mirror of https://github.com/golang/go.git
[release-branch.go1.25] all: merge master (7fa2c73) into release-branch.go1.25
Merge List: + 2025-06-107fa2c736b3os: disallow Root.Remove(".") on Plan 9, js, and Windows + 2025-06-10281cfcfc1bruntime: handle system goroutines later in goroutine profiling + 2025-06-104f86f22671testing/synctest, runtime: avoid panic when using linker-alloc WG from bubble + 2025-06-10773701a853internal/trace: pass GOTRACEBACK=crash to testprogs + 2025-06-10fb0c27c514os: do not follow dangling symlinks in Root when O_CREATE|O_EXCL on AIX + 2025-06-101cafdfb63bnet/http: make the zero value of CrossOriginProtection work + 2025-06-10a35701b352cmd/dist: only install necessary tools when doing local test + 2025-06-10a189516d3aruntime: don't do a direct G handoff in semrelease on systemstack + 2025-06-10f18d046568all.{bash,rc}: use "../bin/go tool dist" instead of "%GOTOOLDIR%/dist" print build info + 2025-06-09ee7bfbdbcccmd/compile/internal/ssa: fix PPC64 merging of (AND (S[RL]Dconst ...) + 2025-06-09985d600f3aruntime: use small struct TestSynctest to ensure cleanups run + 2025-06-09848a768ba7runtime: clarify stack traces for bubbled goroutines + 2025-06-09049a5e6036runtime: return a different bubble deadlock error when main goroutine is done + 2025-06-09ac1686752bcmd/internal/doc: increase version of pkgsite doc command that's run Change-Id: Iba7b2c2f06e91a39fa039c08170e6054e50de3c6
This commit is contained in:
commit
9546293d22
|
|
@ -10,4 +10,4 @@ if [ ! -f make.bash ]; then
|
|||
fi
|
||||
. ./make.bash "$@" --no-banner
|
||||
bash run.bash --no-rebuild
|
||||
"$GOTOOLDIR/dist" banner # print build info
|
||||
../bin/go tool dist banner # print build info
|
||||
|
|
|
|||
|
|
@ -13,4 +13,4 @@ if(! test -f make.rc){
|
|||
. ./make.rc --no-banner $*
|
||||
bind -b $GOROOT/bin /bin
|
||||
./run.rc --no-rebuild
|
||||
$GOTOOLDIR/dist banner # print build info
|
||||
../bin/go tool dist banner # print build info
|
||||
|
|
|
|||
|
|
@ -1583,6 +1583,11 @@ func GetPPC64Shiftme(auxint int64) int64 {
|
|||
// operation. Masks can also extend from the msb and wrap to
|
||||
// the lsb too. That is, the valid masks are 32 bit strings
|
||||
// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
|
||||
//
|
||||
// Note: This ignores the upper 32 bits of the input. When a
|
||||
// zero extended result is desired (e.g a 64 bit result), the
|
||||
// user must verify the upper 32 bits are 0 and the mask is
|
||||
// contiguous (that is, non-wrapping).
|
||||
func isPPC64WordRotateMask(v64 int64) bool {
|
||||
// Isolate rightmost 1 (if none 0) and add.
|
||||
v := uint32(v64)
|
||||
|
|
@ -1593,6 +1598,16 @@ func isPPC64WordRotateMask(v64 int64) bool {
|
|||
return (v&vp == 0 || vn&vpn == 0) && v != 0
|
||||
}
|
||||
|
||||
// Test if this mask is a valid, contiguous bitmask which can be
|
||||
// represented by a RLWNM mask and also clears the upper 32 bits
|
||||
// of the register.
|
||||
func isPPC64WordRotateMaskNonWrapping(v64 int64) bool {
|
||||
// Isolate rightmost 1 (if none 0) and add.
|
||||
v := uint32(v64)
|
||||
vp := (v & -v) + v
|
||||
return (v&vp == 0) && v != 0 && uint64(uint32(v64)) == uint64(v64)
|
||||
}
|
||||
|
||||
// Compress mask and shift into single value of the form
|
||||
// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can
|
||||
// be used to regenerate the input mask.
|
||||
|
|
@ -1702,7 +1717,7 @@ func mergePPC64AndSrdi(m, s int64) int64 {
|
|||
if rv&uint64(mask) != 0 {
|
||||
return 0
|
||||
}
|
||||
if !isPPC64WordRotateMask(mask) {
|
||||
if !isPPC64WordRotateMaskNonWrapping(mask) {
|
||||
return 0
|
||||
}
|
||||
return encodePPC64RotateMask((32-s)&31, mask, 32)
|
||||
|
|
@ -1717,7 +1732,7 @@ func mergePPC64AndSldi(m, s int64) int64 {
|
|||
if rv&uint64(mask) != 0 {
|
||||
return 0
|
||||
}
|
||||
if !isPPC64WordRotateMask(mask) {
|
||||
if !isPPC64WordRotateMaskNonWrapping(mask) {
|
||||
return 0
|
||||
}
|
||||
return encodePPC64RotateMask(s&31, mask, 32)
|
||||
|
|
|
|||
|
|
@ -1390,7 +1390,21 @@ func toolenv() []string {
|
|||
return env
|
||||
}
|
||||
|
||||
var toolchain = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/link", "cmd/preprofile"}
|
||||
var (
|
||||
toolchain = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/link", "cmd/preprofile"}
|
||||
|
||||
// Keep in sync with binExes in cmd/distpack/pack.go.
|
||||
binExesIncludedInDistpack = []string{"cmd/go", "cmd/gofmt"}
|
||||
|
||||
// Keep in sync with the filter in cmd/distpack/pack.go.
|
||||
toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/link", "cmd/preprofile", "cmd/vet"}
|
||||
|
||||
// We could install all tools in "cmd", but is unnecessary because we will
|
||||
// remove them in distpack, so instead install the tools that will actually
|
||||
// be included in distpack, which is a superset of toolchain. Not installing
|
||||
// the tools will help us test what happens when the tools aren't present.
|
||||
toolsToInstall = slices.Concat(binExesIncludedInDistpack, toolsIncludedInDistpack)
|
||||
)
|
||||
|
||||
// The bootstrap command runs a build from scratch,
|
||||
// stopping at having installed the go_bootstrap command.
|
||||
|
|
@ -1456,11 +1470,6 @@ func cmdbootstrap() {
|
|||
// GOEXPERIMENT.
|
||||
os.Setenv("GOEXPERIMENT", "none")
|
||||
|
||||
if debug {
|
||||
// cmd/buildid is used in debug mode.
|
||||
toolchain = append(toolchain, "cmd/buildid")
|
||||
}
|
||||
|
||||
if isdir(pathf("%s/src/pkg", goroot)) {
|
||||
fatalf("\n\n"+
|
||||
"The Go package sources have moved to $GOROOT/src.\n"+
|
||||
|
|
@ -1589,18 +1598,6 @@ func cmdbootstrap() {
|
|||
os.Setenv("GOCACHE", oldgocache)
|
||||
}
|
||||
|
||||
// Keep in sync with binExes in cmd/distpack/pack.go.
|
||||
binExesIncludedInDistpack := []string{"cmd/go", "cmd/gofmt"}
|
||||
|
||||
// Keep in sync with the filter in cmd/distpack/pack.go.
|
||||
toolsIncludedInDistpack := []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/link", "cmd/preprofile", "cmd/vet"}
|
||||
|
||||
// We could install all tools in "cmd", but is unnecessary because we will
|
||||
// remove them in distpack, so instead install the tools that will actually
|
||||
// be included in distpack, which is a superset of toolchain. Not installing
|
||||
// the tools will help us test what happens when the tools aren't present.
|
||||
toolsToInstall := slices.Concat(binExesIncludedInDistpack, toolsIncludedInDistpack)
|
||||
|
||||
if goos == oldgoos && goarch == oldgoarch {
|
||||
// Common case - not setting up for cross-compilation.
|
||||
timelog("build", "toolchain")
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ func (t *tester) run() {
|
|||
// otherwise relevant to the actual set of packages under test.
|
||||
goInstall(toolenv(), gorootBinGo, toolchain...)
|
||||
goInstall(toolenv(), gorootBinGo, toolchain...)
|
||||
goInstall(toolenv(), gorootBinGo, "cmd")
|
||||
goInstall(toolenv(), gorootBinGo, toolsToInstall...)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ func doPkgsite(urlPath string) error {
|
|||
env = append(env, "GOPROXY="+goproxy)
|
||||
}
|
||||
|
||||
const version = "v0.0.0-20250520201116-40659211760d"
|
||||
const version = "v0.0.0-20250608123103-82c52f1754cd"
|
||||
cmd := exec.Command("go", "run", "golang.org/x/pkgsite/cmd/internal/doc@"+version,
|
||||
"-gorepo", buildCtx.GOROOT,
|
||||
"-http", addr,
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
package synctest
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
|
@ -22,14 +23,25 @@ func Wait()
|
|||
//go:linkname IsInBubble
|
||||
func IsInBubble() bool
|
||||
|
||||
// Associate associates p with the current bubble.
|
||||
// It returns false if p has an existing association with a different bubble.
|
||||
func Associate[T any](p *T) (ok bool) {
|
||||
return associate(unsafe.Pointer(p))
|
||||
// Association is the state of a pointer's bubble association.
|
||||
type Association int
|
||||
|
||||
const (
|
||||
Unbubbled = Association(iota) // not associated with any bubble
|
||||
CurrentBubble // associated with the current bubble
|
||||
OtherBubble // associated with a different bubble
|
||||
)
|
||||
|
||||
// Associate attempts to associate p with the current bubble.
|
||||
// It returns the new association status of p.
|
||||
func Associate[T any](p *T) Association {
|
||||
// Ensure p escapes to permit us to attach a special to it.
|
||||
escapedP := abi.Escape(p)
|
||||
return Association(associate(unsafe.Pointer(escapedP)))
|
||||
}
|
||||
|
||||
//go:linkname associate
|
||||
func associate(p unsafe.Pointer) bool
|
||||
func associate(p unsafe.Pointer) int
|
||||
|
||||
// Disassociate disassociates p from any bubble.
|
||||
func Disassociate[T any](p *T) {
|
||||
|
|
|
|||
|
|
@ -488,7 +488,7 @@ func TestDeadlockRoot(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeadlockChild(t *testing.T) {
|
||||
defer wantPanic(t, "deadlock: all goroutines in bubble are blocked")
|
||||
defer wantPanic(t, "deadlock: main bubble goroutine has exited but blocked goroutines remain")
|
||||
synctest.Run(func() {
|
||||
go func() {
|
||||
select {}
|
||||
|
|
@ -497,7 +497,7 @@ func TestDeadlockChild(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeadlockTicker(t *testing.T) {
|
||||
defer wantPanic(t, "deadlock: all goroutines in bubble are blocked")
|
||||
defer wantPanic(t, "deadlock: main bubble goroutine has exited but blocked goroutines remain")
|
||||
synctest.Run(func() {
|
||||
go func() {
|
||||
for range time.Tick(1 * time.Second) {
|
||||
|
|
@ -731,6 +731,34 @@ func TestWaitGroupMovedBetweenBubblesAfterWait(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
var testWaitGroupLinkerAllocatedWG sync.WaitGroup
|
||||
|
||||
func TestWaitGroupLinkerAllocated(t *testing.T) {
|
||||
synctest.Run(func() {
|
||||
// This WaitGroup is probably linker-allocated and has no span,
|
||||
// so we won't be able to add a special to it associating it with
|
||||
// this bubble.
|
||||
//
|
||||
// Operations on it may not be durably blocking,
|
||||
// but they shouldn't fail.
|
||||
testWaitGroupLinkerAllocatedWG.Go(func() {})
|
||||
testWaitGroupLinkerAllocatedWG.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
var testWaitGroupHeapAllocatedWG = new(sync.WaitGroup)
|
||||
|
||||
func TestWaitGroupHeapAllocated(t *testing.T) {
|
||||
synctest.Run(func() {
|
||||
// This package-scoped WaitGroup var should have been heap-allocated,
|
||||
// so we can associate it with a bubble.
|
||||
testWaitGroupHeapAllocatedWG.Add(1)
|
||||
go testWaitGroupHeapAllocatedWG.Wait()
|
||||
synctest.Wait()
|
||||
testWaitGroupHeapAllocatedWG.Done()
|
||||
})
|
||||
}
|
||||
|
||||
func TestHappensBefore(t *testing.T) {
|
||||
// Use two parallel goroutines accessing different vars to ensure that
|
||||
// we correctly account for multiple goroutines in the bubble.
|
||||
|
|
|
|||
|
|
@ -192,6 +192,11 @@ func Mkdirat(dirfd syscall.Handle, name string, mode uint32) error {
|
|||
}
|
||||
|
||||
func Deleteat(dirfd syscall.Handle, name string, options uint32) error {
|
||||
if name == "." {
|
||||
// NtOpenFile's documentation isn't explicit about what happens when deleting ".".
|
||||
// Make this an error consistent with that of POSIX.
|
||||
return syscall.EINVAL
|
||||
}
|
||||
objAttrs := &OBJECT_ATTRIBUTES{}
|
||||
if err := objAttrs.init(dirfd, name); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -600,6 +600,10 @@ func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace
|
|||
godebug += "," + extraGODEBUG
|
||||
}
|
||||
cmd.Env = append(cmd.Env, "GODEBUG="+godebug)
|
||||
if _, ok := os.LookupEnv("GOTRACEBACK"); !ok {
|
||||
// Unless overriden, set GOTRACEBACK=crash.
|
||||
cmd.Env = append(cmd.Env, "GOTRACEBACK=crash")
|
||||
}
|
||||
|
||||
// Capture stdout and stderr.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -26,12 +26,15 @@ import (
|
|||
// Requests without Sec-Fetch-Site or Origin headers are currently assumed to be
|
||||
// either same-origin or non-browser requests, and are allowed.
|
||||
//
|
||||
// The zero value of CrossOriginProtection is valid and has no trusted origins
|
||||
// or bypass patterns.
|
||||
//
|
||||
// [Sec-Fetch-Site]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Sec-Fetch-Site
|
||||
// [Origin]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin
|
||||
// [Cross-Site Request Forgery (CSRF)]: https://developer.mozilla.org/en-US/docs/Web/Security/Attacks/CSRF
|
||||
// [safe methods]: https://developer.mozilla.org/en-US/docs/Glossary/Safe/HTTP
|
||||
type CrossOriginProtection struct {
|
||||
bypass *ServeMux
|
||||
bypass atomic.Pointer[ServeMux]
|
||||
trustedMu sync.RWMutex
|
||||
trusted map[string]bool
|
||||
deny atomic.Pointer[Handler]
|
||||
|
|
@ -39,10 +42,7 @@ type CrossOriginProtection struct {
|
|||
|
||||
// NewCrossOriginProtection returns a new [CrossOriginProtection] value.
|
||||
func NewCrossOriginProtection() *CrossOriginProtection {
|
||||
return &CrossOriginProtection{
|
||||
bypass: NewServeMux(),
|
||||
trusted: make(map[string]bool),
|
||||
}
|
||||
return &CrossOriginProtection{}
|
||||
}
|
||||
|
||||
// AddTrustedOrigin allows all requests with an [Origin] header
|
||||
|
|
@ -70,6 +70,9 @@ func (c *CrossOriginProtection) AddTrustedOrigin(origin string) error {
|
|||
}
|
||||
c.trustedMu.Lock()
|
||||
defer c.trustedMu.Unlock()
|
||||
if c.trusted == nil {
|
||||
c.trusted = make(map[string]bool)
|
||||
}
|
||||
c.trusted[origin] = true
|
||||
return nil
|
||||
}
|
||||
|
|
@ -82,7 +85,21 @@ var noopHandler = HandlerFunc(func(w ResponseWriter, r *Request) {})
|
|||
// AddInsecureBypassPattern can be called concurrently with other methods
|
||||
// or request handling, and applies to future requests.
|
||||
func (c *CrossOriginProtection) AddInsecureBypassPattern(pattern string) {
|
||||
c.bypass.Handle(pattern, noopHandler)
|
||||
var bypass *ServeMux
|
||||
|
||||
// Lazily initialize c.bypass
|
||||
for {
|
||||
bypass = c.bypass.Load()
|
||||
if bypass != nil {
|
||||
break
|
||||
}
|
||||
bypass = NewServeMux()
|
||||
if c.bypass.CompareAndSwap(nil, bypass) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
bypass.Handle(pattern, noopHandler)
|
||||
}
|
||||
|
||||
// SetDenyHandler sets a handler to invoke when a request is rejected.
|
||||
|
|
@ -149,9 +166,11 @@ func (c *CrossOriginProtection) Check(req *Request) error {
|
|||
// isRequestExempt checks the bypasses which require taking a lock, and should
|
||||
// be deferred until the last moment.
|
||||
func (c *CrossOriginProtection) isRequestExempt(req *Request) bool {
|
||||
if _, pattern := c.bypass.Handler(req); pattern != "" {
|
||||
// The request matches a bypass pattern.
|
||||
return true
|
||||
if bypass := c.bypass.Load(); bypass != nil {
|
||||
if _, pattern := bypass.Handler(req); pattern != "" {
|
||||
// The request matches a bypass pattern.
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
c.trustedMu.RLock()
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ package os
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"internal/filepathlite"
|
||||
"internal/stringslite"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
|
@ -173,6 +174,12 @@ func rootRemove(r *Root, name string) error {
|
|||
if err := checkPathEscapesLstat(r, name); err != nil {
|
||||
return &PathError{Op: "removeat", Path: name, Err: err}
|
||||
}
|
||||
if endsWithDot(name) {
|
||||
// We don't want to permit removing the root itself, so check for that.
|
||||
if filepathlite.Clean(name) == "." {
|
||||
return &PathError{Op: "removeat", Path: name, Err: errPathEscapes}
|
||||
}
|
||||
}
|
||||
if err := Remove(joinPath(r.root.name, name)); err != nil {
|
||||
return &PathError{Op: "removeat", Path: name, Err: underlyingError(err)}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -83,8 +83,18 @@ func rootOpenFileNolog(root *Root, name string, flag int, perm FileMode) (*File,
|
|||
fd, err := doInRoot(root, name, nil, func(parent int, name string) (fd int, err error) {
|
||||
ignoringEINTR(func() error {
|
||||
fd, err = unix.Openat(parent, name, syscall.O_NOFOLLOW|syscall.O_CLOEXEC|flag, uint32(perm))
|
||||
if isNoFollowErr(err) || err == syscall.ENOTDIR {
|
||||
err = checkSymlink(parent, name, err)
|
||||
if err != nil {
|
||||
// Never follow symlinks when O_CREATE|O_EXCL, no matter
|
||||
// what error the OS returns.
|
||||
isCreateExcl := flag&(O_CREATE|O_EXCL) == (O_CREATE | O_EXCL)
|
||||
if !isCreateExcl && (isNoFollowErr(err) || err == syscall.ENOTDIR) {
|
||||
err = checkSymlink(parent, name, err)
|
||||
}
|
||||
// AIX returns ELOOP instead of EEXIST for a dangling symlink.
|
||||
// Convert this to EEXIST so it matches ErrExists.
|
||||
if isCreateExcl && err == syscall.ELOOP {
|
||||
err = syscall.EEXIST
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1911,3 +1911,9 @@ func (b BitCursor) Write(data *byte, cnt uintptr) {
|
|||
func (b BitCursor) Offset(cnt uintptr) BitCursor {
|
||||
return BitCursor{b: b.b.offset(cnt)}
|
||||
}
|
||||
|
||||
const (
|
||||
BubbleAssocUnbubbled = bubbleAssocUnbubbled
|
||||
BubbleAssocCurrentBubble = bubbleAssocCurrentBubble
|
||||
BubbleAssocOtherBubble = bubbleAssocOtherBubble
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1431,10 +1431,6 @@ func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
|
|||
// so here we check _Gdead first.
|
||||
return
|
||||
}
|
||||
if isSystemGoroutine(gp1, false) {
|
||||
// System goroutines should not appear in the profile.
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
prev := gp1.goroutineProfiled.Load()
|
||||
|
|
@ -1472,6 +1468,17 @@ func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
|
|||
// stack), or from the scheduler in preparation to execute gp1 (running on the
|
||||
// system stack).
|
||||
func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
|
||||
if isSystemGoroutine(gp1, false) {
|
||||
// System goroutines should not appear in the profile.
|
||||
// Check this here and not in tryRecordGoroutineProfile because isSystemGoroutine
|
||||
// may change on a goroutine while it is executing, so while the scheduler might
|
||||
// see a system goroutine, goroutineProfileWithLabelsConcurrent might not, and
|
||||
// this inconsistency could cause invariants to be violated, such as trying to
|
||||
// record the stack of a running goroutine below. In short, we still want system
|
||||
// goroutines to participate in the same state machine on gp1.goroutineProfiled as
|
||||
// everything else, we just don't record the stack in the profile.
|
||||
return
|
||||
}
|
||||
if readgstatus(gp1) == _Grunning {
|
||||
print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
|
||||
throw("cannot read stack of running goroutine")
|
||||
|
|
|
|||
|
|
@ -1808,6 +1808,45 @@ func TestGoroutineProfileCoro(t *testing.T) {
|
|||
goroutineProf.WriteTo(io.Discard, 1)
|
||||
}
|
||||
|
||||
// This test tries to provoke a situation wherein the finalizer goroutine is
|
||||
// erroneously inspected by the goroutine profiler in such a way that could
|
||||
// cause a crash. See go.dev/issue/74090.
|
||||
func TestGoroutineProfileIssue74090(t *testing.T) {
|
||||
testenv.MustHaveParallelism(t)
|
||||
|
||||
goroutineProf := Lookup("goroutine")
|
||||
|
||||
// T is a pointer type so it won't be allocated by the tiny
|
||||
// allocator, which can lead to its finalizer not being called
|
||||
// during this test.
|
||||
type T *byte
|
||||
for range 10 {
|
||||
// We use finalizers for this test because finalizers transition between
|
||||
// system and user goroutine on each call, since there's substantially
|
||||
// more work to do to set up a finalizer call. Cleanups, on the other hand,
|
||||
// transition once for a whole batch, and so are less likely to trigger
|
||||
// the failure. Under stress testing conditions this test fails approximately
|
||||
// 5 times every 1000 executions on a 64 core machine without the appropriate
|
||||
// fix, which is not ideal but if this test crashes at all, it's a clear
|
||||
// signal that something is broken.
|
||||
var objs []*T
|
||||
for range 10000 {
|
||||
obj := new(T)
|
||||
runtime.SetFinalizer(obj, func(_ interface{}) {})
|
||||
objs = append(objs, obj)
|
||||
}
|
||||
objs = nil
|
||||
|
||||
// Queue up all the finalizers.
|
||||
runtime.GC()
|
||||
|
||||
// Try to run a goroutine profile concurrently with finalizer execution
|
||||
// to trigger the bug.
|
||||
var w strings.Builder
|
||||
goroutineProf.WriteTo(&w, 1)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGoroutine(b *testing.B) {
|
||||
withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
|
||||
return func(b *testing.B) {
|
||||
|
|
|
|||
|
|
@ -1093,10 +1093,10 @@ const (
|
|||
waitReasonGCWeakToStrongWait // "GC weak to strong wait"
|
||||
waitReasonSynctestRun // "synctest.Run"
|
||||
waitReasonSynctestWait // "synctest.Wait"
|
||||
waitReasonSynctestChanReceive // "chan receive (synctest)"
|
||||
waitReasonSynctestChanSend // "chan send (synctest)"
|
||||
waitReasonSynctestSelect // "select (synctest)"
|
||||
waitReasonSynctestWaitGroupWait // "sync.WaitGroup.Wait (synctest)"
|
||||
waitReasonSynctestChanReceive // "chan receive (durable)"
|
||||
waitReasonSynctestChanSend // "chan send (durable)"
|
||||
waitReasonSynctestSelect // "select (durable)"
|
||||
waitReasonSynctestWaitGroupWait // "sync.WaitGroup.Wait (durable)"
|
||||
waitReasonCleanupWait // "cleanup wait"
|
||||
)
|
||||
|
||||
|
|
@ -1143,10 +1143,10 @@ var waitReasonStrings = [...]string{
|
|||
waitReasonGCWeakToStrongWait: "GC weak to strong wait",
|
||||
waitReasonSynctestRun: "synctest.Run",
|
||||
waitReasonSynctestWait: "synctest.Wait",
|
||||
waitReasonSynctestChanReceive: "chan receive (synctest)",
|
||||
waitReasonSynctestChanSend: "chan send (synctest)",
|
||||
waitReasonSynctestSelect: "select (synctest)",
|
||||
waitReasonSynctestWaitGroupWait: "sync.WaitGroup.Wait (synctest)",
|
||||
waitReasonSynctestChanReceive: "chan receive (durable)",
|
||||
waitReasonSynctestChanSend: "chan send (durable)",
|
||||
waitReasonSynctestSelect: "select (durable)",
|
||||
waitReasonSynctestWaitGroupWait: "sync.WaitGroup.Wait (durable)",
|
||||
waitReasonCleanupWait: "cleanup wait",
|
||||
}
|
||||
|
||||
|
|
@ -1207,12 +1207,12 @@ var isIdleInSynctest = [len(waitReasonStrings)]bool{
|
|||
}
|
||||
|
||||
var (
|
||||
allm *m
|
||||
gomaxprocs int32
|
||||
numCPUStartup int32
|
||||
forcegc forcegcstate
|
||||
sched schedt
|
||||
newprocs int32
|
||||
allm *m
|
||||
gomaxprocs int32
|
||||
numCPUStartup int32
|
||||
forcegc forcegcstate
|
||||
sched schedt
|
||||
newprocs int32
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -261,11 +261,13 @@ func semrelease1(addr *uint32, handoff bool, skipframes int) {
|
|||
s.ticket = 1
|
||||
}
|
||||
readyWithTime(s, 5+skipframes)
|
||||
if s.ticket == 1 && getg().m.locks == 0 {
|
||||
if s.ticket == 1 && getg().m.locks == 0 && getg() != getg().m.g0 {
|
||||
// Direct G handoff
|
||||
//
|
||||
// readyWithTime has added the waiter G as runnext in the
|
||||
// current P; we now call the scheduler so that we start running
|
||||
// the waiter G immediately.
|
||||
//
|
||||
// Note that waiter inherits our time slice: this is desirable
|
||||
// to avoid having a highly contended semaphore hog the P
|
||||
// indefinitely. goyield is like Gosched, but it emits a
|
||||
|
|
@ -275,9 +277,12 @@ func semrelease1(addr *uint32, handoff bool, skipframes int) {
|
|||
// the non-starving case it is possible for a different waiter
|
||||
// to acquire the semaphore while we are yielding/scheduling,
|
||||
// and this would be wasteful. We wait instead to enter starving
|
||||
// regime, and then we start to do direct handoffs of ticket and
|
||||
// P.
|
||||
// regime, and then we start to do direct handoffs of ticket and P.
|
||||
//
|
||||
// See issue 33747 for discussion.
|
||||
//
|
||||
// We don't handoff directly if we're holding locks or on the
|
||||
// system stack, since it's not safe to enter the scheduler.
|
||||
goyield()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -242,7 +242,13 @@ func synctestRun(f func()) {
|
|||
raceacquireg(gp, gp.bubble.raceaddr())
|
||||
}
|
||||
if total != 1 {
|
||||
panic(synctestDeadlockError{bubble})
|
||||
var reason string
|
||||
if bubble.done {
|
||||
reason = "deadlock: main bubble goroutine has exited but blocked goroutines remain"
|
||||
} else {
|
||||
reason = "deadlock: all goroutines in bubble are blocked"
|
||||
}
|
||||
panic(synctestDeadlockError{reason: reason, bubble: bubble})
|
||||
}
|
||||
if gp.timer != nil && gp.timer.isFake {
|
||||
// Verify that we haven't marked this goroutine's sleep timer as fake.
|
||||
|
|
@ -252,11 +258,12 @@ func synctestRun(f func()) {
|
|||
}
|
||||
|
||||
type synctestDeadlockError struct {
|
||||
reason string
|
||||
bubble *synctestBubble
|
||||
}
|
||||
|
||||
func (synctestDeadlockError) Error() string {
|
||||
return "deadlock: all goroutines in bubble are blocked"
|
||||
func (e synctestDeadlockError) Error() string {
|
||||
return e.reason
|
||||
}
|
||||
|
||||
func synctestidle_c(gp *g, _ unsafe.Pointer) bool {
|
||||
|
|
@ -356,6 +363,13 @@ type specialBubble struct {
|
|||
bubbleid uint64
|
||||
}
|
||||
|
||||
// Keep these in sync with internal/synctest.
|
||||
const (
|
||||
bubbleAssocUnbubbled = iota // not associated with any bubble
|
||||
bubbleAssocCurrentBubble // associated with the current bubble
|
||||
bubbleAssocOtherBubble // associated with a different bubble
|
||||
)
|
||||
|
||||
// getOrSetBubbleSpecial checks the special record for p's bubble membership.
|
||||
//
|
||||
// If add is true and p is not associated with any bubble,
|
||||
|
|
@ -364,10 +378,12 @@ type specialBubble struct {
|
|||
// It returns ok==true if p is associated with bubbleid
|
||||
// (including if a new association was added),
|
||||
// and ok==false if not.
|
||||
func getOrSetBubbleSpecial(p unsafe.Pointer, bubbleid uint64, add bool) (ok bool) {
|
||||
func getOrSetBubbleSpecial(p unsafe.Pointer, bubbleid uint64, add bool) (assoc int) {
|
||||
span := spanOfHeap(uintptr(p))
|
||||
if span == nil {
|
||||
throw("getOrSetBubbleSpecial on invalid pointer")
|
||||
// This is probably a package var.
|
||||
// We can't attach a special to it, so always consider it unbubbled.
|
||||
return bubbleAssocUnbubbled
|
||||
}
|
||||
|
||||
// Ensure that the span is swept.
|
||||
|
|
@ -386,7 +402,11 @@ func getOrSetBubbleSpecial(p unsafe.Pointer, bubbleid uint64, add bool) (ok bool
|
|||
// p is already associated with a bubble.
|
||||
// Return true iff it's the same bubble.
|
||||
s := (*specialBubble)((unsafe.Pointer)(*iter))
|
||||
ok = s.bubbleid == bubbleid
|
||||
if s.bubbleid == bubbleid {
|
||||
assoc = bubbleAssocCurrentBubble
|
||||
} else {
|
||||
assoc = bubbleAssocOtherBubble
|
||||
}
|
||||
} else if add {
|
||||
// p is not associated with a bubble,
|
||||
// and we've been asked to add an association.
|
||||
|
|
@ -397,23 +417,23 @@ func getOrSetBubbleSpecial(p unsafe.Pointer, bubbleid uint64, add bool) (ok bool
|
|||
s.special.next = *iter
|
||||
*iter = (*special)(unsafe.Pointer(s))
|
||||
spanHasSpecials(span)
|
||||
ok = true
|
||||
assoc = bubbleAssocCurrentBubble
|
||||
} else {
|
||||
// p is not associated with a bubble.
|
||||
ok = false
|
||||
assoc = bubbleAssocUnbubbled
|
||||
}
|
||||
|
||||
unlock(&span.speciallock)
|
||||
releasem(mp)
|
||||
|
||||
return ok
|
||||
return assoc
|
||||
}
|
||||
|
||||
// synctest_associate associates p with the current bubble.
|
||||
// It returns false if p is already associated with a different bubble.
|
||||
//
|
||||
//go:linkname synctest_associate internal/synctest.associate
|
||||
func synctest_associate(p unsafe.Pointer) (ok bool) {
|
||||
func synctest_associate(p unsafe.Pointer) int {
|
||||
return getOrSetBubbleSpecial(p, getg().bubble.id, true)
|
||||
}
|
||||
|
||||
|
|
@ -428,5 +448,5 @@ func synctest_disassociate(p unsafe.Pointer) {
|
|||
//
|
||||
//go:linkname synctest_isAssociated internal/synctest.isAssociated
|
||||
func synctest_isAssociated(p unsafe.Pointer) bool {
|
||||
return getOrSetBubbleSpecial(p, getg().bubble.id, false)
|
||||
return getOrSetBubbleSpecial(p, getg().bubble.id, false) == bubbleAssocCurrentBubble
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@
|
|||
package runtime_test
|
||||
|
||||
import (
|
||||
"internal/synctest"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
|
@ -15,3 +17,13 @@ func TestSynctest(t *testing.T) {
|
|||
t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSynctestAssocConsts verifies that constants defined
|
||||
// in both runtime and internal/synctest match.
|
||||
func TestSynctestAssocConsts(t *testing.T) {
|
||||
if runtime.BubbleAssocUnbubbled != synctest.Unbubbled ||
|
||||
runtime.BubbleAssocCurrentBubble != synctest.CurrentBubble ||
|
||||
runtime.BubbleAssocOtherBubble != synctest.OtherBubble {
|
||||
t.Fatal("mismatch: runtime.BubbleAssoc? != synctest.*")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"runtime"
|
||||
"runtime/metrics"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// This program ensures system goroutines (GC workers, finalizer goroutine)
|
||||
|
|
@ -27,6 +28,11 @@ func numGCCycles() uint64 {
|
|||
return samples[0].Value.Uint64()
|
||||
}
|
||||
|
||||
type T struct {
|
||||
v int
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Channels created by a finalizer and cleanup func registered within the bubble.
|
||||
var (
|
||||
|
|
@ -36,8 +42,8 @@ func main() {
|
|||
synctest.Run(func() {
|
||||
// Start the finalizer and cleanup goroutines.
|
||||
{
|
||||
p := new(int)
|
||||
runtime.SetFinalizer(p, func(*int) {
|
||||
p := new(T)
|
||||
runtime.SetFinalizer(p, func(*T) {
|
||||
ch := make(chan struct{})
|
||||
finalizerCh.Store(&ch)
|
||||
})
|
||||
|
|
@ -47,35 +53,35 @@ func main() {
|
|||
}, struct{}{})
|
||||
}
|
||||
startingCycles := numGCCycles()
|
||||
ch1 := make(chan *int)
|
||||
ch2 := make(chan *int)
|
||||
ch1 := make(chan *T)
|
||||
ch2 := make(chan *T)
|
||||
defer close(ch1)
|
||||
go func() {
|
||||
for i := range ch1 {
|
||||
v := *i + 1
|
||||
ch2 <- &v
|
||||
for range ch1 {
|
||||
ch2 <- &T{}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
const iterations = 1000
|
||||
for range iterations {
|
||||
// Make a lot of short-lived allocations to get the GC working.
|
||||
for i := 0; i < 1000; i++ {
|
||||
v := new(int)
|
||||
*v = i
|
||||
for range 1000 {
|
||||
v := new(T)
|
||||
// Set finalizers on these values, just for added stress.
|
||||
runtime.SetFinalizer(v, func(*int) {})
|
||||
runtime.SetFinalizer(v, func(*T) {})
|
||||
ch1 <- v
|
||||
<-ch2
|
||||
}
|
||||
|
||||
// If we've improperly put a GC goroutine into the synctest group,
|
||||
// this Wait is going to hang.
|
||||
//synctest.Wait()
|
||||
synctest.Wait()
|
||||
|
||||
// End the test after a couple of GC cycles have passed.
|
||||
if numGCCycles()-startingCycles > 1 && finalizerCh.Load() != nil && cleanupCh.Load() != nil {
|
||||
break
|
||||
return
|
||||
}
|
||||
}
|
||||
println("finalizers/cleanups failed to run after", iterations, "cycles")
|
||||
})
|
||||
// Close the channels created by the finalizer and cleanup func.
|
||||
// If the funcs improperly ran inside the bubble, these channels are bubbled
|
||||
|
|
|
|||
|
|
@ -1248,6 +1248,13 @@ func goroutineheader(gp *g) {
|
|||
if isScan {
|
||||
print(" (scan)")
|
||||
}
|
||||
if bubble := gp.bubble; bubble != nil &&
|
||||
gp.waitreason.isIdleInSynctest() &&
|
||||
!stringslite.HasSuffix(status, "(durable)") {
|
||||
// If this isn't a status where the name includes a (durable)
|
||||
// suffix to distinguish it from the non-durable form, add it here.
|
||||
print(" (durable)")
|
||||
}
|
||||
if waitfor >= 1 {
|
||||
print(", ", waitfor, " minutes")
|
||||
}
|
||||
|
|
@ -1255,11 +1262,7 @@ func goroutineheader(gp *g) {
|
|||
print(", locked to thread")
|
||||
}
|
||||
if bubble := gp.bubble; bubble != nil {
|
||||
print(", synctest bubble ", bubble.root.goid, ", ")
|
||||
if !gp.waitreason.isIdleInSynctest() {
|
||||
print("not ")
|
||||
}
|
||||
print("durably blocked")
|
||||
print(", synctest bubble ", bubble.id)
|
||||
}
|
||||
print("]:\n")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -83,13 +83,17 @@ func (wg *WaitGroup) Add(delta int) {
|
|||
race.Disable()
|
||||
defer race.Enable()
|
||||
}
|
||||
bubbled := false
|
||||
if synctest.IsInBubble() {
|
||||
// If Add is called from within a bubble, then all Add calls must be made
|
||||
// from the same bubble.
|
||||
if !synctest.Associate(wg) {
|
||||
switch synctest.Associate(wg) {
|
||||
case synctest.Unbubbled:
|
||||
case synctest.OtherBubble:
|
||||
// wg is already associated with a different bubble.
|
||||
fatal("sync: WaitGroup.Add called from multiple synctest bubbles")
|
||||
} else {
|
||||
case synctest.CurrentBubble:
|
||||
bubbled = true
|
||||
state := wg.state.Or(waitGroupBubbleFlag)
|
||||
if state != 0 && state&waitGroupBubbleFlag == 0 {
|
||||
// Add has been called from outside this bubble.
|
||||
|
|
@ -98,7 +102,7 @@ func (wg *WaitGroup) Add(delta int) {
|
|||
}
|
||||
}
|
||||
state := wg.state.Add(uint64(delta) << 32)
|
||||
if state&waitGroupBubbleFlag != 0 && !synctest.IsInBubble() {
|
||||
if state&waitGroupBubbleFlag != 0 && !bubbled {
|
||||
// Add has been called from within a synctest bubble (and we aren't in one).
|
||||
fatal("sync: WaitGroup.Add called from inside and outside synctest bubble")
|
||||
}
|
||||
|
|
@ -116,7 +120,7 @@ func (wg *WaitGroup) Add(delta int) {
|
|||
if w != 0 && delta > 0 && v == int32(delta) {
|
||||
panic("sync: WaitGroup misuse: Add called concurrently with Wait")
|
||||
}
|
||||
if v == 0 && state&waitGroupBubbleFlag != 0 {
|
||||
if v == 0 && bubbled {
|
||||
// Disassociate the WaitGroup from its bubble.
|
||||
synctest.Disassociate(wg)
|
||||
if w == 0 {
|
||||
|
|
|
|||
|
|
@ -93,6 +93,11 @@
|
|||
// A [sync.WaitGroup] becomes associated with a bubble on the first
|
||||
// call to Add or Go. Once a WaitGroup is associated with a bubble,
|
||||
// calling Add or Go from outside that bubble is a fatal error.
|
||||
// (As a technical limitation, a WaitGroup defined as a package
|
||||
// variable, such as "var wg sync.WaitGroup", cannot be associated
|
||||
// with a bubble and operations on it may not be durably blocking.
|
||||
// This limitation does not apply to a *WaitGroup stored in a
|
||||
// package variable, such as "var wg = new(sync.WaitGroup)".)
|
||||
//
|
||||
// [sync.Cond.Wait] is durably blocking. Waking a goroutine in a bubble
|
||||
// blocked on Cond.Wait from outside the bubble is a fatal error.
|
||||
|
|
|
|||
|
|
@ -529,6 +529,16 @@ func checkMergedShifts64(a [256]uint32, b [256]uint64, c [256]byte, v uint64) {
|
|||
b[1] = b[(v>>20)&0xFF]
|
||||
// ppc64x: "RLWNM", -"SLD"
|
||||
b[2] = b[((uint64((uint32(v) >> 21)) & 0x3f) << 4)]
|
||||
// ppc64x: -"RLWNM"
|
||||
b[3] = (b[3] << 24) & 0xFFFFFF000000
|
||||
// ppc64x: "RLWNM\t[$]24, R[0-9]+, [$]0, [$]7,"
|
||||
b[4] = (b[4] << 24) & 0xFF000000
|
||||
// ppc64x: "RLWNM\t[$]24, R[0-9]+, [$]0, [$]7,"
|
||||
b[5] = (b[5] << 24) & 0xFF00000F
|
||||
// ppc64x: -"RLWNM"
|
||||
b[6] = (b[6] << 0) & 0xFF00000F
|
||||
// ppc64x: "RLWNM\t[$]4, R[0-9]+, [$]28, [$]31,"
|
||||
b[7] = (b[7] >> 28) & 0xF
|
||||
// ppc64x: "RLWNM\t[$]11, R[0-9]+, [$]10, [$]15"
|
||||
c[0] = c[((v>>5)&0x3F)<<16]
|
||||
// ppc64x: "ANDCC\t[$]8064,"
|
||||
|
|
|
|||
Loading…
Reference in New Issue