diff --git a/src/net/net_fake.go b/src/net/net_fake.go index 2d1e137b6d..f7eb28e01a 100644 --- a/src/net/net_fake.go +++ b/src/net/net_fake.go @@ -14,7 +14,6 @@ import ( "errors" "io" "os" - "runtime" "sync" "sync/atomic" "syscall" @@ -517,14 +516,6 @@ func (pq *packetQueue) send(dt *deadlineTimer, b []byte, from sockaddr, block bo full = pq.full } - // Before we check dt.expired, yield to other goroutines. - // This may help to prevent starvation of the goroutine that runs the - // deadlineTimer's time.After callback. - // - // TODO(#65178): Remove this when the runtime scheduler no longer starves - // runnable goroutines. - runtime.Gosched() - select { case <-dt.expired: return 0, os.ErrDeadlineExceeded @@ -576,14 +567,6 @@ func (pq *packetQueue) recvfrom(dt *deadlineTimer, b []byte, wholePacket bool, c empty = pq.empty } - // Before we check dt.expired, yield to other goroutines. - // This may help to prevent starvation of the goroutine that runs the - // deadlineTimer's time.After callback. - // - // TODO(#65178): Remove this when the runtime scheduler no longer starves - // runnable goroutines. - runtime.Gosched() - select { case <-dt.expired: return 0, nil, os.ErrDeadlineExceeded diff --git a/src/runtime/proc.go b/src/runtime/proc.go index dc26cda992..d4919e56fd 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -167,7 +167,7 @@ func main() { // Allow newproc to start new Ms. mainStarted = true - if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon + if haveSysmon { systemstack(func() { newm(sysmon, nil, -1) }) @@ -5941,6 +5941,11 @@ var forcegcperiod int64 = 2 * 60 * 1e9 // golang.org/issue/42515 is needed on NetBSD. var needSysmonWorkaround bool = false +// haveSysmon indicates whether there is sysmon thread support. +// +// No threads on wasm yet, so no sysmon. +const haveSysmon = GOARCH != "wasm" + // Always runs without a P, so write barriers are not allowed. // //go:nowritebarrierrec @@ -6121,7 +6126,10 @@ func retake(now int64) uint32 { s := pp.status sysretake := false if s == _Prunning || s == _Psyscall { - // Preempt G if it's running for too long. + // Preempt G if it's running on the same schedtick for + // too long. This could be from a single long-running + // goroutine or a sequence of goroutines run via + // runnext, which share a single schedtick time slice. t := int64(pp.schedtick) if int64(pd.schedtick) != t { pd.schedtick = uint32(t) @@ -6634,6 +6642,17 @@ const randomizeScheduler = raceenabled // If the run queue is full, runnext puts g on the global queue. // Executed only by the owner P. func runqput(pp *p, gp *g, next bool) { + if !haveSysmon && next { + // A runnext goroutine shares the same time slice as the + // current goroutine (inheritTime from runqget). To prevent a + // ping-pong pair of goroutines from starving all others, we + // depend on sysmon to preempt "long-running goroutines". That + // is, any set of goroutines sharing the same time slice. + // + // If there is no sysmon, we must avoid runnext entirely or + // risk starvation. + next = false + } if randomizeScheduler && next && randn(2) == 0 { next = false } diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 1ebc2d3c6a..e985870710 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -111,11 +111,6 @@ func TestAfterFuncStarvation(t *testing.T) { // the AfterFunc goroutine instead of the runnable channel goroutine. // However, in https://go.dev/issue/65178 this was observed to live-lock // on wasip1/wasm and js/wasm after <10000 runs. - - if runtime.GOARCH == "wasm" { - testenv.SkipFlaky(t, 65178) - } - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) var (