diff --git a/doc/godebug.md b/doc/godebug.md index 515c40cd39..85137573da 100644 --- a/doc/godebug.md +++ b/doc/godebug.md @@ -128,7 +128,12 @@ and the [go command documentation](/cmd/go#hdr-Build_and_test_caching). ### Go 1.23 -TODO: `asynctimerchan` setting. +Go 1.23 changed the channels created by package time to be unbuffered +(synchronous), which makes correct use of the [`Timer.Stop`](/pkg/time/#Timer.Stop) +and [`Timer.Reset`](/pkg/time/#Timer.Reset) method results much easier. +The [`asynctimerchan` setting](/pkg/time/#NewTimer) disables this change. +There are no runtime metrics for this change, +This setting may be removed in a future release, Go 1.27 at the earliest. Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat) for reparse points, which can be controlled with the `winsymlink` setting. diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index 7ecb88683d..a97c391cd4 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -25,7 +25,7 @@ type Info struct { // Note: After adding entries to this table, update the list in doc/godebug.md as well. // (Otherwise the test in this package will fail.) var All = []Info{ - {Name: "asynctimerchan", Package: "time", Opaque: true}, + {Name: "asynctimerchan", Package: "time", Changed: 23, Old: "1", Opaque: true}, {Name: "execerrdot", Package: "os/exec"}, {Name: "gocachehash", Package: "cmd/go"}, {Name: "gocachetest", Package: "cmd/go"}, diff --git a/src/runtime/chan.go b/src/runtime/chan.go index ae26be3c42..8aca024c4c 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -323,6 +323,35 @@ func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { goready(gp, skip+1) } +// timerchandrain removes all elements in channel c's buffer. +// It reports whether any elements were removed. +// Because it is only intended for timers, it does not +// handle waiting senders at all (all timer channels +// use non-blocking sends to fill the buffer). +func timerchandrain(c *hchan) bool { + // Note: Cannot use empty(c) because we are called + // while holding c.timer.sendLock, and empty(c) will + // call c.timer.maybeRunChan, which will deadlock. + // We are emptying the channel, so we only care about + // the count, not about potentially filling it up. + if atomic.Loaduint(&c.qcount) == 0 { + return false + } + lock(&c.lock) + any := false + for c.qcount > 0 { + any = true + typedmemclr(c.elemtype, chanbuf(c, c.recvx)) + c.recvx++ + if c.recvx == c.dataqsiz { + c.recvx = 0 + } + c.qcount-- + } + unlock(&c.lock) + return any +} + // Sends and receives on unbuffered or empty-buffered channels are the // only operations where one running goroutine writes to the stack of // another running goroutine. The GC assumes that stack writes only @@ -748,9 +777,16 @@ func chanlen(c *hchan) int { if c == nil { return 0 } - if c.timer != nil { + async := debug.asynctimerchan.Load() != 0 + if c.timer != nil && async { c.timer.maybeRunChan() } + if c.timer != nil && !async { + // timer channels have a buffered implementation + // but present to users as unbuffered, so that we can + // undo sends without users noticing. + return 0 + } return int(c.qcount) } @@ -758,6 +794,16 @@ func chancap(c *hchan) int { if c == nil { return 0 } + if c.timer != nil { + async := debug.asynctimerchan.Load() != 0 + if async { + return int(c.dataqsiz) + } + // timer channels have a buffered implementation + // but present to users as unbuffered, so that we can + // undo sends without users noticing. + return 0 + } return int(c.dataqsiz) } diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index a6f61240db..33b0387686 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -20,6 +20,7 @@ const ( lockRankSweep lockRankTestR lockRankTestW + lockRankTimerSend lockRankAllocmW lockRankExecW lockRankCpuprof @@ -91,6 +92,7 @@ var lockNames = []string{ lockRankSweep: "sweep", lockRankTestR: "testR", lockRankTestW: "testW", + lockRankTimerSend: "timerSend", lockRankAllocmW: "allocmW", lockRankExecW: "execW", lockRankCpuprof: "cpuprof", @@ -168,51 +170,52 @@ var lockPartialOrder [][]lockRank = [][]lockRank{ lockRankSweep: {}, lockRankTestR: {}, lockRankTestW: {}, + lockRankTimerSend: {}, lockRankAllocmW: {}, lockRankExecW: {}, lockRankCpuprof: {}, lockRankPollCache: {}, lockRankPollDesc: {}, lockRankWakeableSleep: {}, - lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan}, - lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, - lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, - lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, - lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan}, + lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan}, + lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR}, + lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched}, lockRankNotifyList: {}, - lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, - lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, - lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, + lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, + lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers}, + lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer}, lockRankRoot: {}, lockRankItab: {}, lockRankReflectOffs: {lockRankItab}, lockRankUserArenaState: {}, lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, - lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, - lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, - lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, - lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, - lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, - lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, + lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, + lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, + lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, + lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, + lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, lockRankPanic: {}, lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, lockRankRaceFini: {lockRankPanic}, - lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, - lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, + lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR}, + lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR}, lockRankTestRInternal: {lockRankTestR, lockRankTestW}, } diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 75dc6f62cf..2b4b5e99cd 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -55,9 +55,11 @@ NONE < # Test only NONE < testR, testW; +NONE < timerSend; + # Scheduler, timers, netpoll NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep; -scavenge, sweep, testR, wakeableSleep < hchan; +scavenge, sweep, testR, wakeableSleep, timerSend < hchan; assistQueue, cpuprof, forcegc, @@ -81,7 +83,7 @@ NONE < notifyList; hchan, notifyList < sudog; hchan, pollDesc, wakeableSleep < timers; -timers < timer < netpollInit; +timers, timerSend < timer < netpollInit; # Semaphores NONE < root; diff --git a/src/runtime/time.go b/src/runtime/time.go index 37c55b2b46..31c83ca4e3 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -36,11 +36,18 @@ type timer struct { // a well-behaved function and not block. // // The arg and seq are client-specified opaque arguments passed back to f. - // When used from package time, arg is a channel (for After, NewTicker) - // or the function to call (for AfterFunc) and seq is unused (0). // When used from netpoll, arg and seq have meanings defined by netpoll // and are completely opaque to this code; in that context, seq is a sequence // number to recognize and squech stale function invocations. + // When used from package time, arg is a channel (for After, NewTicker) + // or the function to call (for AfterFunc) and seq is unused (0). + // + // Package time does not know about seq, but if this is a channel timer (t.isChan == true), + // this file uses t.seq as a sequence number to recognize and squelch + // sends that correspond to an earlier (stale) timer configuration, + // similar to its use in netpoll. In this usage (that is, when t.isChan == true), + // writes to seq are protected by both t.mu and t.sendLock, + // so reads are allowed when holding either of the two mutexes. // // The delay argument is nanotime() - t.when, meaning the delay in ns between // when the timer should have gone off and now. Normally that amount is @@ -69,6 +76,10 @@ type timer struct { // Since writes to whenHeap are protected by two locks (t.mu and t.ts.mu), // it is permitted to read whenHeap when holding either one. whenHeap int64 + + // sendLock protects sends on the timer's channel. + // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. + sendLock mutex } // init initializes a newly allocated timer t. @@ -167,7 +178,7 @@ func (t *timer) trace1(op string) { return } bits := [4]string{"h", "m", "z", "c"} - for i := range bits { + for i := range 3 { if t.state&(1< 0 { // If timer should have triggered already (but nothing looked at it yet), // trigger now, so that a receive after the stop sees the "old" value // that should be there. + // (It is possible to have t.blocked > 0 if there is a racing receive + // in blockTimerChan, but timerHeaped not being set means + // it hasn't run t.maybeAdd yet; in that case, running the + // timer ourselves now is fine.) if now := nanotime(); t.when <= now { systemstack(func() { t.unlockAndRun(now) // resets t.when @@ -390,6 +418,23 @@ func (t *timer) stop() bool { t.lock() } } +} + +// stop stops the timer t. It may be on some other P, so we can't +// actually remove it from the timers heap. We can only mark it as stopped. +// It will be removed in due course by the P whose heap it is on. +// Reports whether the timer was stopped before it was run. +func (t *timer) stop() bool { + async := debug.asynctimerchan.Load() != 0 + if !async && t.isChan { + lock(&t.sendLock) + } + + t.lock() + t.trace("stop") + if async { + t.maybeRunAsync() + } if t.state&timerHeaped != 0 { t.state |= timerModified if t.state&timerZombie == 0 { @@ -399,7 +444,20 @@ func (t *timer) stop() bool { } pending := t.when > 0 t.when = 0 + + if !async && t.isChan { + // Stop any future sends with stale values. + // See timer.unlockAndRun. + t.seq++ + } t.unlock() + if !async && t.isChan { + unlock(&t.sendLock) + if timerchandrain(t.hchan()) { + pending = true + } + } + return pending } @@ -439,8 +497,16 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in if period < 0 { throw("timer period must be non-negative") } + async := debug.asynctimerchan.Load() != 0 + + if !async && t.isChan { + lock(&t.sendLock) + } t.lock() + if async { + t.maybeRunAsync() + } t.trace("modify") t.period = period if f != nil { @@ -449,20 +515,6 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in t.seq = seq } - if t.state&timerHeaped == 0 && t.isChan && t.when > 0 { - // This is a timer for an unblocked channel. - // Perhaps it should have expired already. - if now := nanotime(); t.when <= now { - // The timer should have run already, - // but nothing has checked it yet. - // Run it now. - systemstack(func() { - t.unlockAndRun(now) // resets t.when - }) - t.lock() - } - } - wake := false pending := t.when > 0 t.when = when @@ -483,7 +535,20 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in } add := t.needsAdd() + + if !async && t.isChan { + // Stop any future sends with stale values. + // See timer.unlockAndRun. + t.seq++ + } t.unlock() + if !async && t.isChan { + if timerchandrain(t.hchan()) { + pending = true + } + unlock(&t.sendLock) + } + if add { t.maybeAdd() } @@ -936,7 +1001,35 @@ func (t *timer) unlockAndRun(now int64) { if ts != nil { ts.unlock() } + + async := debug.asynctimerchan.Load() != 0 + if !async && t.isChan { + // For a timer channel, we want to make sure that no stale sends + // happen after a t.stop or t.modify, but we cannot hold t.mu + // during the actual send (which f does) due to lock ordering. + // It can happen that we are holding t's lock above, we decide + // it's time to send a time value (by calling f), grab the parameters, + // unlock above, and then a t.stop or t.modify changes the timer + // and returns. At that point, the send needs not to happen after all. + // The way we arrange for it not to happen is that t.stop and t.modify + // both increment t.seq while holding both t.mu and t.sendLock. + // We copied the seq value above while holding t.mu. + // Now we can acquire t.sendLock (which will be held across the send) + // and double-check that t.seq is still the seq value we saw above. + // If not, the timer has been updated and we should skip the send. + // We skip the send by reassigning f to a no-op function. + lock(&t.sendLock) + if t.seq != seq { + f = func(any, uintptr, int64) {} + } + } + f(arg, seq, delay) + + if !async && t.isChan { + unlock(&t.sendLock) + } + if ts != nil { ts.lock() } diff --git a/src/time/sleep.go b/src/time/sleep.go index e6225dfb35..669660f90e 100644 --- a/src/time/sleep.go +++ b/src/time/sleep.go @@ -27,6 +27,20 @@ func syncTimer(c chan Time) unsafe.Pointer { } // Otherwise pass to runtime. + // This handles asynctimerchan=0, which is the default Go 1.23 behavior, + // as well as asynctimerchan=2, which is like asynctimerchan=1 + // but implemented entirely by the runtime. + // The only reason to use asynctimerchan=2 is for debugging + // a problem fixed by asynctimerchan=1: it enables the new + // GC-able timer channels (#61542) but not the sync channels (#37196). + // + // If we decide to roll back the sync channels, we will still have + // a fully tested async runtime implementation (asynctimerchan=2) + // and can make this function always return c. + // + // If we decide to keep the sync channels, we can delete all the + // handling of asynctimerchan in the runtime and keep just this + // function to handle asynctimerchan=1. return *(*unsafe.Pointer)(unsafe.Pointer(&c)) } @@ -79,25 +93,22 @@ type Timer struct { // Stop prevents the Timer from firing. // It returns true if the call stops the timer, false if the timer has already // expired or been stopped. -// Stop does not close the channel, to prevent a read from the channel succeeding -// incorrectly. // -// To ensure the channel is empty after a call to Stop, check the -// return value and drain the channel. -// For example, assuming the program has not received from t.C already: -// -// if !t.Stop() { -// <-t.C -// } -// -// This cannot be done concurrent to other receives from the Timer's -// channel or other calls to the Timer's Stop method. -// -// For a timer created with AfterFunc(d, f), if t.Stop returns false, then the timer -// has already expired and the function f has been started in its own goroutine; +// For a func-based timer created with AfterFunc(d, f), +// if t.Stop returns false, then the timer has already expired +// and the function f has been started in its own goroutine; // Stop does not wait for f to complete before returning. -// If the caller needs to know whether f is completed, it must coordinate -// with f explicitly. +// If the caller needs to know whether f is completed, +// it must coordinate with f explicitly. +// +// For a chan-based timer created with NewTimer(d), as of Go 1.23, +// any receive from t.C after Stop has returned is guaranteed to block +// rather than receive a stale time value from before the Stop; +// if the program has not received from t.C already and the timer is +// running, Stop is guaranteed to return true. +// Before Go 1.23, the only safe way to use Stop was insert an extra +// <-t.C if Stop returned false to drain a potential stale value. +// See the [NewTimer] documentation for more details. func (t *Timer) Stop() bool { if !t.initTimer { panic("time: Stop called on uninitialized Timer") @@ -116,6 +127,18 @@ func (t *Timer) Stop() bool { // timers, even if they haven't expired or been stopped. // The Stop method is no longer necessary to help the garbage collector. // (Code may of course still want to call Stop to stop the timer for other reasons.) +// +// Before Go 1.23, the channel assocated with a Timer was +// asynchronous (buffered, capacity 1), which meant that +// stale time values could be received even after [Timer.Stop] +// or [Timer.Reset] returned. +// As of Go 1.23, the channel is synchronous (unbuffered, capacity 0), +// eliminating the possibility of those stale values. +// +// The GODEBUG setting asynctimerchan=1 restores both pre-Go 1.23 +// behaviors: when set, unexpired timers won't be garbage collected, and +// channels will have buffered capacity. This setting may be removed +// in Go 1.27 or later. func NewTimer(d Duration) *Timer { c := make(chan Time, 1) t := (*Timer)(newTimer(when(d), 0, sendTime, c, syncTimer(c))) @@ -127,29 +150,7 @@ func NewTimer(d Duration) *Timer { // It returns true if the timer had been active, false if the timer had // expired or been stopped. // -// For a Timer created with NewTimer, Reset should be invoked only on -// stopped or expired timers with drained channels. -// -// If a program has already received a value from t.C, the timer is known -// to have expired and the channel drained, so t.Reset can be used directly. -// If a program has not yet received a value from t.C, however, -// the timer must be stopped and—if Stop reports that the timer expired -// before being stopped—the channel explicitly drained: -// -// if !t.Stop() { -// <-t.C -// } -// t.Reset(d) -// -// This should not be done concurrent to other receives from the Timer's -// channel. -// -// Note that it is not possible to use Reset's return value correctly, as there -// is a race condition between draining the channel and the new timer expiring. -// Reset should always be invoked on stopped or expired channels, as described above. -// The return value exists to preserve compatibility with existing programs. -// -// For a Timer created with AfterFunc(d, f), Reset either reschedules +// For a func-based timer created with AfterFunc(d, f), Reset either reschedules // when f will run, in which case Reset returns true, or schedules f // to run again, in which case it returns false. // When Reset returns false, Reset neither waits for the prior f to @@ -157,6 +158,15 @@ func NewTimer(d Duration) *Timer { // goroutine running f does not run concurrently with the prior // one. If the caller needs to know whether the prior execution of // f is completed, it must coordinate with f explicitly. +// +// For a chan-based timer created with NewTimer, as of Go 1.23, +// any receive from t.C after Reset has returned is guaranteed not +// to receive a time value corresponding to the previous timer settings; +// if the program has not received from t.C already and the timer is +// running, Reset is guaranteed to return true. +// Before Go 1.23, the only safe way to use Reset was to Stop and +// explicitly drain the timer first. +// See the [NewTimer] documentation for more details. func (t *Timer) Reset(d Duration) bool { if !t.initTimer { panic("time: Reset called on uninitialized Timer") diff --git a/src/time/tick_test.go b/src/time/tick_test.go index 46268acfe3..90c13fbe82 100644 --- a/src/time/tick_test.go +++ b/src/time/tick_test.go @@ -306,38 +306,52 @@ func TestTimerGC(t *testing.T) { run(t, "NewTickerStop", func() { NewTicker(Hour).Stop() }) } -func TestTimerChan(t *testing.T) { - t.Parallel() - tick := &timer2{NewTimer(10000 * Second)} - testTimerChan(t, tick, tick.C) +func TestChan(t *testing.T) { + for _, name := range []string{"0", "1", "2"} { + t.Run("asynctimerchan="+name, func(t *testing.T) { + t.Setenv("GODEBUG", "asynctimerchan="+name) + t.Run("Timer", func(t *testing.T) { + tim := NewTimer(10000 * Second) + testTimerChan(t, tim, tim.C, name == "0") + }) + t.Run("Ticker", func(t *testing.T) { + tim := &tickerTimer{Ticker: NewTicker(10000 * Second)} + testTimerChan(t, tim, tim.C, name == "0") + }) + }) + } } -func TestTickerChan(t *testing.T) { - t.Parallel() - tick := NewTicker(10000 * Second) - testTimerChan(t, tick, tick.C) +type timer interface { + Stop() bool + Reset(Duration) bool } -// timer2 is a Timer with Reset and Stop methods with no result, -// to have the same signatures as Ticker. -type timer2 struct { - *Timer +// tickerTimer is a Timer with Reset and Stop methods that return bools, +// to have the same signatures as Timer. +type tickerTimer struct { + *Ticker + stopped bool } -func (t *timer2) Stop() { - t.Timer.Stop() +func (t *tickerTimer) Stop() bool { + pending := !t.stopped + t.stopped = true + t.Ticker.Stop() + return pending } -func (t *timer2) Reset(d Duration) { - t.Timer.Reset(d) +func (t *tickerTimer) Reset(d Duration) bool { + pending := !t.stopped + t.stopped = false + t.Ticker.Reset(d) + return pending } -type ticker interface { - Stop() - Reset(Duration) -} +func testTimerChan(t *testing.T, tim timer, C <-chan Time, synctimerchan bool) { + _, isTimer := tim.(*Timer) + isTicker := !isTimer -func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { // Retry parameters. Enough to deflake even on slow machines. // Windows in particular has very coarse timers so we have to // wait 10ms just to make a timer go off. @@ -357,7 +371,7 @@ func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { select { default: case <-C: - t.Fatalf("extra tick") + t.Errorf("extra tick") } } assertTick := func() { @@ -375,10 +389,16 @@ func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { return } } - t.Fatalf("missing tick") + t.Errorf("missing tick") } assertLen := func() { t.Helper() + if synctimerchan { + if n := len(C); n != 0 { + t.Errorf("synctimer has len(C) = %d, want 0 (always)", n) + } + return + } var n int if n = len(C); n == 1 { return @@ -389,50 +409,58 @@ func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { return } } - t.Fatalf("len(C) = %d, want 1", n) + t.Errorf("len(C) = %d, want 1", n) } // Test simple stop; timer never in heap. - tick.Stop() + tim.Stop() noTick() // Test modify of timer not in heap. - tick.Reset(10000 * Second) + tim.Reset(10000 * Second) noTick() - // Test modify of timer in heap. - tick.Reset(1) - assertTick() + if synctimerchan { + // Test modify of timer in heap. + tim.Reset(1) + Sleep(sched) + if l, c := len(C), cap(C); l != 0 || c != 0 { + //t.Fatalf("len(C), cap(C) = %d, %d, want 0, 0", l, c) + } + assertTick() + } else { + // Test modify of timer in heap. + tim.Reset(1) + assertTick() + Sleep(sched) + tim.Reset(10000 * Second) + if isTicker { + assertTick() + } + noTick() - // Sleep long enough that a second tick must happen if this is a ticker. - // Test that Reset does not lose the tick that should have happened. - Sleep(sched) - tick.Reset(10000 * Second) - _, isTicker := tick.(*Ticker) - if isTicker { + // Test that len sees an immediate tick arrive + // for Reset of timer in heap. + tim.Reset(1) + assertLen() + assertTick() + + // Test that len sees an immediate tick arrive + // for Reset of timer NOT in heap. + tim.Stop() + if !synctimerchan { + drain() + } + tim.Reset(1) + assertLen() assertTick() } - noTick() - - // Test that len sees an immediate tick arrive - // for Reset of timer in heap. - tick.Reset(1) - assertLen() - assertTick() - - // Test that len sees an immediate tick arrive - // for Reset of timer NOT in heap. - tick.Stop() - drain() - tick.Reset(1) - assertLen() - assertTick() // Sleep long enough that a second tick must happen if this is a ticker. // Test that Reset does not lose the tick that should have happened. Sleep(sched) - tick.Reset(10000 * Second) - if isTicker { + tim.Reset(10000 * Second) + if !synctimerchan && isTicker { assertLen() assertTick() } @@ -461,8 +489,10 @@ func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { } // Reset timer in heap (already reset above, but just in case). - tick.Reset(10000 * Second) - drain() + tim.Reset(10000 * Second) + if !synctimerchan { + drain() + } // Test stop while timer in heap (because goroutine is blocked on <-C). done := make(chan bool) @@ -475,12 +505,12 @@ func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { notDone(done) // Test reset far away while timer in heap. - tick.Reset(20000 * Second) + tim.Reset(20000 * Second) Sleep(sched) notDone(done) // Test imminent reset while in heap. - tick.Reset(1) + tim.Reset(1) waitDone(done) // If this is a ticker, another tick should have come in already @@ -491,14 +521,18 @@ func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { noTick() } - tick.Stop() - if isTicker { + tim.Stop() + if isTicker || !synctimerchan { + t.Logf("drain") drain() } noTick() // Again using select and with two goroutines waiting. - tick.Reset(10000 * Second) + tim.Reset(10000 * Second) + if !synctimerchan { + drain() + } done = make(chan bool, 2) done1 := make(chan bool) done2 := make(chan bool) @@ -521,10 +555,10 @@ func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { }() Sleep(sched) notDone(done) - tick.Reset(sched / 2) + tim.Reset(sched / 2) Sleep(sched) waitDone(done) - tick.Stop() + tim.Stop() close(stop) waitDone(done1) waitDone(done2) @@ -560,6 +594,35 @@ func testTimerChan(t *testing.T, tick ticker, C <-chan Time) { close(stop) waitDone(done) waitDone(done) + + // Test that Stop and Reset block old values from being received. + // (Proposal go.dev/issue/37196.) + if synctimerchan { + tim.Reset(1) + Sleep(10 * Millisecond) + if pending := tim.Stop(); pending != true { + t.Errorf("tim.Stop() = %v, want true", pending) + } + noTick() + + tim.Reset(Hour) + noTick() + if pending := tim.Reset(1); pending != true { + t.Errorf("tim.Stop() = %v, want true", pending) + } + assertTick() + Sleep(10 * Millisecond) + if isTicker { + assertTick() + Sleep(10 * Millisecond) + } else { + noTick() + } + if pending, want := tim.Reset(Hour), isTicker; pending != want { + t.Errorf("tim.Stop() = %v, want %v", pending, want) + } + noTick() + } } func TestManualTicker(t *testing.T) {