mirror of https://github.com/golang/go.git
runtime: move per-P timers state into its own struct
Continuing conversion from C to Go, introduce type timers encapsulating all timer heap state, with methods for operations. This should at least be easier to think about, instead of having these fields strewn through the P struct. It should also be easier to test. I am skeptical about the pair of atomic int64 deadlines: I think there are missed wakeups lurking. Having the code in an abstracted API should make it easier to reason through and fix if needed. [This is one CL in a refactoring stack making very small changes in each step, so that any subtle bugs that we miss can be more easily pinpointed to a small change.] Change-Id: If5ea3e0b946ca14076f44c85cbb4feb9eddb4f95 Reviewed-on: https://go-review.googlesource.com/c/go/+/564132 Reviewed-by: Austin Clements <austin@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Russ Cox <rsc@golang.org>
This commit is contained in:
parent
8570aaaf1a
commit
adc575e64c
|
|
@ -2961,7 +2961,7 @@ func handoffp(pp *p) {
|
||||||
|
|
||||||
// The scheduler lock cannot be held when calling wakeNetPoller below
|
// The scheduler lock cannot be held when calling wakeNetPoller below
|
||||||
// because wakeNetPoller may call wakep which may call startm.
|
// because wakeNetPoller may call wakep which may call startm.
|
||||||
when := nobarrierWakeTime(pp)
|
when := pp.timers.wakeTime()
|
||||||
pidleput(pp, 0)
|
pidleput(pp, 0)
|
||||||
unlock(&sched.lock)
|
unlock(&sched.lock)
|
||||||
|
|
||||||
|
|
@ -3158,7 +3158,7 @@ top:
|
||||||
// which may steal timers. It's important that between now
|
// which may steal timers. It's important that between now
|
||||||
// and then, nothing blocks, so these numbers remain mostly
|
// and then, nothing blocks, so these numbers remain mostly
|
||||||
// relevant.
|
// relevant.
|
||||||
now, pollUntil, _ := checkTimers(pp, 0)
|
now, pollUntil, _ := pp.timers.check(0)
|
||||||
|
|
||||||
// Try to schedule the trace reader.
|
// Try to schedule the trace reader.
|
||||||
if traceEnabled() || traceShuttingDown() {
|
if traceEnabled() || traceShuttingDown() {
|
||||||
|
|
@ -3575,7 +3575,7 @@ func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWo
|
||||||
// timerpMask tells us whether the P may have timers at all. If it
|
// timerpMask tells us whether the P may have timers at all. If it
|
||||||
// can't, no need to check at all.
|
// can't, no need to check at all.
|
||||||
if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
|
if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
|
||||||
tnow, w, ran := checkTimers(p2, now)
|
tnow, w, ran := p2.timers.check(now)
|
||||||
now = tnow
|
now = tnow
|
||||||
if w != 0 && (pollUntil == 0 || w < pollUntil) {
|
if w != 0 && (pollUntil == 0 || w < pollUntil) {
|
||||||
pollUntil = w
|
pollUntil = w
|
||||||
|
|
@ -3641,7 +3641,7 @@ func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
|
||||||
func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
|
func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
|
||||||
for id, p2 := range allpSnapshot {
|
for id, p2 := range allpSnapshot {
|
||||||
if timerpMaskSnapshot.read(uint32(id)) {
|
if timerpMaskSnapshot.read(uint32(id)) {
|
||||||
w := nobarrierWakeTime(p2)
|
w := p2.timers.wakeTime()
|
||||||
if w != 0 && (pollUntil == 0 || w < pollUntil) {
|
if w != 0 && (pollUntil == 0 || w < pollUntil) {
|
||||||
pollUntil = w
|
pollUntil = w
|
||||||
}
|
}
|
||||||
|
|
@ -5435,7 +5435,7 @@ func (pp *p) init(id int32) {
|
||||||
pp.raceprocctx = raceproccreate()
|
pp.raceprocctx = raceproccreate()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lockInit(&pp.timersLock, lockRankTimers)
|
lockInit(&pp.timers.lock, lockRankTimers)
|
||||||
|
|
||||||
// This P may get timers when it starts running. Set the mask here
|
// This P may get timers when it starts running. Set the mask here
|
||||||
// since the P may not go through pidleget (notably P 0 on startup).
|
// since the P may not go through pidleget (notably P 0 on startup).
|
||||||
|
|
@ -5467,7 +5467,7 @@ func (pp *p) destroy() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move all timers to the local P.
|
// Move all timers to the local P.
|
||||||
adoptTimers(pp)
|
getg().m.p.ptr().timers.take(&pp.timers)
|
||||||
|
|
||||||
// Flush p's write barrier buffer.
|
// Flush p's write barrier buffer.
|
||||||
if gcphase != _GCoff {
|
if gcphase != _GCoff {
|
||||||
|
|
@ -5498,7 +5498,7 @@ func (pp *p) destroy() {
|
||||||
gfpurge(pp)
|
gfpurge(pp)
|
||||||
traceProcFree(pp)
|
traceProcFree(pp)
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
if pp.timerRaceCtx != 0 {
|
if pp.timers.raceCtx != 0 {
|
||||||
// The race detector code uses a callback to fetch
|
// The race detector code uses a callback to fetch
|
||||||
// the proc context, so arrange for that callback
|
// the proc context, so arrange for that callback
|
||||||
// to see the right thing.
|
// to see the right thing.
|
||||||
|
|
@ -5508,8 +5508,8 @@ func (pp *p) destroy() {
|
||||||
phold := mp.p.ptr()
|
phold := mp.p.ptr()
|
||||||
mp.p.set(pp)
|
mp.p.set(pp)
|
||||||
|
|
||||||
racectxend(pp.timerRaceCtx)
|
racectxend(pp.timers.raceCtx)
|
||||||
pp.timerRaceCtx = 0
|
pp.timers.raceCtx = 0
|
||||||
|
|
||||||
mp.p.set(phold)
|
mp.p.set(phold)
|
||||||
}
|
}
|
||||||
|
|
@ -5860,7 +5860,7 @@ func checkdead() {
|
||||||
|
|
||||||
// There are no goroutines running, so we can look at the P's.
|
// There are no goroutines running, so we can look at the P's.
|
||||||
for _, pp := range allp {
|
for _, pp := range allp {
|
||||||
if len(pp.timers) > 0 {
|
if len(pp.timers.heap) > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -6204,7 +6204,7 @@ func schedtrace(detailed bool) {
|
||||||
} else {
|
} else {
|
||||||
print("nil")
|
print("nil")
|
||||||
}
|
}
|
||||||
print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers), "\n")
|
print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
|
||||||
} else {
|
} else {
|
||||||
// In non-detailed mode format lengths of per-P run queues as:
|
// In non-detailed mode format lengths of per-P run queues as:
|
||||||
// [len1 len2 len3 len4]
|
// [len1 len2 len3 len4]
|
||||||
|
|
|
||||||
|
|
@ -708,16 +708,6 @@ type p struct {
|
||||||
|
|
||||||
palloc persistentAlloc // per-P to avoid mutex
|
palloc persistentAlloc // per-P to avoid mutex
|
||||||
|
|
||||||
// The when field of the first entry on the timer heap.
|
|
||||||
// This is 0 if the timer heap is empty.
|
|
||||||
timer0When atomic.Int64
|
|
||||||
|
|
||||||
// The earliest known nextwhen field of a timer with
|
|
||||||
// timerModifiedEarlier status. Because the timer may have been
|
|
||||||
// modified again, there need not be any timer with this value.
|
|
||||||
// This is 0 if there are no timerModifiedEarlier timers.
|
|
||||||
timerModifiedEarliest atomic.Int64
|
|
||||||
|
|
||||||
// Per-P GC state
|
// Per-P GC state
|
||||||
gcAssistTime int64 // Nanoseconds in assistAlloc
|
gcAssistTime int64 // Nanoseconds in assistAlloc
|
||||||
gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
|
gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic)
|
||||||
|
|
@ -751,23 +741,8 @@ type p struct {
|
||||||
// writing any stats. Its value is even when not, odd when it is.
|
// writing any stats. Its value is even when not, odd when it is.
|
||||||
statsSeq atomic.Uint32
|
statsSeq atomic.Uint32
|
||||||
|
|
||||||
// Lock for timers. We normally access the timers while running
|
// Timer heap.
|
||||||
// on this P, but the scheduler can also do it from a different P.
|
timers timers
|
||||||
timersLock mutex
|
|
||||||
|
|
||||||
// Actions to take at some time. This is used to implement the
|
|
||||||
// standard library's time package.
|
|
||||||
// Must hold timersLock to access.
|
|
||||||
timers []*timer
|
|
||||||
|
|
||||||
// Number of timers in P's heap.
|
|
||||||
numTimers atomic.Uint32
|
|
||||||
|
|
||||||
// Number of timerDeleted timers in P's heap.
|
|
||||||
deletedTimers atomic.Uint32
|
|
||||||
|
|
||||||
// Race context used while executing timer functions.
|
|
||||||
timerRaceCtx uintptr
|
|
||||||
|
|
||||||
// maxStackScanDelta accumulates the amount of stack space held by
|
// maxStackScanDelta accumulates the amount of stack space held by
|
||||||
// live goroutines (i.e. those eligible for stack scanning).
|
// live goroutines (i.e. those eligible for stack scanning).
|
||||||
|
|
|
||||||
|
|
@ -28,10 +28,7 @@ import (
|
||||||
// Package time knows the layout of this structure.
|
// Package time knows the layout of this structure.
|
||||||
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
|
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
|
||||||
type timer struct {
|
type timer struct {
|
||||||
// If this timer is on a heap, which P's heap it is on.
|
ts *timers
|
||||||
// puintptr rather than *p to match uintptr in the versions
|
|
||||||
// of this struct defined in other packages.
|
|
||||||
pp puintptr
|
|
||||||
|
|
||||||
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
|
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
|
||||||
// each time calling f(arg, now) in the timer goroutine, so f must be
|
// each time calling f(arg, now) in the timer goroutine, so f must be
|
||||||
|
|
@ -55,6 +52,39 @@ type timer struct {
|
||||||
state atomic.Uint32
|
state atomic.Uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A timers is a per-P set of timers.
|
||||||
|
type timers struct {
|
||||||
|
// lock protects timers; timers are per-P, but the scheduler can
|
||||||
|
// access the timers of another P, so we have to lock.
|
||||||
|
lock mutex
|
||||||
|
|
||||||
|
// heap is the set of timers, ordered by t.when.
|
||||||
|
// Must hold lock to access.
|
||||||
|
heap []*timer
|
||||||
|
|
||||||
|
// len is an atomic copy of len(heap).
|
||||||
|
len atomic.Uint32
|
||||||
|
|
||||||
|
// zombies is the number of deleted timers left in heap.
|
||||||
|
zombies atomic.Uint32
|
||||||
|
|
||||||
|
// raceCtx is the race context used while executing timer functions.
|
||||||
|
raceCtx uintptr
|
||||||
|
|
||||||
|
// timer0When is an atomic copy of of heap[0].when.
|
||||||
|
// If len(heap) == 0, timer0When is 0.
|
||||||
|
timer0When atomic.Int64
|
||||||
|
|
||||||
|
// timerModifiedEarliest holds the earliest known heap[i].nextWhen field
|
||||||
|
// for the heap entries with a new nextWhen pending
|
||||||
|
// (that is, with the timerNextWhen bit set in t.state).
|
||||||
|
// Because timers can be modified multiple times,
|
||||||
|
// timerModifiedEarliest can be set to a nextWhen that has since
|
||||||
|
// been replaced with a later time.
|
||||||
|
// If this is 0, it means there are no timerNextWhen timers in the heap.
|
||||||
|
timerModifiedEarliest atomic.Int64
|
||||||
|
}
|
||||||
|
|
||||||
// Timer state field.
|
// Timer state field.
|
||||||
// Note that state 0 must be "unlocked, not in heap" and usable,
|
// Note that state 0 must be "unlocked, not in heap" and usable,
|
||||||
// at least for time.Timer.Stop. See go.dev/issue/21874.
|
// at least for time.Timer.Stop. See go.dev/issue/21874.
|
||||||
|
|
@ -117,33 +147,33 @@ func (t *timer) unlock(state uint32, mp *m) {
|
||||||
|
|
||||||
// updateWhen updates t.when as directed by state, returning the new state
|
// updateWhen updates t.when as directed by state, returning the new state
|
||||||
// and a bool indicating whether the state (and t.when) changed.
|
// and a bool indicating whether the state (and t.when) changed.
|
||||||
// If pp != nil, then the caller must have locked pp.timers,
|
// If ts != nil, then the caller must have locked ts,
|
||||||
// t must be pp.timers[0], and updateWhen takes care of
|
// t must be ts.heap[0], and updateWhen takes care of
|
||||||
// moving t within the pp.timers heap when t.when is changed.
|
// moving t within the timers heap when t.when is changed.
|
||||||
func (t *timer) updateWhen(state uint32, pp *p) (newState uint32, updated bool) {
|
func (t *timer) updateWhen(state uint32, ts *timers) (newState uint32, updated bool) {
|
||||||
if state&timerNextWhen == 0 {
|
if state&timerNextWhen == 0 {
|
||||||
return state, false
|
return state, false
|
||||||
}
|
}
|
||||||
state &^= timerNextWhen
|
state &^= timerNextWhen
|
||||||
if t.nextWhen == 0 {
|
if t.nextWhen == 0 {
|
||||||
if pp != nil {
|
if ts != nil {
|
||||||
if t != pp.timers[0] {
|
if t != ts.heap[0] {
|
||||||
badTimer()
|
badTimer()
|
||||||
}
|
}
|
||||||
pp.deletedTimers.Add(-1)
|
ts.zombies.Add(-1)
|
||||||
dodeltimer0(pp)
|
ts.deleteMin()
|
||||||
}
|
}
|
||||||
state &^= timerHeaped
|
state &^= timerHeaped
|
||||||
} else {
|
} else {
|
||||||
// Now we can change the when field.
|
// Now we can change the when field.
|
||||||
t.when = t.nextWhen
|
t.when = t.nextWhen
|
||||||
// Move t to the right position.
|
// Move t to the right position.
|
||||||
if pp != nil {
|
if ts != nil {
|
||||||
if t != pp.timers[0] {
|
if t != ts.heap[0] {
|
||||||
badTimer()
|
badTimer()
|
||||||
}
|
}
|
||||||
siftdownTimer(pp.timers, 0)
|
ts.siftDown(0)
|
||||||
updateTimer0When(pp)
|
ts.updateTimer0When()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return state, true
|
return state, true
|
||||||
|
|
@ -241,26 +271,26 @@ func goroutineReady(arg any, seq uintptr) {
|
||||||
goready(arg.(*g), 0)
|
goready(arg.(*g), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// doaddtimer adds t to the current P's heap.
|
// add adds t to the timers.
|
||||||
// The caller must have set t.pp = pp, unlocked t,
|
// The caller must have set t.ts = t, unlocked t,
|
||||||
// and then locked the timers for pp.
|
// and then locked ts.lock.
|
||||||
func doaddtimer(pp *p, t *timer) {
|
func (ts *timers) add(t *timer) {
|
||||||
|
assertLockHeld(&ts.lock)
|
||||||
// Timers rely on the network poller, so make sure the poller
|
// Timers rely on the network poller, so make sure the poller
|
||||||
// has started.
|
// has started.
|
||||||
if netpollInited.Load() == 0 {
|
if netpollInited.Load() == 0 {
|
||||||
netpollGenericInit()
|
netpollGenericInit()
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.pp.ptr() != pp {
|
if t.ts != ts {
|
||||||
throw("doaddtimer: P not set in timer")
|
throw("ts not set in timer")
|
||||||
}
|
}
|
||||||
i := len(pp.timers)
|
ts.heap = append(ts.heap, t)
|
||||||
pp.timers = append(pp.timers, t)
|
ts.siftUp(len(ts.heap) - 1)
|
||||||
siftupTimer(pp.timers, i)
|
if t == ts.heap[0] {
|
||||||
if t == pp.timers[0] {
|
ts.timer0When.Store(t.when)
|
||||||
pp.timer0When.Store(t.when)
|
|
||||||
}
|
}
|
||||||
pp.numTimers.Add(1)
|
ts.len.Add(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// stop deletes the timer t. It may be on some other P, so we can't
|
// stop deletes the timer t. It may be on some other P, so we can't
|
||||||
|
|
@ -271,7 +301,7 @@ func (t *timer) stop() bool {
|
||||||
state, mp := t.lock()
|
state, mp := t.lock()
|
||||||
if state&timerHeaped != 0 && (state&timerNextWhen == 0 || t.nextWhen != 0) {
|
if state&timerHeaped != 0 && (state&timerNextWhen == 0 || t.nextWhen != 0) {
|
||||||
// Timer pending: stop it.
|
// Timer pending: stop it.
|
||||||
t.pp.ptr().deletedTimers.Add(1)
|
t.ts.zombies.Add(1)
|
||||||
t.nextWhen = 0
|
t.nextWhen = 0
|
||||||
state |= timerNextWhen
|
state |= timerNextWhen
|
||||||
t.unlock(state, mp)
|
t.unlock(state, mp)
|
||||||
|
|
@ -283,30 +313,29 @@ func (t *timer) stop() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// dodeltimer0 removes timer 0 from the current P's heap.
|
// deleteMin removes timer 0 from ts.
|
||||||
// We are locked on the P when this is called.
|
// ts must be locked.
|
||||||
// It reports whether it saw no problems due to races.
|
func (ts *timers) deleteMin() {
|
||||||
// The caller must have locked the timers for pp.
|
assertLockHeld(&ts.lock)
|
||||||
func dodeltimer0(pp *p) {
|
t := ts.heap[0]
|
||||||
if t := pp.timers[0]; t.pp.ptr() != pp {
|
if t.ts != ts {
|
||||||
throw("dodeltimer0: wrong P")
|
throw("wrong timers")
|
||||||
} else {
|
|
||||||
t.pp = 0
|
|
||||||
}
|
}
|
||||||
last := len(pp.timers) - 1
|
t.ts = nil
|
||||||
|
last := len(ts.heap) - 1
|
||||||
if last > 0 {
|
if last > 0 {
|
||||||
pp.timers[0] = pp.timers[last]
|
ts.heap[0] = ts.heap[last]
|
||||||
}
|
}
|
||||||
pp.timers[last] = nil
|
ts.heap[last] = nil
|
||||||
pp.timers = pp.timers[:last]
|
ts.heap = ts.heap[:last]
|
||||||
if last > 0 {
|
if last > 0 {
|
||||||
siftdownTimer(pp.timers, 0)
|
ts.siftDown(0)
|
||||||
}
|
}
|
||||||
updateTimer0When(pp)
|
ts.updateTimer0When()
|
||||||
n := pp.numTimers.Add(-1)
|
n := ts.len.Add(-1)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
// If there are no timers, then clearly none are modified.
|
// If there are no timers, then clearly none are modified.
|
||||||
pp.timerModifiedEarliest.Store(0)
|
ts.timerModifiedEarliest.Store(0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -331,28 +360,29 @@ func (t *timer) modify(when, period int64, f func(any, uintptr), arg any, seq ui
|
||||||
// Set up t for insertion but unlock first,
|
// Set up t for insertion but unlock first,
|
||||||
// to avoid lock inversion with timers lock.
|
// to avoid lock inversion with timers lock.
|
||||||
// Since t is not in a heap yet, nothing will
|
// Since t is not in a heap yet, nothing will
|
||||||
// find and modify it until after the doaddtimer.
|
// find and modify it until after the ts.add.
|
||||||
state |= timerHeaped
|
state |= timerHeaped
|
||||||
t.when = when
|
t.when = when
|
||||||
|
|
||||||
pp := getg().m.p.ptr()
|
ts := &getg().m.p.ptr().timers
|
||||||
t.pp.set(pp)
|
t.ts = ts
|
||||||
// pass mp=nil to t.unlock to avoid preemption
|
// pass mp=nil to t.unlock to avoid preemption
|
||||||
// between t.unlock and lock of timersLock.
|
// between t.unlock and lock of timersLock.
|
||||||
// releasem done manually below
|
// releasem done manually below
|
||||||
t.unlock(state, nil)
|
t.unlock(state, nil)
|
||||||
|
|
||||||
lock(&pp.timersLock)
|
lock(&ts.lock)
|
||||||
doaddtimer(pp, t)
|
ts.add(t)
|
||||||
unlock(&pp.timersLock)
|
unlock(&ts.lock)
|
||||||
releasem(mp)
|
releasem(mp)
|
||||||
|
|
||||||
wakeNetPoller(when)
|
wakeNetPoller(when)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
pending := state&timerNextWhen == 0 || t.nextWhen != 0 // timerHeaped is set (checked above)
|
pending := state&timerNextWhen == 0 || t.nextWhen != 0 // timerHeaped is set (checked above)
|
||||||
if !pending {
|
if !pending {
|
||||||
t.pp.ptr().deletedTimers.Add(-1)
|
t.ts.zombies.Add(-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The timer is in some other P's heap, so we can't change
|
// The timer is in some other P's heap, so we can't change
|
||||||
|
|
@ -364,7 +394,7 @@ func (t *timer) modify(when, period int64, f func(any, uintptr), arg any, seq ui
|
||||||
state |= timerNextWhen
|
state |= timerNextWhen
|
||||||
earlier := when < t.when
|
earlier := when < t.when
|
||||||
if earlier {
|
if earlier {
|
||||||
updateTimerModifiedEarliest(t.pp.ptr(), when)
|
t.ts.updateTimerModifiedEarliest(when)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.unlock(state, mp)
|
t.unlock(state, mp)
|
||||||
|
|
@ -384,14 +414,15 @@ func (t *timer) reset(when int64) bool {
|
||||||
return t.modify(when, t.period, t.f, t.arg, t.seq)
|
return t.modify(when, t.period, t.f, t.arg, t.seq)
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleantimers cleans up the head of the timer queue. This speeds up
|
// cleanHead cleans up the head of the timer queue. This speeds up
|
||||||
// programs that create and delete timers; leaving them in the heap
|
// programs that create and delete timers; leaving them in the heap
|
||||||
// slows down heap operations.
|
// slows down heap operations.
|
||||||
// The caller must have locked the timers for pp.
|
// The caller must have locked ts.
|
||||||
func cleantimers(pp *p) {
|
func (ts *timers) cleanHead() {
|
||||||
|
assertLockHeld(&ts.lock)
|
||||||
gp := getg()
|
gp := getg()
|
||||||
for {
|
for {
|
||||||
if len(pp.timers) == 0 {
|
if len(ts.heap) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -403,9 +434,9 @@ func cleantimers(pp *p) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t := pp.timers[0]
|
t := ts.heap[0]
|
||||||
if t.pp.ptr() != pp {
|
if t.ts != ts {
|
||||||
throw("cleantimers: bad p")
|
throw("bad ts")
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.state.Load()&timerNextWhen == 0 {
|
if t.state.Load()&timerNextWhen == 0 {
|
||||||
|
|
@ -414,7 +445,7 @@ func cleantimers(pp *p) {
|
||||||
}
|
}
|
||||||
|
|
||||||
state, mp := t.lock()
|
state, mp := t.lock()
|
||||||
state, updated := t.updateWhen(state, pp)
|
state, updated := t.updateWhen(state, ts)
|
||||||
t.unlock(state, mp)
|
t.unlock(state, mp)
|
||||||
if !updated {
|
if !updated {
|
||||||
// Head of timers does not need adjustment.
|
// Head of timers does not need adjustment.
|
||||||
|
|
@ -424,78 +455,83 @@ func cleantimers(pp *p) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// adoptTimers adopts any timers from pp into the local P,
|
// take moves any timers from src into ts
|
||||||
// because pp is being destroyed.
|
// and then clears the timer state from src,
|
||||||
func adoptTimers(pp *p) {
|
// because src is being destroyed.
|
||||||
if len(pp.timers) > 0 {
|
// The caller must not have locked either timers.
|
||||||
plocal := getg().m.p.ptr()
|
// For now this is only called when the world is stopped.
|
||||||
|
func (ts *timers) take(src *timers) {
|
||||||
|
assertWorldStopped()
|
||||||
|
if len(src.heap) > 0 {
|
||||||
// The world is stopped, but we acquire timersLock to
|
// The world is stopped, but we acquire timersLock to
|
||||||
// protect against sysmon calling timeSleepUntil.
|
// protect against sysmon calling timeSleepUntil.
|
||||||
// This is the only case where we hold the timersLock of
|
// This is the only case where we hold more than one ts.lock,
|
||||||
// more than one P, so there are no deadlock concerns.
|
// so there are no deadlock concerns.
|
||||||
lock(&plocal.timersLock)
|
lock(&src.lock)
|
||||||
lock(&pp.timersLock)
|
lock(&ts.lock)
|
||||||
moveTimers(plocal, pp.timers)
|
ts.move(src.heap)
|
||||||
pp.timers = nil
|
src.heap = nil
|
||||||
pp.numTimers.Store(0)
|
src.len.Store(0)
|
||||||
pp.deletedTimers.Store(0)
|
src.zombies.Store(0)
|
||||||
pp.timer0When.Store(0)
|
src.timer0When.Store(0)
|
||||||
unlock(&pp.timersLock)
|
unlock(&ts.lock)
|
||||||
unlock(&plocal.timersLock)
|
unlock(&src.lock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// moveTimers moves a slice of timers to pp. The slice has been taken
|
// moveTimers moves a slice of timers to pp. The slice has been taken
|
||||||
// from a different P.
|
// from a different P.
|
||||||
// This is currently called when the world is stopped, but the caller
|
// This is currently called when the world is stopped, but the caller
|
||||||
// is expected to have locked the timers for pp.
|
// is expected to have locked ts.
|
||||||
func moveTimers(pp *p, timers []*timer) {
|
func (ts *timers) move(timers []*timer) {
|
||||||
|
assertLockHeld(&ts.lock)
|
||||||
for _, t := range timers {
|
for _, t := range timers {
|
||||||
state, mp := t.lock()
|
state, mp := t.lock()
|
||||||
t.pp = 0
|
t.ts = nil
|
||||||
state, _ = t.updateWhen(state, nil)
|
state, _ = t.updateWhen(state, nil)
|
||||||
// Unlock before add, to avoid append (allocation)
|
// Unlock before add, to avoid append (allocation)
|
||||||
// while holding lock. This would be correct even if the world wasn't
|
// while holding lock. This would be correct even if the world wasn't
|
||||||
// stopped (but it is), and it makes staticlockranking happy.
|
// stopped (but it is), and it makes staticlockranking happy.
|
||||||
if state&timerHeaped != 0 {
|
if state&timerHeaped != 0 {
|
||||||
t.pp.set(pp)
|
t.ts = ts
|
||||||
}
|
}
|
||||||
t.unlock(state, mp)
|
t.unlock(state, mp)
|
||||||
if state&timerHeaped != 0 {
|
if state&timerHeaped != 0 {
|
||||||
doaddtimer(pp, t)
|
ts.add(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// adjusttimers looks through the timers in the current P's heap for
|
// adjust looks through the timers in ts.heap for
|
||||||
// any timers that have been modified to run earlier, and puts them in
|
// any timers that have been modified to run earlier, and puts them in
|
||||||
// the correct place in the heap. While looking for those timers,
|
// the correct place in the heap. While looking for those timers,
|
||||||
// it also moves timers that have been modified to run later,
|
// it also moves timers that have been modified to run later,
|
||||||
// and removes deleted timers. The caller must have locked the timers for pp.
|
// and removes deleted timers. The caller must have locked ts.
|
||||||
func adjusttimers(pp *p, now int64, force bool) {
|
func (ts *timers) adjust(now int64, force bool) {
|
||||||
|
assertLockHeld(&ts.lock)
|
||||||
// If we haven't yet reached the time of the earliest timerModified
|
// If we haven't yet reached the time of the earliest timerModified
|
||||||
// timer, don't do anything. This speeds up programs that adjust
|
// timer, don't do anything. This speeds up programs that adjust
|
||||||
// a lot of timers back and forth if the timers rarely expire.
|
// a lot of timers back and forth if the timers rarely expire.
|
||||||
// We'll postpone looking through all the adjusted timers until
|
// We'll postpone looking through all the adjusted timers until
|
||||||
// one would actually expire.
|
// one would actually expire.
|
||||||
if !force {
|
if !force {
|
||||||
first := pp.timerModifiedEarliest.Load()
|
first := ts.timerModifiedEarliest.Load()
|
||||||
if first == 0 || first > now {
|
if first == 0 || first > now {
|
||||||
if verifyTimers {
|
if verifyTimers {
|
||||||
verifyTimerHeap(pp)
|
ts.verify()
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We are going to clear all timerModified timers.
|
// We are going to clear all timerModified timers.
|
||||||
pp.timerModifiedEarliest.Store(0)
|
ts.timerModifiedEarliest.Store(0)
|
||||||
|
|
||||||
changed := false
|
changed := false
|
||||||
for i := 0; i < len(pp.timers); i++ {
|
for i := 0; i < len(ts.heap); i++ {
|
||||||
t := pp.timers[i]
|
t := ts.heap[i]
|
||||||
if t.pp.ptr() != pp {
|
if t.ts != ts {
|
||||||
throw("adjusttimers: bad p")
|
throw("bad ts")
|
||||||
}
|
}
|
||||||
|
|
||||||
state, mp := t.lock()
|
state, mp := t.lock()
|
||||||
|
|
@ -506,12 +542,12 @@ func adjusttimers(pp *p, now int64, force bool) {
|
||||||
if updated {
|
if updated {
|
||||||
changed = true
|
changed = true
|
||||||
if state&timerHeaped == 0 {
|
if state&timerHeaped == 0 {
|
||||||
n := len(pp.timers)
|
n := len(ts.heap)
|
||||||
pp.timers[i] = pp.timers[n-1]
|
ts.heap[i] = ts.heap[n-1]
|
||||||
pp.timers[n-1] = nil
|
ts.heap[n-1] = nil
|
||||||
pp.timers = pp.timers[:n-1]
|
ts.heap = ts.heap[:n-1]
|
||||||
t.pp = 0
|
t.ts = nil
|
||||||
pp.deletedTimers.Add(-1)
|
ts.zombies.Add(-1)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -519,31 +555,31 @@ func adjusttimers(pp *p, now int64, force bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if changed {
|
if changed {
|
||||||
initTimerHeap(pp.timers)
|
ts.initHeap()
|
||||||
updateTimer0When(pp)
|
ts.updateTimer0When()
|
||||||
}
|
}
|
||||||
|
|
||||||
if verifyTimers {
|
if verifyTimers {
|
||||||
verifyTimerHeap(pp)
|
ts.verify()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nobarrierWakeTime looks at P's timers and returns the time when we
|
// wakeTime looks at ts's timers and returns the time when we
|
||||||
// should wake up the netpoller. It returns 0 if there are no timers.
|
// should wake up the netpoller. It returns 0 if there are no timers.
|
||||||
// This function is invoked when dropping a P, and must run without
|
// This function is invoked when dropping a P, so it must run without
|
||||||
// any write barriers.
|
// any write barriers.
|
||||||
//
|
//
|
||||||
//go:nowritebarrierrec
|
//go:nowritebarrierrec
|
||||||
func nobarrierWakeTime(pp *p) int64 {
|
func (ts *timers) wakeTime() int64 {
|
||||||
next := pp.timer0When.Load()
|
next := ts.timer0When.Load()
|
||||||
nextAdj := pp.timerModifiedEarliest.Load()
|
nextAdj := ts.timerModifiedEarliest.Load()
|
||||||
if next == 0 || (nextAdj != 0 && nextAdj < next) {
|
if next == 0 || (nextAdj != 0 && nextAdj < next) {
|
||||||
next = nextAdj
|
next = nextAdj
|
||||||
}
|
}
|
||||||
return next
|
return next
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkTimers runs any timers for the P that are ready.
|
// check runs any timers in ts that are ready.
|
||||||
// If now is not 0 it is the current time.
|
// If now is not 0 it is the current time.
|
||||||
// It returns the passed time or the current time if now was passed as 0.
|
// It returns the passed time or the current time if now was passed as 0.
|
||||||
// and the time when the next timer should run or 0 if there is no next timer,
|
// and the time when the next timer should run or 0 if there is no next timer,
|
||||||
|
|
@ -553,11 +589,11 @@ func nobarrierWakeTime(pp *p) int64 {
|
||||||
// We pass now in and out to avoid extra calls of nanotime.
|
// We pass now in and out to avoid extra calls of nanotime.
|
||||||
//
|
//
|
||||||
//go:yeswritebarrierrec
|
//go:yeswritebarrierrec
|
||||||
func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
|
func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) {
|
||||||
// If it's not yet time for the first timer, or the first adjusted
|
// If it's not yet time for the first timer, or the first adjusted
|
||||||
// timer, then there is nothing to do.
|
// timer, then there is nothing to do.
|
||||||
next := pp.timer0When.Load()
|
next := ts.timer0When.Load()
|
||||||
nextAdj := pp.timerModifiedEarliest.Load()
|
nextAdj := ts.timerModifiedEarliest.Load()
|
||||||
if next == 0 || (nextAdj != 0 && nextAdj < next) {
|
if next == 0 || (nextAdj != 0 && nextAdj < next) {
|
||||||
next = nextAdj
|
next = nextAdj
|
||||||
}
|
}
|
||||||
|
|
@ -570,28 +606,23 @@ func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
|
||||||
if now == 0 {
|
if now == 0 {
|
||||||
now = nanotime()
|
now = nanotime()
|
||||||
}
|
}
|
||||||
if now < next {
|
|
||||||
// Next timer is not ready to run, but keep going
|
// If this is the local P, and there are a lot of deleted timers,
|
||||||
// if we would clear deleted timers.
|
// clear them out. We only do this for the local P to reduce
|
||||||
// This corresponds to the condition below where
|
// lock contention on timersLock.
|
||||||
// we decide whether to call clearDeletedTimers.
|
force := ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4
|
||||||
if pp != getg().m.p.ptr() || int(pp.deletedTimers.Load()) <= int(pp.numTimers.Load()/4) {
|
|
||||||
return now, next, false
|
if now < next && !force {
|
||||||
}
|
// Next timer is not ready to run, and we don't need to clear deleted timers.
|
||||||
|
return now, next, false
|
||||||
}
|
}
|
||||||
|
|
||||||
lock(&pp.timersLock)
|
lock(&ts.lock)
|
||||||
|
if len(ts.heap) > 0 {
|
||||||
if len(pp.timers) > 0 {
|
ts.adjust(now, force)
|
||||||
// If this is the local P, and there are a lot of deleted timers,
|
for len(ts.heap) > 0 {
|
||||||
// clear them out. We only do this for the local P to reduce
|
// Note that runtimer may temporarily unlock ts.
|
||||||
// lock contention on timersLock.
|
if tw := ts.run(now); tw != 0 {
|
||||||
force := pp == getg().m.p.ptr() && int(pp.deletedTimers.Load()) > len(pp.timers)/4
|
|
||||||
adjusttimers(pp, now, force)
|
|
||||||
for len(pp.timers) > 0 {
|
|
||||||
// Note that runtimer may temporarily unlock
|
|
||||||
// pp.timersLock.
|
|
||||||
if tw := runtimer(pp, now); tw != 0 {
|
|
||||||
if tw > 0 {
|
if tw > 0 {
|
||||||
pollUntil = tw
|
pollUntil = tw
|
||||||
}
|
}
|
||||||
|
|
@ -601,38 +632,39 @@ func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock(&pp.timersLock)
|
unlock(&ts.lock)
|
||||||
|
|
||||||
return now, pollUntil, ran
|
return now, pollUntil, ran
|
||||||
}
|
}
|
||||||
|
|
||||||
// runtimer examines the first timer in timers. If it is ready based on now,
|
// run examines the first timer in ts. If it is ready based on now,
|
||||||
// it runs the timer and removes or updates it.
|
// it runs the timer and removes or updates it.
|
||||||
// Returns 0 if it ran a timer, -1 if there are no more timers, or the time
|
// Returns 0 if it ran a timer, -1 if there are no more timers, or the time
|
||||||
// when the first timer should run.
|
// when the first timer should run.
|
||||||
// The caller must have locked the timers for pp.
|
// The caller must have locked ts.
|
||||||
// If a timer is run, this will temporarily unlock the timers.
|
// If a timer is run, this will temporarily unlock ts.
|
||||||
//
|
//
|
||||||
//go:systemstack
|
//go:systemstack
|
||||||
func runtimer(pp *p, now int64) int64 {
|
func (ts *timers) run(now int64) int64 {
|
||||||
|
assertLockHeld(&ts.lock)
|
||||||
Redo:
|
Redo:
|
||||||
if len(pp.timers) == 0 {
|
if len(ts.heap) == 0 {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
t := pp.timers[0]
|
t := ts.heap[0]
|
||||||
if t.pp.ptr() != pp {
|
if t.ts != ts {
|
||||||
throw("runtimer: bad p")
|
throw("bad ts")
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.state.Load()&timerNextWhen == 0 && t.when > now {
|
if t.state.Load()&timerNextWhen == 0 && t.when > now {
|
||||||
// Fast path: not ready to run.
|
// Fast path: not ready to run.
|
||||||
// The access of t.when is protected by the caller holding
|
// The access of t.when is protected by the caller holding
|
||||||
// pp.timersLock, even though t itself is unlocked.
|
// ts.lock, even though t itself is unlocked.
|
||||||
return t.when
|
return t.when
|
||||||
}
|
}
|
||||||
|
|
||||||
state, mp := t.lock()
|
state, mp := t.lock()
|
||||||
state, updated := t.updateWhen(state, pp)
|
state, updated := t.updateWhen(state, ts)
|
||||||
if updated {
|
if updated {
|
||||||
t.unlock(state, mp)
|
t.unlock(state, mp)
|
||||||
goto Redo
|
goto Redo
|
||||||
|
|
@ -648,22 +680,24 @@ Redo:
|
||||||
return t.when
|
return t.when
|
||||||
}
|
}
|
||||||
|
|
||||||
unlockAndRunTimer(pp, t, now, state, mp)
|
ts.unlockAndRun(t, now, state, mp)
|
||||||
|
assertLockHeld(&ts.lock) // t is unlocked now, but not ts
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// unlockAndRunTimer unlocks and runs a single timer.
|
// unlockAndRun unlocks and runs a single timer.
|
||||||
// The caller must have locked the timers for pp.
|
// The caller must have locked ts.
|
||||||
// This will temporarily unlock the timers while running the timer function.
|
// This will temporarily unlock the timers while running the timer function.
|
||||||
//
|
//
|
||||||
//go:systemstack
|
//go:systemstack
|
||||||
func unlockAndRunTimer(pp *p, t *timer, now int64, state uint32, mp *m) {
|
func (ts *timers) unlockAndRun(t *timer, now int64, state uint32, mp *m) {
|
||||||
|
assertLockHeld(&ts.lock)
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
ppcur := getg().m.p.ptr()
|
tsLocal := &getg().m.p.ptr().timers
|
||||||
if ppcur.timerRaceCtx == 0 {
|
if tsLocal.raceCtx == 0 {
|
||||||
ppcur.timerRaceCtx = racegostart(abi.FuncPCABIInternal(runtimer) + sys.PCQuantum)
|
tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum)
|
||||||
}
|
}
|
||||||
raceacquirectx(ppcur.timerRaceCtx, unsafe.Pointer(t))
|
raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t))
|
||||||
}
|
}
|
||||||
|
|
||||||
f := t.f
|
f := t.f
|
||||||
|
|
@ -680,23 +714,21 @@ func unlockAndRunTimer(pp *p, t *timer, now int64, state uint32, mp *m) {
|
||||||
} else {
|
} else {
|
||||||
t.nextWhen = 0
|
t.nextWhen = 0
|
||||||
}
|
}
|
||||||
state, _ = t.updateWhen(state|timerNextWhen, pp)
|
state, _ = t.updateWhen(state|timerNextWhen, ts)
|
||||||
t.unlock(state, mp)
|
t.unlock(state, mp)
|
||||||
|
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
// Temporarily use the current P's racectx for g0.
|
// Temporarily use the current P's racectx for g0.
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp.racectx != 0 {
|
if gp.racectx != 0 {
|
||||||
throw("runOneTimer: unexpected racectx")
|
throw("unexpected racectx")
|
||||||
}
|
}
|
||||||
gp.racectx = gp.m.p.ptr().timerRaceCtx
|
gp.racectx = gp.m.p.ptr().timers.raceCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock(&pp.timersLock)
|
unlock(&ts.lock)
|
||||||
|
|
||||||
f(arg, seq)
|
f(arg, seq)
|
||||||
|
lock(&ts.lock)
|
||||||
lock(&pp.timersLock)
|
|
||||||
|
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
|
|
@ -730,25 +762,26 @@ func unlockAndRunTimer(pp *p, t *timer, now int64, state uint32, mp *m) {
|
||||||
// TODO(prattmic): Additional targeted updates may improve the above cases.
|
// TODO(prattmic): Additional targeted updates may improve the above cases.
|
||||||
// e.g., updating the mask when stealing a timer.
|
// e.g., updating the mask when stealing a timer.
|
||||||
func updateTimerPMask(pp *p) {
|
func updateTimerPMask(pp *p) {
|
||||||
if pp.numTimers.Load() > 0 {
|
if pp.timers.len.Load() > 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks like there are no timers, however another P may transiently
|
// Looks like there are no timers, however another P may transiently
|
||||||
// decrement numTimers when handling a timerModified timer in
|
// decrement numTimers when handling a timerModified timer in
|
||||||
// checkTimers. We must take timersLock to serialize with these changes.
|
// checkTimers. We must take timersLock to serialize with these changes.
|
||||||
lock(&pp.timersLock)
|
lock(&pp.timers.lock)
|
||||||
if pp.numTimers.Load() == 0 {
|
if pp.timers.len.Load() == 0 {
|
||||||
timerpMask.clear(pp.id)
|
timerpMask.clear(pp.id)
|
||||||
}
|
}
|
||||||
unlock(&pp.timersLock)
|
unlock(&pp.timers.lock)
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyTimerHeap verifies that the timer heap is in a valid state.
|
// verifyTimerHeap verifies that the timers is in a valid state.
|
||||||
// This is only for debugging, and is only called if verifyTimers is true.
|
// This is only for debugging, and is only called if verifyTimers is true.
|
||||||
// The caller must have locked the timers.
|
// The caller must have locked ts.
|
||||||
func verifyTimerHeap(pp *p) {
|
func (ts *timers) verify() {
|
||||||
for i, t := range pp.timers {
|
assertLockHeld(&ts.lock)
|
||||||
|
for i, t := range ts.heap {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
// First timer has no parent.
|
// First timer has no parent.
|
||||||
continue
|
continue
|
||||||
|
|
@ -756,38 +789,38 @@ func verifyTimerHeap(pp *p) {
|
||||||
|
|
||||||
// The heap is 4-ary. See siftupTimer and siftdownTimer.
|
// The heap is 4-ary. See siftupTimer and siftdownTimer.
|
||||||
p := (i - 1) / 4
|
p := (i - 1) / 4
|
||||||
if t.when < pp.timers[p].when {
|
if t.when < ts.heap[p].when {
|
||||||
print("bad timer heap at ", i, ": ", p, ": ", pp.timers[p].when, ", ", i, ": ", t.when, "\n")
|
print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", t.when, "\n")
|
||||||
throw("bad timer heap")
|
throw("bad timer heap")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if numTimers := int(pp.numTimers.Load()); len(pp.timers) != numTimers {
|
if n := int(ts.len.Load()); len(ts.heap) != n {
|
||||||
println("timer heap len", len(pp.timers), "!= numTimers", numTimers)
|
println("timer heap len", len(ts.heap), "!= atomic len", n)
|
||||||
throw("bad timer heap len")
|
throw("bad timer heap len")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateTimer0When sets the P's timer0When field.
|
// updateTimer0When sets ts.timer0When to ts.heap[0].when.
|
||||||
// The caller must have locked the timers for pp.
|
// The caller must have locked ts.
|
||||||
func updateTimer0When(pp *p) {
|
func (ts *timers) updateTimer0When() {
|
||||||
if len(pp.timers) == 0 {
|
assertLockHeld(&ts.lock)
|
||||||
pp.timer0When.Store(0)
|
if len(ts.heap) == 0 {
|
||||||
|
ts.timer0When.Store(0)
|
||||||
} else {
|
} else {
|
||||||
pp.timer0When.Store(pp.timers[0].when)
|
ts.timer0When.Store(ts.heap[0].when)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateTimerModifiedEarliest updates the recorded nextwhen field of the
|
// updateTimerModifiedEarliest updates ts.timerModifiedEarliest to be <= nextwhen.
|
||||||
// earlier timerModifiedEarier value.
|
// The timers for ts need not be locked.
|
||||||
// The timers for pp will not be locked.
|
func (ts *timers) updateTimerModifiedEarliest(nextwhen int64) {
|
||||||
func updateTimerModifiedEarliest(pp *p, nextwhen int64) {
|
|
||||||
for {
|
for {
|
||||||
old := pp.timerModifiedEarliest.Load()
|
old := ts.timerModifiedEarliest.Load()
|
||||||
if old != 0 && old < nextwhen {
|
if old != 0 && old < nextwhen {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if pp.timerModifiedEarliest.CompareAndSwap(old, nextwhen) {
|
if ts.timerModifiedEarliest.CompareAndSwap(old, nextwhen) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -808,12 +841,12 @@ func timeSleepUntil() int64 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
w := pp.timer0When.Load()
|
w := pp.timers.timer0When.Load()
|
||||||
if w != 0 && w < next {
|
if w != 0 && w < next {
|
||||||
next = w
|
next = w
|
||||||
}
|
}
|
||||||
|
|
||||||
w = pp.timerModifiedEarliest.Load()
|
w = pp.timers.timerModifiedEarliest.Load()
|
||||||
if w != 0 && w < next {
|
if w != 0 && w < next {
|
||||||
next = w
|
next = w
|
||||||
}
|
}
|
||||||
|
|
@ -831,10 +864,10 @@ func timeSleepUntil() int64 {
|
||||||
// "panic holding locks" message. Instead, we panic while not
|
// "panic holding locks" message. Instead, we panic while not
|
||||||
// holding a lock.
|
// holding a lock.
|
||||||
|
|
||||||
// siftupTimer puts the timer at position i in the right place
|
// siftUp puts the timer at position i in the right place
|
||||||
// in the heap by moving it up toward the top of the heap.
|
// in the heap by moving it up toward the top of the heap.
|
||||||
// It returns the smallest changed index.
|
func (ts *timers) siftUp(i int) {
|
||||||
func siftupTimer(t []*timer, i int) int {
|
t := ts.heap
|
||||||
if i >= len(t) {
|
if i >= len(t) {
|
||||||
badTimer()
|
badTimer()
|
||||||
}
|
}
|
||||||
|
|
@ -854,12 +887,12 @@ func siftupTimer(t []*timer, i int) int {
|
||||||
if tmp != t[i] {
|
if tmp != t[i] {
|
||||||
t[i] = tmp
|
t[i] = tmp
|
||||||
}
|
}
|
||||||
return i
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// siftdownTimer puts the timer at position i in the right place
|
// siftDown puts the timer at position i in the right place
|
||||||
// in the heap by moving it down toward the bottom of the heap.
|
// in the heap by moving it down toward the bottom of the heap.
|
||||||
func siftdownTimer(t []*timer, i int) {
|
func (ts *timers) siftDown(i int) {
|
||||||
|
t := ts.heap
|
||||||
n := len(t)
|
n := len(t)
|
||||||
if i >= n {
|
if i >= n {
|
||||||
badTimer()
|
badTimer()
|
||||||
|
|
@ -902,16 +935,16 @@ func siftdownTimer(t []*timer, i int) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// initTimerHeap reestablishes the heap order in the slice t.
|
// initHeap reestablishes the heap order in the slice ts.heap.
|
||||||
// It takes O(n) time for n=len(t), not the O(n log n) of n repeated add operations.
|
// It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations.
|
||||||
func initTimerHeap(t []*timer) {
|
func (ts *timers) initHeap() {
|
||||||
// Last possible element that needs sifting down is parent of last element;
|
// Last possible element that needs sifting down is parent of last element;
|
||||||
// last element is len(t)-1; parent of last element is (len(t)-1-1)/4.
|
// last element is len(t)-1; parent of last element is (len(t)-1-1)/4.
|
||||||
if len(t) <= 1 {
|
if len(ts.heap) <= 1 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for i := (len(t) - 1 - 1) / 4; i >= 0; i-- {
|
for i := (len(ts.heap) - 1 - 1) / 4; i >= 0; i-- {
|
||||||
siftdownTimer(t, i)
|
ts.siftDown(i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,8 @@
|
||||||
|
|
||||||
package time
|
package time
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
// Sleep pauses the current goroutine for at least the duration d.
|
// Sleep pauses the current goroutine for at least the duration d.
|
||||||
// A negative or zero duration causes Sleep to return immediately.
|
// A negative or zero duration causes Sleep to return immediately.
|
||||||
func Sleep(d Duration)
|
func Sleep(d Duration)
|
||||||
|
|
@ -11,7 +13,7 @@ func Sleep(d Duration)
|
||||||
// Interface to timers implemented in package runtime.
|
// Interface to timers implemented in package runtime.
|
||||||
// Must be in sync with ../runtime/time.go:/^type timer
|
// Must be in sync with ../runtime/time.go:/^type timer
|
||||||
type runtimeTimer struct {
|
type runtimeTimer struct {
|
||||||
pp uintptr
|
ts unsafe.Pointer
|
||||||
when int64
|
when int64
|
||||||
period int64
|
period int64
|
||||||
f func(any, uintptr) // NOTE: must not be closure
|
f func(any, uintptr) // NOTE: must not be closure
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue