mirror of https://github.com/golang/go.git
runtime: report finalizer and cleanup queue length with checkfinalizer>0
This change adds tracking for approximate finalizer and cleanup queue lengths. These lengths are reported once every GC cycle as a single line printed to stderr when GODEBUG=checkfinalizer>0. This change lays the groundwork for runtime/metrics metrics to produce the same values. For #72948. For #72950. Change-Id: I081721238a0fc4c7e5bee2dbaba6cfb4120d1a33 Reviewed-on: https://go-review.googlesource.com/c/go/+/671437 Reviewed-by: Michael Pratt <mpratt@google.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
parent
2aac5a5cba
commit
0d42cebacd
|
|
@ -336,6 +336,20 @@ type cleanupQueue struct {
|
||||||
//
|
//
|
||||||
// Read without lock, written only with lock held.
|
// Read without lock, written only with lock held.
|
||||||
needg atomic.Uint32
|
needg atomic.Uint32
|
||||||
|
|
||||||
|
// Cleanup queue stats.
|
||||||
|
|
||||||
|
// queued represents a monotonic count of queued cleanups. This is sharded across
|
||||||
|
// Ps via the field cleanupsQueued in each p, so reading just this value is insufficient.
|
||||||
|
// In practice, this value only includes the queued count of dead Ps.
|
||||||
|
//
|
||||||
|
// Writes are protected by STW.
|
||||||
|
queued uint64
|
||||||
|
|
||||||
|
// executed is a monotonic count of executed cleanups.
|
||||||
|
//
|
||||||
|
// Read and updated atomically.
|
||||||
|
executed atomic.Uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// addWork indicates that n units of parallelizable work have been added to the queue.
|
// addWork indicates that n units of parallelizable work have been added to the queue.
|
||||||
|
|
@ -387,6 +401,7 @@ func (q *cleanupQueue) enqueue(fn *funcval) {
|
||||||
pp.cleanups = nil
|
pp.cleanups = nil
|
||||||
q.addWork(1)
|
q.addWork(1)
|
||||||
}
|
}
|
||||||
|
pp.cleanupsQueued++
|
||||||
releasem(mp)
|
releasem(mp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -586,6 +601,19 @@ func (q *cleanupQueue) endRunningCleanups() {
|
||||||
releasem(mp)
|
releasem(mp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (q *cleanupQueue) readQueueStats() (queued, executed uint64) {
|
||||||
|
executed = q.executed.Load()
|
||||||
|
queued = q.queued
|
||||||
|
|
||||||
|
// N.B. This is inconsistent, but that's intentional. It's just an estimate.
|
||||||
|
// Read this _after_ reading executed to decrease the chance that we observe
|
||||||
|
// an inconsistency in the statistics (executed > queued).
|
||||||
|
for _, pp := range allp {
|
||||||
|
queued += pp.cleanupsQueued
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func maxCleanupGs() uint32 {
|
func maxCleanupGs() uint32 {
|
||||||
// N.B. Left as a function to make changing the policy easier.
|
// N.B. Left as a function to make changing the policy easier.
|
||||||
return uint32(max(gomaxprocs/4, 1))
|
return uint32(max(gomaxprocs/4, 1))
|
||||||
|
|
@ -636,6 +664,7 @@ func runCleanups() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gcCleanups.endRunningCleanups()
|
gcCleanups.endRunningCleanups()
|
||||||
|
gcCleanups.executed.Add(int64(b.n))
|
||||||
|
|
||||||
atomic.Store(&b.n, 0) // Synchronize with markroot. See comment in cleanupBlockHeader.
|
atomic.Store(&b.n, 0) // Synchronize with markroot. See comment in cleanupBlockHeader.
|
||||||
gcCleanups.free.push(&b.lfnode)
|
gcCleanups.free.push(&b.lfnode)
|
||||||
|
|
|
||||||
|
|
@ -49,6 +49,8 @@ var (
|
||||||
finq *finBlock // list of finalizers that are to be executed
|
finq *finBlock // list of finalizers that are to be executed
|
||||||
finc *finBlock // cache of free blocks
|
finc *finBlock // cache of free blocks
|
||||||
finptrmask [finBlockSize / goarch.PtrSize / 8]byte
|
finptrmask [finBlockSize / goarch.PtrSize / 8]byte
|
||||||
|
finqueued uint64 // monotonic count of queued finalizers
|
||||||
|
finexecuted uint64 // monotonic count of executed finalizers
|
||||||
)
|
)
|
||||||
|
|
||||||
var allfin *finBlock // list of all blocks
|
var allfin *finBlock // list of all blocks
|
||||||
|
|
@ -108,6 +110,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
|
||||||
}
|
}
|
||||||
|
|
||||||
lock(&finlock)
|
lock(&finlock)
|
||||||
|
|
||||||
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
|
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
|
||||||
if finc == nil {
|
if finc == nil {
|
||||||
finc = (*finBlock)(persistentalloc(finBlockSize, 0, &memstats.gcMiscSys))
|
finc = (*finBlock)(persistentalloc(finBlockSize, 0, &memstats.gcMiscSys))
|
||||||
|
|
@ -141,6 +144,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
|
||||||
f.fint = fint
|
f.fint = fint
|
||||||
f.ot = ot
|
f.ot = ot
|
||||||
f.arg = p
|
f.arg = p
|
||||||
|
finqueued++
|
||||||
unlock(&finlock)
|
unlock(&finlock)
|
||||||
fingStatus.Or(fingWake)
|
fingStatus.Or(fingWake)
|
||||||
}
|
}
|
||||||
|
|
@ -177,6 +181,14 @@ func finalizercommit(gp *g, lock unsafe.Pointer) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func finReadQueueStats() (queued, executed uint64) {
|
||||||
|
lock(&finlock)
|
||||||
|
queued = finqueued
|
||||||
|
executed = finexecuted
|
||||||
|
unlock(&finlock)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// This is the goroutine that runs all of the finalizers.
|
// This is the goroutine that runs all of the finalizers.
|
||||||
func runFinalizers() {
|
func runFinalizers() {
|
||||||
var (
|
var (
|
||||||
|
|
@ -204,7 +216,8 @@ func runFinalizers() {
|
||||||
racefingo()
|
racefingo()
|
||||||
}
|
}
|
||||||
for fb != nil {
|
for fb != nil {
|
||||||
for i := fb.cnt; i > 0; i-- {
|
n := fb.cnt
|
||||||
|
for i := n; i > 0; i-- {
|
||||||
f := &fb.fin[i-1]
|
f := &fb.fin[i-1]
|
||||||
|
|
||||||
var regs abi.RegArgs
|
var regs abi.RegArgs
|
||||||
|
|
@ -270,6 +283,7 @@ func runFinalizers() {
|
||||||
}
|
}
|
||||||
next := fb.next
|
next := fb.next
|
||||||
lock(&finlock)
|
lock(&finlock)
|
||||||
|
finexecuted += uint64(n)
|
||||||
fb.next = finc
|
fb.next = finc
|
||||||
finc = fb
|
finc = fb
|
||||||
unlock(&finlock)
|
unlock(&finlock)
|
||||||
|
|
|
||||||
|
|
@ -1337,6 +1337,19 @@ func gcMarkTermination(stw worldStop) {
|
||||||
printunlock()
|
printunlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Print finalizer/cleanup queue length. Like gctrace, do this before the next GC starts.
|
||||||
|
// The fact that the next GC might start is not that problematic here, but acts as a convenient
|
||||||
|
// lock on printing this information (so it cannot overlap with itself from the next GC cycle).
|
||||||
|
if debug.checkfinalizers > 0 {
|
||||||
|
fq, fe := finReadQueueStats()
|
||||||
|
fn := max(int64(fq)-int64(fe), 0)
|
||||||
|
|
||||||
|
cq, ce := gcCleanups.readQueueStats()
|
||||||
|
cn := max(int64(cq)-int64(ce), 0)
|
||||||
|
|
||||||
|
println("checkfinalizers: queue:", fn, "finalizers +", cn, "cleanups")
|
||||||
|
}
|
||||||
|
|
||||||
// Set any arena chunks that were deferred to fault.
|
// Set any arena chunks that were deferred to fault.
|
||||||
lock(&userArenaState.lock)
|
lock(&userArenaState.lock)
|
||||||
faultList := userArenaState.fault
|
faultList := userArenaState.fault
|
||||||
|
|
|
||||||
|
|
@ -5743,6 +5743,8 @@ func (pp *p) destroy() {
|
||||||
pp.raceprocctx = 0
|
pp.raceprocctx = 0
|
||||||
}
|
}
|
||||||
pp.gcAssistTime = 0
|
pp.gcAssistTime = 0
|
||||||
|
gcCleanups.queued += pp.cleanupsQueued
|
||||||
|
pp.cleanupsQueued = 0
|
||||||
pp.status = _Pdead
|
pp.status = _Pdead
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -733,6 +733,7 @@ type p struct {
|
||||||
|
|
||||||
// Cleanups.
|
// Cleanups.
|
||||||
cleanups *cleanupBlock
|
cleanups *cleanupBlock
|
||||||
|
cleanupsQueued uint64 // monotonic count of cleanups queued by this P
|
||||||
|
|
||||||
// maxStackScanDelta accumulates the amount of stack space held by
|
// maxStackScanDelta accumulates the amount of stack space held by
|
||||||
// live goroutines (i.e. those eligible for stack scanning).
|
// live goroutines (i.e. those eligible for stack scanning).
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue