mirror of https://github.com/golang/go.git
runtime: break down memstats.gc_sys
This change breaks apart gc_sys into three distinct pieces. Two of those pieces are pieces which come from heap_sys since they're allocated from the page heap. The rest comes from memory mapped from e.g. persistentalloc which better fits the purpose of a sysMemStat. Also, rename gc_sys to gcMiscSys. Change-Id: I098789170052511e7b31edbcdc9a53e5c24573f7 Reviewed-on: https://go-review.googlesource.com/c/go/+/246973 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Trust: Michael Knyszek <mknyszek@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
39e335ac06
commit
ad863ba32a
|
|
@ -540,6 +540,9 @@ func dumpms() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpmemstats() {
|
func dumpmemstats() {
|
||||||
|
// These ints should be identical to the exported
|
||||||
|
// MemStats structure and should be ordered the same
|
||||||
|
// way too.
|
||||||
dumpint(tagMemStats)
|
dumpint(tagMemStats)
|
||||||
dumpint(memstats.alloc)
|
dumpint(memstats.alloc)
|
||||||
dumpint(memstats.total_alloc)
|
dumpint(memstats.total_alloc)
|
||||||
|
|
@ -560,7 +563,7 @@ func dumpmemstats() {
|
||||||
dumpint(memstats.mcache_inuse)
|
dumpint(memstats.mcache_inuse)
|
||||||
dumpint(memstats.mcache_sys.load())
|
dumpint(memstats.mcache_sys.load())
|
||||||
dumpint(memstats.buckhash_sys.load())
|
dumpint(memstats.buckhash_sys.load())
|
||||||
dumpint(memstats.gc_sys.load())
|
dumpint(memstats.gcMiscSys.load() + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse)
|
||||||
dumpint(memstats.other_sys.load())
|
dumpint(memstats.other_sys.load())
|
||||||
dumpint(memstats.next_gc)
|
dumpint(memstats.next_gc)
|
||||||
dumpint(memstats.last_gc_unix)
|
dumpint(memstats.last_gc_unix)
|
||||||
|
|
|
||||||
|
|
@ -743,9 +743,9 @@ mapped:
|
||||||
throw("arena already initialized")
|
throw("arena already initialized")
|
||||||
}
|
}
|
||||||
var r *heapArena
|
var r *heapArena
|
||||||
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
|
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
|
||||||
if r == nil {
|
if r == nil {
|
||||||
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
|
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
|
||||||
if r == nil {
|
if r == nil {
|
||||||
throw("out of memory allocating heap arena metadata")
|
throw("out of memory allocating heap arena metadata")
|
||||||
}
|
}
|
||||||
|
|
@ -757,7 +757,7 @@ mapped:
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
size = physPageSize
|
size = physPageSize
|
||||||
}
|
}
|
||||||
newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys))
|
newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
|
||||||
if newArray == nil {
|
if newArray == nil {
|
||||||
throw("out of memory allocating allArenas")
|
throw("out of memory allocating allArenas")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,7 @@ func startCheckmarks() {
|
||||||
|
|
||||||
if bitmap == nil {
|
if bitmap == nil {
|
||||||
// Allocate bitmap on first use.
|
// Allocate bitmap on first use.
|
||||||
bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gc_sys))
|
bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys))
|
||||||
if bitmap == nil {
|
if bitmap == nil {
|
||||||
throw("out of memory allocating checkmarks bitmap")
|
throw("out of memory allocating checkmarks bitmap")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
|
||||||
lock(&finlock)
|
lock(&finlock)
|
||||||
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
|
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
|
||||||
if finc == nil {
|
if finc == nil {
|
||||||
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
|
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
|
||||||
finc.alllink = allfin
|
finc.alllink = allfin
|
||||||
allfin = finc
|
allfin = finc
|
||||||
if finptrmask[0] == 0 {
|
if finptrmask[0] == 0 {
|
||||||
|
|
|
||||||
|
|
@ -713,7 +713,7 @@ func (h *mheap) init() {
|
||||||
h.central[i].mcentral.init(spanClass(i))
|
h.central[i].mcentral.init(spanClass(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
h.pages.init(&h.lock, &memstats.gc_sys)
|
h.pages.init(&h.lock, &memstats.gcMiscSys)
|
||||||
}
|
}
|
||||||
|
|
||||||
// reclaim sweeps and reclaims at least npage pages into the heap.
|
// reclaim sweeps and reclaims at least npage pages into the heap.
|
||||||
|
|
@ -1230,8 +1230,10 @@ HaveSpan:
|
||||||
atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
|
atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
|
||||||
case spanAllocStack:
|
case spanAllocStack:
|
||||||
atomic.Xadd64(&memstats.stacks_inuse, int64(nbytes))
|
atomic.Xadd64(&memstats.stacks_inuse, int64(nbytes))
|
||||||
case spanAllocPtrScalarBits, spanAllocWorkBuf:
|
case spanAllocWorkBuf:
|
||||||
memstats.gc_sys.add(int64(nbytes))
|
atomic.Xadd64(&memstats.gcWorkBufInUse, int64(nbytes))
|
||||||
|
case spanAllocPtrScalarBits:
|
||||||
|
atomic.Xadd64(&memstats.gcProgPtrScalarBitsInUse, int64(nbytes))
|
||||||
}
|
}
|
||||||
if typ.manual() {
|
if typ.manual() {
|
||||||
// Manually managed memory doesn't count toward heap_sys.
|
// Manually managed memory doesn't count toward heap_sys.
|
||||||
|
|
@ -1406,8 +1408,10 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
|
||||||
atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
|
atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
|
||||||
case spanAllocStack:
|
case spanAllocStack:
|
||||||
atomic.Xadd64(&memstats.stacks_inuse, -int64(nbytes))
|
atomic.Xadd64(&memstats.stacks_inuse, -int64(nbytes))
|
||||||
case spanAllocPtrScalarBits, spanAllocWorkBuf:
|
case spanAllocWorkBuf:
|
||||||
memstats.gc_sys.add(-int64(nbytes))
|
atomic.Xadd64(&memstats.gcWorkBufInUse, -int64(nbytes))
|
||||||
|
case spanAllocPtrScalarBits:
|
||||||
|
atomic.Xadd64(&memstats.gcProgPtrScalarBitsInUse, -int64(nbytes))
|
||||||
}
|
}
|
||||||
if typ.manual() {
|
if typ.manual() {
|
||||||
// Manually managed memory doesn't count toward heap_sys, so add it back.
|
// Manually managed memory doesn't count toward heap_sys, so add it back.
|
||||||
|
|
@ -1956,7 +1960,7 @@ func newArenaMayUnlock() *gcBitsArena {
|
||||||
var result *gcBitsArena
|
var result *gcBitsArena
|
||||||
if gcBitsArenas.free == nil {
|
if gcBitsArenas.free == nil {
|
||||||
unlock(&gcBitsArenas.lock)
|
unlock(&gcBitsArenas.lock)
|
||||||
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
|
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
|
||||||
if result == nil {
|
if result == nil {
|
||||||
throw("runtime: cannot allocate memory")
|
throw("runtime: cannot allocate memory")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,7 @@ retry:
|
||||||
if newCap == 0 {
|
if newCap == 0 {
|
||||||
newCap = spanSetInitSpineCap
|
newCap = spanSetInitSpineCap
|
||||||
}
|
}
|
||||||
newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys)
|
newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
|
||||||
if b.spineCap != 0 {
|
if b.spineCap != 0 {
|
||||||
// Blocks are allocated off-heap, so
|
// Blocks are allocated off-heap, so
|
||||||
// no write barriers.
|
// no write barriers.
|
||||||
|
|
@ -283,7 +283,7 @@ func (p *spanSetBlockAlloc) alloc() *spanSetBlock {
|
||||||
if s := (*spanSetBlock)(p.stack.pop()); s != nil {
|
if s := (*spanSetBlock)(p.stack.pop()); s != nil {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gc_sys))
|
return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
|
||||||
}
|
}
|
||||||
|
|
||||||
// free returns a spanSetBlock back to the pool.
|
// free returns a spanSetBlock back to the pool.
|
||||||
|
|
|
||||||
|
|
@ -44,15 +44,17 @@ type mstats struct {
|
||||||
|
|
||||||
// Statistics about allocation of low-level fixed-size structures.
|
// Statistics about allocation of low-level fixed-size structures.
|
||||||
// Protected by FixAlloc locks.
|
// Protected by FixAlloc locks.
|
||||||
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
|
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
|
||||||
stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
|
stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
|
||||||
mspan_inuse uint64 // mspan structures
|
mspan_inuse uint64 // mspan structures
|
||||||
mspan_sys sysMemStat
|
mspan_sys sysMemStat
|
||||||
mcache_inuse uint64 // mcache structures
|
mcache_inuse uint64 // mcache structures
|
||||||
mcache_sys sysMemStat
|
mcache_sys sysMemStat
|
||||||
buckhash_sys sysMemStat // profiling bucket hash table
|
buckhash_sys sysMemStat // profiling bucket hash table
|
||||||
gc_sys sysMemStat // updated atomically or during STW
|
gcWorkBufInUse uint64 // updated atomically or during STW
|
||||||
other_sys sysMemStat // updated atomically or during STW
|
gcProgPtrScalarBitsInUse uint64 // updated atomically or during STW
|
||||||
|
gcMiscSys sysMemStat // updated atomically or during STW
|
||||||
|
other_sys sysMemStat // updated atomically or during STW
|
||||||
|
|
||||||
// Statistics about the garbage collector.
|
// Statistics about the garbage collector.
|
||||||
|
|
||||||
|
|
@ -472,7 +474,10 @@ func readmemstats_m(stats *MemStats) {
|
||||||
stats.MCacheInuse = memstats.mcache_inuse
|
stats.MCacheInuse = memstats.mcache_inuse
|
||||||
stats.MCacheSys = memstats.mcache_sys.load()
|
stats.MCacheSys = memstats.mcache_sys.load()
|
||||||
stats.BuckHashSys = memstats.buckhash_sys.load()
|
stats.BuckHashSys = memstats.buckhash_sys.load()
|
||||||
stats.GCSys = memstats.gc_sys.load()
|
// MemStats defines GCSys as an aggregate of all memory related
|
||||||
|
// to the memory management system, but we track this memory
|
||||||
|
// at a more granular level in the runtime.
|
||||||
|
stats.GCSys = memstats.gcMiscSys.load() + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
|
||||||
stats.OtherSys = memstats.other_sys.load()
|
stats.OtherSys = memstats.other_sys.load()
|
||||||
stats.NextGC = memstats.next_gc
|
stats.NextGC = memstats.next_gc
|
||||||
stats.LastGC = memstats.last_gc_unix
|
stats.LastGC = memstats.last_gc_unix
|
||||||
|
|
@ -557,11 +562,11 @@ func updatememstats() {
|
||||||
memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
|
memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
|
||||||
memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
|
memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
|
||||||
memstats.sys = memstats.heap_sys.load() + memstats.stacks_sys.load() + memstats.mspan_sys.load() +
|
memstats.sys = memstats.heap_sys.load() + memstats.stacks_sys.load() + memstats.mspan_sys.load() +
|
||||||
memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gc_sys.load() +
|
memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gcMiscSys.load() +
|
||||||
memstats.other_sys.load()
|
memstats.other_sys.load()
|
||||||
|
|
||||||
// We also count stacks_inuse as sys memory.
|
// We also count stacks_inuse, gcWorkBufInUse, and gcProgPtrScalarBitsInUse as sys memory.
|
||||||
memstats.sys += memstats.stacks_inuse
|
memstats.sys += memstats.stacks_inuse + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
|
||||||
|
|
||||||
// Calculate memory allocator stats.
|
// Calculate memory allocator stats.
|
||||||
// During program execution we only count number of frees and amount of freed memory.
|
// During program execution we only count number of frees and amount of freed memory.
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue