diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index 8f9c529539..5302dd8e3d 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -126,9 +126,6 @@ havespan: // mcache. If it gets uncached, we'll adjust this. atomic.Xadd64(&c.nmalloc, int64(n)) usedBytes := uintptr(s.allocCount) * s.elemsize - if usedBytes > 0 { - reimburseSweepCredit(usedBytes) - } atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes)) if trace.enabled { // heap_live changed. diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 88722699a6..8cba9f72bb 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -839,7 +839,8 @@ func gcSetTriggerRatio(triggerRatio float64) { // pages by the time the allocated heap reaches the GC // trigger. Compute the ratio of in-use pages to sweep // per byte allocated. - heapDistance := int64(trigger) - int64(atomic.Load64(&memstats.heap_live)) + heapLiveBasis := atomic.Load64(&memstats.heap_live) + heapDistance := int64(trigger) - int64(heapLiveBasis) // Add a little margin so rounding errors and // concurrent sweep are less likely to leave pages // unswept when GC starts. @@ -850,7 +851,7 @@ func gcSetTriggerRatio(triggerRatio float64) { } mheap_.sweepPagesPerByte = float64(mheap_.pagesInUse) / float64(heapDistance) mheap_.pagesSwept = 0 - mheap_.spanBytesAlloc = 0 + mheap_.sweepHeapLiveBasis = heapLiveBasis } } diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index dd0682594a..8915b398cd 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -123,7 +123,7 @@ func sweepone() uintptr { // last one print trace information. if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 { if debug.gcpacertrace > 0 { - print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", mheap_.spanBytesAlloc>>20, "MB of spans; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n") + print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n") } } _g_.m.locks-- @@ -379,8 +379,7 @@ func (s *mspan) sweep(preserve bool) bool { // // deductSweepCredit makes a worst-case assumption that all spanBytes // bytes of the ultimately allocated span will be available for object -// allocation. The caller should call reimburseSweepCredit if that -// turns out not to be the case once the span is allocated. +// allocation. // // deductSweepCredit is the core of the "proportional sweep" system. // It uses statistics gathered by the garbage collector to perform @@ -398,12 +397,10 @@ func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) { traceGCSweepStart() } - // Account for this span allocation. - spanBytesAlloc := atomic.Xadd64(&mheap_.spanBytesAlloc, int64(spanBytes)) - // Fix debt if necessary. - pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc)) - for pagesOwed-int64(atomic.Load64(&mheap_.pagesSwept)) > int64(callerSweepPages) { + newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes + pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages) + for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)) { if gosweepone() == ^uintptr(0) { mheap_.sweepPagesPerByte = 0 break @@ -414,19 +411,3 @@ func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) { traceGCSweepDone() } } - -// reimburseSweepCredit records that unusableBytes bytes of a -// just-allocated span are not available for object allocation. This -// offsets the worst-case charge performed by deductSweepCredit. -func reimburseSweepCredit(unusableBytes uintptr) { - if mheap_.sweepPagesPerByte == 0 { - // Nobody cares about the credit. Avoid the atomic. - return - } - nval := atomic.Xadd64(&mheap_.spanBytesAlloc, -int64(unusableBytes)) - if int64(nval) < 0 { - // Debugging for #18043. - print("runtime: bad spanBytesAlloc=", nval, " (was ", nval+uint64(unusableBytes), ") unusableBytes=", unusableBytes, " sweepPagesPerByte=", mheap_.sweepPagesPerByte, "\n") - throw("spanBytesAlloc underflow") - } -} diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index e1b3b184e8..643fc7c502 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -75,10 +75,10 @@ type mheap struct { _ uint32 // align uint64 fields on 32-bit for atomics // Proportional sweep - pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock - spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically - pagesSwept uint64 // pages swept this cycle; updated atomically - sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without + pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock + pagesSwept uint64 // pages swept this cycle; updated atomically + sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without + sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without // TODO(austin): pagesInUse should be a uintptr, but the 386 // compiler can't 8-byte align fields.