diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go index d428144db0..069f267130 100644 --- a/src/runtime/mgcscavenge.go +++ b/src/runtime/mgcscavenge.go @@ -450,6 +450,25 @@ func (s *pageAlloc) scavengeStartGen() { printScavTrace(s.scav.gen, s.scav.released, false) } s.inUse.cloneInto(&s.scav.inUse) + + // Pick the new starting address for the scavenger cycle. + var startAddr uintptr + if s.scav.scavLWM < s.scav.freeHWM { + // The "free" high watermark exceeds the "scavenged" low watermark, + // so there are free scavengable pages in parts of the address space + // that the scavenger already searched, the high watermark being the + // highest one. Pick that as our new starting point to ensure we + // see those pages. + startAddr = s.scav.freeHWM + } else { + // The "free" high watermark does not exceed the "scavenged" low + // watermark. This means the allocator didn't free any memory in + // the range we scavenged last cycle, so we might as well continue + // scavenging from where we were. + startAddr = s.scav.scavLWM + } + s.scav.inUse.removeGreaterEqual(startAddr) + // reservationBytes may be zero if s.inUse.totalBytes is small, or if // scavengeReservationShards is large. This case is fine as the scavenger // will simply be turned off, but it does mean that scavengeReservationShards, @@ -459,6 +478,8 @@ func (s *pageAlloc) scavengeStartGen() { s.scav.reservationBytes = alignUp(s.inUse.totalBytes, pallocChunkBytes) / scavengeReservationShards s.scav.gen++ s.scav.released = 0 + s.scav.freeHWM = 0 + s.scav.scavLWM = maxSearchAddr } // scavengeReserve reserves a contiguous range of the address space @@ -676,6 +697,11 @@ func (s *pageAlloc) scavengeRangeLocked(ci chunkIdx, base, npages uint) uintptr // Compute the full address for the start of the range. addr := chunkBase(ci) + uintptr(base)*pageSize + // Update the scavenge low watermark. + if addr < s.scav.scavLWM { + s.scav.scavLWM = addr + } + // Only perform the actual scavenging if we're not in a test. // It's dangerous to do so otherwise. if s.test { diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index 771cb3a3ba..905d49d751 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -270,6 +270,14 @@ type pageAlloc struct { // released is the amount of memory released this generation. released uintptr + + // scavLWM is the lowest address that the scavenger reached this + // scavenge generation. + scavLWM uintptr + + // freeHWM is the highest address of a page that was freed to + // the page allocator this scavenge generation. + freeHWM uintptr } // mheap_.lock. This level of indirection makes it possible @@ -306,6 +314,9 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { // Set the mheapLock. s.mheapLock = mheapLock + + // Initialize scavenge tracking state. + s.scav.scavLWM = maxSearchAddr } // compareSearchAddrTo compares an address against s.searchAddr in a linearized @@ -813,6 +824,11 @@ func (s *pageAlloc) free(base, npages uintptr) { if s.compareSearchAddrTo(base) < 0 { s.searchAddr = base } + // Update the free high watermark for the scavenger. + limit := base + npages*pageSize - 1 + if s.scav.freeHWM < limit { + s.scav.freeHWM = limit + } if npages == 1 { // Fast path: we're clearing a single bit, and we know exactly // where it is, so mark it directly. @@ -820,7 +836,6 @@ func (s *pageAlloc) free(base, npages uintptr) { s.chunkOf(i).free1(chunkPageIndex(base)) } else { // Slow path: we're clearing more bits so we may need to iterate. - limit := base + npages*pageSize - 1 sc, ec := chunkIndex(base), chunkIndex(limit) si, ei := chunkPageIndex(base), chunkPageIndex(limit)