diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 6721f3a8bb..3807050cbe 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1227,16 +1227,6 @@ HaveSpan: // heap_released since we already did so earlier. sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift) s.scavenged = false - - // Since we allocated out of a scavenged span, we just - // grew the RSS. Mitigate this by scavenging enough free - // space to make up for it but only if we need to. - // - // scavengeLocked may cause coalescing, so prevent - // coalescing with s by temporarily changing its state. - s.state = mSpanManual - h.scavengeIfNeededLocked(s.npages * pageSize) - s.state = mSpanFree } h.setSpans(s.base(), npage, s) @@ -1312,6 +1302,10 @@ func (h *mheap) grow(npage uintptr) bool { // // h must be locked. func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) { + // Scavenge some pages to make up for the virtual memory space + // we just allocated, but only if we need to. + h.scavengeIfNeededLocked(size) + s := (*mspan)(h.spanalloc.alloc()) s.init(uintptr(v), size/pageSize) h.setSpans(s.base(), s.npages, s)