mirror of https://github.com/golang/go.git
runtime: stop unnecessary span scavenges on free
This change fixes a bug wherein freeing a scavenged span that didn't coalesce with any neighboring spans would result in that span getting scavenged again. This case may actually be a common occurance because "freeing" span trimmings and newly-grown spans end up using the same codepath. On systems where madvise is relatively expensive, this can have a large performance impact. This change also cleans up some of this logic in freeSpanLocked since a number of factors made the coalescing code somewhat difficult to reason about with respect to scavenging. Notably, the way the needsScavenge boolean is handled could be better expressed and the inverted conditions (e.g. !after.released) can make things even more confusing. Fixes #28595. Change-Id: I75228dba70b6596b90853020b7c24fbe7ab937cf Reviewed-on: https://go-review.googlesource.com/c/147559 Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
78c0e1f81d
commit
06be7cbf3c
|
|
@ -1054,7 +1054,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
|
|||
|
||||
// We scavenge s at the end after coalescing if s or anything
|
||||
// it merged with is marked scavenged.
|
||||
needsScavenge := s.scavenged
|
||||
needsScavenge := false
|
||||
prescavenged := s.released() // number of bytes already scavenged.
|
||||
|
||||
// Coalesce with earlier, later spans.
|
||||
|
|
@ -1064,14 +1064,15 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
|
|||
s.npages += before.npages
|
||||
s.needzero |= before.needzero
|
||||
h.setSpan(before.base(), s)
|
||||
// If before or s are scavenged, then we need to scavenge the final coalesced span.
|
||||
needsScavenge = needsScavenge || before.scavenged || s.scavenged
|
||||
prescavenged += before.released()
|
||||
// The size is potentially changing so the treap needs to delete adjacent nodes and
|
||||
// insert back as a combined node.
|
||||
if !before.scavenged {
|
||||
h.free.removeSpan(before)
|
||||
} else {
|
||||
if before.scavenged {
|
||||
h.scav.removeSpan(before)
|
||||
needsScavenge = true
|
||||
prescavenged += before.released()
|
||||
} else {
|
||||
h.free.removeSpan(before)
|
||||
}
|
||||
before.state = mSpanDead
|
||||
h.spanalloc.free(unsafe.Pointer(before))
|
||||
|
|
@ -1082,12 +1083,12 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
|
|||
s.npages += after.npages
|
||||
s.needzero |= after.needzero
|
||||
h.setSpan(s.base()+s.npages*pageSize-1, s)
|
||||
if !after.scavenged {
|
||||
h.free.removeSpan(after)
|
||||
} else {
|
||||
needsScavenge = needsScavenge || after.scavenged || s.scavenged
|
||||
prescavenged += after.released()
|
||||
if after.scavenged {
|
||||
h.scav.removeSpan(after)
|
||||
needsScavenge = true
|
||||
prescavenged += after.released()
|
||||
} else {
|
||||
h.free.removeSpan(after)
|
||||
}
|
||||
after.state = mSpanDead
|
||||
h.spanalloc.free(unsafe.Pointer(after))
|
||||
|
|
|
|||
Loading…
Reference in New Issue