diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 627c7cfdce..e807995810 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1052,10 +1052,18 @@ func (h *mheap) allocUserArenaChunk() *mspan { h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) s.isUserArenaChunk = true s.elemsize -= userArenaChunkReserveBytes() - s.limit = s.base() + s.elemsize s.freeindex = 1 s.allocCount = 1 + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole chunk or just skip it + // since we're in the mark phase anyway. + s.limit = s.base() + s.elemsize + // Adjust size to include redzone. if asanenabled { s.elemsize -= redZoneSize(s.elemsize) diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index 440120cdfe..a1d04d2f8a 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -253,6 +253,14 @@ func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan { // Put the large span in the mcentral swept list so that it's // visible to the background sweeper. mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) + + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole object or just skip it + // since we're in the mark phase anyway. s.limit = s.base() + size s.initHeapBits() return s diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index c71ecbbcd5..ec27ce25a8 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -250,13 +250,10 @@ func (c *mcentral) uncacheSpan(s *mspan) { // grow allocates a new empty span from the heap and initializes it for c's size class. func (c *mcentral) grow() *mspan { npages := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()]) - size := uintptr(gc.SizeClassToSize[c.spanclass.sizeclass()]) - s := mheap_.alloc(npages, c.spanclass) if s == nil { return nil } - s.limit = s.base() + size*uintptr(s.nelems) s.initHeapBits() return s } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 9361089b80..3965d6eb52 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1445,7 +1445,6 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, if typ.manual() { s.manualFreeList = 0 s.nelems = 0 - s.limit = s.base() + s.npages*pageSize s.state.set(mSpanManual) } else { // We must set span properties before the span is published anywhere @@ -1486,6 +1485,9 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, s.gcmarkBits = newMarkBits(uintptr(s.nelems)) s.allocBits = newAllocBits(uintptr(s.nelems)) + // Adjust s.limit down to the object-containing part of the span. + s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) + // It's safe to access h.sweepgen without the heap lock because it's // only ever updated with the world stopped and we run on the // systemstack which blocks a STW transition. @@ -1785,6 +1787,7 @@ func (span *mspan) init(base uintptr, npages uintptr) { span.list = nil span.startAddr = base span.npages = npages + span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans span.allocCount = 0 span.spanclass = 0 span.elemsize = 0