runtime: implement experiment to replace heap bitmap with alloc headers

This change replaces the 1-bit-per-word heap bitmap for most size
classes with allocation headers for objects that contain pointers. The
header consists of a single pointer to a type. All allocations with
headers are treated as implicitly containing one or more instances of
the type in the header.

As the name implies, headers are usually stored as the first word of an
object. There are two additional exceptions to where headers are stored
and how they're used.

Objects smaller than 512 bytes do not have headers. Instead, a heap
bitmap is reserved at the end of spans for objects of this size. A full
word of overhead is too much for these small objects. The bitmap is of
the same format of the old bitmap, minus the noMorePtrs bits which are
unnecessary. All the objects <512 bytes have a bitmap less than a
pointer-word in size, and that was the granularity at which noMorePtrs
could stop scanning early anyway.

Objects that are larger than 32 KiB (which have their own span) have
their headers stored directly in the span, to allow power-of-two-sized
allocations to not spill over into an extra page.

The full implementation is behind GOEXPERIMENT=allocheaders.

The purpose of this change is performance. First and foremost, with
headers we no longer have to unroll pointer/scalar data at allocation
time for most size classes. Small size classes still need some
unrolling, but their bitmaps are small so we can optimize that case
fairly well. Larger objects effectively have their pointer/scalar data
unrolled on-demand from type data, which is much more compactly
represented and results in less TLB pressure. Furthermore, since the
headers are usually right next to the object and where we're about to
start scanning, we get an additional temporal locality benefit in the
data cache when looking up type metadata. The pointer/scalar data is
now effectively unrolled on-demand, but it's also simpler to unroll than
before; that unrolled data is never written anywhere, and for arrays we
get the benefit of retreading the same data per element, as opposed to
looking it up from scratch for each pointer-word of bitmap. Lastly,
because we no longer have a heap bitmap that spans the entire heap,
there's a flat 1.5% memory use reduction. This is balanced slightly by
some objects possibly being bumped up a size class, but most objects are
not tightly optimized to size class sizes so there's some memory to
spare, making the header basically free in those cases.

See the follow-up CL which turns on this experiment by default for
benchmark results. (CL 538217.)

Change-Id: I4c9034ee200650d06d8bdecd579d5f7c1bbf1fc5
Reviewed-on: https://go-review.googlesource.com/c/go/+/437955
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
Michael Anthony Knyszek 2022-09-11 04:07:41 +00:00 committed by Michael Knyszek
parent 25867485a7
commit 38ac7c41aa
23 changed files with 1138 additions and 533 deletions

View File

@ -73,11 +73,13 @@ func TestIntendedInlining(t *testing.T) {
"gclinkptr.ptr",
"guintptr.ptr",
"writeHeapBitsForAddr",
"heapBitsSlice",
"markBits.isMarked",
"muintptr.ptr",
"puintptr.ptr",
"spanOf",
"spanOfUnchecked",
"typePointers.nextFast",
"(*gcWork).putFast",
"(*gcWork).tryGetFast",
"(*guintptr).set",
@ -86,6 +88,7 @@ func TestIntendedInlining(t *testing.T) {
"(*mspan).base",
"(*mspan).markBitsForBase",
"(*mspan).markBitsForIndex",
"(*mspan).writeHeapBits",
"(*muintptr).set",
"(*puintptr).set",
"(*wbBuf).get1",

View File

@ -7030,10 +7030,18 @@ func verifyGCBits(t *testing.T, typ Type, bits []byte) {
// e.g. with rep(2, lit(1, 0)).
bits = trimBitmap(bits)
if !bytes.Equal(heapBits, bits) {
_, _, line, _ := runtime.Caller(1)
t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits)
if bytes.HasPrefix(heapBits, bits) {
// Just the prefix matching is OK.
//
// The Go runtime's pointer/scalar iterator generates pointers beyond
// the size of the type, up to the size of the size class. This space
// is safe for the GC to scan since it's zero, and GCBits checks to
// make sure that's true. But we need to handle the fact that the bitmap
// may be larger than we expect.
return
}
_, _, line, _ := runtime.Caller(1)
t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits)
}
func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
@ -7042,15 +7050,20 @@ func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
// repeat a bitmap for a small array or executing a repeat in
// a GC program.
val := MakeSlice(typ, 0, cap)
data := NewAt(ArrayOf(cap, typ.Elem()), val.UnsafePointer())
data := NewAt(typ.Elem(), val.UnsafePointer())
heapBits := GCBits(data.Interface())
// Repeat the bitmap for the slice size, trimming scalars in
// the last element.
bits = trimBitmap(rep(cap, bits))
if !bytes.Equal(heapBits, bits) {
_, _, line, _ := runtime.Caller(1)
t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
if bytes.Equal(heapBits, bits) {
return
}
if len(heapBits) > len(bits) && bytes.Equal(heapBits[:len(bits)], bits) {
// Just the prefix matching is OK.
return
}
_, _, line, _ := runtime.Caller(1)
t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
}
func TestGCBits(t *testing.T) {

View File

@ -83,6 +83,8 @@
package runtime
import (
"internal/goarch"
"internal/goexperiment"
"runtime/internal/atomic"
"runtime/internal/math"
"unsafe"
@ -218,6 +220,19 @@ func init() {
lockInit(&userArenaState.lock, lockRankUserArenaState)
}
// userArenaChunkReserveBytes returns the amount of additional bytes to reserve for
// heap metadata.
func userArenaChunkReserveBytes() uintptr {
if goexperiment.AllocHeaders {
// In the allocation headers experiment, we reserve the end of the chunk for
// a pointer/scalar bitmap. We also reserve space for a dummy _type that
// refers to the bitmap. The PtrBytes field of the dummy _type indicates how
// many of those bits are valid.
return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
}
return 0
}
type userArena struct {
// full is a list of full chunks that have not enough free memory left, and
// that we'll free once this user arena is freed.
@ -491,9 +506,9 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
// Set up heap bitmap and do extra accounting.
if typ.PtrBytes != 0 {
if cap >= 0 {
userArenaHeapBitsSetSliceType(typ, cap, ptr, s.base())
userArenaHeapBitsSetSliceType(typ, cap, ptr, s)
} else {
userArenaHeapBitsSetType(typ, ptr, s.base())
userArenaHeapBitsSetType(typ, ptr, s)
}
c := getMCache(mp)
if c == nil {
@ -523,13 +538,13 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
// userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for
// Go slice backing store values allocated in a user arena chunk. It sets up the
// heap bitmap for n consecutive values with type typ allocated at address ptr.
func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, base uintptr) {
func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan) {
mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
if overflow || n < 0 || mem > maxAlloc {
panic(plainError("runtime: allocation size out of range"))
}
for i := 0; i < n; i++ {
userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), base)
userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), s)
}
}
@ -591,9 +606,12 @@ func newUserArenaChunk() (unsafe.Pointer, *mspan) {
// TODO(mknyszek): Track individual objects.
rzSize := computeRZlog(span.elemsize)
span.elemsize -= rzSize
span.limit -= rzSize
span.userArenaChunkFree = makeAddrRange(span.base(), span.limit)
asanpoison(unsafe.Pointer(span.limit), span.npages*pageSize-span.elemsize)
if goexperiment.AllocHeaders {
span.largeType.Size_ = span.elemsize
}
rzStart := span.base() + span.elemsize
span.userArenaChunkFree = makeAddrRange(span.base(), rzStart)
asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart)
asanunpoison(unsafe.Pointer(span.base()), span.elemsize)
}
@ -694,7 +712,7 @@ func (s *mspan) setUserArenaChunkToFault() {
// the span gets off the quarantine list. The main reason is so that the
// amount of bytes allocated doesn't exceed how much is counted as
// "mapped ready," which could cause a deadlock in the pacer.
gcController.totalFree.Add(int64(s.npages * pageSize))
gcController.totalFree.Add(int64(s.elemsize))
// Update consistent stats to match.
//
@ -704,11 +722,11 @@ func (s *mspan) setUserArenaChunkToFault() {
atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
atomic.Xadd64(&stats.largeFreeCount, 1)
atomic.Xadd64(&stats.largeFree, int64(s.npages*pageSize))
atomic.Xadd64(&stats.largeFree, int64(s.elemsize))
memstats.heapStats.release()
// This counts as a free, so update heapLive.
gcController.update(-int64(s.npages*pageSize), 0)
gcController.update(-int64(s.elemsize), 0)
// Mark it as free for the race detector.
if raceenabled {
@ -856,6 +874,10 @@ func (h *mheap) allocUserArenaChunk() *mspan {
spc := makeSpanClass(0, false)
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
s.isUserArenaChunk = true
s.elemsize -= userArenaChunkReserveBytes()
s.limit = s.base() + s.elemsize
s.freeindex = 1
s.allocCount = 1
// Account for this new arena chunk memory.
gcController.heapInUse.add(int64(userArenaChunkBytes))
@ -866,22 +888,15 @@ func (h *mheap) allocUserArenaChunk() *mspan {
atomic.Xaddint64(&stats.committed, int64(userArenaChunkBytes))
// Model the arena as a single large malloc.
atomic.Xadd64(&stats.largeAlloc, int64(userArenaChunkBytes))
atomic.Xadd64(&stats.largeAlloc, int64(s.elemsize))
atomic.Xadd64(&stats.largeAllocCount, 1)
memstats.heapStats.release()
// Count the alloc in inconsistent, internal stats.
gcController.totalAlloc.Add(int64(userArenaChunkBytes))
gcController.totalAlloc.Add(int64(s.elemsize))
// Update heapLive.
gcController.update(int64(userArenaChunkBytes), 0)
// Put the large span in the mcentral swept list so that it's
// visible to the background sweeper.
h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
s.limit = s.base() + userArenaChunkBytes
s.freeindex = 1
s.allocCount = 1
gcController.update(int64(s.elemsize), 0)
// This must clear the entire heap bitmap so that it's safe
// to allocate noscan data without writing anything out.
@ -902,6 +917,19 @@ func (h *mheap) allocUserArenaChunk() *mspan {
s.freeIndexForScan = 1
// Set up the range for allocation.
s.userArenaChunkFree = makeAddrRange(base, s.limit)
s.userArenaChunkFree = makeAddrRange(base, base+s.elemsize)
// Put the large span in the mcentral swept list so that it's
// visible to the background sweeper.
h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
if goexperiment.AllocHeaders {
// Set up an allocation header. Avoid write barriers here because this type
// is not a real type, and it exists in an invalid location.
*(*uintptr)(unsafe.Pointer(&s.largeType)) = uintptr(unsafe.Pointer(s.limit))
*(*uintptr)(unsafe.Pointer(&s.largeType.GCData)) = s.limit + unsafe.Sizeof(_type{})
s.largeType.PtrBytes = 0
s.largeType.Size_ = s.elemsize
}
return s
}

View File

@ -664,19 +664,32 @@ func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
if base == 0 {
return
}
n := span.elemsize
hbits := heapBitsForAddr(base, n)
for {
var addr uintptr
if hbits, addr = hbits.next(); addr == 0 {
break
if goexperiment.AllocHeaders {
tp := span.typePointersOfUnchecked(base)
for {
var addr uintptr
if tp, addr = tp.next(base + span.elemsize); addr == 0 {
break
}
pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(pp) && !isPinned(pp) {
panic(errorString(msg))
}
}
pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(pp) && !isPinned(pp) {
panic(errorString(msg))
} else {
n := span.elemsize
hbits := heapBitsForAddr(base, n)
for {
var addr uintptr
if hbits, addr = hbits.next(); addr == 0 {
break
}
pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(pp) && !isPinned(pp) {
panic(errorString(msg))
}
}
}
return
}

View File

@ -9,6 +9,7 @@ package runtime
import (
"internal/goarch"
"internal/goexperiment"
"unsafe"
)
@ -176,16 +177,29 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
}
// src must be in the regular heap.
hbits := heapBitsForAddr(uintptr(src), size)
for {
var addr uintptr
if hbits, addr = hbits.next(); addr == 0 {
break
if goexperiment.AllocHeaders {
tp := s.typePointersOf(uintptr(src), size)
for {
var addr uintptr
if tp, addr = tp.next(uintptr(src) + size); addr == 0 {
break
}
v := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(v) && !isPinned(v) {
throw(cgoWriteBarrierFail)
}
}
v := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(v) && !isPinned(v) {
throw(cgoWriteBarrierFail)
} else {
hbits := heapBitsForAddr(uintptr(src), size)
for {
var addr uintptr
if hbits, addr = hbits.next(); addr == 0 {
break
}
v := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(v) && !isPinned(v) {
throw(cgoWriteBarrierFail)
}
}
}
}

View File

@ -9,6 +9,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
@ -326,6 +327,14 @@ func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) {
// no valid racectx, but if we're instantiated in the runtime_test package,
// we might accidentally cause runtime code to be incorrectly instrumented.
func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
// This benchmark doesn't work with the allocheaders experiment. It sets up
// an elaborate scenario to be able to benchmark the function safely, but doing
// this work for the allocheaders' version of the function would be complex.
// Just fail instead and rely on the test code making sure we never get here.
if goexperiment.AllocHeaders {
panic("called benchSetType with allocheaders experiment enabled")
}
// Compute the input sizes.
size := t.Size() * uintptr(len)
@ -340,7 +349,7 @@ func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type)
// Round up the size to the size class to make the benchmark a little more
// realistic. However, validate it, to make sure this is safe.
allocSize := roundupsize(size)
allocSize := roundupsize(size, t.PtrBytes == 0)
if s.npages*pageSize < allocSize {
panic("backing span not large enough for benchmark")
}

View File

@ -6,6 +6,7 @@ package runtime_test
import (
"fmt"
"internal/goexperiment"
"math/rand"
"os"
"reflect"
@ -457,11 +458,17 @@ func BenchmarkSetTypeNode1024Slice(b *testing.B) {
}
func benchSetType[T any](b *testing.B) {
if goexperiment.AllocHeaders {
b.Skip("not supported with allocation headers experiment")
}
b.SetBytes(int64(unsafe.Sizeof(*new(T))))
runtime.BenchSetType[T](b.N, b.ResetTimer)
}
func benchSetTypeSlice[T any](b *testing.B, len int) {
if goexperiment.AllocHeaders {
b.Skip("not supported with allocation headers experiment")
}
b.SetBytes(int64(unsafe.Sizeof(*new(T)) * uintptr(len)))
runtime.BenchSetTypeSlice[T](b.N, b.ResetTimer, len)
}

View File

@ -91,10 +91,17 @@ func TestGCInfo(t *testing.T) {
func verifyGCInfo(t *testing.T, name string, p any, mask0 []byte) {
mask := runtime.GCMask(p)
if !bytes.Equal(mask, mask0) {
t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask)
if bytes.HasPrefix(mask, mask0) {
// Just the prefix matching is OK.
//
// The Go runtime's pointer/scalar iterator generates pointers beyond
// the size of the type, up to the size of the size class. This space
// is safe for the GC to scan since it's zero, and GCBits checks to
// make sure that's true. But we need to handle the fact that the bitmap
// may be larger than we expect.
return
}
t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask)
}
func trimDead(mask []byte) []byte {

View File

@ -14,6 +14,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"unsafe"
)
@ -737,16 +738,28 @@ func makeheapobjbv(p uintptr, size uintptr) bitvector {
for i := uintptr(0); i < nptr/8+1; i++ {
tmpbuf[i] = 0
}
hbits := heapBitsForAddr(p, size)
for {
var addr uintptr
hbits, addr = hbits.next()
if addr == 0 {
break
if goexperiment.AllocHeaders {
s := spanOf(p)
tp := s.typePointersOf(p, size)
for {
var addr uintptr
if tp, addr = tp.next(p + size); addr == 0 {
break
}
i := (addr - p) / goarch.PtrSize
tmpbuf[i/8] |= 1 << (i % 8)
}
} else {
hbits := heapBitsForAddr(p, size)
for {
var addr uintptr
hbits, addr = hbits.next()
if addr == 0 {
break
}
i := (addr - p) / goarch.PtrSize
tmpbuf[i/8] |= 1 << (i % 8)
}
i := (addr - p) / goarch.PtrSize
tmpbuf[i/8] |= 1 << (i % 8)
}
return bitvector{int32(nptr), &tmpbuf[0]}
}

View File

@ -102,6 +102,7 @@ package runtime
import (
"internal/goarch"
"internal/goexperiment"
"internal/goos"
"runtime/internal/atomic"
"runtime/internal/math"
@ -424,6 +425,26 @@ func mallocinit() {
print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
throw("bad pagesPerReclaimerChunk")
}
if goexperiment.AllocHeaders {
// Check that the minimum size (exclusive) for a malloc header is also
// a size class boundary. This is important to making sure checks align
// across different parts of the runtime.
minSizeForMallocHeaderIsSizeClass := false
for i := 0; i < len(class_to_size); i++ {
if minSizeForMallocHeader == uintptr(class_to_size[i]) {
minSizeForMallocHeaderIsSizeClass = true
break
}
}
if !minSizeForMallocHeaderIsSizeClass {
throw("min size of malloc header is not a size class boundary")
}
// Check that the pointer bitmap for all small sizes without a malloc header
// fits in a word.
if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
throw("max pointer/scan bitmap size for headerless objects is too large")
}
}
if minTagBits > taggedPointerBits {
throw("taggedPointerbits too small")
@ -1016,12 +1037,22 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
throw("mallocgc called without a P or outside bootstrapping")
}
var span *mspan
var header **_type
var x unsafe.Pointer
noscan := typ == nil || typ.PtrBytes == 0
// In some cases block zeroing can profitably (for latency reduction purposes)
// be delayed till preemption is possible; delayedZeroing tracks that state.
delayedZeroing := false
if size <= maxSmallSize {
// Determine if it's a 'small' object that goes into a size-classed span.
//
// Note: This comparison looks a little strange, but it exists to smooth out
// the crossover between the largest size class and large objects that have
// their own spans. The small window of object sizes between maxSmallSize-mallocHeaderSize
// and maxSmallSize will be considered large, even though they might fit in
// a size class. In practice this is completely fine, since the largest small
// size class has a single object in it already, precisely to make the transition
// to large objects smooth.
if size <= maxSmallSize-mallocHeaderSize {
if noscan && size < maxTinySize {
// Tiny allocator.
//
@ -1096,6 +1127,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
size = maxTinySize
} else {
hasHeader := !noscan && !heapBitsInSpan(size)
if goexperiment.AllocHeaders && hasHeader {
size += mallocHeaderSize
}
var sizeclass uint8
if size <= smallSizeMax-8 {
sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
@ -1113,6 +1148,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, size)
}
if goexperiment.AllocHeaders && hasHeader {
header = (**_type)(x)
x = add(x, mallocHeaderSize)
size -= mallocHeaderSize
}
}
} else {
shouldhelpgc = true
@ -1128,29 +1168,30 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
delayedZeroing = true
} else {
memclrNoHeapPointers(x, size)
// We've in theory cleared almost the whole span here,
// and could take the extra step of actually clearing
// the whole thing. However, don't. Any GC bits for the
// uncleared parts will be zero, and it's just going to
// be needzero = 1 once freed anyway.
}
}
if goexperiment.AllocHeaders && !noscan {
header = &span.largeType
}
}
if !noscan {
var scanSize uintptr
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.Size_ {
// Array allocation. If there are any
// pointers, GC has to scan to the last
// element.
if typ.PtrBytes != 0 {
scanSize = dataSize - typ.Size_ + typ.PtrBytes
}
if goexperiment.AllocHeaders {
c.scanAlloc += heapSetType(uintptr(x), dataSize, typ, header, span)
} else {
scanSize = typ.PtrBytes
var scanSize uintptr
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.Size_ {
// Array allocation. If there are any
// pointers, GC has to scan to the last
// element.
if typ.PtrBytes != 0 {
scanSize = dataSize - typ.Size_ + typ.PtrBytes
}
} else {
scanSize = typ.PtrBytes
}
c.scanAlloc += scanSize
}
c.scanAlloc += scanSize
}
// Ensure that the stores above that initialize x to
@ -1176,7 +1217,12 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
if gcphase != _GCoff {
gcmarknewobject(span, uintptr(x), size)
// Pass the full size of the allocation to the number of bytes
// marked.
//
// If !goexperiment.AllocHeaders, "size" doesn't include the
// allocation header, so use span.elemsize unconditionally.
gcmarknewobject(span, uintptr(x), span.elemsize)
}
if raceenabled {
@ -1215,6 +1261,9 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if !noscan {
throw("delayed zeroing on data that may contain pointers")
}
if goexperiment.AllocHeaders && header != nil {
throw("unexpected malloc header in delayed zeroing of large object")
}
memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
}

View File

@ -354,7 +354,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
// used with this value of b.
nbuckets += bucketShift(b - 4)
sz := t.Bucket.Size_ * nbuckets
up := roundupsize(sz)
up := roundupsize(sz, t.Bucket.PtrBytes == 0)
if up != sz {
nbuckets = up / t.Bucket.Size_
}

File diff suppressed because it is too large Load Diff

View File

@ -47,6 +47,19 @@ import (
"unsafe"
)
const (
// For compatibility with the allocheaders GOEXPERIMENT.
mallocHeaderSize = 0
minSizeForMallocHeader = ^uintptr(0)
)
// For compatibility with the allocheaders GOEXPERIMENT.
//
//go:nosplit
func heapBitsInSpan(_ uintptr) bool {
return false
}
// heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
type heapArenaPtrScalar struct {
// bitmap stores the pointer/scalar bitmap for the words in
@ -671,6 +684,11 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
}
}
// For goexperiment.AllocHeaders
func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
return 0
}
// Testing.
// Returns GC type info for the pointer stored in ep for testing.
@ -765,7 +783,8 @@ func getgcmask(ep any) (mask []byte) {
// non-slice-backing-store Go values allocated in a user arena chunk. It
// sets up the heap bitmap for the value with type typ allocated at address ptr.
// base is the base address of the arena chunk.
func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) {
func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
base := s.base()
h := writeHeapBitsForAddr(uintptr(ptr))
// Our last allocation might have ended right at a noMorePtrs mark,
@ -855,3 +874,53 @@ func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) {
}
}
}
// For goexperiment.AllocHeaders.
type typePointers struct {
addr uintptr
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
panic("not implemented")
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
panic("not implemented")
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (tp typePointers) nextFast() (typePointers, uintptr) {
panic("not implemented")
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
panic("not implemented")
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (tp typePointers) fastForward(n, limit uintptr) typePointers {
panic("not implemented")
}
// For goexperiment.AllocHeaders, to pass TestIntendedInlining.
func (s *mspan) writeHeapBits() {
panic("not implemented")
}
// For goexperiment.AllocHeaders, to pass TestIntendedInlining.
func heapBitsSlice() {
panic("not implemented")
}

View File

@ -9,6 +9,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@ -410,7 +411,7 @@ func SetFinalizer(obj any, finalizer any) {
}
// find the containing object
base, _, _ := findObject(uintptr(e.data), 0, 0)
base, span, _ := findObject(uintptr(e.data), 0, 0)
if base == 0 {
if isGoPointerWithoutSpan(e.data) {
@ -419,6 +420,11 @@ func SetFinalizer(obj any, finalizer any) {
throw("runtime.SetFinalizer: pointer not in allocated block")
}
// Move base forward if we've got an allocation header.
if goexperiment.AllocHeaders && !span.spanclass.noscan() && !heapBitsInSpan(span.elemsize) && span.spanclass.sizeclass() != 0 {
base += mallocHeaderSize
}
if uintptr(e.data) != base {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).

View File

@ -9,6 +9,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
"internal/goexperiment"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@ -1306,6 +1307,7 @@ func scanobject(b uintptr, gcw *gcWork) {
throw("scanobject of a noscan object")
}
var tp typePointers
if n > maxObletBytes {
// Large object. Break into oblets for better
// parallelism and lower latency.
@ -1327,15 +1329,34 @@ func scanobject(b uintptr, gcw *gcWork) {
// of the object.
n = s.base() + s.elemsize - b
n = min(n, maxObletBytes)
if goexperiment.AllocHeaders {
tp = s.typePointersOfUnchecked(s.base())
tp = tp.fastForward(b-tp.addr, b+n)
}
} else {
if goexperiment.AllocHeaders {
tp = s.typePointersOfUnchecked(b)
}
}
hbits := heapBitsForAddr(b, n)
var hbits heapBits
if !goexperiment.AllocHeaders {
hbits = heapBitsForAddr(b, n)
}
var scanSize uintptr
for {
var addr uintptr
if hbits, addr = hbits.nextFast(); addr == 0 {
if hbits, addr = hbits.next(); addr == 0 {
break
if goexperiment.AllocHeaders {
if tp, addr = tp.nextFast(); addr == 0 {
if tp, addr = tp.next(b + n); addr == 0 {
break
}
}
} else {
if hbits, addr = hbits.nextFast(); addr == 0 {
if hbits, addr = hbits.next(); addr == 0 {
break
}
}
}

View File

@ -25,6 +25,7 @@
package runtime
import (
"internal/goexperiment"
"runtime/internal/atomic"
"unsafe"
)
@ -786,6 +787,15 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
} else {
mheap_.freeSpan(s)
}
if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.Kind_&kindGCProg != 0 {
// In the allocheaders experiment, the unrolled GCProg bitmap is allocated separately.
// Free the space for the unrolled bitmap.
systemstack(func() {
s := spanOf(uintptr(unsafe.Pointer(s.largeType)))
mheap_.freeManual(s, spanAllocPtrScalarBits)
})
s.largeType = nil
}
// Count the free in the consistent, external stats.
stats := memstats.heapStats.acquire()

View File

@ -11,6 +11,7 @@ package runtime
import (
"internal/cpu"
"internal/goarch"
"internal/goexperiment"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@ -487,6 +488,7 @@ type mspan struct {
speciallock mutex // guards specials list and changes to pinnerBits
specials *special // linked list of special records sorted by offset.
userArenaChunkFree addrRange // interval for managing chunk allocation
largeType *_type // malloc header for large objects.
}
func (s *mspan) base() uintptr {
@ -564,10 +566,12 @@ func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
}
//go:nosplit
func (sc spanClass) sizeclass() int8 {
return int8(sc >> 1)
}
//go:nosplit
func (sc spanClass) noscan() bool {
return sc&1 != 0
}
@ -1387,7 +1391,12 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
s.divMul = 0
} else {
s.elemsize = uintptr(class_to_size[sizeclass])
s.nelems = uint16(nbytes / s.elemsize)
if goexperiment.AllocHeaders && !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) {
// In the allocheaders experiment, reserve space for the pointer/scan bitmap at the end.
s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
} else {
s.nelems = uint16(nbytes / s.elemsize)
}
s.divMul = class_to_divmagic[sizeclass]
}

View File

@ -11,17 +11,26 @@
package runtime
// Returns size of the memory block that mallocgc will allocate if you ask for the size.
func roundupsize(size uintptr) uintptr {
if size < _MaxSmallSize {
if size <= smallSizeMax-8 {
return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
} else {
return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
// Returns size of the memory block that mallocgc will allocate if you ask for the size,
// minus any inline space for metadata.
func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
reqSize = size
if reqSize <= maxSmallSize-mallocHeaderSize {
// Small object.
if !noscan && reqSize > minSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize)
reqSize += mallocHeaderSize
}
// (reqSize - size) is either mallocHeaderSize or 0. We need to subtract mallocHeaderSize
// from the result if we have one, since mallocgc will add it back in.
if reqSize <= smallSizeMax-8 {
return uintptr(class_to_size[size_to_class8[divRoundUp(reqSize, smallSizeDiv)]]) - (reqSize - size)
}
return uintptr(class_to_size[size_to_class128[divRoundUp(reqSize-smallSizeMax, largeSizeDiv)]]) - (reqSize - size)
}
if size+_PageSize < size {
// Large object. Align reqSize up to the next page. Check for overflow.
reqSize += pageSize - 1
if reqSize < size {
return size
}
return alignUp(size, _PageSize)
return reqSize &^ (pageSize - 1)
}

View File

@ -12,7 +12,9 @@
package runtime
// Returns size of the memory block that mallocgc will allocate if you ask for the size.
func roundupsize(size uintptr) uintptr {
//
// The noscan argument is purely for compatibility with goexperiment.AllocHeaders.
func roundupsize(size uintptr, noscan bool) uintptr {
if size < _MaxSmallSize {
if size <= smallSizeMax-8 {
return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])

View File

@ -223,6 +223,7 @@ type symbolizeDataContext struct {
func raceSymbolizeData(ctx *symbolizeDataContext) {
if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
// TODO: Does this need to handle malloc headers?
ctx.heap = 1
ctx.start = base
ctx.size = span.elemsize

View File

@ -179,17 +179,18 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
// For 1 we don't need any division/multiplication.
// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
// For powers of 2, use a variable shift.
noscan := et.PtrBytes == 0
switch {
case et.Size_ == 1:
lenmem = uintptr(oldLen)
newlenmem = uintptr(newLen)
capmem = roundupsize(uintptr(newcap))
capmem = roundupsize(uintptr(newcap), noscan)
overflow = uintptr(newcap) > maxAlloc
newcap = int(capmem)
case et.Size_ == goarch.PtrSize:
lenmem = uintptr(oldLen) * goarch.PtrSize
newlenmem = uintptr(newLen) * goarch.PtrSize
capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
newcap = int(capmem / goarch.PtrSize)
case isPowerOfTwo(et.Size_):
@ -202,7 +203,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
}
lenmem = uintptr(oldLen) << shift
newlenmem = uintptr(newLen) << shift
capmem = roundupsize(uintptr(newcap) << shift)
capmem = roundupsize(uintptr(newcap)<<shift, noscan)
overflow = uintptr(newcap) > (maxAlloc >> shift)
newcap = int(capmem >> shift)
capmem = uintptr(newcap) << shift
@ -210,7 +211,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
lenmem = uintptr(oldLen) * et.Size_
newlenmem = uintptr(newLen) * et.Size_
capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
capmem = roundupsize(capmem)
capmem = roundupsize(capmem, noscan)
newcap = int(capmem / et.Size_)
capmem = uintptr(newcap) * et.Size_
}

View File

@ -270,7 +270,7 @@ func rawstring(size int) (s string, b []byte) {
// rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
func rawbyteslice(size int) (b []byte) {
cap := roundupsize(uintptr(size))
cap := roundupsize(uintptr(size), true)
p := mallocgc(cap, nil, false)
if cap != uintptr(size) {
memclrNoHeapPointers(add(p, uintptr(size)), cap-uintptr(size))
@ -285,7 +285,7 @@ func rawruneslice(size int) (b []rune) {
if uintptr(size) > maxAlloc/4 {
throw("out of memory")
}
mem := roundupsize(uintptr(size) * 4)
mem := roundupsize(uintptr(size)*4, true)
p := mallocgc(mem, nil, false)
if mem != uintptr(size)*4 {
memclrNoHeapPointers(add(p, uintptr(size)*4), mem-uintptr(size)*4)

View File

@ -424,11 +424,15 @@ func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOf
func systemstack_switch()
// alignUp rounds n up to a multiple of a. a must be a power of 2.
//
//go:nosplit
func alignUp(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1)
}
// alignDown rounds n down to a multiple of a. a must be a power of 2.
//
//go:nosplit
func alignDown(n, a uintptr) uintptr {
return n &^ (a - 1)
}