runtime: implement experiment to replace heap bitmap with alloc headers

This change replaces the 1-bit-per-word heap bitmap for most size
classes with allocation headers for objects that contain pointers. The
header consists of a single pointer to a type. All allocations with
headers are treated as implicitly containing one or more instances of
the type in the header.

As the name implies, headers are usually stored as the first word of an
object. There are two additional exceptions to where headers are stored
and how they're used.

Objects smaller than 512 bytes do not have headers. Instead, a heap
bitmap is reserved at the end of spans for objects of this size. A full
word of overhead is too much for these small objects. The bitmap is of
the same format of the old bitmap, minus the noMorePtrs bits which are
unnecessary. All the objects <512 bytes have a bitmap less than a
pointer-word in size, and that was the granularity at which noMorePtrs
could stop scanning early anyway.

Objects that are larger than 32 KiB (which have their own span) have
their headers stored directly in the span, to allow power-of-two-sized
allocations to not spill over into an extra page.

The full implementation is behind GOEXPERIMENT=allocheaders.

The purpose of this change is performance. First and foremost, with
headers we no longer have to unroll pointer/scalar data at allocation
time for most size classes. Small size classes still need some
unrolling, but their bitmaps are small so we can optimize that case
fairly well. Larger objects effectively have their pointer/scalar data
unrolled on-demand from type data, which is much more compactly
represented and results in less TLB pressure. Furthermore, since the
headers are usually right next to the object and where we're about to
start scanning, we get an additional temporal locality benefit in the
data cache when looking up type metadata. The pointer/scalar data is
now effectively unrolled on-demand, but it's also simpler to unroll than
before; that unrolled data is never written anywhere, and for arrays we
get the benefit of retreading the same data per element, as opposed to
looking it up from scratch for each pointer-word of bitmap. Lastly,
because we no longer have a heap bitmap that spans the entire heap,
there's a flat 1.5% memory use reduction. This is balanced slightly by
some objects possibly being bumped up a size class, but most objects are
not tightly optimized to size class sizes so there's some memory to
spare, making the header basically free in those cases.

See the follow-up CL which turns on this experiment by default for
benchmark results. (CL 538217.)

Change-Id: I4c9034ee200650d06d8bdecd579d5f7c1bbf1fc5
Reviewed-on: https://go-review.googlesource.com/c/go/+/437955
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
Michael Anthony Knyszek 2022-09-11 04:07:41 +00:00 committed by Michael Knyszek
parent 25867485a7
commit 38ac7c41aa
23 changed files with 1138 additions and 533 deletions

View File

@ -73,11 +73,13 @@ func TestIntendedInlining(t *testing.T) {
"gclinkptr.ptr", "gclinkptr.ptr",
"guintptr.ptr", "guintptr.ptr",
"writeHeapBitsForAddr", "writeHeapBitsForAddr",
"heapBitsSlice",
"markBits.isMarked", "markBits.isMarked",
"muintptr.ptr", "muintptr.ptr",
"puintptr.ptr", "puintptr.ptr",
"spanOf", "spanOf",
"spanOfUnchecked", "spanOfUnchecked",
"typePointers.nextFast",
"(*gcWork).putFast", "(*gcWork).putFast",
"(*gcWork).tryGetFast", "(*gcWork).tryGetFast",
"(*guintptr).set", "(*guintptr).set",
@ -86,6 +88,7 @@ func TestIntendedInlining(t *testing.T) {
"(*mspan).base", "(*mspan).base",
"(*mspan).markBitsForBase", "(*mspan).markBitsForBase",
"(*mspan).markBitsForIndex", "(*mspan).markBitsForIndex",
"(*mspan).writeHeapBits",
"(*muintptr).set", "(*muintptr).set",
"(*puintptr).set", "(*puintptr).set",
"(*wbBuf).get1", "(*wbBuf).get1",

View File

@ -7030,10 +7030,18 @@ func verifyGCBits(t *testing.T, typ Type, bits []byte) {
// e.g. with rep(2, lit(1, 0)). // e.g. with rep(2, lit(1, 0)).
bits = trimBitmap(bits) bits = trimBitmap(bits)
if !bytes.Equal(heapBits, bits) { if bytes.HasPrefix(heapBits, bits) {
_, _, line, _ := runtime.Caller(1) // Just the prefix matching is OK.
t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits) //
// The Go runtime's pointer/scalar iterator generates pointers beyond
// the size of the type, up to the size of the size class. This space
// is safe for the GC to scan since it's zero, and GCBits checks to
// make sure that's true. But we need to handle the fact that the bitmap
// may be larger than we expect.
return
} }
_, _, line, _ := runtime.Caller(1)
t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits)
} }
func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) { func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
@ -7042,15 +7050,20 @@ func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) {
// repeat a bitmap for a small array or executing a repeat in // repeat a bitmap for a small array or executing a repeat in
// a GC program. // a GC program.
val := MakeSlice(typ, 0, cap) val := MakeSlice(typ, 0, cap)
data := NewAt(ArrayOf(cap, typ.Elem()), val.UnsafePointer()) data := NewAt(typ.Elem(), val.UnsafePointer())
heapBits := GCBits(data.Interface()) heapBits := GCBits(data.Interface())
// Repeat the bitmap for the slice size, trimming scalars in // Repeat the bitmap for the slice size, trimming scalars in
// the last element. // the last element.
bits = trimBitmap(rep(cap, bits)) bits = trimBitmap(rep(cap, bits))
if !bytes.Equal(heapBits, bits) { if bytes.Equal(heapBits, bits) {
_, _, line, _ := runtime.Caller(1) return
t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
} }
if len(heapBits) > len(bits) && bytes.Equal(heapBits[:len(bits)], bits) {
// Just the prefix matching is OK.
return
}
_, _, line, _ := runtime.Caller(1)
t.Errorf("line %d: heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", line, typ, cap, heapBits, bits)
} }
func TestGCBits(t *testing.T) { func TestGCBits(t *testing.T) {

View File

@ -83,6 +83,8 @@
package runtime package runtime
import ( import (
"internal/goarch"
"internal/goexperiment"
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/math" "runtime/internal/math"
"unsafe" "unsafe"
@ -218,6 +220,19 @@ func init() {
lockInit(&userArenaState.lock, lockRankUserArenaState) lockInit(&userArenaState.lock, lockRankUserArenaState)
} }
// userArenaChunkReserveBytes returns the amount of additional bytes to reserve for
// heap metadata.
func userArenaChunkReserveBytes() uintptr {
if goexperiment.AllocHeaders {
// In the allocation headers experiment, we reserve the end of the chunk for
// a pointer/scalar bitmap. We also reserve space for a dummy _type that
// refers to the bitmap. The PtrBytes field of the dummy _type indicates how
// many of those bits are valid.
return userArenaChunkBytes/goarch.PtrSize/8 + unsafe.Sizeof(_type{})
}
return 0
}
type userArena struct { type userArena struct {
// full is a list of full chunks that have not enough free memory left, and // full is a list of full chunks that have not enough free memory left, and
// that we'll free once this user arena is freed. // that we'll free once this user arena is freed.
@ -491,9 +506,9 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
// Set up heap bitmap and do extra accounting. // Set up heap bitmap and do extra accounting.
if typ.PtrBytes != 0 { if typ.PtrBytes != 0 {
if cap >= 0 { if cap >= 0 {
userArenaHeapBitsSetSliceType(typ, cap, ptr, s.base()) userArenaHeapBitsSetSliceType(typ, cap, ptr, s)
} else { } else {
userArenaHeapBitsSetType(typ, ptr, s.base()) userArenaHeapBitsSetType(typ, ptr, s)
} }
c := getMCache(mp) c := getMCache(mp)
if c == nil { if c == nil {
@ -523,13 +538,13 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer {
// userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for // userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for
// Go slice backing store values allocated in a user arena chunk. It sets up the // Go slice backing store values allocated in a user arena chunk. It sets up the
// heap bitmap for n consecutive values with type typ allocated at address ptr. // heap bitmap for n consecutive values with type typ allocated at address ptr.
func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, base uintptr) { func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan) {
mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
if overflow || n < 0 || mem > maxAlloc { if overflow || n < 0 || mem > maxAlloc {
panic(plainError("runtime: allocation size out of range")) panic(plainError("runtime: allocation size out of range"))
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), base) userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), s)
} }
} }
@ -591,9 +606,12 @@ func newUserArenaChunk() (unsafe.Pointer, *mspan) {
// TODO(mknyszek): Track individual objects. // TODO(mknyszek): Track individual objects.
rzSize := computeRZlog(span.elemsize) rzSize := computeRZlog(span.elemsize)
span.elemsize -= rzSize span.elemsize -= rzSize
span.limit -= rzSize if goexperiment.AllocHeaders {
span.userArenaChunkFree = makeAddrRange(span.base(), span.limit) span.largeType.Size_ = span.elemsize
asanpoison(unsafe.Pointer(span.limit), span.npages*pageSize-span.elemsize) }
rzStart := span.base() + span.elemsize
span.userArenaChunkFree = makeAddrRange(span.base(), rzStart)
asanpoison(unsafe.Pointer(rzStart), span.limit-rzStart)
asanunpoison(unsafe.Pointer(span.base()), span.elemsize) asanunpoison(unsafe.Pointer(span.base()), span.elemsize)
} }
@ -694,7 +712,7 @@ func (s *mspan) setUserArenaChunkToFault() {
// the span gets off the quarantine list. The main reason is so that the // the span gets off the quarantine list. The main reason is so that the
// amount of bytes allocated doesn't exceed how much is counted as // amount of bytes allocated doesn't exceed how much is counted as
// "mapped ready," which could cause a deadlock in the pacer. // "mapped ready," which could cause a deadlock in the pacer.
gcController.totalFree.Add(int64(s.npages * pageSize)) gcController.totalFree.Add(int64(s.elemsize))
// Update consistent stats to match. // Update consistent stats to match.
// //
@ -704,11 +722,11 @@ func (s *mspan) setUserArenaChunkToFault() {
atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize)) atomic.Xaddint64(&stats.committed, -int64(s.npages*pageSize))
atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize)) atomic.Xaddint64(&stats.inHeap, -int64(s.npages*pageSize))
atomic.Xadd64(&stats.largeFreeCount, 1) atomic.Xadd64(&stats.largeFreeCount, 1)
atomic.Xadd64(&stats.largeFree, int64(s.npages*pageSize)) atomic.Xadd64(&stats.largeFree, int64(s.elemsize))
memstats.heapStats.release() memstats.heapStats.release()
// This counts as a free, so update heapLive. // This counts as a free, so update heapLive.
gcController.update(-int64(s.npages*pageSize), 0) gcController.update(-int64(s.elemsize), 0)
// Mark it as free for the race detector. // Mark it as free for the race detector.
if raceenabled { if raceenabled {
@ -856,6 +874,10 @@ func (h *mheap) allocUserArenaChunk() *mspan {
spc := makeSpanClass(0, false) spc := makeSpanClass(0, false)
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
s.isUserArenaChunk = true s.isUserArenaChunk = true
s.elemsize -= userArenaChunkReserveBytes()
s.limit = s.base() + s.elemsize
s.freeindex = 1
s.allocCount = 1
// Account for this new arena chunk memory. // Account for this new arena chunk memory.
gcController.heapInUse.add(int64(userArenaChunkBytes)) gcController.heapInUse.add(int64(userArenaChunkBytes))
@ -866,22 +888,15 @@ func (h *mheap) allocUserArenaChunk() *mspan {
atomic.Xaddint64(&stats.committed, int64(userArenaChunkBytes)) atomic.Xaddint64(&stats.committed, int64(userArenaChunkBytes))
// Model the arena as a single large malloc. // Model the arena as a single large malloc.
atomic.Xadd64(&stats.largeAlloc, int64(userArenaChunkBytes)) atomic.Xadd64(&stats.largeAlloc, int64(s.elemsize))
atomic.Xadd64(&stats.largeAllocCount, 1) atomic.Xadd64(&stats.largeAllocCount, 1)
memstats.heapStats.release() memstats.heapStats.release()
// Count the alloc in inconsistent, internal stats. // Count the alloc in inconsistent, internal stats.
gcController.totalAlloc.Add(int64(userArenaChunkBytes)) gcController.totalAlloc.Add(int64(s.elemsize))
// Update heapLive. // Update heapLive.
gcController.update(int64(userArenaChunkBytes), 0) gcController.update(int64(s.elemsize), 0)
// Put the large span in the mcentral swept list so that it's
// visible to the background sweeper.
h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
s.limit = s.base() + userArenaChunkBytes
s.freeindex = 1
s.allocCount = 1
// This must clear the entire heap bitmap so that it's safe // This must clear the entire heap bitmap so that it's safe
// to allocate noscan data without writing anything out. // to allocate noscan data without writing anything out.
@ -902,6 +917,19 @@ func (h *mheap) allocUserArenaChunk() *mspan {
s.freeIndexForScan = 1 s.freeIndexForScan = 1
// Set up the range for allocation. // Set up the range for allocation.
s.userArenaChunkFree = makeAddrRange(base, s.limit) s.userArenaChunkFree = makeAddrRange(base, base+s.elemsize)
// Put the large span in the mcentral swept list so that it's
// visible to the background sweeper.
h.central[spc].mcentral.fullSwept(h.sweepgen).push(s)
if goexperiment.AllocHeaders {
// Set up an allocation header. Avoid write barriers here because this type
// is not a real type, and it exists in an invalid location.
*(*uintptr)(unsafe.Pointer(&s.largeType)) = uintptr(unsafe.Pointer(s.limit))
*(*uintptr)(unsafe.Pointer(&s.largeType.GCData)) = s.limit + unsafe.Sizeof(_type{})
s.largeType.PtrBytes = 0
s.largeType.Size_ = s.elemsize
}
return s return s
} }

View File

@ -664,19 +664,32 @@ func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
if base == 0 { if base == 0 {
return return
} }
n := span.elemsize if goexperiment.AllocHeaders {
hbits := heapBitsForAddr(base, n) tp := span.typePointersOfUnchecked(base)
for { for {
var addr uintptr var addr uintptr
if hbits, addr = hbits.next(); addr == 0 { if tp, addr = tp.next(base + span.elemsize); addr == 0 {
break break
}
pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(pp) && !isPinned(pp) {
panic(errorString(msg))
}
} }
pp := *(*unsafe.Pointer)(unsafe.Pointer(addr)) } else {
if cgoIsGoPointer(pp) && !isPinned(pp) { n := span.elemsize
panic(errorString(msg)) hbits := heapBitsForAddr(base, n)
for {
var addr uintptr
if hbits, addr = hbits.next(); addr == 0 {
break
}
pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(pp) && !isPinned(pp) {
panic(errorString(msg))
}
} }
} }
return return
} }

View File

@ -9,6 +9,7 @@ package runtime
import ( import (
"internal/goarch" "internal/goarch"
"internal/goexperiment"
"unsafe" "unsafe"
) )
@ -176,16 +177,29 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
} }
// src must be in the regular heap. // src must be in the regular heap.
if goexperiment.AllocHeaders {
hbits := heapBitsForAddr(uintptr(src), size) tp := s.typePointersOf(uintptr(src), size)
for { for {
var addr uintptr var addr uintptr
if hbits, addr = hbits.next(); addr == 0 { if tp, addr = tp.next(uintptr(src) + size); addr == 0 {
break break
}
v := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(v) && !isPinned(v) {
throw(cgoWriteBarrierFail)
}
} }
v := *(*unsafe.Pointer)(unsafe.Pointer(addr)) } else {
if cgoIsGoPointer(v) && !isPinned(v) { hbits := heapBitsForAddr(uintptr(src), size)
throw(cgoWriteBarrierFail) for {
var addr uintptr
if hbits, addr = hbits.next(); addr == 0 {
break
}
v := *(*unsafe.Pointer)(unsafe.Pointer(addr))
if cgoIsGoPointer(v) && !isPinned(v) {
throw(cgoWriteBarrierFail)
}
} }
} }
} }

View File

@ -9,6 +9,7 @@ package runtime
import ( import (
"internal/abi" "internal/abi"
"internal/goarch" "internal/goarch"
"internal/goexperiment"
"internal/goos" "internal/goos"
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/sys" "runtime/internal/sys"
@ -326,6 +327,14 @@ func BenchSetTypeSlice[T any](n int, resetTimer func(), len int) {
// no valid racectx, but if we're instantiated in the runtime_test package, // no valid racectx, but if we're instantiated in the runtime_test package,
// we might accidentally cause runtime code to be incorrectly instrumented. // we might accidentally cause runtime code to be incorrectly instrumented.
func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) { func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) {
// This benchmark doesn't work with the allocheaders experiment. It sets up
// an elaborate scenario to be able to benchmark the function safely, but doing
// this work for the allocheaders' version of the function would be complex.
// Just fail instead and rely on the test code making sure we never get here.
if goexperiment.AllocHeaders {
panic("called benchSetType with allocheaders experiment enabled")
}
// Compute the input sizes. // Compute the input sizes.
size := t.Size() * uintptr(len) size := t.Size() * uintptr(len)
@ -340,7 +349,7 @@ func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type)
// Round up the size to the size class to make the benchmark a little more // Round up the size to the size class to make the benchmark a little more
// realistic. However, validate it, to make sure this is safe. // realistic. However, validate it, to make sure this is safe.
allocSize := roundupsize(size) allocSize := roundupsize(size, t.PtrBytes == 0)
if s.npages*pageSize < allocSize { if s.npages*pageSize < allocSize {
panic("backing span not large enough for benchmark") panic("backing span not large enough for benchmark")
} }

View File

@ -6,6 +6,7 @@ package runtime_test
import ( import (
"fmt" "fmt"
"internal/goexperiment"
"math/rand" "math/rand"
"os" "os"
"reflect" "reflect"
@ -457,11 +458,17 @@ func BenchmarkSetTypeNode1024Slice(b *testing.B) {
} }
func benchSetType[T any](b *testing.B) { func benchSetType[T any](b *testing.B) {
if goexperiment.AllocHeaders {
b.Skip("not supported with allocation headers experiment")
}
b.SetBytes(int64(unsafe.Sizeof(*new(T)))) b.SetBytes(int64(unsafe.Sizeof(*new(T))))
runtime.BenchSetType[T](b.N, b.ResetTimer) runtime.BenchSetType[T](b.N, b.ResetTimer)
} }
func benchSetTypeSlice[T any](b *testing.B, len int) { func benchSetTypeSlice[T any](b *testing.B, len int) {
if goexperiment.AllocHeaders {
b.Skip("not supported with allocation headers experiment")
}
b.SetBytes(int64(unsafe.Sizeof(*new(T)) * uintptr(len))) b.SetBytes(int64(unsafe.Sizeof(*new(T)) * uintptr(len)))
runtime.BenchSetTypeSlice[T](b.N, b.ResetTimer, len) runtime.BenchSetTypeSlice[T](b.N, b.ResetTimer, len)
} }

View File

@ -91,10 +91,17 @@ func TestGCInfo(t *testing.T) {
func verifyGCInfo(t *testing.T, name string, p any, mask0 []byte) { func verifyGCInfo(t *testing.T, name string, p any, mask0 []byte) {
mask := runtime.GCMask(p) mask := runtime.GCMask(p)
if !bytes.Equal(mask, mask0) { if bytes.HasPrefix(mask, mask0) {
t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask) // Just the prefix matching is OK.
//
// The Go runtime's pointer/scalar iterator generates pointers beyond
// the size of the type, up to the size of the size class. This space
// is safe for the GC to scan since it's zero, and GCBits checks to
// make sure that's true. But we need to handle the fact that the bitmap
// may be larger than we expect.
return return
} }
t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask)
} }
func trimDead(mask []byte) []byte { func trimDead(mask []byte) []byte {

View File

@ -14,6 +14,7 @@ package runtime
import ( import (
"internal/abi" "internal/abi"
"internal/goarch" "internal/goarch"
"internal/goexperiment"
"unsafe" "unsafe"
) )
@ -737,16 +738,28 @@ func makeheapobjbv(p uintptr, size uintptr) bitvector {
for i := uintptr(0); i < nptr/8+1; i++ { for i := uintptr(0); i < nptr/8+1; i++ {
tmpbuf[i] = 0 tmpbuf[i] = 0
} }
if goexperiment.AllocHeaders {
hbits := heapBitsForAddr(p, size) s := spanOf(p)
for { tp := s.typePointersOf(p, size)
var addr uintptr for {
hbits, addr = hbits.next() var addr uintptr
if addr == 0 { if tp, addr = tp.next(p + size); addr == 0 {
break break
}
i := (addr - p) / goarch.PtrSize
tmpbuf[i/8] |= 1 << (i % 8)
}
} else {
hbits := heapBitsForAddr(p, size)
for {
var addr uintptr
hbits, addr = hbits.next()
if addr == 0 {
break
}
i := (addr - p) / goarch.PtrSize
tmpbuf[i/8] |= 1 << (i % 8)
} }
i := (addr - p) / goarch.PtrSize
tmpbuf[i/8] |= 1 << (i % 8)
} }
return bitvector{int32(nptr), &tmpbuf[0]} return bitvector{int32(nptr), &tmpbuf[0]}
} }

View File

@ -102,6 +102,7 @@ package runtime
import ( import (
"internal/goarch" "internal/goarch"
"internal/goexperiment"
"internal/goos" "internal/goos"
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/math" "runtime/internal/math"
@ -424,6 +425,26 @@ func mallocinit() {
print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n") print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
throw("bad pagesPerReclaimerChunk") throw("bad pagesPerReclaimerChunk")
} }
if goexperiment.AllocHeaders {
// Check that the minimum size (exclusive) for a malloc header is also
// a size class boundary. This is important to making sure checks align
// across different parts of the runtime.
minSizeForMallocHeaderIsSizeClass := false
for i := 0; i < len(class_to_size); i++ {
if minSizeForMallocHeader == uintptr(class_to_size[i]) {
minSizeForMallocHeaderIsSizeClass = true
break
}
}
if !minSizeForMallocHeaderIsSizeClass {
throw("min size of malloc header is not a size class boundary")
}
// Check that the pointer bitmap for all small sizes without a malloc header
// fits in a word.
if minSizeForMallocHeader/goarch.PtrSize > 8*goarch.PtrSize {
throw("max pointer/scan bitmap size for headerless objects is too large")
}
}
if minTagBits > taggedPointerBits { if minTagBits > taggedPointerBits {
throw("taggedPointerbits too small") throw("taggedPointerbits too small")
@ -1016,12 +1037,22 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
throw("mallocgc called without a P or outside bootstrapping") throw("mallocgc called without a P or outside bootstrapping")
} }
var span *mspan var span *mspan
var header **_type
var x unsafe.Pointer var x unsafe.Pointer
noscan := typ == nil || typ.PtrBytes == 0 noscan := typ == nil || typ.PtrBytes == 0
// In some cases block zeroing can profitably (for latency reduction purposes) // In some cases block zeroing can profitably (for latency reduction purposes)
// be delayed till preemption is possible; delayedZeroing tracks that state. // be delayed till preemption is possible; delayedZeroing tracks that state.
delayedZeroing := false delayedZeroing := false
if size <= maxSmallSize { // Determine if it's a 'small' object that goes into a size-classed span.
//
// Note: This comparison looks a little strange, but it exists to smooth out
// the crossover between the largest size class and large objects that have
// their own spans. The small window of object sizes between maxSmallSize-mallocHeaderSize
// and maxSmallSize will be considered large, even though they might fit in
// a size class. In practice this is completely fine, since the largest small
// size class has a single object in it already, precisely to make the transition
// to large objects smooth.
if size <= maxSmallSize-mallocHeaderSize {
if noscan && size < maxTinySize { if noscan && size < maxTinySize {
// Tiny allocator. // Tiny allocator.
// //
@ -1096,6 +1127,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
} }
size = maxTinySize size = maxTinySize
} else { } else {
hasHeader := !noscan && !heapBitsInSpan(size)
if goexperiment.AllocHeaders && hasHeader {
size += mallocHeaderSize
}
var sizeclass uint8 var sizeclass uint8
if size <= smallSizeMax-8 { if size <= smallSizeMax-8 {
sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)] sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
@ -1113,6 +1148,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if needzero && span.needzero != 0 { if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, size) memclrNoHeapPointers(x, size)
} }
if goexperiment.AllocHeaders && hasHeader {
header = (**_type)(x)
x = add(x, mallocHeaderSize)
size -= mallocHeaderSize
}
} }
} else { } else {
shouldhelpgc = true shouldhelpgc = true
@ -1128,29 +1168,30 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
delayedZeroing = true delayedZeroing = true
} else { } else {
memclrNoHeapPointers(x, size) memclrNoHeapPointers(x, size)
// We've in theory cleared almost the whole span here,
// and could take the extra step of actually clearing
// the whole thing. However, don't. Any GC bits for the
// uncleared parts will be zero, and it's just going to
// be needzero = 1 once freed anyway.
} }
} }
if goexperiment.AllocHeaders && !noscan {
header = &span.largeType
}
} }
if !noscan { if !noscan {
var scanSize uintptr if goexperiment.AllocHeaders {
heapBitsSetType(uintptr(x), size, dataSize, typ) c.scanAlloc += heapSetType(uintptr(x), dataSize, typ, header, span)
if dataSize > typ.Size_ {
// Array allocation. If there are any
// pointers, GC has to scan to the last
// element.
if typ.PtrBytes != 0 {
scanSize = dataSize - typ.Size_ + typ.PtrBytes
}
} else { } else {
scanSize = typ.PtrBytes var scanSize uintptr
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.Size_ {
// Array allocation. If there are any
// pointers, GC has to scan to the last
// element.
if typ.PtrBytes != 0 {
scanSize = dataSize - typ.Size_ + typ.PtrBytes
}
} else {
scanSize = typ.PtrBytes
}
c.scanAlloc += scanSize
} }
c.scanAlloc += scanSize
} }
// Ensure that the stores above that initialize x to // Ensure that the stores above that initialize x to
@ -1176,7 +1217,12 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// This may be racing with GC so do it atomically if there can be // This may be racing with GC so do it atomically if there can be
// a race marking the bit. // a race marking the bit.
if gcphase != _GCoff { if gcphase != _GCoff {
gcmarknewobject(span, uintptr(x), size) // Pass the full size of the allocation to the number of bytes
// marked.
//
// If !goexperiment.AllocHeaders, "size" doesn't include the
// allocation header, so use span.elemsize unconditionally.
gcmarknewobject(span, uintptr(x), span.elemsize)
} }
if raceenabled { if raceenabled {
@ -1215,6 +1261,9 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if !noscan { if !noscan {
throw("delayed zeroing on data that may contain pointers") throw("delayed zeroing on data that may contain pointers")
} }
if goexperiment.AllocHeaders && header != nil {
throw("unexpected malloc header in delayed zeroing of large object")
}
memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
} }

View File

@ -354,7 +354,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
// used with this value of b. // used with this value of b.
nbuckets += bucketShift(b - 4) nbuckets += bucketShift(b - 4)
sz := t.Bucket.Size_ * nbuckets sz := t.Bucket.Size_ * nbuckets
up := roundupsize(sz) up := roundupsize(sz, t.Bucket.PtrBytes == 0)
if up != sz { if up != sz {
nbuckets = up / t.Bucket.Size_ nbuckets = up / t.Bucket.Size_
} }

File diff suppressed because it is too large Load Diff

View File

@ -47,6 +47,19 @@ import (
"unsafe" "unsafe"
) )
const (
// For compatibility with the allocheaders GOEXPERIMENT.
mallocHeaderSize = 0
minSizeForMallocHeader = ^uintptr(0)
)
// For compatibility with the allocheaders GOEXPERIMENT.
//
//go:nosplit
func heapBitsInSpan(_ uintptr) bool {
return false
}
// heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC. // heapArenaPtrScalar contains the per-heapArena pointer/scalar metadata for the GC.
type heapArenaPtrScalar struct { type heapArenaPtrScalar struct {
// bitmap stores the pointer/scalar bitmap for the words in // bitmap stores the pointer/scalar bitmap for the words in
@ -671,6 +684,11 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
} }
} }
// For goexperiment.AllocHeaders
func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) (scanSize uintptr) {
return 0
}
// Testing. // Testing.
// Returns GC type info for the pointer stored in ep for testing. // Returns GC type info for the pointer stored in ep for testing.
@ -765,7 +783,8 @@ func getgcmask(ep any) (mask []byte) {
// non-slice-backing-store Go values allocated in a user arena chunk. It // non-slice-backing-store Go values allocated in a user arena chunk. It
// sets up the heap bitmap for the value with type typ allocated at address ptr. // sets up the heap bitmap for the value with type typ allocated at address ptr.
// base is the base address of the arena chunk. // base is the base address of the arena chunk.
func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) { func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan) {
base := s.base()
h := writeHeapBitsForAddr(uintptr(ptr)) h := writeHeapBitsForAddr(uintptr(ptr))
// Our last allocation might have ended right at a noMorePtrs mark, // Our last allocation might have ended right at a noMorePtrs mark,
@ -855,3 +874,53 @@ func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) {
} }
} }
} }
// For goexperiment.AllocHeaders.
type typePointers struct {
addr uintptr
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
panic("not implemented")
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
panic("not implemented")
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (tp typePointers) nextFast() (typePointers, uintptr) {
panic("not implemented")
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
panic("not implemented")
}
// For goexperiment.AllocHeaders.
//
//go:nosplit
func (tp typePointers) fastForward(n, limit uintptr) typePointers {
panic("not implemented")
}
// For goexperiment.AllocHeaders, to pass TestIntendedInlining.
func (s *mspan) writeHeapBits() {
panic("not implemented")
}
// For goexperiment.AllocHeaders, to pass TestIntendedInlining.
func heapBitsSlice() {
panic("not implemented")
}

View File

@ -9,6 +9,7 @@ package runtime
import ( import (
"internal/abi" "internal/abi"
"internal/goarch" "internal/goarch"
"internal/goexperiment"
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/sys" "runtime/internal/sys"
"unsafe" "unsafe"
@ -410,7 +411,7 @@ func SetFinalizer(obj any, finalizer any) {
} }
// find the containing object // find the containing object
base, _, _ := findObject(uintptr(e.data), 0, 0) base, span, _ := findObject(uintptr(e.data), 0, 0)
if base == 0 { if base == 0 {
if isGoPointerWithoutSpan(e.data) { if isGoPointerWithoutSpan(e.data) {
@ -419,6 +420,11 @@ func SetFinalizer(obj any, finalizer any) {
throw("runtime.SetFinalizer: pointer not in allocated block") throw("runtime.SetFinalizer: pointer not in allocated block")
} }
// Move base forward if we've got an allocation header.
if goexperiment.AllocHeaders && !span.spanclass.noscan() && !heapBitsInSpan(span.elemsize) && span.spanclass.sizeclass() != 0 {
base += mallocHeaderSize
}
if uintptr(e.data) != base { if uintptr(e.data) != base {
// As an implementation detail we allow to set finalizers for an inner byte // As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details). // of an object if it could come from tiny alloc (see mallocgc for details).

View File

@ -9,6 +9,7 @@ package runtime
import ( import (
"internal/abi" "internal/abi"
"internal/goarch" "internal/goarch"
"internal/goexperiment"
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/sys" "runtime/internal/sys"
"unsafe" "unsafe"
@ -1306,6 +1307,7 @@ func scanobject(b uintptr, gcw *gcWork) {
throw("scanobject of a noscan object") throw("scanobject of a noscan object")
} }
var tp typePointers
if n > maxObletBytes { if n > maxObletBytes {
// Large object. Break into oblets for better // Large object. Break into oblets for better
// parallelism and lower latency. // parallelism and lower latency.
@ -1327,15 +1329,34 @@ func scanobject(b uintptr, gcw *gcWork) {
// of the object. // of the object.
n = s.base() + s.elemsize - b n = s.base() + s.elemsize - b
n = min(n, maxObletBytes) n = min(n, maxObletBytes)
if goexperiment.AllocHeaders {
tp = s.typePointersOfUnchecked(s.base())
tp = tp.fastForward(b-tp.addr, b+n)
}
} else {
if goexperiment.AllocHeaders {
tp = s.typePointersOfUnchecked(b)
}
} }
hbits := heapBitsForAddr(b, n) var hbits heapBits
if !goexperiment.AllocHeaders {
hbits = heapBitsForAddr(b, n)
}
var scanSize uintptr var scanSize uintptr
for { for {
var addr uintptr var addr uintptr
if hbits, addr = hbits.nextFast(); addr == 0 { if goexperiment.AllocHeaders {
if hbits, addr = hbits.next(); addr == 0 { if tp, addr = tp.nextFast(); addr == 0 {
break if tp, addr = tp.next(b + n); addr == 0 {
break
}
}
} else {
if hbits, addr = hbits.nextFast(); addr == 0 {
if hbits, addr = hbits.next(); addr == 0 {
break
}
} }
} }

View File

@ -25,6 +25,7 @@
package runtime package runtime
import ( import (
"internal/goexperiment"
"runtime/internal/atomic" "runtime/internal/atomic"
"unsafe" "unsafe"
) )
@ -786,6 +787,15 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
} else { } else {
mheap_.freeSpan(s) mheap_.freeSpan(s)
} }
if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.Kind_&kindGCProg != 0 {
// In the allocheaders experiment, the unrolled GCProg bitmap is allocated separately.
// Free the space for the unrolled bitmap.
systemstack(func() {
s := spanOf(uintptr(unsafe.Pointer(s.largeType)))
mheap_.freeManual(s, spanAllocPtrScalarBits)
})
s.largeType = nil
}
// Count the free in the consistent, external stats. // Count the free in the consistent, external stats.
stats := memstats.heapStats.acquire() stats := memstats.heapStats.acquire()

View File

@ -11,6 +11,7 @@ package runtime
import ( import (
"internal/cpu" "internal/cpu"
"internal/goarch" "internal/goarch"
"internal/goexperiment"
"runtime/internal/atomic" "runtime/internal/atomic"
"runtime/internal/sys" "runtime/internal/sys"
"unsafe" "unsafe"
@ -487,6 +488,7 @@ type mspan struct {
speciallock mutex // guards specials list and changes to pinnerBits speciallock mutex // guards specials list and changes to pinnerBits
specials *special // linked list of special records sorted by offset. specials *special // linked list of special records sorted by offset.
userArenaChunkFree addrRange // interval for managing chunk allocation userArenaChunkFree addrRange // interval for managing chunk allocation
largeType *_type // malloc header for large objects.
} }
func (s *mspan) base() uintptr { func (s *mspan) base() uintptr {
@ -564,10 +566,12 @@ func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
return spanClass(sizeclass<<1) | spanClass(bool2int(noscan)) return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
} }
//go:nosplit
func (sc spanClass) sizeclass() int8 { func (sc spanClass) sizeclass() int8 {
return int8(sc >> 1) return int8(sc >> 1)
} }
//go:nosplit
func (sc spanClass) noscan() bool { func (sc spanClass) noscan() bool {
return sc&1 != 0 return sc&1 != 0
} }
@ -1387,7 +1391,12 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
s.divMul = 0 s.divMul = 0
} else { } else {
s.elemsize = uintptr(class_to_size[sizeclass]) s.elemsize = uintptr(class_to_size[sizeclass])
s.nelems = uint16(nbytes / s.elemsize) if goexperiment.AllocHeaders && !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) {
// In the allocheaders experiment, reserve space for the pointer/scan bitmap at the end.
s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
} else {
s.nelems = uint16(nbytes / s.elemsize)
}
s.divMul = class_to_divmagic[sizeclass] s.divMul = class_to_divmagic[sizeclass]
} }

View File

@ -11,17 +11,26 @@
package runtime package runtime
// Returns size of the memory block that mallocgc will allocate if you ask for the size. // Returns size of the memory block that mallocgc will allocate if you ask for the size,
func roundupsize(size uintptr) uintptr { // minus any inline space for metadata.
if size < _MaxSmallSize { func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
if size <= smallSizeMax-8 { reqSize = size
return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]]) if reqSize <= maxSmallSize-mallocHeaderSize {
} else { // Small object.
return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]]) if !noscan && reqSize > minSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize)
reqSize += mallocHeaderSize
} }
// (reqSize - size) is either mallocHeaderSize or 0. We need to subtract mallocHeaderSize
// from the result if we have one, since mallocgc will add it back in.
if reqSize <= smallSizeMax-8 {
return uintptr(class_to_size[size_to_class8[divRoundUp(reqSize, smallSizeDiv)]]) - (reqSize - size)
}
return uintptr(class_to_size[size_to_class128[divRoundUp(reqSize-smallSizeMax, largeSizeDiv)]]) - (reqSize - size)
} }
if size+_PageSize < size { // Large object. Align reqSize up to the next page. Check for overflow.
reqSize += pageSize - 1
if reqSize < size {
return size return size
} }
return alignUp(size, _PageSize) return reqSize &^ (pageSize - 1)
} }

View File

@ -12,7 +12,9 @@
package runtime package runtime
// Returns size of the memory block that mallocgc will allocate if you ask for the size. // Returns size of the memory block that mallocgc will allocate if you ask for the size.
func roundupsize(size uintptr) uintptr { //
// The noscan argument is purely for compatibility with goexperiment.AllocHeaders.
func roundupsize(size uintptr, noscan bool) uintptr {
if size < _MaxSmallSize { if size < _MaxSmallSize {
if size <= smallSizeMax-8 { if size <= smallSizeMax-8 {
return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]]) return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])

View File

@ -223,6 +223,7 @@ type symbolizeDataContext struct {
func raceSymbolizeData(ctx *symbolizeDataContext) { func raceSymbolizeData(ctx *symbolizeDataContext) {
if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 { if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
// TODO: Does this need to handle malloc headers?
ctx.heap = 1 ctx.heap = 1
ctx.start = base ctx.start = base
ctx.size = span.elemsize ctx.size = span.elemsize

View File

@ -179,17 +179,18 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
// For 1 we don't need any division/multiplication. // For 1 we don't need any division/multiplication.
// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
// For powers of 2, use a variable shift. // For powers of 2, use a variable shift.
noscan := et.PtrBytes == 0
switch { switch {
case et.Size_ == 1: case et.Size_ == 1:
lenmem = uintptr(oldLen) lenmem = uintptr(oldLen)
newlenmem = uintptr(newLen) newlenmem = uintptr(newLen)
capmem = roundupsize(uintptr(newcap)) capmem = roundupsize(uintptr(newcap), noscan)
overflow = uintptr(newcap) > maxAlloc overflow = uintptr(newcap) > maxAlloc
newcap = int(capmem) newcap = int(capmem)
case et.Size_ == goarch.PtrSize: case et.Size_ == goarch.PtrSize:
lenmem = uintptr(oldLen) * goarch.PtrSize lenmem = uintptr(oldLen) * goarch.PtrSize
newlenmem = uintptr(newLen) * goarch.PtrSize newlenmem = uintptr(newLen) * goarch.PtrSize
capmem = roundupsize(uintptr(newcap) * goarch.PtrSize) capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
newcap = int(capmem / goarch.PtrSize) newcap = int(capmem / goarch.PtrSize)
case isPowerOfTwo(et.Size_): case isPowerOfTwo(et.Size_):
@ -202,7 +203,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
} }
lenmem = uintptr(oldLen) << shift lenmem = uintptr(oldLen) << shift
newlenmem = uintptr(newLen) << shift newlenmem = uintptr(newLen) << shift
capmem = roundupsize(uintptr(newcap) << shift) capmem = roundupsize(uintptr(newcap)<<shift, noscan)
overflow = uintptr(newcap) > (maxAlloc >> shift) overflow = uintptr(newcap) > (maxAlloc >> shift)
newcap = int(capmem >> shift) newcap = int(capmem >> shift)
capmem = uintptr(newcap) << shift capmem = uintptr(newcap) << shift
@ -210,7 +211,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice
lenmem = uintptr(oldLen) * et.Size_ lenmem = uintptr(oldLen) * et.Size_
newlenmem = uintptr(newLen) * et.Size_ newlenmem = uintptr(newLen) * et.Size_
capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap)) capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
capmem = roundupsize(capmem) capmem = roundupsize(capmem, noscan)
newcap = int(capmem / et.Size_) newcap = int(capmem / et.Size_)
capmem = uintptr(newcap) * et.Size_ capmem = uintptr(newcap) * et.Size_
} }

View File

@ -270,7 +270,7 @@ func rawstring(size int) (s string, b []byte) {
// rawbyteslice allocates a new byte slice. The byte slice is not zeroed. // rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
func rawbyteslice(size int) (b []byte) { func rawbyteslice(size int) (b []byte) {
cap := roundupsize(uintptr(size)) cap := roundupsize(uintptr(size), true)
p := mallocgc(cap, nil, false) p := mallocgc(cap, nil, false)
if cap != uintptr(size) { if cap != uintptr(size) {
memclrNoHeapPointers(add(p, uintptr(size)), cap-uintptr(size)) memclrNoHeapPointers(add(p, uintptr(size)), cap-uintptr(size))
@ -285,7 +285,7 @@ func rawruneslice(size int) (b []rune) {
if uintptr(size) > maxAlloc/4 { if uintptr(size) > maxAlloc/4 {
throw("out of memory") throw("out of memory")
} }
mem := roundupsize(uintptr(size) * 4) mem := roundupsize(uintptr(size)*4, true)
p := mallocgc(mem, nil, false) p := mallocgc(mem, nil, false)
if mem != uintptr(size)*4 { if mem != uintptr(size)*4 {
memclrNoHeapPointers(add(p, uintptr(size)*4), mem-uintptr(size)*4) memclrNoHeapPointers(add(p, uintptr(size)*4), mem-uintptr(size)*4)

View File

@ -424,11 +424,15 @@ func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOf
func systemstack_switch() func systemstack_switch()
// alignUp rounds n up to a multiple of a. a must be a power of 2. // alignUp rounds n up to a multiple of a. a must be a power of 2.
//
//go:nosplit
func alignUp(n, a uintptr) uintptr { func alignUp(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1) return (n + a - 1) &^ (a - 1)
} }
// alignDown rounds n down to a multiple of a. a must be a power of 2. // alignDown rounds n down to a multiple of a. a must be a power of 2.
//
//go:nosplit
func alignDown(n, a uintptr) uintptr { func alignDown(n, a uintptr) uintptr {
return n &^ (a - 1) return n &^ (a - 1)
} }