diff --git a/src/reflect/value.go b/src/reflect/value.go index 0452b51d7b..705d74f6b8 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -1723,7 +1723,7 @@ func (v Value) SetZero() { case Slice: *(*unsafeheader.Slice)(v.ptr) = unsafeheader.Slice{} case Interface: - *(*[2]unsafe.Pointer)(v.ptr) = [2]unsafe.Pointer{} + *(*emptyInterface)(v.ptr) = emptyInterface{} case Chan, Func, Map, Pointer, UnsafePointer: *(*unsafe.Pointer)(v.ptr) = nil case Array, Struct: diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go index 456155e548..c4b6c2a789 100644 --- a/src/runtime/mbarrier.go +++ b/src/runtime/mbarrier.go @@ -154,7 +154,10 @@ func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) { return } if writeBarrier.enabled && typ.PtrBytes != 0 { - bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes) + // This always copies a full value of type typ so it's safe + // to pass typ along as an optimization. See the comment on + // bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ) } // There's a race here: if some other goroutine can write to // src, it may change some pointer in src after we've @@ -176,7 +179,10 @@ func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) { //go:nowritebarrierrec //go:nosplit func wbZero(typ *_type, dst unsafe.Pointer) { - bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes) + // This always copies a full value of type typ so it's safe + // to pass typ along as an optimization. See the comment on + // bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes, typ) } // wbMove performs the write barrier operations necessary before @@ -186,7 +192,11 @@ func wbZero(typ *_type, dst unsafe.Pointer) { //go:nowritebarrierrec //go:nosplit func wbMove(typ *_type, dst, src unsafe.Pointer) { - bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes) + // This always copies a full value of type typ so it's safe to + // pass a type here. + // + // See the comment on bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes, typ) } //go:linkname reflect_typedmemmove reflect.typedmemmove @@ -223,7 +233,10 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) { //go:nosplit func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) { if writeBarrier.enabled && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize { - bulkBarrierPreWrite(uintptr(dst), uintptr(src), size) + // Pass nil for the type. dst does not point to value of type typ, + // but rather points into one, so applying the optimization is not + // safe. See the comment on this function. + bulkBarrierPreWrite(uintptr(dst), uintptr(src), size, nil) } memmove(dst, src, size) @@ -278,8 +291,11 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe // before calling typedslicecopy. size := uintptr(n) * typ.Size_ if writeBarrier.enabled { + // This always copies one or more full values of type typ so + // it's safe to pass typ along as an optimization. See the comment on + // bulkBarrierPreWrite. pwsize := size - typ.Size_ + typ.PtrBytes - bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize) + bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize, typ) } // See typedmemmove for a discussion of the race between the // barrier and memmove. @@ -308,7 +324,10 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int { //go:nosplit func typedmemclr(typ *_type, ptr unsafe.Pointer) { if writeBarrier.enabled && typ.PtrBytes != 0 { - bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes) + // This always clears a whole value of type typ, so it's + // safe to pass a type here and apply the optimization. + // See the comment on bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes, typ) } memclrNoHeapPointers(ptr, typ.Size_) } @@ -321,7 +340,11 @@ func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) { //go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) { if writeBarrier.enabled && typ.PtrBytes != 0 { - bulkBarrierPreWrite(uintptr(ptr), 0, size) + // Pass nil for the type. ptr does not point to value of type typ, + // but rather points into one so it's not safe to apply the optimization. + // See the comment on this function in the reflect package and the + // comment on bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(ptr), 0, size, nil) } memclrNoHeapPointers(ptr, size) } @@ -330,7 +353,9 @@ func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintpt func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) { size := typ.Size_ * uintptr(len) if writeBarrier.enabled && typ.PtrBytes != 0 { - bulkBarrierPreWrite(uintptr(ptr), 0, size) + // This always clears whole elements of an array, so it's + // safe to pass a type here. See the comment on bulkBarrierPreWrite. + bulkBarrierPreWrite(uintptr(ptr), 0, size, typ) } memclrNoHeapPointers(ptr, size) } @@ -342,6 +367,7 @@ func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) { // //go:nosplit func memclrHasPointers(ptr unsafe.Pointer, n uintptr) { - bulkBarrierPreWrite(uintptr(ptr), 0, n) + // Pass nil for the type since we don't have one here anyway. + bulkBarrierPreWrite(uintptr(ptr), 0, n, nil) memclrNoHeapPointers(ptr, n) } diff --git a/src/runtime/mbitmap_allocheaders.go b/src/runtime/mbitmap_allocheaders.go index 319d71f92f..03cec5ffcc 100644 --- a/src/runtime/mbitmap_allocheaders.go +++ b/src/runtime/mbitmap_allocheaders.go @@ -58,6 +58,7 @@ package runtime import ( + "internal/abi" "internal/goarch" "runtime/internal/sys" "unsafe" @@ -200,6 +201,29 @@ func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers { return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ} } +// typePointersOfType is like typePointersOf, but assumes addr points to one or more +// contiguous instances of the provided type. The provided type must not be nil and +// it must not have its type metadata encoded as a gcprog. +// +// It returns an iterator that tiles typ.GCData starting from addr. It's the caller's +// responsibility to limit iteration. +// +// nosplit because its callers are nosplit and require all their callees to be nosplit. +// +//go:nosplit +func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers { + const doubleCheck = false + if doubleCheck && (typ == nil || typ.Kind_&kindGCProg != 0) { + throw("bad type passed to typePointersOfType") + } + if span.spanclass.noscan() { + return typePointers{} + } + // Since we have the type, pretend we have a header. + gcdata := typ.GCData + return typePointers{elem: addr, addr: addr, mask: readUintptr(gcdata), typ: typ} +} + // nextFast is the fast path of next. nextFast is written to be inlineable and, // as the name implies, fast. // @@ -368,15 +392,30 @@ func (span *mspan) objBase(addr uintptr) uintptr { // The caller is also responsible for cgo pointer checks if this // may be writing Go pointers into non-Go memory. // -// The pointer bitmap is not maintained for allocations containing +// Pointer data is not maintained for allocations containing // no pointers at all; any caller of bulkBarrierPreWrite must first // make sure the underlying allocation contains pointers, usually // by checking typ.PtrBytes. // +// The typ argument is the type of the space at src and dst (and the +// element type if src and dst refer to arrays) and it is optional. +// If typ is nil, the barrier will still behave as expected and typ +// is used purely as an optimization. However, it must be used with +// care. +// +// If typ is not nil, then src and dst must point to one or more values +// of type typ. The caller must ensure that the ranges [src, src+size) +// and [dst, dst+size) refer to one or more whole values of type src and +// dst (leaving off the pointerless tail of the space is OK). If this +// precondition is not followed, this function will fail to scan the +// right pointers. +// +// When in doubt, pass nil for typ. That is safe and will always work. +// // Callers must perform cgo checks if goexperiment.CgoCheck2. // //go:nosplit -func bulkBarrierPreWrite(dst, src, size uintptr) { +func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) { if (dst|src|size)&(goarch.PtrSize-1) != 0 { throw("bulkBarrierPreWrite: unaligned arguments") } @@ -411,7 +450,18 @@ func bulkBarrierPreWrite(dst, src, size uintptr) { } buf := &getg().m.p.ptr().wbBuf - tp := s.typePointersOf(dst, size) + // Double-check that the bitmaps generated in the two possible paths match. + const doubleCheck = false + if doubleCheck { + doubleCheckTypePointersOfType(s, typ, dst, size) + } + + var tp typePointers + if typ != nil && typ.Kind_&kindGCProg == 0 { + tp = s.typePointersOfType(typ, dst) + } else { + tp = s.typePointersOf(dst, size) + } if src == 0 { for { var addr uintptr @@ -446,8 +496,12 @@ func bulkBarrierPreWrite(dst, src, size uintptr) { // This is used for special cases where e.g. dst was just // created and zeroed with malloc. // +// The type of the space can be provided purely as an optimization. +// See bulkBarrierPreWrite's comment for more details -- use this +// optimization with great care. +// //go:nosplit -func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) { +func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) { if (dst|src|size)&(goarch.PtrSize-1) != 0 { throw("bulkBarrierPreWrite: unaligned arguments") } @@ -455,7 +509,20 @@ func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) { return } buf := &getg().m.p.ptr().wbBuf - tp := spanOf(dst).typePointersOf(dst, size) + s := spanOf(dst) + + // Double-check that the bitmaps generated in the two possible paths match. + const doubleCheck = false + if doubleCheck { + doubleCheckTypePointersOfType(s, typ, dst, size) + } + + var tp typePointers + if typ != nil && typ.Kind_&kindGCProg == 0 { + tp = s.typePointersOfType(typ, dst) + } else { + tp = s.typePointersOf(dst, size) + } for { var addr uintptr if tp, addr = tp.next(dst + size); addr == 0 { @@ -993,6 +1060,52 @@ func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_ throw("heapSetType: pointer entry not correct") } +//go:nosplit +func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) { + if typ == nil || typ.Kind_&kindGCProg != 0 { + return + } + if typ.Kind_&kindMask == kindInterface { + // Interfaces are unfortunately inconsistently handled + // when it comes to the type pointer, so it's easy to + // produce a lot of false positives here. + return + } + tp0 := s.typePointersOfType(typ, addr) + tp1 := s.typePointersOf(addr, size) + failed := false + for { + var addr0, addr1 uintptr + tp0, addr0 = tp0.next(addr + size) + tp1, addr1 = tp1.next(addr + size) + if addr0 != addr1 { + failed = true + break + } + if addr0 == 0 { + break + } + } + if failed { + tp0 := s.typePointersOfType(typ, addr) + tp1 := s.typePointersOf(addr, size) + print("runtime: addr=", hex(addr), " size=", size, "\n") + print("runtime: type=", toRType(typ).string(), "\n") + dumpTypePointers(tp0) + dumpTypePointers(tp1) + for { + var addr0, addr1 uintptr + tp0, addr0 = tp0.next(addr + size) + tp1, addr1 = tp1.next(addr + size) + print("runtime: ", hex(addr0), " ", hex(addr1), "\n") + if addr0 == 0 && addr1 == 0 { + break + } + } + throw("mismatch between typePointersOfType and typePointersOf") + } +} + func dumpTypePointers(tp typePointers) { print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n") print("runtime: tp.addr=", hex(tp.addr), " tp.mask=") @@ -1015,12 +1128,19 @@ func getgcmask(ep any) (mask []byte) { e := *efaceOf(&ep) p := e.data t := e._type + + var et *_type + if t.Kind_&kindMask != kindPtr { + throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried") + } + et = (*ptrtype)(unsafe.Pointer(t)).Elem + // data or bss for _, datap := range activeModules() { // data if datap.data <= uintptr(p) && uintptr(p) < datap.edata { bitmap := datap.gcdatamask.bytedata - n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ + n := et.Size_ mask = make([]byte, n/goarch.PtrSize) for i := uintptr(0); i < n; i += goarch.PtrSize { off := (uintptr(p) + i - datap.data) / goarch.PtrSize @@ -1032,7 +1152,7 @@ func getgcmask(ep any) (mask []byte) { // bss if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss { bitmap := datap.gcbssmask.bytedata - n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_ + n := et.Size_ mask = make([]byte, n/goarch.PtrSize) for i := uintptr(0); i < n; i += goarch.PtrSize { off := (uintptr(p) + i - datap.bss) / goarch.PtrSize @@ -1056,13 +1176,13 @@ func getgcmask(ep any) (mask []byte) { base = tp.addr // Unroll the full bitmap the GC would actually observe. - mask = make([]byte, (limit-base)/goarch.PtrSize) + maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize) for { var addr uintptr if tp, addr = tp.next(limit); addr == 0 { break } - mask[(addr-base)/goarch.PtrSize] = 1 + maskFromHeap[(addr-base)/goarch.PtrSize] = 1 } // Double-check that every part of the ptr/scalar we're not @@ -1074,11 +1194,61 @@ func getgcmask(ep any) (mask []byte) { } } - // Callers expect this mask to end at the last pointer. - for len(mask) > 0 && mask[len(mask)-1] == 0 { - mask = mask[:len(mask)-1] + // Callers (and a check we're about to run) expects this mask + // to end at the last pointer. + for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 { + maskFromHeap = maskFromHeap[:len(maskFromHeap)-1] } + if et.Kind_&kindGCProg == 0 { + // Unroll again, but this time from the type information. + maskFromType := make([]byte, (limit-base)/goarch.PtrSize) + tp = s.typePointersOfType(et, base) + for { + var addr uintptr + if tp, addr = tp.next(limit); addr == 0 { + break + } + maskFromType[(addr-base)/goarch.PtrSize] = 1 + } + + // Validate that the prefix of maskFromType is equal to + // maskFromHeap. maskFromType may contain more pointers than + // maskFromHeap produces because maskFromHeap may be able to + // get exact type information for certain classes of objects. + // With maskFromType, we're always just tiling the type bitmap + // through to the elemsize. + // + // It's OK if maskFromType has pointers in elemsize that extend + // past the actual populated space; we checked above that all + // that space is zeroed, so just the GC will just see nil pointers. + differs := false + for i := range maskFromHeap { + if maskFromHeap[i] != maskFromType[i] { + differs = true + break + } + } + + if differs { + print("runtime: heap mask=") + for _, b := range maskFromHeap { + print(b) + } + println() + print("runtime: type mask=") + for _, b := range maskFromType { + print(b) + } + println() + print("runtime: type=", toRType(et).string(), "\n") + throw("found two different masks from two different methods") + } + } + + // Select the heap mask to return. We may not have a type mask. + mask = maskFromHeap + // Make sure we keep ep alive. We may have stopped referencing // ep's data pointer sometime before this point and it's possible // for that memory to get freed. diff --git a/src/runtime/mbitmap_noallocheaders.go b/src/runtime/mbitmap_noallocheaders.go index dab15889a4..383993aa1e 100644 --- a/src/runtime/mbitmap_noallocheaders.go +++ b/src/runtime/mbitmap_noallocheaders.go @@ -42,6 +42,7 @@ package runtime import ( + "internal/abi" "internal/goarch" "runtime/internal/sys" "unsafe" @@ -233,10 +234,13 @@ func (h heapBits) nextFast() (heapBits, uintptr) { // make sure the underlying allocation contains pointers, usually // by checking typ.PtrBytes. // +// The type of the space can be provided purely as an optimization, +// however it is not used with GOEXPERIMENT=noallocheaders. +// // Callers must perform cgo checks if goexperiment.CgoCheck2. // //go:nosplit -func bulkBarrierPreWrite(dst, src, size uintptr) { +func bulkBarrierPreWrite(dst, src, size uintptr, _ *abi.Type) { if (dst|src|size)&(goarch.PtrSize-1) != 0 { throw("bulkBarrierPreWrite: unaligned arguments") } @@ -305,8 +309,11 @@ func bulkBarrierPreWrite(dst, src, size uintptr) { // This is used for special cases where e.g. dst was just // created and zeroed with malloc. // +// The type of the space can be provided purely as an optimization, +// however it is not used with GOEXPERIMENT=noallocheaders. +// //go:nosplit -func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) { +func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, _ *abi.Type) { if (dst|src|size)&(goarch.PtrSize-1) != 0 { throw("bulkBarrierPreWrite: unaligned arguments") } diff --git a/src/runtime/slice.go b/src/runtime/slice.go index 95341b5904..eb628bb169 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -64,7 +64,11 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf if copymem > 0 && writeBarrier.enabled { // Only shade the pointers in old.array since we know the destination slice to // only contains nil pointers because it has been cleared during alloc. - bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem) + // + // It's safe to pass a type to this function as an optimization because + // from and to only ever refer to memory representing whole values of + // type et. See the comment on bulkBarrierPreWrite. + bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem, et) } } @@ -247,7 +251,11 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice if lenmem > 0 && writeBarrier.enabled { // Only shade the pointers in oldPtr since we know the destination slice p // only contains nil pointers because it has been cleared during alloc. - bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes) + // + // It's safe to pass a type to this function as an optimization because + // from and to only ever refer to memory representing whole values of + // type et. See the comment on bulkBarrierPreWrite. + bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et) } } memmove(p, oldPtr, lenmem)