diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index 167e659016..adde1bd8c2 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -87,51 +87,53 @@ var runtimeDecls = [...]struct { {"writeBarrier", varTag, 76}, {"writebarrierptr", funcTag, 77}, {"typedmemmove", funcTag, 78}, - {"typedslicecopy", funcTag, 79}, - {"selectnbsend", funcTag, 80}, - {"selectnbrecv", funcTag, 81}, - {"selectnbrecv2", funcTag, 83}, - {"newselect", funcTag, 84}, - {"selectsend", funcTag, 80}, + {"typedmemclr", funcTag, 79}, + {"typedslicecopy", funcTag, 80}, + {"selectnbsend", funcTag, 81}, + {"selectnbrecv", funcTag, 82}, + {"selectnbrecv2", funcTag, 84}, + {"newselect", funcTag, 85}, + {"selectsend", funcTag, 81}, {"selectrecv", funcTag, 73}, - {"selectrecv2", funcTag, 85}, - {"selectdefault", funcTag, 86}, - {"selectgo", funcTag, 87}, + {"selectrecv2", funcTag, 86}, + {"selectdefault", funcTag, 87}, + {"selectgo", funcTag, 88}, {"block", funcTag, 5}, - {"makeslice", funcTag, 89}, - {"makeslice64", funcTag, 90}, - {"growslice", funcTag, 91}, - {"memmove", funcTag, 92}, - {"memclr", funcTag, 93}, - {"memequal", funcTag, 94}, - {"memequal8", funcTag, 95}, - {"memequal16", funcTag, 95}, - {"memequal32", funcTag, 95}, - {"memequal64", funcTag, 95}, - {"memequal128", funcTag, 95}, - {"int64div", funcTag, 96}, - {"uint64div", funcTag, 97}, - {"int64mod", funcTag, 96}, - {"uint64mod", funcTag, 97}, - {"float64toint64", funcTag, 98}, - {"float64touint64", funcTag, 99}, - {"float64touint32", funcTag, 101}, - {"int64tofloat64", funcTag, 102}, - {"uint64tofloat64", funcTag, 103}, - {"uint32tofloat64", funcTag, 104}, - {"complex128div", funcTag, 105}, - {"racefuncenter", funcTag, 106}, + {"makeslice", funcTag, 90}, + {"makeslice64", funcTag, 91}, + {"growslice", funcTag, 92}, + {"memmove", funcTag, 93}, + {"memclrNoHeapPointers", funcTag, 94}, + {"memclrHasPointers", funcTag, 94}, + {"memequal", funcTag, 95}, + {"memequal8", funcTag, 96}, + {"memequal16", funcTag, 96}, + {"memequal32", funcTag, 96}, + {"memequal64", funcTag, 96}, + {"memequal128", funcTag, 96}, + {"int64div", funcTag, 97}, + {"uint64div", funcTag, 98}, + {"int64mod", funcTag, 97}, + {"uint64mod", funcTag, 98}, + {"float64toint64", funcTag, 99}, + {"float64touint64", funcTag, 100}, + {"float64touint32", funcTag, 102}, + {"int64tofloat64", funcTag, 103}, + {"uint64tofloat64", funcTag, 104}, + {"uint32tofloat64", funcTag, 105}, + {"complex128div", funcTag, 106}, + {"racefuncenter", funcTag, 107}, {"racefuncexit", funcTag, 5}, - {"raceread", funcTag, 106}, - {"racewrite", funcTag, 106}, - {"racereadrange", funcTag, 107}, - {"racewriterange", funcTag, 107}, - {"msanread", funcTag, 107}, - {"msanwrite", funcTag, 107}, + {"raceread", funcTag, 107}, + {"racewrite", funcTag, 107}, + {"racereadrange", funcTag, 108}, + {"racewriterange", funcTag, 108}, + {"msanread", funcTag, 108}, + {"msanwrite", funcTag, 108}, } func runtimeTypes() []*Type { - var typs [108]*Type + var typs [109]*Type typs[0] = bytetype typs[1] = typPtr(typs[0]) typs[2] = Types[TANY] @@ -211,34 +213,35 @@ func runtimeTypes() []*Type { typs[76] = tostruct([]*Node{namedfield("enabled", typs[13]), namedfield("needed", typs[13]), namedfield("cgo", typs[13])}) typs[77] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[2])}, nil) typs[78] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) - typs[79] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[33])}) - typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[74]), anonfield(typs[3])}, []*Node{anonfield(typs[13])}) - typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[71])}, []*Node{anonfield(typs[13])}) - typs[82] = typPtr(typs[13]) - typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[82]), anonfield(typs[71])}, []*Node{anonfield(typs[13])}) - typs[84] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[17]), anonfield(typs[10])}, nil) - typs[85] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[71]), anonfield(typs[3]), anonfield(typs[82])}, []*Node{anonfield(typs[13])}) - typs[86] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[13])}) - typs[87] = functype(nil, []*Node{anonfield(typs[1])}, nil) - typs[88] = typSlice(typs[2]) - typs[89] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[33]), anonfield(typs[33])}, []*Node{anonfield(typs[88])}) - typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[88])}) - typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[88]), anonfield(typs[33])}, []*Node{anonfield(typs[88])}) - typs[92] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, nil) - typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[50])}, nil) - typs[94] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, []*Node{anonfield(typs[13])}) - typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[13])}) - typs[96] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])}) - typs[97] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])}) - typs[98] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[17])}) - typs[99] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[19])}) - typs[100] = Types[TUINT32] - typs[101] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[100])}) - typs[102] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[15])}) - typs[103] = functype(nil, []*Node{anonfield(typs[19])}, []*Node{anonfield(typs[15])}) - typs[104] = functype(nil, []*Node{anonfield(typs[100])}, []*Node{anonfield(typs[15])}) - typs[105] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])}) - typs[106] = functype(nil, []*Node{anonfield(typs[50])}, nil) - typs[107] = functype(nil, []*Node{anonfield(typs[50]), anonfield(typs[50])}, nil) + typs[79] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil) + typs[80] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[33])}) + typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[74]), anonfield(typs[3])}, []*Node{anonfield(typs[13])}) + typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[71])}, []*Node{anonfield(typs[13])}) + typs[83] = typPtr(typs[13]) + typs[84] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[83]), anonfield(typs[71])}, []*Node{anonfield(typs[13])}) + typs[85] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[17]), anonfield(typs[10])}, nil) + typs[86] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[71]), anonfield(typs[3]), anonfield(typs[83])}, []*Node{anonfield(typs[13])}) + typs[87] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[13])}) + typs[88] = functype(nil, []*Node{anonfield(typs[1])}, nil) + typs[89] = typSlice(typs[2]) + typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[33]), anonfield(typs[33])}, []*Node{anonfield(typs[89])}) + typs[91] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[89])}) + typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[89]), anonfield(typs[33])}, []*Node{anonfield(typs[89])}) + typs[93] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, nil) + typs[94] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[50])}, nil) + typs[95] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, []*Node{anonfield(typs[13])}) + typs[96] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[13])}) + typs[97] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])}) + typs[98] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])}) + typs[99] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[17])}) + typs[100] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[19])}) + typs[101] = Types[TUINT32] + typs[102] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[101])}) + typs[103] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[15])}) + typs[104] = functype(nil, []*Node{anonfield(typs[19])}, []*Node{anonfield(typs[15])}) + typs[105] = functype(nil, []*Node{anonfield(typs[101])}, []*Node{anonfield(typs[15])}) + typs[106] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])}) + typs[107] = functype(nil, []*Node{anonfield(typs[50])}, nil) + typs[108] = functype(nil, []*Node{anonfield(typs[50]), anonfield(typs[50])}, nil) return typs[:] } diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go index 07be2d2da3..ff2da79e81 100644 --- a/src/cmd/compile/internal/gc/builtin/runtime.go +++ b/src/cmd/compile/internal/gc/builtin/runtime.go @@ -113,6 +113,7 @@ func writebarrierptr(dst *any, src any) // *byte is really *runtime.Type func typedmemmove(typ *byte, dst *any, src *any) +func typedmemclr(typ *byte, dst *any) func typedslicecopy(typ *byte, dst any, src any) int func selectnbsend(chanType *byte, hchan chan<- any, elem *any) bool @@ -131,7 +132,8 @@ func makeslice(typ *byte, len int, cap int) (ary []any) func makeslice64(typ *byte, len int64, cap int64) (ary []any) func growslice(typ *byte, old []any, cap int) (ary []any) func memmove(to *any, frm *any, length uintptr) -func memclr(ptr *byte, length uintptr) +func memclrNoHeapPointers(ptr *byte, length uintptr) +func memclrHasPointers(ptr *byte, length uintptr) func memequal(x, y *any, size uintptr) bool func memequal8(x, y *any) bool diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index bff21940e1..59dfba5b1e 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -393,11 +393,13 @@ func memclrrange(n, v1, v2, a *Node) bool { return false } + // TODO: Use memclrHasPointers if there are pointers. + // Convert to // if len(a) != 0 { // hp = &a[0] // hn = len(a)*sizeof(elem(a)) - // memclr(hp, hn) + // memclrNoHeapPointers(hp, hn) // i = len(a) - 1 // } n.Op = OIF @@ -423,8 +425,8 @@ func memclrrange(n, v1, v2, a *Node) bool { tmp = conv(tmp, Types[TUINTPTR]) n.Nbody.Append(nod(OAS, hn, tmp)) - // memclr(hp, hn) - fn := mkcall("memclr", nil, nil, hp, hn) + // memclrNoHeapPointers(hp, hn) + fn := mkcall("memclrNoHeapPointers", nil, nil, hp, hn) n.Nbody.Append(fn) diff --git a/src/reflect/value.go b/src/reflect/value.go index 283fbd3c53..042414ffe7 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -442,13 +442,13 @@ func (v Value) call(op string, in []Value) []Value { if nout == 0 { // This is untyped because the frame is really a // stack, even though it's a heap object. - memclr(args, frametype.size) + memclrNoHeapPointers(args, frametype.size) framePool.Put(args) } else { // Zero the now unused input area of args, // because the Values returned by this function contain pointers to the args object, // and will thus keep the args object alive indefinitely. - memclr(args, retOffset) + memclrNoHeapPointers(args, retOffset) // Wrap Values around return values in args. ret = make([]Value, nout) off = retOffset @@ -648,7 +648,7 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer) { // This is untyped because the frame is really a stack, even // though it's a heap object. - memclr(args, frametype.size) + memclrNoHeapPointers(args, frametype.size) framePool.Put(args) } @@ -2512,7 +2512,7 @@ func typedmemmovepartial(t *rtype, dst, src unsafe.Pointer, off, size uintptr) func typedslicecopy(elemType *rtype, dst, src sliceHeader) int //go:noescape -func memclr(ptr unsafe.Pointer, n uintptr) +func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) // Dummy annotation marking that the value x escapes, // for use in cases where the reflect code is so clever that diff --git a/src/runtime/alg.go b/src/runtime/alg.go index 80f205c494..5c378c6a2a 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -275,12 +275,6 @@ func ifaceHash(i interface { return algarray[alg_INTER].hash(noescape(unsafe.Pointer(&i)), seed) } -// Testing adapter for memclr -func memclrBytes(b []byte) { - s := (*slice)(unsafe.Pointer(&b)) - memclr(s.array, uintptr(s.len)) -} - const hashRandomBytes = sys.PtrSize / 4 * 64 // used in asm_{386,amd64}.s to seed the hash function diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s index 60613b175d..ab73508a59 100644 --- a/src/runtime/asm_amd64p32.s +++ b/src/runtime/asm_amd64p32.s @@ -484,7 +484,7 @@ TEXT runtime·stackcheck(SB), NOSPLIT, $0-0 MOVL 0, AX RET -TEXT runtime·memclr(SB),NOSPLIT,$0-8 +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-8 MOVL ptr+0(FP), DI MOVL n+4(FP), CX MOVQ CX, BX diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 5510a27694..d83b3b0a49 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -155,7 +155,11 @@ var Int32Hash = int32Hash var Int64Hash = int64Hash var EfaceHash = efaceHash var IfaceHash = ifaceHash -var MemclrBytes = memclrBytes + +func MemclrBytes(b []byte) { + s := (*slice)(unsafe.Pointer(&b)) + memclrNoHeapPointers(s.array, uintptr(s.len)) +} var HashLoad = &hashLoad diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go index 86d3b37ff1..086d374a1e 100644 --- a/src/runtime/hashmap.go +++ b/src/runtime/hashmap.go @@ -1090,7 +1090,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { if t.bucket.kind&kindNoPointers == 0 { memclrHasPointers(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) } else { - memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) + memclrNoHeapPointers(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) } } } diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 8cdccb877a..60394173ed 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -631,7 +631,7 @@ func mdump() { s.ensureSwept() } } - memclr(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache)) + memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache)) dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr))) dumpparams() dumpitabs() diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index c5f6facc4d..7cdca03e5b 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -682,7 +682,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } x = unsafe.Pointer(v) if needzero && span.needzero != 0 { - memclr(unsafe.Pointer(v), size) + memclrNoHeapPointers(unsafe.Pointer(v), size) } } } else { diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go index 1a7bef4fa1..a8766c7218 100644 --- a/src/runtime/mbarrier.go +++ b/src/runtime/mbarrier.go @@ -333,14 +333,17 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int { } // typedmemclr clears the typed memory at ptr with type typ. The -// memory at ptr must already be type-safe. +// memory at ptr must already be initialized (and hence in type-safe +// state). If the memory is being initialized for the first time, see +// memclrNoHeapPointers. // // If the caller knows that typ has pointers, it can alternatively // call memclrHasPointers. // //go:nosplit func typedmemclr(typ *_type, ptr unsafe.Pointer) { - memclr(ptr, typ.size) + // TODO(austin): Call the hybrid barrier. + memclrNoHeapPointers(ptr, typ.size) } // memclrHasPointers clears n bytes of typed memory starting at ptr. @@ -350,5 +353,6 @@ func typedmemclr(typ *_type, ptr unsafe.Pointer) { // //go:nosplit func memclrHasPointers(ptr unsafe.Pointer, n uintptr) { - memclr(ptr, n) + // TODO(austin): Call the hybrid barrier. + memclrNoHeapPointers(ptr, n) } diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index f1f9158eeb..be52bfacc6 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -742,7 +742,7 @@ func (h heapBits) initSpan(s *mspan) { } return } - memclr(unsafe.Pointer(subtractb(h.bitp, nbyte-1)), nbyte) + memclrNoHeapPointers(unsafe.Pointer(subtractb(h.bitp, nbyte-1)), nbyte) } // initCheckmarkSpan initializes a span for being checkmarked. @@ -1433,7 +1433,7 @@ func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize u } endProg := unsafe.Pointer(subtractb(h.bitp, (totalBits+3)/4)) endAlloc := unsafe.Pointer(subtractb(h.bitp, allocSize/heapBitmapScale)) - memclr(add(endAlloc, 1), uintptr(endProg)-uintptr(endAlloc)) + memclrNoHeapPointers(add(endAlloc, 1), uintptr(endProg)-uintptr(endAlloc)) } // progToPointerMask returns the 1-bit pointer mask output by the GC program prog. diff --git a/src/runtime/mem_plan9.go b/src/runtime/mem_plan9.go index 3d82a9829e..98bfc7f536 100644 --- a/src/runtime/mem_plan9.go +++ b/src/runtime/mem_plan9.go @@ -38,7 +38,7 @@ func memAlloc(n uintptr) unsafe.Pointer { p.size -= n p = (*memHdr)(add(unsafe.Pointer(p), p.size)) } - memclr(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})) + *p = memHdr{} return unsafe.Pointer(p) } prevp = p @@ -48,7 +48,7 @@ func memAlloc(n uintptr) unsafe.Pointer { func memFree(ap unsafe.Pointer, n uintptr) { n = memRound(n) - memclr(ap, n) + memclrNoHeapPointers(ap, n) bp := (*memHdr)(ap) bp.size = n bpn := uintptr(ap) @@ -63,7 +63,7 @@ func memFree(ap unsafe.Pointer, n uintptr) { if bpn+bp.size == uintptr(unsafe.Pointer(p)) { bp.size += p.size bp.next = p.next - memclr(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})) + *p = memHdr{} } else { bp.next.set(p) } @@ -77,14 +77,14 @@ func memFree(ap unsafe.Pointer, n uintptr) { if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) { bp.size += p.next.ptr().size bp.next = p.next.ptr().next - memclr(unsafe.Pointer(p.next), unsafe.Sizeof(memHdr{})) + *p.next.ptr() = memHdr{} } else { bp.next = p.next } if uintptr(unsafe.Pointer(p))+p.size == bpn { p.size += bp.size p.next = bp.next - memclr(unsafe.Pointer(bp), unsafe.Sizeof(memHdr{})) + *bp = memHdr{} } else { p.next.set(bp) } diff --git a/src/runtime/memclr_386.s b/src/runtime/memclr_386.s index ce962f35da..ef6e60287c 100644 --- a/src/runtime/memclr_386.s +++ b/src/runtime/memclr_386.s @@ -8,8 +8,8 @@ // NOTE: Windows externalthreadhandler expects memclr to preserve DX. -// void runtime·memclr(void*, uintptr) -TEXT runtime·memclr(SB), NOSPLIT, $0-8 +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-8 MOVL ptr+0(FP), DI MOVL n+4(FP), BX XORL AX, AX diff --git a/src/runtime/memclr_amd64.s b/src/runtime/memclr_amd64.s index 6f30eca242..244f5b4d8c 100644 --- a/src/runtime/memclr_amd64.s +++ b/src/runtime/memclr_amd64.s @@ -8,8 +8,8 @@ // NOTE: Windows externalthreadhandler expects memclr to preserve DX. -// void runtime·memclr(void*, uintptr) -TEXT runtime·memclr(SB), NOSPLIT, $0-16 +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-16 MOVQ ptr+0(FP), DI MOVQ n+8(FP), BX XORQ AX, AX diff --git a/src/runtime/memclr_arm.s b/src/runtime/memclr_arm.s index 6ad70fbfc0..eb37674303 100644 --- a/src/runtime/memclr_arm.s +++ b/src/runtime/memclr_arm.s @@ -30,7 +30,7 @@ #define N R12 #define TMP R12 /* N and TMP don't overlap */ -TEXT runtime·memclr(SB),NOSPLIT,$0-8 +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-8 MOVW ptr+0(FP), TO MOVW n+4(FP), N MOVW $0, R0 diff --git a/src/runtime/memclr_arm64.s b/src/runtime/memclr_arm64.s index 47c6b73c84..9d756bcf6d 100644 --- a/src/runtime/memclr_arm64.s +++ b/src/runtime/memclr_arm64.s @@ -4,8 +4,8 @@ #include "textflag.h" -// void runtime·memclr(void*, uintptr) -TEXT runtime·memclr(SB),NOSPLIT,$0-16 +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 MOVD ptr+0(FP), R3 MOVD n+8(FP), R4 // TODO(mwhudson): this is written this way to avoid tickling diff --git a/src/runtime/memclr_mips64x.s b/src/runtime/memclr_mips64x.s index 30a4af3c94..5018d43c8b 100644 --- a/src/runtime/memclr_mips64x.s +++ b/src/runtime/memclr_mips64x.s @@ -6,8 +6,8 @@ #include "textflag.h" -// void runtime·memclr(void*, uintptr) -TEXT runtime·memclr(SB),NOSPLIT,$0-16 +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 MOVV ptr+0(FP), R1 MOVV n+8(FP), R2 ADDV R1, R2, R4 diff --git a/src/runtime/memclr_plan9_386.s b/src/runtime/memclr_plan9_386.s index 4707ab2e75..c3d92a9fa9 100644 --- a/src/runtime/memclr_plan9_386.s +++ b/src/runtime/memclr_plan9_386.s @@ -4,8 +4,8 @@ #include "textflag.h" -// void runtime·memclr(void*, uintptr) -TEXT runtime·memclr(SB), NOSPLIT, $0-8 +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-8 MOVL ptr+0(FP), DI MOVL n+4(FP), BX XORL AX, AX diff --git a/src/runtime/memclr_plan9_amd64.s b/src/runtime/memclr_plan9_amd64.s index 37e61dfbcc..d4d1a3a50b 100644 --- a/src/runtime/memclr_plan9_amd64.s +++ b/src/runtime/memclr_plan9_amd64.s @@ -4,8 +4,8 @@ #include "textflag.h" -// void runtime·memclr(void*, uintptr) -TEXT runtime·memclr(SB),NOSPLIT,$0-16 +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 MOVQ ptr+0(FP), DI MOVQ n+8(FP), CX MOVQ CX, BX diff --git a/src/runtime/memclr_ppc64x.s b/src/runtime/memclr_ppc64x.s index f7375dbee6..e3a4673c8d 100644 --- a/src/runtime/memclr_ppc64x.s +++ b/src/runtime/memclr_ppc64x.s @@ -6,8 +6,8 @@ #include "textflag.h" -// void runtime·memclr(void*, uintptr) -TEXT runtime·memclr(SB), NOSPLIT|NOFRAME, $0-16 +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT|NOFRAME, $0-16 MOVD ptr+0(FP), R3 MOVD n+8(FP), R4 diff --git a/src/runtime/memclr_s390x.s b/src/runtime/memclr_s390x.s index 846131e9f5..43da10dcb6 100644 --- a/src/runtime/memclr_s390x.s +++ b/src/runtime/memclr_s390x.s @@ -4,8 +4,8 @@ #include "textflag.h" -// void runtime·memclr(void*, uintptr) -TEXT runtime·memclr(SB),NOSPLIT|NOFRAME,$0-16 +// void runtime·memclrNoHeapPointers(void*, uintptr) +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT|NOFRAME,$0-16 MOVD ptr+0(FP), R4 MOVD n+8(FP), R5 diff --git a/src/runtime/mfixalloc.go b/src/runtime/mfixalloc.go index 0d3d895113..fe4b0fcf2a 100644 --- a/src/runtime/mfixalloc.go +++ b/src/runtime/mfixalloc.go @@ -72,7 +72,7 @@ func (f *fixalloc) alloc() unsafe.Pointer { f.list = f.list.next f.inuse += f.size if f.zero { - memclr(v, f.size) + memclrNoHeapPointers(v, f.size) } return v } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index e81e410ad8..a0f5599516 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -629,7 +629,7 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) if s != nil { if needzero && s.needzero != 0 { - memclr(unsafe.Pointer(s.base()), s.npages<<_PageShift) + memclrNoHeapPointers(unsafe.Pointer(s.base()), s.npages<<_PageShift) } s.needzero = 0 } @@ -1418,7 +1418,7 @@ func newArena() *gcBits { } else { result = gcBitsArenas.free gcBitsArenas.free = gcBitsArenas.free.next - memclr(unsafe.Pointer(result), gcBitsChunkBytes) + memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes) } result.next = nil // If result.bits is not 8 byte aligned adjust index so diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go index d6def7ba43..067fb3bb0a 100644 --- a/src/runtime/os3_solaris.go +++ b/src/runtime/os3_solaris.go @@ -317,7 +317,7 @@ func semacreate(mp *m) { // here because it could cause a deadlock. _g_.m.libcall.fn = uintptr(unsafe.Pointer(&libc_malloc)) _g_.m.libcall.n = 1 - memclr(unsafe.Pointer(&_g_.m.scratch), uintptr(len(_g_.m.scratch.v))) + _g_.m.scratch = mscratch{} _g_.m.scratch.v[0] = unsafe.Sizeof(*sem) _g_.m.libcall.args = uintptr(unsafe.Pointer(&_g_.m.scratch)) asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&_g_.m.libcall)) @@ -337,7 +337,7 @@ func semasleep(ns int64) int32 { _m_.libcall.fn = uintptr(unsafe.Pointer(&libc_sem_reltimedwait_np)) _m_.libcall.n = 2 - memclr(unsafe.Pointer(&_m_.scratch), uintptr(len(_m_.scratch.v))) + _m_.scratch = mscratch{} _m_.scratch.v[0] = _m_.waitsema _m_.scratch.v[1] = uintptr(unsafe.Pointer(&_m_.ts)) _m_.libcall.args = uintptr(unsafe.Pointer(&_m_.scratch)) @@ -353,7 +353,7 @@ func semasleep(ns int64) int32 { for { _m_.libcall.fn = uintptr(unsafe.Pointer(&libc_sem_wait)) _m_.libcall.n = 1 - memclr(unsafe.Pointer(&_m_.scratch), uintptr(len(_m_.scratch.v))) + _m_.scratch = mscratch{} _m_.scratch.v[0] = _m_.waitsema _m_.libcall.args = uintptr(unsafe.Pointer(&_m_.scratch)) asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&_m_.libcall)) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 8b57514ac0..af111014f1 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -2812,7 +2812,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr } memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) - memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) + memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) newg.sched.sp = sp newg.stktopsp = sp newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function diff --git a/src/runtime/slice.go b/src/runtime/slice.go index 7f4de450d2..0f49df1647 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -141,7 +141,7 @@ func growslice(et *_type, old slice, cap int) slice { memmove(p, old.array, lenmem) // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length). // Only clear the part that will not be overwritten. - memclr(add(p, newlenmem), capmem-newlenmem) + memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) } else { // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. p = mallocgc(capmem, et, true) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index dfc71b41c3..ea9a69aa1e 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -436,7 +436,7 @@ func stackfree(stk stack, n uintptr) { } if stackDebug >= 1 { println("stackfree", v, n) - memclr(v, n) // for testing, clobber stack data + memclrNoHeapPointers(v, n) // for testing, clobber stack data } if debug.efence != 0 || stackFromSystem != 0 { if debug.efence != 0 || stackFaultOnFree != 0 { diff --git a/src/runtime/string.go b/src/runtime/string.go index 4cf165bb87..07528236ee 100644 --- a/src/runtime/string.go +++ b/src/runtime/string.go @@ -249,7 +249,7 @@ func rawbyteslice(size int) (b []byte) { cap := roundupsize(uintptr(size)) p := mallocgc(cap, nil, false) if cap != uintptr(size) { - memclr(add(p, uintptr(size)), cap-uintptr(size)) + memclrNoHeapPointers(add(p, uintptr(size)), cap-uintptr(size)) } *(*slice)(unsafe.Pointer(&b)) = slice{p, size, int(cap)} @@ -264,7 +264,7 @@ func rawruneslice(size int) (b []rune) { mem := roundupsize(uintptr(size) * 4) p := mallocgc(mem, nil, false) if mem != uintptr(size)*4 { - memclr(add(p, uintptr(size)*4), mem-uintptr(size)*4) + memclrNoHeapPointers(add(p, uintptr(size)*4), mem-uintptr(size)*4) } *(*slice)(unsafe.Pointer(&b)) = slice{p, size, int(mem / 4)} diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index 693a3445c2..7384c7810f 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -60,20 +60,24 @@ func badsystemstack() { throw("systemstack called from unexpected goroutine") } -// memclr clears n bytes starting at ptr. +// memclrNoHeapPointers clears n bytes starting at ptr. // -// Usually you should use typedmemclr. memclr should be used only when -// the caller knows that *ptr contains no heap pointers or to -// initialize memory to a type-safe state when allocation reuses dead -// memory. +// Usually you should use typedmemclr. memclrNoHeapPointers should be +// used only when the caller knows that *ptr contains no heap pointers +// because either: +// +// 1. *ptr is initialized memory and its type is pointer-free. +// +// 2. *ptr is uninitialized memory (e.g., memory that's being reused +// for a new allocation) and hence contains only "junk". // // in memclr_*.s //go:noescape -func memclr(ptr unsafe.Pointer, n uintptr) +func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) -//go:linkname reflect_memclr reflect.memclr -func reflect_memclr(ptr unsafe.Pointer, n uintptr) { - memclr(ptr, n) +//go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers +func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) { + memclrNoHeapPointers(ptr, n) } // memmove copies n bytes from "from" to "to". diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s index 60302e05c4..bd5de33946 100644 --- a/src/runtime/sys_windows_386.s +++ b/src/runtime/sys_windows_386.s @@ -192,7 +192,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0 SUBL $m__size, SP // space for M MOVL SP, 0(SP) MOVL $m__size, 4(SP) - CALL runtime·memclr(SB) // smashes AX,BX,CX + CALL runtime·memclrNoHeapPointers(SB) // smashes AX,BX,CX LEAL m_tls(SP), CX MOVL CX, 0x14(FS) @@ -203,7 +203,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0 MOVL SP, 0(SP) MOVL $g__size, 4(SP) - CALL runtime·memclr(SB) // smashes AX,BX,CX + CALL runtime·memclrNoHeapPointers(SB) // smashes AX,BX,CX LEAL g__size(SP), BX MOVL BX, g_m(SP) diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s index 9679099c4e..c61b79d24f 100644 --- a/src/runtime/sys_windows_amd64.s +++ b/src/runtime/sys_windows_amd64.s @@ -236,7 +236,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME,$0 SUBQ $m__size, SP // space for M MOVQ SP, 0(SP) MOVQ $m__size, 8(SP) - CALL runtime·memclr(SB) // smashes AX,BX,CX, maybe BP + CALL runtime·memclrNoHeapPointers(SB) // smashes AX,BX,CX, maybe BP LEAQ m_tls(SP), CX MOVQ CX, 0x28(GS) @@ -247,7 +247,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME,$0 MOVQ SP, 0(SP) MOVQ $g__size, 8(SP) - CALL runtime·memclr(SB) // smashes AX,BX,CX, maybe BP + CALL runtime·memclrNoHeapPointers(SB) // smashes AX,BX,CX, maybe BP LEAQ g__size(SP), BX MOVQ BX, g_m(SP) diff --git a/src/runtime/write_err_android.go b/src/runtime/write_err_android.go index 4411a14755..748dec644c 100644 --- a/src/runtime/write_err_android.go +++ b/src/runtime/write_err_android.go @@ -75,7 +75,9 @@ func writeErr(b []byte) { if v == '\n' || writePos == len(dst)-1 { dst[writePos] = 0 write(writeFD, unsafe.Pointer(&writeBuf[0]), int32(hlen+writePos)) - memclrBytes(dst) + for i := range dst { + dst[i] = 0 + } writePos = 0 } }