diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 4770400cc8..fc3b858a80 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -169,7 +169,7 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { a := n.Lhs[0] var call *ir.CallExpr - if w := t.Elem().Size(); w <= abi.MaxZero { + if w := t.Elem().Size(); w <= abi.ZeroValSize { fn := mapfn(mapaccess2[fast], t, false) call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key) } else { diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 7df6a34447..268f793dc9 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -826,7 +826,7 @@ func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node { switch { case n.Assigned: mapFn = mapfn(mapassign[fast], t, false) - case t.Elem().Size() > abi.MaxZero: + case t.Elem().Size() > abi.ZeroValSize: args = append(args, reflectdata.ZeroAddr(t.Elem().Size())) mapFn = mapfn("mapaccess1_fat", t, true) default: diff --git a/src/internal/abi/map.go b/src/internal/abi/map.go index 977bdb71cf..ad054e7d77 100644 --- a/src/internal/abi/map.go +++ b/src/internal/abi/map.go @@ -13,4 +13,5 @@ const ( MapMaxElemBytes = 128 // Must fit in a uint8. ) -const MaxZero = 1024 +// ZeroValSize is the size in bytes of runtime.zeroVal. +const ZeroValSize = 1024 diff --git a/src/reflect/value.go b/src/reflect/value.go index 74bd8560ba..06f22f7428 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -1603,7 +1603,7 @@ func (v Value) IsZero() bool { } typ := (*abi.ArrayType)(unsafe.Pointer(v.typ())) // If the type is comparable, then compare directly with zero. - if typ.Equal != nil && typ.Size() <= abi.MaxZero { + if typ.Equal != nil && typ.Size() <= abi.ZeroValSize { // v.ptr doesn't escape, as Equal functions are compiler generated // and never escape. The escape analysis doesn't know, as it is a // function pointer call. @@ -1631,7 +1631,7 @@ func (v Value) IsZero() bool { } typ := (*abi.StructType)(unsafe.Pointer(v.typ())) // If the type is comparable, then compare directly with zero. - if v.typ().Equal != nil && v.typ().Size() <= abi.MaxZero { + if typ.Equal != nil && typ.Size() <= abi.ZeroValSize { // See noescape justification above. return typ.Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0])) } @@ -3277,7 +3277,7 @@ func Zero(typ Type) Value { fl := flag(t.Kind()) if t.IfaceIndir() { var p unsafe.Pointer - if t.Size() <= abi.MaxZero { + if t.Size() <= abi.ZeroValSize { p = unsafe.Pointer(&zeroVal[0]) } else { p = unsafe_New(t) @@ -3288,7 +3288,7 @@ func Zero(typ Type) Value { } //go:linkname zeroVal runtime.zeroVal -var zeroVal [abi.MaxZero]byte +var zeroVal [abi.ZeroValSize]byte // New returns a Value representing a pointer to a new zero value // for the specified type. That is, the returned Value's Type is PointerTo(typ). diff --git a/src/runtime/map.go b/src/runtime/map.go index e1294ca71b..7048949073 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -1436,7 +1436,7 @@ func reflectlite_maplen(h *hmap) int { return h.count } -var zeroVal [abi.MaxZero]byte +var zeroVal [abi.ZeroValSize]byte // mapinitnoop is a no-op function known the Go linker; if a given global // map (of the right size) is determined to be dead, the linker will