mirror of https://github.com/golang/go.git
all: split old and swiss map abi and compiler integration
The two map implementations are still identical, but now the compiler targets the appropriate ABI depending on GOEXPERIMENT. For #54766. Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest,gotip-linux-amd64-longtest-swissmap Change-Id: I8438f64f044ba9de30ddbf2b8ceb9b4edd2d5614 Reviewed-on: https://go-review.googlesource.com/c/go/+/580779 Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: Keith Randall <khr@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
057b703407
commit
4f7dc282c4
|
|
@ -0,0 +1,303 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package reflectdata
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/rttype"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
// OldMapBucketType makes the map bucket type given the type of the map.
|
||||
func OldMapBucketType(t *types.Type) *types.Type {
|
||||
// Builds a type representing a Bucket structure for
|
||||
// the given map type. This type is not visible to users -
|
||||
// we include only enough information to generate a correct GC
|
||||
// program for it.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
//
|
||||
// A "bucket" is a "struct" {
|
||||
// tophash [abi.OldMapBucketCount]uint8
|
||||
// keys [abi.OldMapBucketCount]keyType
|
||||
// elems [abi.OldMapBucketCount]elemType
|
||||
// overflow *bucket
|
||||
// }
|
||||
if t.MapType().OldBucket != nil {
|
||||
return t.MapType().OldBucket
|
||||
}
|
||||
|
||||
keytype := t.Key()
|
||||
elemtype := t.Elem()
|
||||
types.CalcSize(keytype)
|
||||
types.CalcSize(elemtype)
|
||||
if keytype.Size() > abi.OldMapMaxKeyBytes {
|
||||
keytype = types.NewPtr(keytype)
|
||||
}
|
||||
if elemtype.Size() > abi.OldMapMaxElemBytes {
|
||||
elemtype = types.NewPtr(elemtype)
|
||||
}
|
||||
|
||||
field := make([]*types.Field, 0, 5)
|
||||
|
||||
// The first field is: uint8 topbits[BUCKETSIZE].
|
||||
arr := types.NewArray(types.Types[types.TUINT8], abi.OldMapBucketCount)
|
||||
field = append(field, makefield("topbits", arr))
|
||||
|
||||
arr = types.NewArray(keytype, abi.OldMapBucketCount)
|
||||
arr.SetNoalg(true)
|
||||
keys := makefield("keys", arr)
|
||||
field = append(field, keys)
|
||||
|
||||
arr = types.NewArray(elemtype, abi.OldMapBucketCount)
|
||||
arr.SetNoalg(true)
|
||||
elems := makefield("elems", arr)
|
||||
field = append(field, elems)
|
||||
|
||||
// If keys and elems have no pointers, the map implementation
|
||||
// can keep a list of overflow pointers on the side so that
|
||||
// buckets can be marked as having no pointers.
|
||||
// Arrange for the bucket to have no pointers by changing
|
||||
// the type of the overflow field to uintptr in this case.
|
||||
// See comment on hmap.overflow in runtime/map.go.
|
||||
otyp := types.Types[types.TUNSAFEPTR]
|
||||
if !elemtype.HasPointers() && !keytype.HasPointers() {
|
||||
otyp = types.Types[types.TUINTPTR]
|
||||
}
|
||||
overflow := makefield("overflow", otyp)
|
||||
field = append(field, overflow)
|
||||
|
||||
// link up fields
|
||||
bucket := types.NewStruct(field[:])
|
||||
bucket.SetNoalg(true)
|
||||
types.CalcSize(bucket)
|
||||
|
||||
// Check invariants that map code depends on.
|
||||
if !types.IsComparable(t.Key()) {
|
||||
base.Fatalf("unsupported map key type for %v", t)
|
||||
}
|
||||
if abi.OldMapBucketCount < 8 {
|
||||
base.Fatalf("bucket size %d too small for proper alignment %d", abi.OldMapBucketCount, 8)
|
||||
}
|
||||
if uint8(keytype.Alignment()) > abi.OldMapBucketCount {
|
||||
base.Fatalf("key align too big for %v", t)
|
||||
}
|
||||
if uint8(elemtype.Alignment()) > abi.OldMapBucketCount {
|
||||
base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.OldMapBucketCount)
|
||||
}
|
||||
if keytype.Size() > abi.OldMapMaxKeyBytes {
|
||||
base.Fatalf("key size too large for %v", t)
|
||||
}
|
||||
if elemtype.Size() > abi.OldMapMaxElemBytes {
|
||||
base.Fatalf("elem size too large for %v", t)
|
||||
}
|
||||
if t.Key().Size() > abi.OldMapMaxKeyBytes && !keytype.IsPtr() {
|
||||
base.Fatalf("key indirect incorrect for %v", t)
|
||||
}
|
||||
if t.Elem().Size() > abi.OldMapMaxElemBytes && !elemtype.IsPtr() {
|
||||
base.Fatalf("elem indirect incorrect for %v", t)
|
||||
}
|
||||
if keytype.Size()%keytype.Alignment() != 0 {
|
||||
base.Fatalf("key size not a multiple of key align for %v", t)
|
||||
}
|
||||
if elemtype.Size()%elemtype.Alignment() != 0 {
|
||||
base.Fatalf("elem size not a multiple of elem align for %v", t)
|
||||
}
|
||||
if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
|
||||
base.Fatalf("bucket align not multiple of key align %v", t)
|
||||
}
|
||||
if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
|
||||
base.Fatalf("bucket align not multiple of elem align %v", t)
|
||||
}
|
||||
if keys.Offset%keytype.Alignment() != 0 {
|
||||
base.Fatalf("bad alignment of keys in bmap for %v", t)
|
||||
}
|
||||
if elems.Offset%elemtype.Alignment() != 0 {
|
||||
base.Fatalf("bad alignment of elems in bmap for %v", t)
|
||||
}
|
||||
|
||||
// Double-check that overflow field is final memory in struct,
|
||||
// with no padding at end.
|
||||
if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
|
||||
base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
|
||||
t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
|
||||
}
|
||||
|
||||
t.MapType().OldBucket = bucket
|
||||
|
||||
bucket.StructType().Map = t
|
||||
return bucket
|
||||
}
|
||||
|
||||
var oldHmapType *types.Type
|
||||
|
||||
// OldMapType returns a type interchangeable with runtime.hmap.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
func OldMapType() *types.Type {
|
||||
if oldHmapType != nil {
|
||||
return oldHmapType
|
||||
}
|
||||
|
||||
// build a struct:
|
||||
// type hmap struct {
|
||||
// count int
|
||||
// flags uint8
|
||||
// B uint8
|
||||
// noverflow uint16
|
||||
// hash0 uint32
|
||||
// buckets unsafe.Pointer
|
||||
// oldbuckets unsafe.Pointer
|
||||
// nevacuate uintptr
|
||||
// extra unsafe.Pointer // *mapextra
|
||||
// }
|
||||
// must match runtime/map.go:hmap.
|
||||
fields := []*types.Field{
|
||||
makefield("count", types.Types[types.TINT]),
|
||||
makefield("flags", types.Types[types.TUINT8]),
|
||||
makefield("B", types.Types[types.TUINT8]),
|
||||
makefield("noverflow", types.Types[types.TUINT16]),
|
||||
makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
|
||||
makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
|
||||
makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("nevacuate", types.Types[types.TUINTPTR]),
|
||||
makefield("extra", types.Types[types.TUNSAFEPTR]),
|
||||
}
|
||||
|
||||
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
|
||||
hmap := types.NewNamed(n)
|
||||
n.SetType(hmap)
|
||||
n.SetTypecheck(1)
|
||||
|
||||
hmap.SetUnderlying(types.NewStruct(fields))
|
||||
types.CalcSize(hmap)
|
||||
|
||||
// The size of hmap should be 48 bytes on 64 bit
|
||||
// and 28 bytes on 32 bit platforms.
|
||||
if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
|
||||
base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
|
||||
}
|
||||
|
||||
oldHmapType = hmap
|
||||
return hmap
|
||||
}
|
||||
|
||||
var oldHiterType *types.Type
|
||||
|
||||
// OldMapIterType returns a type interchangeable with runtime.hiter.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
func OldMapIterType() *types.Type {
|
||||
if oldHiterType != nil {
|
||||
return oldHiterType
|
||||
}
|
||||
|
||||
hmap := OldMapType()
|
||||
|
||||
// build a struct:
|
||||
// type hiter struct {
|
||||
// key unsafe.Pointer // *Key
|
||||
// elem unsafe.Pointer // *Elem
|
||||
// t unsafe.Pointer // *OldMapType
|
||||
// h *hmap
|
||||
// buckets unsafe.Pointer
|
||||
// bptr unsafe.Pointer // *bmap
|
||||
// overflow unsafe.Pointer // *[]*bmap
|
||||
// oldoverflow unsafe.Pointer // *[]*bmap
|
||||
// startBucket uintptr
|
||||
// offset uint8
|
||||
// wrapped bool
|
||||
// B uint8
|
||||
// i uint8
|
||||
// bucket uintptr
|
||||
// checkBucket uintptr
|
||||
// }
|
||||
// must match runtime/map.go:hiter.
|
||||
fields := []*types.Field{
|
||||
makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
|
||||
makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
|
||||
makefield("t", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("h", types.NewPtr(hmap)),
|
||||
makefield("buckets", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("bptr", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("overflow", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("startBucket", types.Types[types.TUINTPTR]),
|
||||
makefield("offset", types.Types[types.TUINT8]),
|
||||
makefield("wrapped", types.Types[types.TBOOL]),
|
||||
makefield("B", types.Types[types.TUINT8]),
|
||||
makefield("i", types.Types[types.TUINT8]),
|
||||
makefield("bucket", types.Types[types.TUINTPTR]),
|
||||
makefield("checkBucket", types.Types[types.TUINTPTR]),
|
||||
}
|
||||
|
||||
// build iterator struct holding the above fields
|
||||
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
|
||||
hiter := types.NewNamed(n)
|
||||
n.SetType(hiter)
|
||||
n.SetTypecheck(1)
|
||||
|
||||
hiter.SetUnderlying(types.NewStruct(fields))
|
||||
types.CalcSize(hiter)
|
||||
if hiter.Size() != int64(12*types.PtrSize) {
|
||||
base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
|
||||
}
|
||||
|
||||
oldHiterType = hiter
|
||||
return hiter
|
||||
}
|
||||
|
||||
func writeOldMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
|
||||
// internal/abi.OldMapType
|
||||
s1 := writeType(t.Key())
|
||||
s2 := writeType(t.Elem())
|
||||
s3 := writeType(OldMapBucketType(t))
|
||||
hasher := genhash(t.Key())
|
||||
|
||||
c.Field("Key").WritePtr(s1)
|
||||
c.Field("Elem").WritePtr(s2)
|
||||
c.Field("Bucket").WritePtr(s3)
|
||||
c.Field("Hasher").WritePtr(hasher)
|
||||
var flags uint32
|
||||
// Note: flags must match maptype accessors in ../../../../runtime/type.go
|
||||
// and maptype builder in ../../../../reflect/type.go:MapOf.
|
||||
if t.Key().Size() > abi.OldMapMaxKeyBytes {
|
||||
c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
|
||||
flags |= 1 // indirect key
|
||||
} else {
|
||||
c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
|
||||
}
|
||||
|
||||
if t.Elem().Size() > abi.OldMapMaxElemBytes {
|
||||
c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
|
||||
flags |= 2 // indirect value
|
||||
} else {
|
||||
c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
|
||||
}
|
||||
c.Field("BucketSize").WriteUint16(uint16(OldMapBucketType(t).Size()))
|
||||
if types.IsReflexive(t.Key()) {
|
||||
flags |= 4 // reflexive key
|
||||
}
|
||||
if needkeyupdate(t.Key()) {
|
||||
flags |= 8 // need key update
|
||||
}
|
||||
if hashMightPanic(t.Key()) {
|
||||
flags |= 16 // hash might panic
|
||||
}
|
||||
c.Field("Flags").WriteUint32(flags)
|
||||
|
||||
if u := t.Underlying(); u != t {
|
||||
// If t is a named map type, also keep the underlying map
|
||||
// type live in the binary. This is important to make sure that
|
||||
// a named map and that same map cast to its underlying type via
|
||||
// reflection, use the same hash function. See issue 37716.
|
||||
r := obj.Addrel(lsym)
|
||||
r.Sym = writeType(u)
|
||||
r.Type = objabi.R_KEEP
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,303 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package reflectdata
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
"cmd/compile/internal/rttype"
|
||||
"cmd/compile/internal/types"
|
||||
"cmd/internal/obj"
|
||||
"cmd/internal/objabi"
|
||||
"cmd/internal/src"
|
||||
)
|
||||
|
||||
// SwissMapBucketType makes the map bucket type given the type of the map.
|
||||
func SwissMapBucketType(t *types.Type) *types.Type {
|
||||
// Builds a type representing a Bucket structure for
|
||||
// the given map type. This type is not visible to users -
|
||||
// we include only enough information to generate a correct GC
|
||||
// program for it.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
//
|
||||
// A "bucket" is a "struct" {
|
||||
// tophash [abi.SwissMapBucketCount]uint8
|
||||
// keys [abi.SwissMapBucketCount]keyType
|
||||
// elems [abi.SwissMapBucketCount]elemType
|
||||
// overflow *bucket
|
||||
// }
|
||||
if t.MapType().SwissBucket != nil {
|
||||
return t.MapType().SwissBucket
|
||||
}
|
||||
|
||||
keytype := t.Key()
|
||||
elemtype := t.Elem()
|
||||
types.CalcSize(keytype)
|
||||
types.CalcSize(elemtype)
|
||||
if keytype.Size() > abi.SwissMapMaxKeyBytes {
|
||||
keytype = types.NewPtr(keytype)
|
||||
}
|
||||
if elemtype.Size() > abi.SwissMapMaxElemBytes {
|
||||
elemtype = types.NewPtr(elemtype)
|
||||
}
|
||||
|
||||
field := make([]*types.Field, 0, 5)
|
||||
|
||||
// The first field is: uint8 topbits[BUCKETSIZE].
|
||||
arr := types.NewArray(types.Types[types.TUINT8], abi.SwissMapBucketCount)
|
||||
field = append(field, makefield("topbits", arr))
|
||||
|
||||
arr = types.NewArray(keytype, abi.SwissMapBucketCount)
|
||||
arr.SetNoalg(true)
|
||||
keys := makefield("keys", arr)
|
||||
field = append(field, keys)
|
||||
|
||||
arr = types.NewArray(elemtype, abi.SwissMapBucketCount)
|
||||
arr.SetNoalg(true)
|
||||
elems := makefield("elems", arr)
|
||||
field = append(field, elems)
|
||||
|
||||
// If keys and elems have no pointers, the map implementation
|
||||
// can keep a list of overflow pointers on the side so that
|
||||
// buckets can be marked as having no pointers.
|
||||
// Arrange for the bucket to have no pointers by changing
|
||||
// the type of the overflow field to uintptr in this case.
|
||||
// See comment on hmap.overflow in runtime/map.go.
|
||||
otyp := types.Types[types.TUNSAFEPTR]
|
||||
if !elemtype.HasPointers() && !keytype.HasPointers() {
|
||||
otyp = types.Types[types.TUINTPTR]
|
||||
}
|
||||
overflow := makefield("overflow", otyp)
|
||||
field = append(field, overflow)
|
||||
|
||||
// link up fields
|
||||
bucket := types.NewStruct(field[:])
|
||||
bucket.SetNoalg(true)
|
||||
types.CalcSize(bucket)
|
||||
|
||||
// Check invariants that map code depends on.
|
||||
if !types.IsComparable(t.Key()) {
|
||||
base.Fatalf("unsupported map key type for %v", t)
|
||||
}
|
||||
if abi.SwissMapBucketCount < 8 {
|
||||
base.Fatalf("bucket size %d too small for proper alignment %d", abi.SwissMapBucketCount, 8)
|
||||
}
|
||||
if uint8(keytype.Alignment()) > abi.SwissMapBucketCount {
|
||||
base.Fatalf("key align too big for %v", t)
|
||||
}
|
||||
if uint8(elemtype.Alignment()) > abi.SwissMapBucketCount {
|
||||
base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.SwissMapBucketCount)
|
||||
}
|
||||
if keytype.Size() > abi.SwissMapMaxKeyBytes {
|
||||
base.Fatalf("key size too large for %v", t)
|
||||
}
|
||||
if elemtype.Size() > abi.SwissMapMaxElemBytes {
|
||||
base.Fatalf("elem size too large for %v", t)
|
||||
}
|
||||
if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() {
|
||||
base.Fatalf("key indirect incorrect for %v", t)
|
||||
}
|
||||
if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() {
|
||||
base.Fatalf("elem indirect incorrect for %v", t)
|
||||
}
|
||||
if keytype.Size()%keytype.Alignment() != 0 {
|
||||
base.Fatalf("key size not a multiple of key align for %v", t)
|
||||
}
|
||||
if elemtype.Size()%elemtype.Alignment() != 0 {
|
||||
base.Fatalf("elem size not a multiple of elem align for %v", t)
|
||||
}
|
||||
if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
|
||||
base.Fatalf("bucket align not multiple of key align %v", t)
|
||||
}
|
||||
if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
|
||||
base.Fatalf("bucket align not multiple of elem align %v", t)
|
||||
}
|
||||
if keys.Offset%keytype.Alignment() != 0 {
|
||||
base.Fatalf("bad alignment of keys in bmap for %v", t)
|
||||
}
|
||||
if elems.Offset%elemtype.Alignment() != 0 {
|
||||
base.Fatalf("bad alignment of elems in bmap for %v", t)
|
||||
}
|
||||
|
||||
// Double-check that overflow field is final memory in struct,
|
||||
// with no padding at end.
|
||||
if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
|
||||
base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
|
||||
t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
|
||||
}
|
||||
|
||||
t.MapType().SwissBucket = bucket
|
||||
|
||||
bucket.StructType().Map = t
|
||||
return bucket
|
||||
}
|
||||
|
||||
var swissHmapType *types.Type
|
||||
|
||||
// SwissMapType returns a type interchangeable with runtime.hmap.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
func SwissMapType() *types.Type {
|
||||
if swissHmapType != nil {
|
||||
return swissHmapType
|
||||
}
|
||||
|
||||
// build a struct:
|
||||
// type hmap struct {
|
||||
// count int
|
||||
// flags uint8
|
||||
// B uint8
|
||||
// noverflow uint16
|
||||
// hash0 uint32
|
||||
// buckets unsafe.Pointer
|
||||
// oldbuckets unsafe.Pointer
|
||||
// nevacuate uintptr
|
||||
// extra unsafe.Pointer // *mapextra
|
||||
// }
|
||||
// must match runtime/map.go:hmap.
|
||||
fields := []*types.Field{
|
||||
makefield("count", types.Types[types.TINT]),
|
||||
makefield("flags", types.Types[types.TUINT8]),
|
||||
makefield("B", types.Types[types.TUINT8]),
|
||||
makefield("noverflow", types.Types[types.TUINT16]),
|
||||
makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
|
||||
makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
|
||||
makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("nevacuate", types.Types[types.TUINTPTR]),
|
||||
makefield("extra", types.Types[types.TUNSAFEPTR]),
|
||||
}
|
||||
|
||||
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
|
||||
hmap := types.NewNamed(n)
|
||||
n.SetType(hmap)
|
||||
n.SetTypecheck(1)
|
||||
|
||||
hmap.SetUnderlying(types.NewStruct(fields))
|
||||
types.CalcSize(hmap)
|
||||
|
||||
// The size of hmap should be 48 bytes on 64 bit
|
||||
// and 28 bytes on 32 bit platforms.
|
||||
if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
|
||||
base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
|
||||
}
|
||||
|
||||
swissHmapType = hmap
|
||||
return hmap
|
||||
}
|
||||
|
||||
var swissHiterType *types.Type
|
||||
|
||||
// SwissMapIterType returns a type interchangeable with runtime.hiter.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
func SwissMapIterType() *types.Type {
|
||||
if swissHiterType != nil {
|
||||
return swissHiterType
|
||||
}
|
||||
|
||||
hmap := SwissMapType()
|
||||
|
||||
// build a struct:
|
||||
// type hiter struct {
|
||||
// key unsafe.Pointer // *Key
|
||||
// elem unsafe.Pointer // *Elem
|
||||
// t unsafe.Pointer // *SwissMapType
|
||||
// h *hmap
|
||||
// buckets unsafe.Pointer
|
||||
// bptr unsafe.Pointer // *bmap
|
||||
// overflow unsafe.Pointer // *[]*bmap
|
||||
// oldoverflow unsafe.Pointer // *[]*bmap
|
||||
// startBucket uintptr
|
||||
// offset uint8
|
||||
// wrapped bool
|
||||
// B uint8
|
||||
// i uint8
|
||||
// bucket uintptr
|
||||
// checkBucket uintptr
|
||||
// }
|
||||
// must match runtime/map.go:hiter.
|
||||
fields := []*types.Field{
|
||||
makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
|
||||
makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
|
||||
makefield("t", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("h", types.NewPtr(hmap)),
|
||||
makefield("buckets", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("bptr", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("overflow", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("startBucket", types.Types[types.TUINTPTR]),
|
||||
makefield("offset", types.Types[types.TUINT8]),
|
||||
makefield("wrapped", types.Types[types.TBOOL]),
|
||||
makefield("B", types.Types[types.TUINT8]),
|
||||
makefield("i", types.Types[types.TUINT8]),
|
||||
makefield("bucket", types.Types[types.TUINTPTR]),
|
||||
makefield("checkBucket", types.Types[types.TUINTPTR]),
|
||||
}
|
||||
|
||||
// build iterator struct hswissing the above fields
|
||||
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
|
||||
hiter := types.NewNamed(n)
|
||||
n.SetType(hiter)
|
||||
n.SetTypecheck(1)
|
||||
|
||||
hiter.SetUnderlying(types.NewStruct(fields))
|
||||
types.CalcSize(hiter)
|
||||
if hiter.Size() != int64(12*types.PtrSize) {
|
||||
base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
|
||||
}
|
||||
|
||||
swissHiterType = hiter
|
||||
return hiter
|
||||
}
|
||||
|
||||
func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
|
||||
// internal/abi.SwissMapType
|
||||
s1 := writeType(t.Key())
|
||||
s2 := writeType(t.Elem())
|
||||
s3 := writeType(SwissMapBucketType(t))
|
||||
hasher := genhash(t.Key())
|
||||
|
||||
c.Field("Key").WritePtr(s1)
|
||||
c.Field("Elem").WritePtr(s2)
|
||||
c.Field("Bucket").WritePtr(s3)
|
||||
c.Field("Hasher").WritePtr(hasher)
|
||||
var flags uint32
|
||||
// Note: flags must match maptype accessors in ../../../../runtime/type.go
|
||||
// and maptype builder in ../../../../reflect/type.go:MapOf.
|
||||
if t.Key().Size() > abi.SwissMapMaxKeyBytes {
|
||||
c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
|
||||
flags |= 1 // indirect key
|
||||
} else {
|
||||
c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
|
||||
}
|
||||
|
||||
if t.Elem().Size() > abi.SwissMapMaxElemBytes {
|
||||
c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
|
||||
flags |= 2 // indirect value
|
||||
} else {
|
||||
c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
|
||||
}
|
||||
c.Field("BucketSize").WriteUint16(uint16(SwissMapBucketType(t).Size()))
|
||||
if types.IsReflexive(t.Key()) {
|
||||
flags |= 4 // reflexive key
|
||||
}
|
||||
if needkeyupdate(t.Key()) {
|
||||
flags |= 8 // need key update
|
||||
}
|
||||
if hashMightPanic(t.Key()) {
|
||||
flags |= 16 // hash might panic
|
||||
}
|
||||
c.Field("Flags").WriteUint32(flags)
|
||||
|
||||
if u := t.Underlying(); u != t {
|
||||
// If t is a named map type, also keep the underlying map
|
||||
// type live in the binary. This is important to make sure that
|
||||
// a named map and that same map cast to its underlying type via
|
||||
// reflection, use the same hash function. See issue 37716.
|
||||
r := obj.Addrel(lsym)
|
||||
r.Sym = writeType(u)
|
||||
r.Type = objabi.R_KEEP
|
||||
}
|
||||
}
|
||||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"encoding/binary"
|
||||
"fmt"
|
||||
"internal/abi"
|
||||
"internal/buildcfg"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
|
@ -69,242 +70,6 @@ func makefield(name string, t *types.Type) *types.Field {
|
|||
return types.NewField(src.NoXPos, sym, t)
|
||||
}
|
||||
|
||||
// MapBucketType makes the map bucket type given the type of the map.
|
||||
func MapBucketType(t *types.Type) *types.Type {
|
||||
// Builds a type representing a Bucket structure for
|
||||
// the given map type. This type is not visible to users -
|
||||
// we include only enough information to generate a correct GC
|
||||
// program for it.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
//
|
||||
// A "bucket" is a "struct" {
|
||||
// tophash [abi.MapBucketCount]uint8
|
||||
// keys [abi.MapBucketCount]keyType
|
||||
// elems [abi.MapBucketCount]elemType
|
||||
// overflow *bucket
|
||||
// }
|
||||
if t.MapType().Bucket != nil {
|
||||
return t.MapType().Bucket
|
||||
}
|
||||
|
||||
keytype := t.Key()
|
||||
elemtype := t.Elem()
|
||||
types.CalcSize(keytype)
|
||||
types.CalcSize(elemtype)
|
||||
if keytype.Size() > abi.MapMaxKeyBytes {
|
||||
keytype = types.NewPtr(keytype)
|
||||
}
|
||||
if elemtype.Size() > abi.MapMaxElemBytes {
|
||||
elemtype = types.NewPtr(elemtype)
|
||||
}
|
||||
|
||||
field := make([]*types.Field, 0, 5)
|
||||
|
||||
// The first field is: uint8 topbits[BUCKETSIZE].
|
||||
arr := types.NewArray(types.Types[types.TUINT8], abi.MapBucketCount)
|
||||
field = append(field, makefield("topbits", arr))
|
||||
|
||||
arr = types.NewArray(keytype, abi.MapBucketCount)
|
||||
arr.SetNoalg(true)
|
||||
keys := makefield("keys", arr)
|
||||
field = append(field, keys)
|
||||
|
||||
arr = types.NewArray(elemtype, abi.MapBucketCount)
|
||||
arr.SetNoalg(true)
|
||||
elems := makefield("elems", arr)
|
||||
field = append(field, elems)
|
||||
|
||||
// If keys and elems have no pointers, the map implementation
|
||||
// can keep a list of overflow pointers on the side so that
|
||||
// buckets can be marked as having no pointers.
|
||||
// Arrange for the bucket to have no pointers by changing
|
||||
// the type of the overflow field to uintptr in this case.
|
||||
// See comment on hmap.overflow in runtime/map.go.
|
||||
otyp := types.Types[types.TUNSAFEPTR]
|
||||
if !elemtype.HasPointers() && !keytype.HasPointers() {
|
||||
otyp = types.Types[types.TUINTPTR]
|
||||
}
|
||||
overflow := makefield("overflow", otyp)
|
||||
field = append(field, overflow)
|
||||
|
||||
// link up fields
|
||||
bucket := types.NewStruct(field[:])
|
||||
bucket.SetNoalg(true)
|
||||
types.CalcSize(bucket)
|
||||
|
||||
// Check invariants that map code depends on.
|
||||
if !types.IsComparable(t.Key()) {
|
||||
base.Fatalf("unsupported map key type for %v", t)
|
||||
}
|
||||
if abi.MapBucketCount < 8 {
|
||||
base.Fatalf("bucket size %d too small for proper alignment %d", abi.MapBucketCount, 8)
|
||||
}
|
||||
if uint8(keytype.Alignment()) > abi.MapBucketCount {
|
||||
base.Fatalf("key align too big for %v", t)
|
||||
}
|
||||
if uint8(elemtype.Alignment()) > abi.MapBucketCount {
|
||||
base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.MapBucketCount)
|
||||
}
|
||||
if keytype.Size() > abi.MapMaxKeyBytes {
|
||||
base.Fatalf("key size too large for %v", t)
|
||||
}
|
||||
if elemtype.Size() > abi.MapMaxElemBytes {
|
||||
base.Fatalf("elem size too large for %v", t)
|
||||
}
|
||||
if t.Key().Size() > abi.MapMaxKeyBytes && !keytype.IsPtr() {
|
||||
base.Fatalf("key indirect incorrect for %v", t)
|
||||
}
|
||||
if t.Elem().Size() > abi.MapMaxElemBytes && !elemtype.IsPtr() {
|
||||
base.Fatalf("elem indirect incorrect for %v", t)
|
||||
}
|
||||
if keytype.Size()%keytype.Alignment() != 0 {
|
||||
base.Fatalf("key size not a multiple of key align for %v", t)
|
||||
}
|
||||
if elemtype.Size()%elemtype.Alignment() != 0 {
|
||||
base.Fatalf("elem size not a multiple of elem align for %v", t)
|
||||
}
|
||||
if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
|
||||
base.Fatalf("bucket align not multiple of key align %v", t)
|
||||
}
|
||||
if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
|
||||
base.Fatalf("bucket align not multiple of elem align %v", t)
|
||||
}
|
||||
if keys.Offset%keytype.Alignment() != 0 {
|
||||
base.Fatalf("bad alignment of keys in bmap for %v", t)
|
||||
}
|
||||
if elems.Offset%elemtype.Alignment() != 0 {
|
||||
base.Fatalf("bad alignment of elems in bmap for %v", t)
|
||||
}
|
||||
|
||||
// Double-check that overflow field is final memory in struct,
|
||||
// with no padding at end.
|
||||
if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
|
||||
base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
|
||||
t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
|
||||
}
|
||||
|
||||
t.MapType().Bucket = bucket
|
||||
|
||||
bucket.StructType().Map = t
|
||||
return bucket
|
||||
}
|
||||
|
||||
var hmapType *types.Type
|
||||
|
||||
// MapType returns a type interchangeable with runtime.hmap.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
func MapType() *types.Type {
|
||||
if hmapType != nil {
|
||||
return hmapType
|
||||
}
|
||||
|
||||
// build a struct:
|
||||
// type hmap struct {
|
||||
// count int
|
||||
// flags uint8
|
||||
// B uint8
|
||||
// noverflow uint16
|
||||
// hash0 uint32
|
||||
// buckets unsafe.Pointer
|
||||
// oldbuckets unsafe.Pointer
|
||||
// nevacuate uintptr
|
||||
// extra unsafe.Pointer // *mapextra
|
||||
// }
|
||||
// must match runtime/map.go:hmap.
|
||||
fields := []*types.Field{
|
||||
makefield("count", types.Types[types.TINT]),
|
||||
makefield("flags", types.Types[types.TUINT8]),
|
||||
makefield("B", types.Types[types.TUINT8]),
|
||||
makefield("noverflow", types.Types[types.TUINT16]),
|
||||
makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
|
||||
makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
|
||||
makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("nevacuate", types.Types[types.TUINTPTR]),
|
||||
makefield("extra", types.Types[types.TUNSAFEPTR]),
|
||||
}
|
||||
|
||||
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
|
||||
hmap := types.NewNamed(n)
|
||||
n.SetType(hmap)
|
||||
n.SetTypecheck(1)
|
||||
|
||||
hmap.SetUnderlying(types.NewStruct(fields))
|
||||
types.CalcSize(hmap)
|
||||
|
||||
// The size of hmap should be 48 bytes on 64 bit
|
||||
// and 28 bytes on 32 bit platforms.
|
||||
if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
|
||||
base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
|
||||
}
|
||||
|
||||
hmapType = hmap
|
||||
return hmap
|
||||
}
|
||||
|
||||
var hiterType *types.Type
|
||||
|
||||
// MapIterType returns a type interchangeable with runtime.hiter.
|
||||
// Make sure this stays in sync with runtime/map.go.
|
||||
func MapIterType() *types.Type {
|
||||
if hiterType != nil {
|
||||
return hiterType
|
||||
}
|
||||
|
||||
hmap := MapType()
|
||||
|
||||
// build a struct:
|
||||
// type hiter struct {
|
||||
// key unsafe.Pointer // *Key
|
||||
// elem unsafe.Pointer // *Elem
|
||||
// t unsafe.Pointer // *MapType
|
||||
// h *hmap
|
||||
// buckets unsafe.Pointer
|
||||
// bptr unsafe.Pointer // *bmap
|
||||
// overflow unsafe.Pointer // *[]*bmap
|
||||
// oldoverflow unsafe.Pointer // *[]*bmap
|
||||
// startBucket uintptr
|
||||
// offset uint8
|
||||
// wrapped bool
|
||||
// B uint8
|
||||
// i uint8
|
||||
// bucket uintptr
|
||||
// checkBucket uintptr
|
||||
// }
|
||||
// must match runtime/map.go:hiter.
|
||||
fields := []*types.Field{
|
||||
makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
|
||||
makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
|
||||
makefield("t", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("h", types.NewPtr(hmap)),
|
||||
makefield("buckets", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("bptr", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("overflow", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
|
||||
makefield("startBucket", types.Types[types.TUINTPTR]),
|
||||
makefield("offset", types.Types[types.TUINT8]),
|
||||
makefield("wrapped", types.Types[types.TBOOL]),
|
||||
makefield("B", types.Types[types.TUINT8]),
|
||||
makefield("i", types.Types[types.TUINT8]),
|
||||
makefield("bucket", types.Types[types.TUINTPTR]),
|
||||
makefield("checkBucket", types.Types[types.TUINTPTR]),
|
||||
}
|
||||
|
||||
// build iterator struct holding the above fields
|
||||
n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
|
||||
hiter := types.NewNamed(n)
|
||||
n.SetType(hiter)
|
||||
n.SetTypecheck(1)
|
||||
|
||||
hiter.SetUnderlying(types.NewStruct(fields))
|
||||
types.CalcSize(hiter)
|
||||
if hiter.Size() != int64(12*types.PtrSize) {
|
||||
base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
|
||||
}
|
||||
|
||||
hiterType = hiter
|
||||
return hiter
|
||||
}
|
||||
|
||||
// methods returns the methods of the non-interface type t, sorted by name.
|
||||
// Generates stub functions as needed.
|
||||
func methods(t *types.Type) []*typeSig {
|
||||
|
|
@ -1005,7 +770,11 @@ func writeType(t *types.Type) *obj.LSym {
|
|||
rt = rttype.InterfaceType
|
||||
dataAdd = len(imethods(t)) * int(rttype.IMethod.Size())
|
||||
case types.TMAP:
|
||||
rt = rttype.MapType
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
rt = rttype.SwissMapType
|
||||
} else {
|
||||
rt = rttype.OldMapType
|
||||
}
|
||||
case types.TPTR:
|
||||
rt = rttype.PtrType
|
||||
// TODO: use rttype.Type for Elem() is ANY?
|
||||
|
|
@ -1105,52 +874,10 @@ func writeType(t *types.Type) *obj.LSym {
|
|||
}
|
||||
|
||||
case types.TMAP:
|
||||
// internal/abi.MapType
|
||||
s1 := writeType(t.Key())
|
||||
s2 := writeType(t.Elem())
|
||||
s3 := writeType(MapBucketType(t))
|
||||
hasher := genhash(t.Key())
|
||||
|
||||
c.Field("Key").WritePtr(s1)
|
||||
c.Field("Elem").WritePtr(s2)
|
||||
c.Field("Bucket").WritePtr(s3)
|
||||
c.Field("Hasher").WritePtr(hasher)
|
||||
var flags uint32
|
||||
// Note: flags must match maptype accessors in ../../../../runtime/type.go
|
||||
// and maptype builder in ../../../../reflect/type.go:MapOf.
|
||||
if t.Key().Size() > abi.MapMaxKeyBytes {
|
||||
c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
|
||||
flags |= 1 // indirect key
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
writeSwissMapType(t, lsym, c)
|
||||
} else {
|
||||
c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
|
||||
}
|
||||
|
||||
if t.Elem().Size() > abi.MapMaxElemBytes {
|
||||
c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
|
||||
flags |= 2 // indirect value
|
||||
} else {
|
||||
c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
|
||||
}
|
||||
c.Field("BucketSize").WriteUint16(uint16(MapBucketType(t).Size()))
|
||||
if types.IsReflexive(t.Key()) {
|
||||
flags |= 4 // reflexive key
|
||||
}
|
||||
if needkeyupdate(t.Key()) {
|
||||
flags |= 8 // need key update
|
||||
}
|
||||
if hashMightPanic(t.Key()) {
|
||||
flags |= 16 // hash might panic
|
||||
}
|
||||
c.Field("Flags").WriteUint32(flags)
|
||||
|
||||
if u := t.Underlying(); u != t {
|
||||
// If t is a named map type, also keep the underlying map
|
||||
// type live in the binary. This is important to make sure that
|
||||
// a named map and that same map cast to its underlying type via
|
||||
// reflection, use the same hash function. See issue 37716.
|
||||
r := obj.Addrel(lsym)
|
||||
r.Sym = writeType(u)
|
||||
r.Type = objabi.R_KEEP
|
||||
writeOldMapType(t, lsym, c)
|
||||
}
|
||||
|
||||
case types.TPTR:
|
||||
|
|
|
|||
|
|
@ -27,7 +27,8 @@ var ArrayType *types.Type
|
|||
var ChanType *types.Type
|
||||
var FuncType *types.Type
|
||||
var InterfaceType *types.Type
|
||||
var MapType *types.Type
|
||||
var OldMapType *types.Type
|
||||
var SwissMapType *types.Type
|
||||
var PtrType *types.Type
|
||||
var SliceType *types.Type
|
||||
var StructType *types.Type
|
||||
|
|
@ -54,7 +55,8 @@ func Init() {
|
|||
ChanType = fromReflect(reflect.TypeOf(abi.ChanType{}))
|
||||
FuncType = fromReflect(reflect.TypeOf(abi.FuncType{}))
|
||||
InterfaceType = fromReflect(reflect.TypeOf(abi.InterfaceType{}))
|
||||
MapType = fromReflect(reflect.TypeOf(abi.MapType{}))
|
||||
OldMapType = fromReflect(reflect.TypeOf(abi.OldMapType{}))
|
||||
SwissMapType = fromReflect(reflect.TypeOf(abi.SwissMapType{}))
|
||||
PtrType = fromReflect(reflect.TypeOf(abi.PtrType{}))
|
||||
SliceType = fromReflect(reflect.TypeOf(abi.SliceType{}))
|
||||
StructType = fromReflect(reflect.TypeOf(abi.StructType{}))
|
||||
|
|
|
|||
|
|
@ -88,7 +88,11 @@ func InitConfig() {
|
|||
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
|
||||
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
|
||||
_ = types.NewPtr(types.ErrorType) // *error
|
||||
_ = types.NewPtr(reflectdata.MapType()) // *runtime.hmap
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
_ = types.NewPtr(reflectdata.SwissMapType()) // *runtime.hmap
|
||||
} else {
|
||||
_ = types.NewPtr(reflectdata.OldMapType()) // *runtime.hmap
|
||||
}
|
||||
_ = types.NewPtr(deferstruct()) // *runtime._defer
|
||||
types.NewPtrCacheEnabled = false
|
||||
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
|
||||
|
|
@ -2939,7 +2943,13 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
|
|||
}
|
||||
|
||||
// map <--> *hmap
|
||||
if to.Kind() == types.TMAP && from == types.NewPtr(reflectdata.MapType()) {
|
||||
var mt *types.Type
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
mt = types.NewPtr(reflectdata.SwissMapType())
|
||||
} else {
|
||||
mt = types.NewPtr(reflectdata.OldMapType())
|
||||
}
|
||||
if to.Kind() == types.TMAP && from == mt {
|
||||
return v
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -474,7 +474,7 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
|
|||
// Format the bucket struct for map[x]y as map.bucket[x]y.
|
||||
// This avoids a recursive print that generates very long names.
|
||||
switch t {
|
||||
case mt.Bucket:
|
||||
case mt.OldBucket, mt.SwissBucket:
|
||||
b.WriteString("map.bucket[")
|
||||
default:
|
||||
base.Fatalf("unknown internal map type")
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
|
|||
}{
|
||||
{Sym{}, 32, 64},
|
||||
{Type{}, 64, 104},
|
||||
{Map{}, 12, 24},
|
||||
{Map{}, 16, 32},
|
||||
{Forward{}, 20, 32},
|
||||
{Func{}, 32, 56},
|
||||
{Struct{}, 12, 24},
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"cmd/internal/src"
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"internal/buildcfg"
|
||||
"internal/types/errors"
|
||||
"sync"
|
||||
)
|
||||
|
|
@ -313,7 +314,17 @@ type Map struct {
|
|||
Key *Type // Key type
|
||||
Elem *Type // Val (elem) type
|
||||
|
||||
Bucket *Type // internal struct type representing a hash bucket
|
||||
// Note: It would be cleaner to completely split Map into OldMap and
|
||||
// SwissMap, but 99% of the types map code doesn't care about the
|
||||
// implementation at all, so it is tons of churn to split the type.
|
||||
// Only code that looks at the bucket field can care about the
|
||||
// implementation.
|
||||
|
||||
// GOEXPERIMENT=noswissmap fields
|
||||
OldBucket *Type // internal struct type representing a hash bucket
|
||||
|
||||
// GOEXPERIMENT=swissmap fields
|
||||
SwissBucket *Type // internal struct type representing a hash bucket
|
||||
}
|
||||
|
||||
// MapType returns t's extra map-specific fields.
|
||||
|
|
@ -1206,23 +1217,43 @@ func (t *Type) cmp(x *Type) Cmp {
|
|||
// by the general code after the switch.
|
||||
|
||||
case TSTRUCT:
|
||||
if t.StructType().Map == nil {
|
||||
if x.StructType().Map != nil {
|
||||
return CMPlt // nil < non-nil
|
||||
}
|
||||
// to the fallthrough
|
||||
} else if x.StructType().Map == nil {
|
||||
return CMPgt // nil > non-nil
|
||||
} else if t.StructType().Map.MapType().Bucket == t {
|
||||
// Both have non-nil Map
|
||||
// Special case for Maps which include a recursive type where the recursion is not broken with a named type
|
||||
if x.StructType().Map.MapType().Bucket != x {
|
||||
return CMPlt // bucket maps are least
|
||||
}
|
||||
return t.StructType().Map.cmp(x.StructType().Map)
|
||||
} else if x.StructType().Map.MapType().Bucket == x {
|
||||
return CMPgt // bucket maps are least
|
||||
} // If t != t.Map.Bucket, fall through to general case
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
if t.StructType().Map == nil {
|
||||
if x.StructType().Map != nil {
|
||||
return CMPlt // nil < non-nil
|
||||
}
|
||||
// to the fallthrough
|
||||
} else if x.StructType().Map == nil {
|
||||
return CMPgt // nil > non-nil
|
||||
} else if t.StructType().Map.MapType().SwissBucket == t {
|
||||
// Both have non-nil Map
|
||||
// Special case for Maps which include a recursive type where the recursion is not broken with a named type
|
||||
if x.StructType().Map.MapType().SwissBucket != x {
|
||||
return CMPlt // bucket maps are least
|
||||
}
|
||||
return t.StructType().Map.cmp(x.StructType().Map)
|
||||
} else if x.StructType().Map.MapType().SwissBucket == x {
|
||||
return CMPgt // bucket maps are least
|
||||
} // If t != t.Map.SwissBucket, fall through to general case
|
||||
} else {
|
||||
if t.StructType().Map == nil {
|
||||
if x.StructType().Map != nil {
|
||||
return CMPlt // nil < non-nil
|
||||
}
|
||||
// to the fallthrough
|
||||
} else if x.StructType().Map == nil {
|
||||
return CMPgt // nil > non-nil
|
||||
} else if t.StructType().Map.MapType().OldBucket == t {
|
||||
// Both have non-nil Map
|
||||
// Special case for Maps which include a recursive type where the recursion is not broken with a named type
|
||||
if x.StructType().Map.MapType().OldBucket != x {
|
||||
return CMPlt // bucket maps are least
|
||||
}
|
||||
return t.StructType().Map.cmp(x.StructType().Map)
|
||||
} else if x.StructType().Map.MapType().OldBucket == x {
|
||||
return CMPgt // bucket maps are least
|
||||
} // If t != t.Map.OldBucket, fall through to general case
|
||||
}
|
||||
|
||||
tfs := t.Fields()
|
||||
xfs := x.Fields()
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"go/constant"
|
||||
"go/token"
|
||||
"internal/abi"
|
||||
"internal/buildcfg"
|
||||
"strings"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
|
|
@ -311,8 +312,15 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
|||
|
||||
// walkMakeMap walks an OMAKEMAP node.
|
||||
func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
return walkMakeSwissMap(n, init)
|
||||
}
|
||||
return walkMakeOldMap(n, init)
|
||||
}
|
||||
|
||||
func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
||||
t := n.Type()
|
||||
hmapType := reflectdata.MapType()
|
||||
hmapType := reflectdata.SwissMapType()
|
||||
hint := n.Len
|
||||
|
||||
// var h *hmap
|
||||
|
|
@ -330,7 +338,7 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
|||
// Maximum key and elem size is 128 bytes, larger objects
|
||||
// are stored with an indirection. So max bucket size is 2048+eps.
|
||||
if !ir.IsConst(hint, constant.Int) ||
|
||||
constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapBucketCount)) {
|
||||
constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapBucketCount)) {
|
||||
|
||||
// In case hint is larger than BUCKETSIZE runtime.makemap
|
||||
// will allocate the buckets on the heap, see #20184
|
||||
|
|
@ -341,12 +349,12 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
|||
// h.buckets = b
|
||||
// }
|
||||
|
||||
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.MapBucketCount)), nil, nil)
|
||||
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.SwissMapBucketCount)), nil, nil)
|
||||
nif.Likely = true
|
||||
|
||||
// var bv bmap
|
||||
// b = &bv
|
||||
b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))
|
||||
b := stackTempAddr(&nif.Body, reflectdata.SwissMapBucketType(t))
|
||||
|
||||
// h.buckets = b
|
||||
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
|
||||
|
|
@ -356,7 +364,102 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
|||
}
|
||||
}
|
||||
|
||||
if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapBucketCount)) {
|
||||
if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapBucketCount)) {
|
||||
// Handling make(map[any]any) and
|
||||
// make(map[any]any, hint) where hint <= BUCKETSIZE
|
||||
// special allows for faster map initialization and
|
||||
// improves binary size by using calls with fewer arguments.
|
||||
// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
|
||||
// and no buckets will be allocated by makemap. Therefore,
|
||||
// no buckets need to be allocated in this code path.
|
||||
if n.Esc() == ir.EscNone {
|
||||
// Only need to initialize h.hash0 since
|
||||
// hmap h has been allocated on the stack already.
|
||||
// h.hash0 = rand32()
|
||||
rand := mkcall("rand32", types.Types[types.TUINT32], init)
|
||||
hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
|
||||
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
|
||||
return typecheck.ConvNop(h, t)
|
||||
}
|
||||
// Call runtime.makehmap to allocate an
|
||||
// hmap on the heap and initialize hmap's hash0 field.
|
||||
fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
|
||||
return mkcall1(fn, n.Type(), init)
|
||||
}
|
||||
|
||||
if n.Esc() != ir.EscNone {
|
||||
h = typecheck.NodNil()
|
||||
}
|
||||
// Map initialization with a variable or large hint is
|
||||
// more complicated. We therefore generate a call to
|
||||
// runtime.makemap to initialize hmap and allocate the
|
||||
// map buckets.
|
||||
|
||||
// When hint fits into int, use makemap instead of
|
||||
// makemap64, which is faster and shorter on 32 bit platforms.
|
||||
fnname := "makemap64"
|
||||
argtype := types.Types[types.TINT64]
|
||||
|
||||
// Type checking guarantees that TIDEAL hint is positive and fits in an int.
|
||||
// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
|
||||
// The case of hint overflow when converting TUINT or TUINTPTR to TINT
|
||||
// will be handled by the negative range checks in makemap during runtime.
|
||||
if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
|
||||
fnname = "makemap"
|
||||
argtype = types.Types[types.TINT]
|
||||
}
|
||||
|
||||
fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem())
|
||||
return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
|
||||
}
|
||||
|
||||
func walkMakeOldMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
|
||||
t := n.Type()
|
||||
hmapType := reflectdata.OldMapType()
|
||||
hint := n.Len
|
||||
|
||||
// var h *hmap
|
||||
var h ir.Node
|
||||
if n.Esc() == ir.EscNone {
|
||||
// Allocate hmap on stack.
|
||||
|
||||
// var hv hmap
|
||||
// h = &hv
|
||||
h = stackTempAddr(init, hmapType)
|
||||
|
||||
// Allocate one bucket pointed to by hmap.buckets on stack if hint
|
||||
// is not larger than BUCKETSIZE. In case hint is larger than
|
||||
// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
|
||||
// Maximum key and elem size is 128 bytes, larger objects
|
||||
// are stored with an indirection. So max bucket size is 2048+eps.
|
||||
if !ir.IsConst(hint, constant.Int) ||
|
||||
constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
|
||||
|
||||
// In case hint is larger than BUCKETSIZE runtime.makemap
|
||||
// will allocate the buckets on the heap, see #20184
|
||||
//
|
||||
// if hint <= BUCKETSIZE {
|
||||
// var bv bmap
|
||||
// b = &bv
|
||||
// h.buckets = b
|
||||
// }
|
||||
|
||||
nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.OldMapBucketCount)), nil, nil)
|
||||
nif.Likely = true
|
||||
|
||||
// var bv bmap
|
||||
// b = &bv
|
||||
b := stackTempAddr(&nif.Body, reflectdata.OldMapBucketType(t))
|
||||
|
||||
// h.buckets = b
|
||||
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
|
||||
na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
|
||||
nif.Body.Append(na)
|
||||
appendWalkStmt(init, nif)
|
||||
}
|
||||
}
|
||||
|
||||
if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
|
||||
// Handling make(map[any]any) and
|
||||
// make(map[any]any, hint) where hint <= BUCKETSIZE
|
||||
// special allows for faster map initialization and
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package walk
|
|||
import (
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"internal/buildcfg"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
|
|
@ -926,7 +927,11 @@ func (o *orderState) stmt(n ir.Node) {
|
|||
|
||||
// n.Prealloc is the temp for the iterator.
|
||||
// MapIterType contains pointers and needs to be zeroed.
|
||||
n.Prealloc = o.newTemp(reflectdata.MapIterType(), true)
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true)
|
||||
} else {
|
||||
n.Prealloc = o.newTemp(reflectdata.OldMapIterType(), true)
|
||||
}
|
||||
}
|
||||
n.Key = o.exprInPlace(n.Key)
|
||||
n.Value = o.exprInPlace(n.Value)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package walk
|
|||
import (
|
||||
"fmt"
|
||||
"internal/abi"
|
||||
"internal/buildcfg"
|
||||
|
||||
"cmd/compile/internal/base"
|
||||
"cmd/compile/internal/ir"
|
||||
|
|
@ -184,7 +185,42 @@ var mapassign = mkmapnames("mapassign", "ptr")
|
|||
var mapdelete = mkmapnames("mapdelete", "")
|
||||
|
||||
func mapfast(t *types.Type) int {
|
||||
if t.Elem().Size() > abi.MapMaxElemBytes {
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
return mapfastSwiss(t)
|
||||
}
|
||||
return mapfastOld(t)
|
||||
}
|
||||
|
||||
func mapfastSwiss(t *types.Type) int {
|
||||
if t.Elem().Size() > abi.SwissMapMaxElemBytes {
|
||||
return mapslow
|
||||
}
|
||||
switch reflectdata.AlgType(t.Key()) {
|
||||
case types.AMEM32:
|
||||
if !t.Key().HasPointers() {
|
||||
return mapfast32
|
||||
}
|
||||
if types.PtrSize == 4 {
|
||||
return mapfast32ptr
|
||||
}
|
||||
base.Fatalf("small pointer %v", t.Key())
|
||||
case types.AMEM64:
|
||||
if !t.Key().HasPointers() {
|
||||
return mapfast64
|
||||
}
|
||||
if types.PtrSize == 8 {
|
||||
return mapfast64ptr
|
||||
}
|
||||
// Two-word object, at least one of which is a pointer.
|
||||
// Use the slow path.
|
||||
case types.ASTRING:
|
||||
return mapfaststr
|
||||
}
|
||||
return mapslow
|
||||
}
|
||||
|
||||
func mapfastOld(t *types.Type) int {
|
||||
if t.Elem().Size() > abi.OldMapMaxElemBytes {
|
||||
return mapslow
|
||||
}
|
||||
switch reflectdata.AlgType(t.Key()) {
|
||||
|
|
|
|||
|
|
@ -552,8 +552,12 @@ func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, sym
|
|||
off += 3 * arch.PtrSize
|
||||
case abi.Chan: // reflect.chanType
|
||||
off += 2 * arch.PtrSize
|
||||
case abi.Map: // reflect.mapType
|
||||
off += 4*arch.PtrSize + 8
|
||||
case abi.Map:
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
off += 4*arch.PtrSize + 8 // internal/abi.SwissMapType
|
||||
} else {
|
||||
off += 4*arch.PtrSize + 8 // internal/abi.OldMapType
|
||||
}
|
||||
case abi.Interface: // reflect.interfaceType
|
||||
off += 3 * arch.PtrSize
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -864,6 +864,14 @@ func (d *dwctxt) mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valna
|
|||
}
|
||||
|
||||
func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
d.synthesizemaptypesSwiss(ctxt, die)
|
||||
} else {
|
||||
d.synthesizemaptypesOld(ctxt, die)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
|
||||
hash := walktypedef(d.findprotodie(ctxt, "type:runtime.hmap"))
|
||||
bucket := walktypedef(d.findprotodie(ctxt, "type:runtime.bmap"))
|
||||
|
||||
|
|
@ -885,11 +893,11 @@ func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
|
|||
|
||||
// compute size info like hashmap.c does.
|
||||
indirectKey, indirectVal := false, false
|
||||
if keysize > abi.MapMaxKeyBytes {
|
||||
if keysize > abi.SwissMapMaxKeyBytes {
|
||||
keysize = int64(d.arch.PtrSize)
|
||||
indirectKey = true
|
||||
}
|
||||
if valsize > abi.MapMaxElemBytes {
|
||||
if valsize > abi.SwissMapMaxElemBytes {
|
||||
valsize = int64(d.arch.PtrSize)
|
||||
indirectVal = true
|
||||
}
|
||||
|
|
@ -897,28 +905,28 @@ func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
|
|||
// Construct type to represent an array of BucketSize keys
|
||||
keyname := d.nameFromDIESym(keytype)
|
||||
dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) {
|
||||
newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount*keysize, 0)
|
||||
newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount*keysize, 0)
|
||||
t := keytype
|
||||
if indirectKey {
|
||||
t = d.defptrto(keytype)
|
||||
}
|
||||
d.newrefattr(dwhk, dwarf.DW_AT_type, t)
|
||||
fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size")
|
||||
newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount, 0)
|
||||
newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount, 0)
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
|
||||
})
|
||||
|
||||
// Construct type to represent an array of BucketSize values
|
||||
valname := d.nameFromDIESym(valtype)
|
||||
dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) {
|
||||
newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount*valsize, 0)
|
||||
newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount*valsize, 0)
|
||||
t := valtype
|
||||
if indirectVal {
|
||||
t = d.defptrto(valtype)
|
||||
}
|
||||
d.newrefattr(dwhv, dwarf.DW_AT_type, t)
|
||||
fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size")
|
||||
newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount, 0)
|
||||
newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount, 0)
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
|
||||
})
|
||||
|
||||
|
|
@ -930,20 +938,116 @@ func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
|
|||
|
||||
fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys")
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, dwhks)
|
||||
newmemberoffsetattr(fld, abi.MapBucketCount)
|
||||
newmemberoffsetattr(fld, abi.SwissMapBucketCount)
|
||||
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values")
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, dwhvs)
|
||||
newmemberoffsetattr(fld, abi.MapBucketCount+abi.MapBucketCount*int32(keysize))
|
||||
newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*int32(keysize))
|
||||
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow")
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym)))
|
||||
newmemberoffsetattr(fld, abi.MapBucketCount+abi.MapBucketCount*(int32(keysize)+int32(valsize)))
|
||||
newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*(int32(keysize)+int32(valsize)))
|
||||
if d.arch.RegSize > d.arch.PtrSize {
|
||||
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad")
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
|
||||
newmemberoffsetattr(fld, abi.MapBucketCount+abi.MapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize))
|
||||
newmemberoffsetattr(fld, abi.SwissMapBucketCount+abi.SwissMapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize))
|
||||
}
|
||||
|
||||
newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount+abi.MapBucketCount*keysize+abi.MapBucketCount*valsize+int64(d.arch.RegSize), 0)
|
||||
newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.SwissMapBucketCount+abi.SwissMapBucketCount*keysize+abi.SwissMapBucketCount*valsize+int64(d.arch.RegSize), 0)
|
||||
})
|
||||
|
||||
// Construct hash<K,V>
|
||||
dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) {
|
||||
d.copychildren(ctxt, dwh, hash)
|
||||
d.substitutetype(dwh, "buckets", d.defptrto(dwhbs))
|
||||
d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs))
|
||||
newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil)
|
||||
})
|
||||
|
||||
// make map type a pointer to hash<K,V>
|
||||
d.newrefattr(die, dwarf.DW_AT_type, d.defptrto(dwhs))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dwctxt) synthesizemaptypesOld(ctxt *Link, die *dwarf.DWDie) {
|
||||
hash := walktypedef(d.findprotodie(ctxt, "type:runtime.hmap"))
|
||||
bucket := walktypedef(d.findprotodie(ctxt, "type:runtime.bmap"))
|
||||
|
||||
if hash == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for ; die != nil; die = die.Link {
|
||||
if die.Abbrev != dwarf.DW_ABRV_MAPTYPE {
|
||||
continue
|
||||
}
|
||||
gotype := loader.Sym(getattr(die, dwarf.DW_AT_type).Data.(dwSym))
|
||||
keytype := decodetypeMapKey(d.ldr, d.arch, gotype)
|
||||
valtype := decodetypeMapValue(d.ldr, d.arch, gotype)
|
||||
keydata := d.ldr.Data(keytype)
|
||||
valdata := d.ldr.Data(valtype)
|
||||
keysize, valsize := decodetypeSize(d.arch, keydata), decodetypeSize(d.arch, valdata)
|
||||
keytype, valtype = d.walksymtypedef(d.defgotype(keytype)), d.walksymtypedef(d.defgotype(valtype))
|
||||
|
||||
// compute size info like hashmap.c does.
|
||||
indirectKey, indirectVal := false, false
|
||||
if keysize > abi.OldMapMaxKeyBytes {
|
||||
keysize = int64(d.arch.PtrSize)
|
||||
indirectKey = true
|
||||
}
|
||||
if valsize > abi.OldMapMaxElemBytes {
|
||||
valsize = int64(d.arch.PtrSize)
|
||||
indirectVal = true
|
||||
}
|
||||
|
||||
// Construct type to represent an array of BucketSize keys
|
||||
keyname := d.nameFromDIESym(keytype)
|
||||
dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) {
|
||||
newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*keysize, 0)
|
||||
t := keytype
|
||||
if indirectKey {
|
||||
t = d.defptrto(keytype)
|
||||
}
|
||||
d.newrefattr(dwhk, dwarf.DW_AT_type, t)
|
||||
fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size")
|
||||
newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0)
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
|
||||
})
|
||||
|
||||
// Construct type to represent an array of BucketSize values
|
||||
valname := d.nameFromDIESym(valtype)
|
||||
dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) {
|
||||
newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*valsize, 0)
|
||||
t := valtype
|
||||
if indirectVal {
|
||||
t = d.defptrto(valtype)
|
||||
}
|
||||
d.newrefattr(dwhv, dwarf.DW_AT_type, t)
|
||||
fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size")
|
||||
newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0)
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
|
||||
})
|
||||
|
||||
// Construct bucket<K,V>
|
||||
dwhbs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) {
|
||||
// Copy over all fields except the field "data" from the generic
|
||||
// bucket. "data" will be replaced with keys/values below.
|
||||
d.copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data"))
|
||||
|
||||
fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys")
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, dwhks)
|
||||
newmemberoffsetattr(fld, abi.OldMapBucketCount)
|
||||
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values")
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, dwhvs)
|
||||
newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*int32(keysize))
|
||||
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow")
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym)))
|
||||
newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize)))
|
||||
if d.arch.RegSize > d.arch.PtrSize {
|
||||
fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad")
|
||||
d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
|
||||
newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize))
|
||||
}
|
||||
|
||||
newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount+abi.OldMapBucketCount*keysize+abi.OldMapBucketCount*valsize+int64(d.arch.RegSize), 0)
|
||||
})
|
||||
|
||||
// Construct hash<K,V>
|
||||
|
|
@ -1782,7 +1886,6 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
|
|||
"type:internal/abi.ArrayType",
|
||||
"type:internal/abi.ChanType",
|
||||
"type:internal/abi.FuncType",
|
||||
"type:internal/abi.MapType",
|
||||
"type:internal/abi.PtrType",
|
||||
"type:internal/abi.SliceType",
|
||||
"type:internal/abi.StructType",
|
||||
|
|
@ -1791,6 +1894,11 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
|
|||
"type:internal/abi.Imethod"} {
|
||||
d.defgotype(d.lookupOrDiag(typ))
|
||||
}
|
||||
if buildcfg.Experiment.SwissMap {
|
||||
d.defgotype(d.lookupOrDiag("type:internal/abi.SwissMapType"))
|
||||
} else {
|
||||
d.defgotype(d.lookupOrDiag("type:internal/abi.OldMapType"))
|
||||
}
|
||||
|
||||
// fake root DIE for compile unit DIEs
|
||||
var dwroot dwarf.DWDie
|
||||
|
|
|
|||
|
|
@ -60,7 +60,6 @@ func TestRuntimeTypesPresent(t *testing.T) {
|
|||
"internal/abi.ArrayType": true,
|
||||
"internal/abi.ChanType": true,
|
||||
"internal/abi.FuncType": true,
|
||||
"internal/abi.MapType": true,
|
||||
"internal/abi.PtrType": true,
|
||||
"internal/abi.SliceType": true,
|
||||
"internal/abi.StructType": true,
|
||||
|
|
@ -72,6 +71,16 @@ func TestRuntimeTypesPresent(t *testing.T) {
|
|||
if len(found) != len(want) {
|
||||
t.Errorf("found %v, want %v", found, want)
|
||||
}
|
||||
|
||||
// Must have one of OldMapType or SwissMapType.
|
||||
want = map[string]bool{
|
||||
"internal/abi.OldMapType": true,
|
||||
"internal/abi.SwissMapType": true,
|
||||
}
|
||||
found = findTypes(t, dwarf, want)
|
||||
if len(found) != 1 {
|
||||
t.Errorf("map type want one of %v found %v", want, found)
|
||||
}
|
||||
}
|
||||
|
||||
func findTypes(t *testing.T, dw *dwarf.Data, want map[string]bool) (found map[string]bool) {
|
||||
|
|
|
|||
|
|
@ -1,19 +0,0 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package abi
|
||||
|
||||
// Map constants common to several packages
|
||||
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
|
||||
const (
|
||||
// Maximum number of key/elem pairs a bucket can hold.
|
||||
MapBucketCountBits = 3 // log2 of number of elements in a bucket.
|
||||
MapBucketCount = 1 << MapBucketCountBits
|
||||
|
||||
// Maximum key or elem size to keep inline (instead of mallocing per element).
|
||||
// Must fit in a uint8.
|
||||
// Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
|
||||
MapMaxKeyBytes = 128
|
||||
MapMaxElemBytes = 128 // Must fit in a uint8.
|
||||
)
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Map constants common to several packages
|
||||
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
|
||||
const (
|
||||
// Maximum number of key/elem pairs a bucket can hold.
|
||||
OldMapBucketCountBits = 3 // log2 of number of elements in a bucket.
|
||||
OldMapBucketCount = 1 << OldMapBucketCountBits
|
||||
|
||||
// Maximum key or elem size to keep inline (instead of mallocing per element).
|
||||
// Must fit in a uint8.
|
||||
// Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
|
||||
OldMapMaxKeyBytes = 128
|
||||
OldMapMaxElemBytes = 128 // Must fit in a uint8.
|
||||
)
|
||||
|
||||
type OldMapType struct {
|
||||
Type
|
||||
Key *Type
|
||||
Elem *Type
|
||||
Bucket *Type // internal type representing a hash bucket
|
||||
// function for hashing keys (ptr to key, seed) -> hash
|
||||
Hasher func(unsafe.Pointer, uintptr) uintptr
|
||||
KeySize uint8 // size of key slot
|
||||
ValueSize uint8 // size of elem slot
|
||||
BucketSize uint16 // size of bucket
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
// Note: flag values must match those used in the TMAP case
|
||||
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
|
||||
func (mt *OldMapType) IndirectKey() bool { // store ptr to key instead of key itself
|
||||
return mt.Flags&1 != 0
|
||||
}
|
||||
func (mt *OldMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
|
||||
return mt.Flags&2 != 0
|
||||
}
|
||||
func (mt *OldMapType) ReflexiveKey() bool { // true if k==k for all keys
|
||||
return mt.Flags&4 != 0
|
||||
}
|
||||
func (mt *OldMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
|
||||
return mt.Flags&8 != 0
|
||||
}
|
||||
func (mt *OldMapType) HashMightPanic() bool { // true if hash function might panic
|
||||
return mt.Flags&16 != 0
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !goexperiment.swissmap
|
||||
|
||||
package abi
|
||||
|
||||
// See comment in map_select_swiss.go.
|
||||
type mapType = OldMapType
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.swissmap
|
||||
|
||||
package abi
|
||||
|
||||
// Select the map type that this binary is built using. This is for common
|
||||
// lookup methods like Type.Key to know which type to use.
|
||||
//
|
||||
// Note that mapType *must not be used by any functions called in the
|
||||
// compiler to build a target program* because the compiler must use the map
|
||||
// type determined by run-time GOEXPERIMENT, not the build tags used to build
|
||||
// the compiler.
|
||||
//
|
||||
// TODO(prattmic): This package is rather confusing because it has many
|
||||
// functions that can't be used by the compiler (e.g., Type.Uncommon depends on
|
||||
// the layout of type + uncommon objects in the binary. It would be incorrect
|
||||
// for an ad-hoc local Type object). It may be best to move code that isn't
|
||||
// usable by the compiler out of the package.
|
||||
type mapType = SwissMapType
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package abi
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Map constants common to several packages
|
||||
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
|
||||
const (
|
||||
// Maximum number of key/elem pairs a bucket can hold.
|
||||
SwissMapBucketCountBits = 3 // log2 of number of elements in a bucket.
|
||||
SwissMapBucketCount = 1 << SwissMapBucketCountBits
|
||||
|
||||
// Maximum key or elem size to keep inline (instead of mallocing per element).
|
||||
// Must fit in a uint8.
|
||||
// Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
|
||||
SwissMapMaxKeyBytes = 128
|
||||
SwissMapMaxElemBytes = 128 // Must fit in a uint8.
|
||||
)
|
||||
|
||||
type SwissMapType struct {
|
||||
Type
|
||||
Key *Type
|
||||
Elem *Type
|
||||
Bucket *Type // internal type representing a hash bucket
|
||||
// function for hashing keys (ptr to key, seed) -> hash
|
||||
Hasher func(unsafe.Pointer, uintptr) uintptr
|
||||
KeySize uint8 // size of key slot
|
||||
ValueSize uint8 // size of elem slot
|
||||
BucketSize uint16 // size of bucket
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
// Note: flag values must match those used in the TMAP case
|
||||
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
|
||||
func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself
|
||||
return mt.Flags&1 != 0
|
||||
}
|
||||
func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
|
||||
return mt.Flags&2 != 0
|
||||
}
|
||||
func (mt *SwissMapType) ReflexiveKey() bool { // true if k==k for all keys
|
||||
return mt.Flags&4 != 0
|
||||
}
|
||||
func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
|
||||
return mt.Flags&8 != 0
|
||||
}
|
||||
func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic
|
||||
return mt.Flags&16 != 0
|
||||
}
|
||||
|
||||
|
|
@ -341,7 +341,7 @@ func (t *Type) Uncommon() *UncommonType {
|
|||
return &(*u)(unsafe.Pointer(t)).u
|
||||
case Map:
|
||||
type u struct {
|
||||
MapType
|
||||
mapType
|
||||
u UncommonType
|
||||
}
|
||||
return &(*u)(unsafe.Pointer(t)).u
|
||||
|
|
@ -370,7 +370,7 @@ func (t *Type) Elem() *Type {
|
|||
tt := (*ChanType)(unsafe.Pointer(t))
|
||||
return tt.Elem
|
||||
case Map:
|
||||
tt := (*MapType)(unsafe.Pointer(t))
|
||||
tt := (*mapType)(unsafe.Pointer(t))
|
||||
return tt.Elem
|
||||
case Pointer:
|
||||
tt := (*PtrType)(unsafe.Pointer(t))
|
||||
|
|
@ -390,12 +390,12 @@ func (t *Type) StructType() *StructType {
|
|||
return (*StructType)(unsafe.Pointer(t))
|
||||
}
|
||||
|
||||
// MapType returns t cast to a *MapType, or nil if its tag does not match.
|
||||
func (t *Type) MapType() *MapType {
|
||||
// MapType returns t cast to a *OldMapType or *SwissMapType, or nil if its tag does not match.
|
||||
func (t *Type) MapType() *mapType {
|
||||
if t.Kind() != Map {
|
||||
return nil
|
||||
}
|
||||
return (*MapType)(unsafe.Pointer(t))
|
||||
return (*mapType)(unsafe.Pointer(t))
|
||||
}
|
||||
|
||||
// ArrayType returns t cast to a *ArrayType, or nil if its tag does not match.
|
||||
|
|
@ -455,40 +455,9 @@ func (t *Type) NumMethod() int {
|
|||
// NumMethod returns the number of interface methods in the type's method set.
|
||||
func (t *InterfaceType) NumMethod() int { return len(t.Methods) }
|
||||
|
||||
type MapType struct {
|
||||
Type
|
||||
Key *Type
|
||||
Elem *Type
|
||||
Bucket *Type // internal type representing a hash bucket
|
||||
// function for hashing keys (ptr to key, seed) -> hash
|
||||
Hasher func(unsafe.Pointer, uintptr) uintptr
|
||||
KeySize uint8 // size of key slot
|
||||
ValueSize uint8 // size of elem slot
|
||||
BucketSize uint16 // size of bucket
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
// Note: flag values must match those used in the TMAP case
|
||||
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
|
||||
func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself
|
||||
return mt.Flags&1 != 0
|
||||
}
|
||||
func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself
|
||||
return mt.Flags&2 != 0
|
||||
}
|
||||
func (mt *MapType) ReflexiveKey() bool { // true if k==k for all keys
|
||||
return mt.Flags&4 != 0
|
||||
}
|
||||
func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
|
||||
return mt.Flags&8 != 0
|
||||
}
|
||||
func (mt *MapType) HashMightPanic() bool { // true if hash function might panic
|
||||
return mt.Flags&16 != 0
|
||||
}
|
||||
|
||||
func (t *Type) Key() *Type {
|
||||
if t.Kind() == Map {
|
||||
return (*MapType)(unsafe.Pointer(t)).Key
|
||||
return (*mapType)(unsafe.Pointer(t)).Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
"go/token"
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"internal/goexperiment"
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"math"
|
||||
|
|
@ -32,8 +33,6 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
const bucketCount = abi.MapBucketCount
|
||||
|
||||
var sink any
|
||||
|
||||
func TestBool(t *testing.T) {
|
||||
|
|
@ -7277,47 +7276,95 @@ func TestGCBits(t *testing.T) {
|
|||
verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
|
||||
verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
|
||||
|
||||
hdr := make([]byte, bucketCount/goarch.PtrSize)
|
||||
if goexperiment.SwissMap {
|
||||
const bucketCount = abi.SwissMapBucketCount
|
||||
|
||||
verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
|
||||
verifyGCBits(t, MapBucketOf(k, e), want)
|
||||
verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
|
||||
}
|
||||
verifyMapBucket(t,
|
||||
hdr := make([]byte, bucketCount/goarch.PtrSize)
|
||||
|
||||
verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
|
||||
verifyGCBits(t, MapBucketOf(k, e), want)
|
||||
verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
|
||||
}
|
||||
verifyMapBucket(t,
|
||||
Tscalar, Tptr,
|
||||
map[Xscalar]Xptr(nil),
|
||||
join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
verifyMapBucket(t,
|
||||
Tscalarptr, Tptr,
|
||||
map[Xscalarptr]Xptr(nil),
|
||||
join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
|
||||
verifyMapBucket(t, Tint64, Tptr,
|
||||
verifyMapBucket(t, Tint64, Tptr,
|
||||
map[int64]Xptr(nil),
|
||||
join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
verifyMapBucket(t,
|
||||
Tscalar, Tscalar,
|
||||
map[Xscalar]Xscalar(nil),
|
||||
empty)
|
||||
verifyMapBucket(t,
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
|
||||
map[[2]Xscalarptr][3]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
|
||||
map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
|
||||
map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
|
||||
map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
|
||||
map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
|
||||
} else {
|
||||
const bucketCount = abi.OldMapBucketCount
|
||||
|
||||
hdr := make([]byte, bucketCount/goarch.PtrSize)
|
||||
|
||||
verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
|
||||
verifyGCBits(t, MapBucketOf(k, e), want)
|
||||
verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
|
||||
}
|
||||
verifyMapBucket(t,
|
||||
Tscalar, Tptr,
|
||||
map[Xscalar]Xptr(nil),
|
||||
join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
Tscalarptr, Tptr,
|
||||
map[Xscalarptr]Xptr(nil),
|
||||
join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
|
||||
verifyMapBucket(t, Tint64, Tptr,
|
||||
map[int64]Xptr(nil),
|
||||
join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
Tscalar, Tscalar,
|
||||
map[Xscalar]Xscalar(nil),
|
||||
empty)
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
|
||||
map[[2]Xscalarptr][3]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
|
||||
map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
|
||||
map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
|
||||
map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
|
||||
verifyMapBucket(t,
|
||||
ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
|
||||
map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
|
||||
join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
|
||||
}
|
||||
}
|
||||
|
||||
func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
|
||||
// mapType represents a map type.
|
||||
type mapType struct {
|
||||
abi.MapType
|
||||
abi.OldMapType
|
||||
}
|
||||
|
||||
func (t *rtype) Key() Type {
|
||||
|
|
@ -70,13 +70,13 @@ func MapOf(key, elem Type) Type {
|
|||
return typehash(ktyp, p, seed)
|
||||
}
|
||||
mt.Flags = 0
|
||||
if ktyp.Size_ > abi.MapMaxKeyBytes {
|
||||
if ktyp.Size_ > abi.OldMapMaxKeyBytes {
|
||||
mt.KeySize = uint8(goarch.PtrSize)
|
||||
mt.Flags |= 1 // indirect key
|
||||
} else {
|
||||
mt.KeySize = uint8(ktyp.Size_)
|
||||
}
|
||||
if etyp.Size_ > abi.MapMaxElemBytes {
|
||||
if etyp.Size_ > abi.OldMapMaxElemBytes {
|
||||
mt.ValueSize = uint8(goarch.PtrSize)
|
||||
mt.Flags |= 2 // indirect value
|
||||
} else {
|
||||
|
|
@ -99,10 +99,10 @@ func MapOf(key, elem Type) Type {
|
|||
}
|
||||
|
||||
func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
|
||||
if ktyp.Size_ > abi.MapMaxKeyBytes {
|
||||
if ktyp.Size_ > abi.OldMapMaxKeyBytes {
|
||||
ktyp = ptrTo(ktyp)
|
||||
}
|
||||
if etyp.Size_ > abi.MapMaxElemBytes {
|
||||
if etyp.Size_ > abi.OldMapMaxElemBytes {
|
||||
etyp = ptrTo(etyp)
|
||||
}
|
||||
|
||||
|
|
@ -114,29 +114,29 @@ func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
|
|||
var gcdata *byte
|
||||
var ptrdata uintptr
|
||||
|
||||
size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
|
||||
size := abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
|
||||
if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
|
||||
panic("reflect: bad size computation in MapOf")
|
||||
}
|
||||
|
||||
if ktyp.Pointers() || etyp.Pointers() {
|
||||
nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
|
||||
nptr := (abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
|
||||
n := (nptr + 7) / 8
|
||||
|
||||
// Runtime needs pointer masks to be a multiple of uintptr in size.
|
||||
n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
|
||||
mask := make([]byte, n)
|
||||
base := uintptr(abi.MapBucketCount / goarch.PtrSize)
|
||||
base := uintptr(abi.OldMapBucketCount / goarch.PtrSize)
|
||||
|
||||
if ktyp.Pointers() {
|
||||
emitGCMask(mask, base, ktyp, abi.MapBucketCount)
|
||||
emitGCMask(mask, base, ktyp, abi.OldMapBucketCount)
|
||||
}
|
||||
base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
|
||||
base += abi.OldMapBucketCount * ktyp.Size_ / goarch.PtrSize
|
||||
|
||||
if etyp.Pointers() {
|
||||
emitGCMask(mask, base, etyp, abi.MapBucketCount)
|
||||
emitGCMask(mask, base, etyp, abi.OldMapBucketCount)
|
||||
}
|
||||
base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
|
||||
base += abi.OldMapBucketCount * etyp.Size_ / goarch.PtrSize
|
||||
|
||||
word := base
|
||||
mask[word/8] |= 1 << (word % 8)
|
||||
|
|
@ -180,7 +180,7 @@ func (v Value) MapIndex(key Value) Value {
|
|||
// of unexported fields.
|
||||
|
||||
var e unsafe.Pointer
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
|
||||
k := *(*string)(key.ptr)
|
||||
e = mapaccess_faststr(v.typ(), v.pointer(), k)
|
||||
} else {
|
||||
|
|
@ -423,7 +423,7 @@ func (v Value) SetMapIndex(key, elem Value) {
|
|||
key.mustBeExported()
|
||||
tt := (*mapType)(unsafe.Pointer(v.typ()))
|
||||
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
|
||||
k := *(*string)(key.ptr)
|
||||
if elem.typ() == nil {
|
||||
mapdelete_faststr(v.typ(), v.pointer(), k)
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
|
||||
// mapType represents a map type.
|
||||
type mapType struct {
|
||||
abi.MapType
|
||||
abi.SwissMapType
|
||||
}
|
||||
|
||||
func (t *rtype) Key() Type {
|
||||
|
|
@ -70,13 +70,13 @@ func MapOf(key, elem Type) Type {
|
|||
return typehash(ktyp, p, seed)
|
||||
}
|
||||
mt.Flags = 0
|
||||
if ktyp.Size_ > abi.MapMaxKeyBytes {
|
||||
if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
|
||||
mt.KeySize = uint8(goarch.PtrSize)
|
||||
mt.Flags |= 1 // indirect key
|
||||
} else {
|
||||
mt.KeySize = uint8(ktyp.Size_)
|
||||
}
|
||||
if etyp.Size_ > abi.MapMaxElemBytes {
|
||||
if etyp.Size_ > abi.SwissMapMaxElemBytes {
|
||||
mt.ValueSize = uint8(goarch.PtrSize)
|
||||
mt.Flags |= 2 // indirect value
|
||||
} else {
|
||||
|
|
@ -99,10 +99,10 @@ func MapOf(key, elem Type) Type {
|
|||
}
|
||||
|
||||
func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
|
||||
if ktyp.Size_ > abi.MapMaxKeyBytes {
|
||||
if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
|
||||
ktyp = ptrTo(ktyp)
|
||||
}
|
||||
if etyp.Size_ > abi.MapMaxElemBytes {
|
||||
if etyp.Size_ > abi.SwissMapMaxElemBytes {
|
||||
etyp = ptrTo(etyp)
|
||||
}
|
||||
|
||||
|
|
@ -114,29 +114,29 @@ func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
|
|||
var gcdata *byte
|
||||
var ptrdata uintptr
|
||||
|
||||
size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
|
||||
size := abi.SwissMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
|
||||
if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
|
||||
panic("reflect: bad size computation in MapOf")
|
||||
}
|
||||
|
||||
if ktyp.Pointers() || etyp.Pointers() {
|
||||
nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
|
||||
nptr := (abi.SwissMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
|
||||
n := (nptr + 7) / 8
|
||||
|
||||
// Runtime needs pointer masks to be a multiple of uintptr in size.
|
||||
n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
|
||||
mask := make([]byte, n)
|
||||
base := uintptr(abi.MapBucketCount / goarch.PtrSize)
|
||||
base := uintptr(abi.SwissMapBucketCount / goarch.PtrSize)
|
||||
|
||||
if ktyp.Pointers() {
|
||||
emitGCMask(mask, base, ktyp, abi.MapBucketCount)
|
||||
emitGCMask(mask, base, ktyp, abi.SwissMapBucketCount)
|
||||
}
|
||||
base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
|
||||
base += abi.SwissMapBucketCount * ktyp.Size_ / goarch.PtrSize
|
||||
|
||||
if etyp.Pointers() {
|
||||
emitGCMask(mask, base, etyp, abi.MapBucketCount)
|
||||
emitGCMask(mask, base, etyp, abi.SwissMapBucketCount)
|
||||
}
|
||||
base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
|
||||
base += abi.SwissMapBucketCount * etyp.Size_ / goarch.PtrSize
|
||||
|
||||
word := base
|
||||
mask[word/8] |= 1 << (word % 8)
|
||||
|
|
@ -180,7 +180,7 @@ func (v Value) MapIndex(key Value) Value {
|
|||
// of unexported fields.
|
||||
|
||||
var e unsafe.Pointer
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
|
||||
k := *(*string)(key.ptr)
|
||||
e = mapaccess_faststr(v.typ(), v.pointer(), k)
|
||||
} else {
|
||||
|
|
@ -423,7 +423,7 @@ func (v Value) SetMapIndex(key, elem Value) {
|
|||
key.mustBeExported()
|
||||
tt := (*mapType)(unsafe.Pointer(v.typ()))
|
||||
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
|
||||
if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
|
||||
k := *(*string)(key.ptr)
|
||||
if elem.typ() == nil {
|
||||
mapdelete_faststr(v.typ(), v.pointer(), k)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,58 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !goexperiment.swissmap
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func MapBucketsCount(m map[int]int) int {
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
return 1 << h.B
|
||||
}
|
||||
|
||||
func MapBucketsPointerIsNil(m map[int]int) bool {
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
return h.buckets == nil
|
||||
}
|
||||
|
||||
func MapTombstoneCheck(m map[int]int) {
|
||||
// Make sure emptyOne and emptyRest are distributed correctly.
|
||||
// We should have a series of filled and emptyOne cells, followed by
|
||||
// a series of emptyRest cells.
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
i := any(m)
|
||||
t := *(**maptype)(unsafe.Pointer(&i))
|
||||
|
||||
for x := 0; x < 1<<h.B; x++ {
|
||||
b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
|
||||
n := 0
|
||||
for b := b0; b != nil; b = b.overflow(t) {
|
||||
for i := 0; i < abi.OldMapBucketCount; i++ {
|
||||
if b.tophash[i] != emptyRest {
|
||||
n++
|
||||
}
|
||||
}
|
||||
}
|
||||
k := 0
|
||||
for b := b0; b != nil; b = b.overflow(t) {
|
||||
for i := 0; i < abi.OldMapBucketCount; i++ {
|
||||
if k < n && b.tophash[i] == emptyRest {
|
||||
panic("early emptyRest")
|
||||
}
|
||||
if k >= n && b.tophash[i] != emptyRest {
|
||||
panic("late non-emptyRest")
|
||||
}
|
||||
if k == n-1 && b.tophash[i] == emptyOne {
|
||||
panic("last non-emptyRest entry is emptyOne")
|
||||
}
|
||||
k++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.swissmap
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func MapBucketsCount(m map[int]int) int {
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
return 1 << h.B
|
||||
}
|
||||
|
||||
func MapBucketsPointerIsNil(m map[int]int) bool {
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
return h.buckets == nil
|
||||
}
|
||||
|
||||
func MapTombstoneCheck(m map[int]int) {
|
||||
// Make sure emptyOne and emptyRest are distributed correctly.
|
||||
// We should have a series of filled and emptyOne cells, followed by
|
||||
// a series of emptyRest cells.
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
i := any(m)
|
||||
t := *(**maptype)(unsafe.Pointer(&i))
|
||||
|
||||
for x := 0; x < 1<<h.B; x++ {
|
||||
b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
|
||||
n := 0
|
||||
for b := b0; b != nil; b = b.overflow(t) {
|
||||
for i := 0; i < abi.SwissMapBucketCount; i++ {
|
||||
if b.tophash[i] != emptyRest {
|
||||
n++
|
||||
}
|
||||
}
|
||||
}
|
||||
k := 0
|
||||
for b := b0; b != nil; b = b.overflow(t) {
|
||||
for i := 0; i < abi.SwissMapBucketCount; i++ {
|
||||
if k < n && b.tophash[i] == emptyRest {
|
||||
panic("early emptyRest")
|
||||
}
|
||||
if k >= n && b.tophash[i] != emptyRest {
|
||||
panic("late non-emptyRest")
|
||||
}
|
||||
if k == n-1 && b.tophash[i] == emptyOne {
|
||||
panic("last non-emptyRest entry is emptyOne")
|
||||
}
|
||||
k++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -483,16 +483,6 @@ func (rw *RWMutex) Unlock() {
|
|||
|
||||
const RuntimeHmapSize = unsafe.Sizeof(hmap{})
|
||||
|
||||
func MapBucketsCount(m map[int]int) int {
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
return 1 << h.B
|
||||
}
|
||||
|
||||
func MapBucketsPointerIsNil(m map[int]int) bool {
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
return h.buckets == nil
|
||||
}
|
||||
|
||||
func OverLoadFactor(count int, B uint8) bool {
|
||||
return overLoadFactor(count, B)
|
||||
}
|
||||
|
|
@ -614,42 +604,6 @@ func stackOverflow(x *byte) {
|
|||
stackOverflow(&buf[0])
|
||||
}
|
||||
|
||||
func MapTombstoneCheck(m map[int]int) {
|
||||
// Make sure emptyOne and emptyRest are distributed correctly.
|
||||
// We should have a series of filled and emptyOne cells, followed by
|
||||
// a series of emptyRest cells.
|
||||
h := *(**hmap)(unsafe.Pointer(&m))
|
||||
i := any(m)
|
||||
t := *(**maptype)(unsafe.Pointer(&i))
|
||||
|
||||
for x := 0; x < 1<<h.B; x++ {
|
||||
b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
|
||||
n := 0
|
||||
for b := b0; b != nil; b = b.overflow(t) {
|
||||
for i := 0; i < abi.MapBucketCount; i++ {
|
||||
if b.tophash[i] != emptyRest {
|
||||
n++
|
||||
}
|
||||
}
|
||||
}
|
||||
k := 0
|
||||
for b := b0; b != nil; b = b.overflow(t) {
|
||||
for i := 0; i < abi.MapBucketCount; i++ {
|
||||
if k < n && b.tophash[i] == emptyRest {
|
||||
panic("early emptyRest")
|
||||
}
|
||||
if k >= n && b.tophash[i] != emptyRest {
|
||||
panic("late non-emptyRest")
|
||||
}
|
||||
if k == n-1 && b.tophash[i] == emptyOne {
|
||||
panic("last non-emptyRest entry is emptyOne")
|
||||
}
|
||||
k++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RunGetgThreadSwitchTest() {
|
||||
// Test that getg works correctly with thread switch.
|
||||
// With gccgo, if we generate getg inlined, the backend
|
||||
|
|
|
|||
|
|
@ -43,9 +43,9 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
|||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -92,9 +92,9 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
|
|||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -145,7 +145,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
inserti = i
|
||||
|
|
@ -185,7 +185,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
|
||||
// store new key at insert position
|
||||
|
|
@ -194,7 +194,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -244,7 +244,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
inserti = i
|
||||
|
|
@ -284,7 +284,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
|
||||
// store new key at insert position
|
||||
|
|
@ -293,7 +293,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -326,7 +326,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
|
|||
bOrig := b
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -338,7 +338,7 @@ search:
|
|||
// 32 bits wide and the key is 32 bits wide also.
|
||||
*(*unsafe.Pointer)(k) = nil
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
|
|
@ -347,7 +347,7 @@ search:
|
|||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if i == abi.OldMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -366,7 +366,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.MapBucketCount - 1
|
||||
i = abi.OldMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -414,7 +414,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.MapBucketCount*4)
|
||||
x.e = add(x.k, abi.OldMapBucketCount*4)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -422,13 +422,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.MapBucketCount*4)
|
||||
y.e = add(y.k, abi.OldMapBucketCount*4)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.MapBucketCount*4)
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.OldMapBucketCount*4)
|
||||
for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -450,13 +450,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.MapBucketCount {
|
||||
if dst.i == abi.OldMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.MapBucketCount*4)
|
||||
dst.e = add(dst.k, abi.OldMapBucketCount*4)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
|
||||
|
|
|
|||
|
|
@ -43,9 +43,9 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
|||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -83,9 +83,9 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
|
|||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*4+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -125,7 +125,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
inserti = i
|
||||
|
|
@ -165,7 +165,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
|
||||
// store new key at insert position
|
||||
|
|
@ -174,7 +174,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -215,7 +215,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
inserti = i
|
||||
|
|
@ -255,7 +255,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
|
||||
// store new key at insert position
|
||||
|
|
@ -264,7 +264,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*4+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -297,7 +297,7 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
|
|||
bOrig := b
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 4) {
|
||||
if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -309,7 +309,7 @@ search:
|
|||
// 32 bits wide and the key is 32 bits wide also.
|
||||
*(*unsafe.Pointer)(k) = nil
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*4+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
|
|
@ -318,7 +318,7 @@ search:
|
|||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if i == abi.SwissMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -337,7 +337,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.MapBucketCount - 1
|
||||
i = abi.SwissMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -385,7 +385,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.MapBucketCount*4)
|
||||
x.e = add(x.k, abi.SwissMapBucketCount*4)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -393,13 +393,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.MapBucketCount*4)
|
||||
y.e = add(y.k, abi.SwissMapBucketCount*4)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.MapBucketCount*4)
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.SwissMapBucketCount*4)
|
||||
for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -421,13 +421,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.MapBucketCount {
|
||||
if dst.i == abi.SwissMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.MapBucketCount*4)
|
||||
dst.e = add(dst.k, abi.SwissMapBucketCount*4)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
|
||||
|
|
|
|||
|
|
@ -43,9 +43,9 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -92,9 +92,9 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
|
|||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -145,7 +145,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
insertb = b
|
||||
|
|
@ -185,7 +185,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
||||
// store new key at insert position
|
||||
|
|
@ -194,7 +194,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -246,7 +246,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
insertb = b
|
||||
|
|
@ -286,7 +286,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
||||
// store new key at insert position
|
||||
|
|
@ -295,7 +295,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -328,7 +328,7 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
|
|||
bOrig := b
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -342,7 +342,7 @@ search:
|
|||
memclrHasPointers(k, 8)
|
||||
}
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
|
|
@ -351,7 +351,7 @@ search:
|
|||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if i == abi.OldMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -370,7 +370,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.MapBucketCount - 1
|
||||
i = abi.OldMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -418,7 +418,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.MapBucketCount*8)
|
||||
x.e = add(x.k, abi.OldMapBucketCount*8)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -426,13 +426,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.MapBucketCount*8)
|
||||
y.e = add(y.k, abi.OldMapBucketCount*8)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.MapBucketCount*8)
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.OldMapBucketCount*8)
|
||||
for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -454,13 +454,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.MapBucketCount {
|
||||
if dst.i == abi.OldMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.MapBucketCount*8)
|
||||
dst.e = add(dst.k, abi.OldMapBucketCount*8)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
if t.Key.Pointers() && writeBarrier.enabled {
|
||||
|
|
|
|||
|
|
@ -43,9 +43,9 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -83,9 +83,9 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
|
|||
}
|
||||
}
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*8+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -125,7 +125,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
insertb = b
|
||||
|
|
@ -165,7 +165,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
||||
// store new key at insert position
|
||||
|
|
@ -174,7 +174,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -215,7 +215,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(b.tophash[i]) {
|
||||
if insertb == nil {
|
||||
insertb = b
|
||||
|
|
@ -255,7 +255,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
|
||||
// store new key at insert position
|
||||
|
|
@ -264,7 +264,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*8+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -297,7 +297,7 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
|
|||
bOrig := b
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
|
||||
for i, k := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, k = i+1, add(k, 8) {
|
||||
if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -311,7 +311,7 @@ search:
|
|||
memclrHasPointers(k, 8)
|
||||
}
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*8+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
|
|
@ -320,7 +320,7 @@ search:
|
|||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if i == abi.SwissMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -339,7 +339,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.MapBucketCount - 1
|
||||
i = abi.SwissMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -387,7 +387,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.MapBucketCount*8)
|
||||
x.e = add(x.k, abi.SwissMapBucketCount*8)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -395,13 +395,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.MapBucketCount*8)
|
||||
y.e = add(y.k, abi.SwissMapBucketCount*8)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.MapBucketCount*8)
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.SwissMapBucketCount*8)
|
||||
for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -423,13 +423,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.MapBucketCount {
|
||||
if dst.i == abi.SwissMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.MapBucketCount*8)
|
||||
dst.e = add(dst.k, abi.SwissMapBucketCount*8)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
if t.Key.Pointers() && writeBarrier.enabled {
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|||
b := (*bmap)(h.buckets)
|
||||
if key.len < 32 {
|
||||
// short key, doing lots of comparisons is ok
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
|
|
@ -38,14 +38,14 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
// long key, try not to do more comparisons than necessary
|
||||
keymaybe := uintptr(abi.MapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
keymaybe := uintptr(abi.OldMapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
|
|
@ -54,7 +54,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|||
continue
|
||||
}
|
||||
if k.str == key.str {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
// check first 4 bytes
|
||||
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
||||
|
|
@ -64,16 +64,16 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|||
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
||||
continue
|
||||
}
|
||||
if keymaybe != abi.MapBucketCount {
|
||||
if keymaybe != abi.OldMapBucketCount {
|
||||
// Two keys are potential matches. Use hash to distinguish them.
|
||||
goto dohash
|
||||
}
|
||||
keymaybe = i
|
||||
}
|
||||
if keymaybe != abi.MapBucketCount {
|
||||
if keymaybe != abi.OldMapBucketCount {
|
||||
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
|
||||
if memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
|
|
@ -94,13 +94,13 @@ dohash:
|
|||
}
|
||||
top := tophash(hash)
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -133,7 +133,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|||
b := (*bmap)(h.buckets)
|
||||
if key.len < 32 {
|
||||
// short key, doing lots of comparisons is ok
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
|
|
@ -142,14 +142,14 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
}
|
||||
// long key, try not to do more comparisons than necessary
|
||||
keymaybe := uintptr(abi.MapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
keymaybe := uintptr(abi.OldMapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
|
|
@ -158,7 +158,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|||
continue
|
||||
}
|
||||
if k.str == key.str {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
// check first 4 bytes
|
||||
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
||||
|
|
@ -168,16 +168,16 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|||
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
||||
continue
|
||||
}
|
||||
if keymaybe != abi.MapBucketCount {
|
||||
if keymaybe != abi.OldMapBucketCount {
|
||||
// Two keys are potential matches. Use hash to distinguish them.
|
||||
goto dohash
|
||||
}
|
||||
keymaybe = i
|
||||
}
|
||||
if keymaybe != abi.MapBucketCount {
|
||||
if keymaybe != abi.OldMapBucketCount {
|
||||
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
|
||||
if memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
|
|
@ -198,13 +198,13 @@ dohash:
|
|||
}
|
||||
top := tophash(hash)
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -257,7 +257,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if isEmpty(b.tophash[i]) && insertb == nil {
|
||||
insertb = b
|
||||
|
|
@ -304,7 +304,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = top // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = top // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
|
||||
// store new key at insert position
|
||||
|
|
@ -312,7 +312,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -347,7 +347,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
|
|||
top := tophash(hash)
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
|
|
@ -357,7 +357,7 @@ search:
|
|||
}
|
||||
// Clear key's pointer.
|
||||
k.str = nil
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
|
|
@ -366,7 +366,7 @@ search:
|
|||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if i == abi.OldMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -385,7 +385,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.MapBucketCount - 1
|
||||
i = abi.OldMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -433,7 +433,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize)
|
||||
x.e = add(x.k, abi.OldMapBucketCount*2*goarch.PtrSize)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -441,13 +441,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize)
|
||||
y.e = add(y.k, abi.OldMapBucketCount*2*goarch.PtrSize)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.MapBucketCount*2*goarch.PtrSize)
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.OldMapBucketCount*2*goarch.PtrSize)
|
||||
for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -469,13 +469,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.MapBucketCount {
|
||||
if dst.i == abi.OldMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize)
|
||||
dst.e = add(dst.k, abi.OldMapBucketCount*2*goarch.PtrSize)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
*(*string)(dst.k) = *(*string)(k)
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|||
b := (*bmap)(h.buckets)
|
||||
if key.len < 32 {
|
||||
// short key, doing lots of comparisons is ok
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
|
|
@ -38,14 +38,14 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
}
|
||||
// long key, try not to do more comparisons than necessary
|
||||
keymaybe := uintptr(abi.MapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
keymaybe := uintptr(abi.SwissMapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
|
|
@ -54,7 +54,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|||
continue
|
||||
}
|
||||
if k.str == key.str {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
// check first 4 bytes
|
||||
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
||||
|
|
@ -64,16 +64,16 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|||
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
||||
continue
|
||||
}
|
||||
if keymaybe != abi.MapBucketCount {
|
||||
if keymaybe != abi.SwissMapBucketCount {
|
||||
// Two keys are potential matches. Use hash to distinguish them.
|
||||
goto dohash
|
||||
}
|
||||
keymaybe = i
|
||||
}
|
||||
if keymaybe != abi.MapBucketCount {
|
||||
if keymaybe != abi.SwissMapBucketCount {
|
||||
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
|
||||
if memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0])
|
||||
|
|
@ -94,13 +94,13 @@ dohash:
|
|||
}
|
||||
top := tophash(hash)
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -124,7 +124,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|||
b := (*bmap)(h.buckets)
|
||||
if key.len < 32 {
|
||||
// short key, doing lots of comparisons is ok
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
|
|
@ -133,14 +133,14 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
}
|
||||
// long key, try not to do more comparisons than necessary
|
||||
keymaybe := uintptr(abi.MapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
keymaybe := uintptr(abi.SwissMapBucketCount)
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || isEmpty(b.tophash[i]) {
|
||||
if b.tophash[i] == emptyRest {
|
||||
|
|
@ -149,7 +149,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|||
continue
|
||||
}
|
||||
if k.str == key.str {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
// check first 4 bytes
|
||||
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
||||
|
|
@ -159,16 +159,16 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|||
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
||||
continue
|
||||
}
|
||||
if keymaybe != abi.MapBucketCount {
|
||||
if keymaybe != abi.SwissMapBucketCount {
|
||||
// Two keys are potential matches. Use hash to distinguish them.
|
||||
goto dohash
|
||||
}
|
||||
keymaybe = i
|
||||
}
|
||||
if keymaybe != abi.MapBucketCount {
|
||||
if keymaybe != abi.SwissMapBucketCount {
|
||||
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
|
||||
if memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&zeroVal[0]), false
|
||||
|
|
@ -189,13 +189,13 @@ dohash:
|
|||
}
|
||||
top := tophash(hash)
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
}
|
||||
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
return add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -237,7 +237,7 @@ again:
|
|||
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if isEmpty(b.tophash[i]) && insertb == nil {
|
||||
insertb = b
|
||||
|
|
@ -284,7 +284,7 @@ bucketloop:
|
|||
insertb = h.newoverflow(t, b)
|
||||
inserti = 0 // not necessary, but avoids needlessly spilling inserti
|
||||
}
|
||||
insertb.tophash[inserti&(abi.MapBucketCount-1)] = top // mask inserti to avoid bounds checks
|
||||
insertb.tophash[inserti&(abi.SwissMapBucketCount-1)] = top // mask inserti to avoid bounds checks
|
||||
|
||||
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
|
||||
// store new key at insert position
|
||||
|
|
@ -292,7 +292,7 @@ bucketloop:
|
|||
h.count++
|
||||
|
||||
done:
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
|
||||
elem := add(unsafe.Pointer(insertb), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
|
||||
if h.flags&hashWriting == 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
|
|
@ -327,7 +327,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
|
|||
top := tophash(hash)
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
for i, kptr := uintptr(0), b.keys(); i < abi.SwissMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
|
||||
k := (*stringStruct)(kptr)
|
||||
if k.len != key.len || b.tophash[i] != top {
|
||||
continue
|
||||
|
|
@ -337,7 +337,7 @@ search:
|
|||
}
|
||||
// Clear key's pointer.
|
||||
k.str = nil
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
|
||||
if t.Elem.Pointers() {
|
||||
memclrHasPointers(e, t.Elem.Size_)
|
||||
} else {
|
||||
|
|
@ -346,7 +346,7 @@ search:
|
|||
b.tophash[i] = emptyOne
|
||||
// If the bucket now ends in a bunch of emptyOne states,
|
||||
// change those to emptyRest states.
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if i == abi.SwissMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -365,7 +365,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.MapBucketCount - 1
|
||||
i = abi.SwissMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -413,7 +413,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize)
|
||||
x.e = add(x.k, abi.SwissMapBucketCount*2*goarch.PtrSize)
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -421,13 +421,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize)
|
||||
y.e = add(y.k, abi.SwissMapBucketCount*2*goarch.PtrSize)
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.MapBucketCount*2*goarch.PtrSize)
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.SwissMapBucketCount*2*goarch.PtrSize)
|
||||
for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -449,13 +449,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.MapBucketCount {
|
||||
if dst.i == abi.SwissMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize)
|
||||
dst.e = add(dst.k, abi.SwissMapBucketCount*2*goarch.PtrSize)
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
|
||||
// Copy key.
|
||||
*(*string)(dst.k) = *(*string)(k)
|
||||
|
|
|
|||
|
|
@ -63,15 +63,17 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
type maptype = abi.OldMapType
|
||||
|
||||
const (
|
||||
// Maximum number of key/elem pairs a bucket can hold.
|
||||
bucketCntBits = abi.MapBucketCountBits
|
||||
bucketCntBits = abi.OldMapBucketCountBits
|
||||
|
||||
// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
|
||||
// Because of minimum alignment rules, bucketCnt is known to be at least 8.
|
||||
// Represent as loadFactorNum/loadFactorDen, to allow integer math.
|
||||
loadFactorDen = 2
|
||||
loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
|
||||
loadFactorNum = loadFactorDen * abi.OldMapBucketCount * 13 / 16
|
||||
|
||||
// data offset should be the size of the bmap struct, but needs to be
|
||||
// aligned correctly. For amd64p32 this means 64-bit alignment
|
||||
|
|
@ -146,7 +148,7 @@ type bmap struct {
|
|||
// tophash generally contains the top byte of the hash value
|
||||
// for each key in this bucket. If tophash[0] < minTopHash,
|
||||
// tophash[0] is a bucket evacuation state instead.
|
||||
tophash [abi.MapBucketCount]uint8
|
||||
tophash [abi.OldMapBucketCount]uint8
|
||||
// Followed by bucketCnt keys and then bucketCnt elems.
|
||||
// NOTE: packing all the keys together and then all the elems together makes the
|
||||
// code a bit more complicated than alternating key/elem/key/elem/... but it allows
|
||||
|
|
@ -446,7 +448,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -458,7 +460,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -516,7 +518,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -528,7 +530,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -560,7 +562,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -572,7 +574,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -656,12 +658,12 @@ again:
|
|||
var elem unsafe.Pointer
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if isEmpty(b.tophash[i]) && inserti == nil {
|
||||
inserti = &b.tophash[i]
|
||||
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
}
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -679,7 +681,7 @@ bucketloop:
|
|||
if t.NeedKeyUpdate() {
|
||||
typedmemmove(t.Key, k, key)
|
||||
}
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
goto done
|
||||
}
|
||||
ovf := b.overflow(t)
|
||||
|
|
@ -703,7 +705,7 @@ bucketloop:
|
|||
newb := h.newoverflow(t, b)
|
||||
inserti = &newb.tophash[0]
|
||||
insertk = add(unsafe.Pointer(newb), dataOffset)
|
||||
elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
elem = add(insertk, abi.OldMapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
|
||||
// store new key/elem at insert position
|
||||
|
|
@ -778,7 +780,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
|
|||
top := tophash(hash)
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break search
|
||||
|
|
@ -799,7 +801,7 @@ search:
|
|||
} else if t.Key.Pointers() {
|
||||
memclrHasPointers(k, t.Key.Size_)
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
*(*unsafe.Pointer)(e) = nil
|
||||
} else if t.Elem.Pointers() {
|
||||
|
|
@ -812,7 +814,7 @@ search:
|
|||
// change those to emptyRest states.
|
||||
// It would be nice to make this a separate function, but
|
||||
// for loops are not currently inlineable.
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if i == abi.OldMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -831,7 +833,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.MapBucketCount - 1
|
||||
i = abi.OldMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -908,7 +910,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
|
|||
// decide where to start
|
||||
r := uintptr(rand())
|
||||
it.startBucket = r & bucketMask(h.B)
|
||||
it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
it.offset = uint8(r >> h.B & (abi.OldMapBucketCount - 1))
|
||||
|
||||
// iterator state
|
||||
it.bucket = it.startBucket
|
||||
|
|
@ -983,8 +985,8 @@ next:
|
|||
}
|
||||
i = 0
|
||||
}
|
||||
for ; i < abi.MapBucketCount; i++ {
|
||||
offi := (i + it.offset) & (abi.MapBucketCount - 1)
|
||||
for ; i < abi.OldMapBucketCount; i++ {
|
||||
offi := (i + it.offset) & (abi.OldMapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
|
||||
// TODO: emptyRest is hard to use here, as we start iterating
|
||||
// in the middle of a bucket. It's feasible, just tricky.
|
||||
|
|
@ -994,7 +996,7 @@ next:
|
|||
if t.IndirectKey() {
|
||||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
|
||||
if checkBucket != noCheck && !h.sameSizeGrow() {
|
||||
// Special case: iterator was started during a grow to a larger size
|
||||
// and the grow is not done yet. We're working on a bucket whose
|
||||
|
|
@ -1096,7 +1098,7 @@ func mapclear(t *maptype, h *hmap) {
|
|||
for i := uintptr(0); i <= mask; i++ {
|
||||
b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
b.tophash[i] = emptyRest
|
||||
}
|
||||
}
|
||||
|
|
@ -1183,7 +1185,7 @@ func hashGrow(t *maptype, h *hmap) {
|
|||
|
||||
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
|
||||
func overLoadFactor(count int, B uint8) bool {
|
||||
return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
|
||||
return count > abi.OldMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
|
||||
}
|
||||
|
||||
// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
|
||||
|
|
@ -1261,7 +1263,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
x.e = add(x.k, abi.OldMapBucketCount*uintptr(t.KeySize))
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -1269,13 +1271,13 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
y.e = add(y.k, abi.OldMapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.OldMapBucketCount*uintptr(t.KeySize))
|
||||
for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -1321,13 +1323,13 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.MapBucketCount {
|
||||
if dst.i == abi.OldMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
dst.e = add(dst.k, abi.OldMapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
if t.IndirectKey() {
|
||||
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
|
||||
} else {
|
||||
|
|
@ -1408,18 +1410,18 @@ func reflect_makemap(t *maptype, cap int) *hmap {
|
|||
if t.Key.Equal == nil {
|
||||
throw("runtime.reflect_makemap: unsupported map key type")
|
||||
}
|
||||
if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
|
||||
t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
|
||||
if t.Key.Size_ > abi.OldMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
|
||||
t.Key.Size_ <= abi.OldMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
|
||||
throw("key size wrong")
|
||||
}
|
||||
if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
|
||||
t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
|
||||
if t.Elem.Size_ > abi.OldMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
|
||||
t.Elem.Size_ <= abi.OldMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
|
||||
throw("elem size wrong")
|
||||
}
|
||||
if t.Key.Align_ > abi.MapBucketCount {
|
||||
if t.Key.Align_ > abi.OldMapBucketCount {
|
||||
throw("key align too big")
|
||||
}
|
||||
if t.Elem.Align_ > abi.MapBucketCount {
|
||||
if t.Elem.Align_ > abi.OldMapBucketCount {
|
||||
throw("elem align too big")
|
||||
}
|
||||
if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
|
||||
|
|
@ -1428,7 +1430,7 @@ func reflect_makemap(t *maptype, cap int) *hmap {
|
|||
if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
|
||||
throw("elem size not a multiple of elem align")
|
||||
}
|
||||
if abi.MapBucketCount < 8 {
|
||||
if abi.OldMapBucketCount < 8 {
|
||||
throw("bucketsize too small for proper alignment")
|
||||
}
|
||||
if dataOffset%uintptr(t.Key.Align_) != 0 {
|
||||
|
|
@ -1619,26 +1621,26 @@ func mapclone(m any) any {
|
|||
// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
|
||||
// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
|
||||
func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
|
||||
for i := 0; i < abi.MapBucketCount; i++ {
|
||||
for i := 0; i < abi.OldMapBucketCount; i++ {
|
||||
if isEmpty(src.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
||||
for ; pos < abi.MapBucketCount; pos++ {
|
||||
for ; pos < abi.OldMapBucketCount; pos++ {
|
||||
if isEmpty(dst.tophash[pos]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if pos == abi.MapBucketCount {
|
||||
if pos == abi.OldMapBucketCount {
|
||||
dst = h.newoverflow(t, dst)
|
||||
pos = 0
|
||||
}
|
||||
|
||||
srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
|
||||
srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
|
||||
srcEle := add(unsafe.Pointer(src), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
|
||||
dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
|
||||
dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
|
||||
dstEle := add(unsafe.Pointer(dst), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
|
||||
|
||||
dst.tophash[pos] = src.tophash[i]
|
||||
if t.IndirectKey() {
|
||||
|
|
@ -1742,7 +1744,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
|
|||
// Process entries one at a time.
|
||||
for srcBmap != nil {
|
||||
// move from oldBlucket to new bucket
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
if isEmpty(srcBmap.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1756,7 +1758,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
|
|||
srcK = *((*unsafe.Pointer)(srcK))
|
||||
}
|
||||
|
||||
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
srcEle = *((*unsafe.Pointer)(srcEle))
|
||||
}
|
||||
|
|
@ -1782,7 +1784,7 @@ func keys(m any, p unsafe.Pointer) {
|
|||
}
|
||||
s := (*slice)(p)
|
||||
r := int(rand())
|
||||
offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
|
||||
if h.B == 0 {
|
||||
copyKeys(t, h, (*bmap)(h.buckets), s, offset)
|
||||
return
|
||||
|
|
@ -1811,8 +1813,8 @@ func keys(m any, p unsafe.Pointer) {
|
|||
|
||||
func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
||||
for b != nil {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1845,7 +1847,7 @@ func values(m any, p unsafe.Pointer) {
|
|||
}
|
||||
s := (*slice)(p)
|
||||
r := int(rand())
|
||||
offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
|
||||
if h.B == 0 {
|
||||
copyValues(t, h, (*bmap)(h.buckets), s, offset)
|
||||
return
|
||||
|
|
@ -1874,8 +1876,8 @@ func values(m any, p unsafe.Pointer) {
|
|||
|
||||
func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
||||
for b != nil {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
|
||||
for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1884,7 +1886,7 @@ func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
|||
fatal("concurrent map read and map write")
|
||||
}
|
||||
|
||||
ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
|
||||
ele := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
ele = *((*unsafe.Pointer)(ele))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,188 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !goexperiment.swissmap
|
||||
|
||||
package runtime_test
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"runtime"
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMapIterOrder(t *testing.T) {
|
||||
sizes := []int{3, 7, 9, 15}
|
||||
if abi.OldMapBucketCountBits >= 5 {
|
||||
// it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5.
|
||||
t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.OldMapBucketCountBits)
|
||||
}
|
||||
for _, n := range sizes {
|
||||
for i := 0; i < 1000; i++ {
|
||||
// Make m be {0: true, 1: true, ..., n-1: true}.
|
||||
m := make(map[int]bool)
|
||||
for i := 0; i < n; i++ {
|
||||
m[i] = true
|
||||
}
|
||||
// Check that iterating over the map produces at least two different orderings.
|
||||
ord := func() []int {
|
||||
var s []int
|
||||
for key := range m {
|
||||
s = append(s, key)
|
||||
}
|
||||
return s
|
||||
}
|
||||
first := ord()
|
||||
ok := false
|
||||
for try := 0; try < 100; try++ {
|
||||
if !slices.Equal(first, ord()) {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const bs = abi.OldMapBucketCount
|
||||
|
||||
// belowOverflow should be a pretty-full pair of buckets;
|
||||
// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets
|
||||
// that are 13/16 full each, which is the overflow boundary.
|
||||
// Adding one to that should ensure overflow to the next higher size.
|
||||
const (
|
||||
belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75%
|
||||
atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill.
|
||||
)
|
||||
|
||||
var mapBucketTests = [...]struct {
|
||||
n int // n is the number of map elements
|
||||
noescape int // number of expected buckets for non-escaping map
|
||||
escape int // number of expected buckets for escaping map
|
||||
}{
|
||||
{-(1 << 30), 1, 1},
|
||||
{-1, 1, 1},
|
||||
{0, 1, 1},
|
||||
{1, 1, 1},
|
||||
{bs, 1, 1},
|
||||
{bs + 1, 2, 2},
|
||||
{belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75%
|
||||
{atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4
|
||||
|
||||
{2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75%
|
||||
{2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8
|
||||
|
||||
{4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75%
|
||||
{4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16
|
||||
}
|
||||
|
||||
func TestMapBuckets(t *testing.T) {
|
||||
// Test that maps of different sizes have the right number of buckets.
|
||||
// Non-escaping maps with small buckets (like map[int]int) never
|
||||
// have a nil bucket pointer due to starting with preallocated buckets
|
||||
// on the stack. Escaping maps start with a non-nil bucket pointer if
|
||||
// hint size is above bucketCnt and thereby have more than one bucket.
|
||||
// These tests depend on bucketCnt and loadFactor* in map.go.
|
||||
t.Run("mapliteral", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := map[int]int{}
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(map[int]int{})
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("nohint", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int)
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("makemap", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int, tt.n)
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int, tt.n))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("makemap64", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int, int64(tt.n))
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int, tt.n))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -63,15 +63,17 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
type maptype = abi.SwissMapType
|
||||
|
||||
const (
|
||||
// Maximum number of key/elem pairs a bucket can hold.
|
||||
bucketCntBits = abi.MapBucketCountBits
|
||||
bucketCntBits = abi.SwissMapBucketCountBits
|
||||
|
||||
// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
|
||||
// Because of minimum alignment rules, bucketCnt is known to be at least 8.
|
||||
// Represent as loadFactorNum/loadFactorDen, to allow integer math.
|
||||
loadFactorDen = 2
|
||||
loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
|
||||
loadFactorNum = loadFactorDen * abi.SwissMapBucketCount * 13 / 16
|
||||
|
||||
// data offset should be the size of the bmap struct, but needs to be
|
||||
// aligned correctly. For amd64p32 this means 64-bit alignment
|
||||
|
|
@ -146,7 +148,7 @@ type bmap struct {
|
|||
// tophash generally contains the top byte of the hash value
|
||||
// for each key in this bucket. If tophash[0] < minTopHash,
|
||||
// tophash[0] is a bucket evacuation state instead.
|
||||
tophash [abi.MapBucketCount]uint8
|
||||
tophash [abi.SwissMapBucketCount]uint8
|
||||
// Followed by bucketCnt keys and then bucketCnt elems.
|
||||
// NOTE: packing all the keys together and then all the elems together makes the
|
||||
// code a bit more complicated than alternating key/elem/key/elem/... but it allows
|
||||
|
|
@ -425,7 +427,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -437,7 +439,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -486,7 +488,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -498,7 +500,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -530,7 +532,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
|
|||
top := tophash(hash)
|
||||
bucketloop:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -542,7 +544,7 @@ bucketloop:
|
|||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
if t.Key.Equal(key, k) {
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
e = *((*unsafe.Pointer)(e))
|
||||
}
|
||||
|
|
@ -612,12 +614,12 @@ again:
|
|||
var elem unsafe.Pointer
|
||||
bucketloop:
|
||||
for {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if isEmpty(b.tophash[i]) && inserti == nil {
|
||||
inserti = &b.tophash[i]
|
||||
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
}
|
||||
if b.tophash[i] == emptyRest {
|
||||
break bucketloop
|
||||
|
|
@ -635,7 +637,7 @@ bucketloop:
|
|||
if t.NeedKeyUpdate() {
|
||||
typedmemmove(t.Key, k, key)
|
||||
}
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
elem = add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
goto done
|
||||
}
|
||||
ovf := b.overflow(t)
|
||||
|
|
@ -659,7 +661,7 @@ bucketloop:
|
|||
newb := h.newoverflow(t, b)
|
||||
inserti = &newb.tophash[0]
|
||||
insertk = add(unsafe.Pointer(newb), dataOffset)
|
||||
elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
elem = add(insertk, abi.SwissMapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
|
||||
// store new key/elem at insert position
|
||||
|
|
@ -725,7 +727,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
|
|||
top := tophash(hash)
|
||||
search:
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if b.tophash[i] != top {
|
||||
if b.tophash[i] == emptyRest {
|
||||
break search
|
||||
|
|
@ -746,7 +748,7 @@ search:
|
|||
} else if t.Key.Pointers() {
|
||||
memclrHasPointers(k, t.Key.Size_)
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
*(*unsafe.Pointer)(e) = nil
|
||||
} else if t.Elem.Pointers() {
|
||||
|
|
@ -759,7 +761,7 @@ search:
|
|||
// change those to emptyRest states.
|
||||
// It would be nice to make this a separate function, but
|
||||
// for loops are not currently inlineable.
|
||||
if i == abi.MapBucketCount-1 {
|
||||
if i == abi.SwissMapBucketCount-1 {
|
||||
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
|
||||
goto notLast
|
||||
}
|
||||
|
|
@ -778,7 +780,7 @@ search:
|
|||
c := b
|
||||
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
|
||||
}
|
||||
i = abi.MapBucketCount - 1
|
||||
i = abi.SwissMapBucketCount - 1
|
||||
} else {
|
||||
i--
|
||||
}
|
||||
|
|
@ -839,7 +841,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
|
|||
// decide where to start
|
||||
r := uintptr(rand())
|
||||
it.startBucket = r & bucketMask(h.B)
|
||||
it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
it.offset = uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
|
||||
|
||||
// iterator state
|
||||
it.bucket = it.startBucket
|
||||
|
|
@ -900,8 +902,8 @@ next:
|
|||
}
|
||||
i = 0
|
||||
}
|
||||
for ; i < abi.MapBucketCount; i++ {
|
||||
offi := (i + it.offset) & (abi.MapBucketCount - 1)
|
||||
for ; i < abi.SwissMapBucketCount; i++ {
|
||||
offi := (i + it.offset) & (abi.SwissMapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
|
||||
// TODO: emptyRest is hard to use here, as we start iterating
|
||||
// in the middle of a bucket. It's feasible, just tricky.
|
||||
|
|
@ -911,7 +913,7 @@ next:
|
|||
if t.IndirectKey() {
|
||||
k = *((*unsafe.Pointer)(k))
|
||||
}
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
|
||||
e := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
|
||||
if checkBucket != noCheck && !h.sameSizeGrow() {
|
||||
// Special case: iterator was started during a grow to a larger size
|
||||
// and the grow is not done yet. We're working on a bucket whose
|
||||
|
|
@ -1002,7 +1004,7 @@ func mapclear(t *maptype, h *hmap) {
|
|||
for i := uintptr(0); i <= mask; i++ {
|
||||
b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
b.tophash[i] = emptyRest
|
||||
}
|
||||
}
|
||||
|
|
@ -1089,7 +1091,7 @@ func hashGrow(t *maptype, h *hmap) {
|
|||
|
||||
// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
|
||||
func overLoadFactor(count int, B uint8) bool {
|
||||
return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
|
||||
return count > abi.SwissMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
|
||||
}
|
||||
|
||||
// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
|
||||
|
|
@ -1167,7 +1169,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
x := &xy[0]
|
||||
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
|
||||
x.k = add(unsafe.Pointer(x.b), dataOffset)
|
||||
x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
x.e = add(x.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
|
||||
|
||||
if !h.sameSizeGrow() {
|
||||
// Only calculate y pointers if we're growing bigger.
|
||||
|
|
@ -1175,13 +1177,13 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
y := &xy[1]
|
||||
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
|
||||
y.k = add(unsafe.Pointer(y.b), dataOffset)
|
||||
y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
y.e = add(y.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
|
||||
e := add(k, abi.SwissMapBucketCount*uintptr(t.KeySize))
|
||||
for i := 0; i < abi.SwissMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
|
||||
top := b.tophash[i]
|
||||
if isEmpty(top) {
|
||||
b.tophash[i] = evacuatedEmpty
|
||||
|
|
@ -1227,13 +1229,13 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
|
||||
dst := &xy[useY] // evacuation destination
|
||||
|
||||
if dst.i == abi.MapBucketCount {
|
||||
if dst.i == abi.SwissMapBucketCount {
|
||||
dst.b = h.newoverflow(t, dst.b)
|
||||
dst.i = 0
|
||||
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
|
||||
dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
|
||||
dst.e = add(dst.k, abi.SwissMapBucketCount*uintptr(t.KeySize))
|
||||
}
|
||||
dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
dst.b.tophash[dst.i&(abi.SwissMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||
if t.IndirectKey() {
|
||||
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
|
||||
} else {
|
||||
|
|
@ -1301,18 +1303,18 @@ func reflect_makemap(t *maptype, cap int) *hmap {
|
|||
if t.Key.Equal == nil {
|
||||
throw("runtime.reflect_makemap: unsupported map key type")
|
||||
}
|
||||
if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
|
||||
t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
|
||||
if t.Key.Size_ > abi.SwissMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
|
||||
t.Key.Size_ <= abi.SwissMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
|
||||
throw("key size wrong")
|
||||
}
|
||||
if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
|
||||
t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
|
||||
if t.Elem.Size_ > abi.SwissMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
|
||||
t.Elem.Size_ <= abi.SwissMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
|
||||
throw("elem size wrong")
|
||||
}
|
||||
if t.Key.Align_ > abi.MapBucketCount {
|
||||
if t.Key.Align_ > abi.SwissMapBucketCount {
|
||||
throw("key align too big")
|
||||
}
|
||||
if t.Elem.Align_ > abi.MapBucketCount {
|
||||
if t.Elem.Align_ > abi.SwissMapBucketCount {
|
||||
throw("elem align too big")
|
||||
}
|
||||
if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
|
||||
|
|
@ -1321,7 +1323,7 @@ func reflect_makemap(t *maptype, cap int) *hmap {
|
|||
if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
|
||||
throw("elem size not a multiple of elem align")
|
||||
}
|
||||
if abi.MapBucketCount < 8 {
|
||||
if abi.SwissMapBucketCount < 8 {
|
||||
throw("bucketsize too small for proper alignment")
|
||||
}
|
||||
if dataOffset%uintptr(t.Key.Align_) != 0 {
|
||||
|
|
@ -1444,26 +1446,26 @@ func mapclone(m any) any {
|
|||
// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
|
||||
// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
|
||||
func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
|
||||
for i := 0; i < abi.MapBucketCount; i++ {
|
||||
for i := 0; i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(src.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
||||
for ; pos < abi.MapBucketCount; pos++ {
|
||||
for ; pos < abi.SwissMapBucketCount; pos++ {
|
||||
if isEmpty(dst.tophash[pos]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if pos == abi.MapBucketCount {
|
||||
if pos == abi.SwissMapBucketCount {
|
||||
dst = h.newoverflow(t, dst)
|
||||
pos = 0
|
||||
}
|
||||
|
||||
srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
|
||||
srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
|
||||
srcEle := add(unsafe.Pointer(src), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
|
||||
dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
|
||||
dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
|
||||
dstEle := add(unsafe.Pointer(dst), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
|
||||
|
||||
dst.tophash[pos] = src.tophash[i]
|
||||
if t.IndirectKey() {
|
||||
|
|
@ -1567,7 +1569,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
|
|||
// Process entries one at a time.
|
||||
for srcBmap != nil {
|
||||
// move from oldBlucket to new bucket
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
if isEmpty(srcBmap.tophash[i]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1581,7 +1583,7 @@ func mapclone2(t *maptype, src *hmap) *hmap {
|
|||
srcK = *((*unsafe.Pointer)(srcK))
|
||||
}
|
||||
|
||||
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
srcEle = *((*unsafe.Pointer)(srcEle))
|
||||
}
|
||||
|
|
@ -1607,7 +1609,7 @@ func keys(m any, p unsafe.Pointer) {
|
|||
}
|
||||
s := (*slice)(p)
|
||||
r := int(rand())
|
||||
offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
offset := uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
|
||||
if h.B == 0 {
|
||||
copyKeys(t, h, (*bmap)(h.buckets), s, offset)
|
||||
return
|
||||
|
|
@ -1636,8 +1638,8 @@ func keys(m any, p unsafe.Pointer) {
|
|||
|
||||
func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
||||
for b != nil {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.SwissMapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1670,7 +1672,7 @@ func values(m any, p unsafe.Pointer) {
|
|||
}
|
||||
s := (*slice)(p)
|
||||
r := int(rand())
|
||||
offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
|
||||
offset := uint8(r >> h.B & (abi.SwissMapBucketCount - 1))
|
||||
if h.B == 0 {
|
||||
copyValues(t, h, (*bmap)(h.buckets), s, offset)
|
||||
return
|
||||
|
|
@ -1699,8 +1701,8 @@ func values(m any, p unsafe.Pointer) {
|
|||
|
||||
func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
||||
for b != nil {
|
||||
for i := uintptr(0); i < abi.MapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
|
||||
for i := uintptr(0); i < abi.SwissMapBucketCount; i++ {
|
||||
offi := (i + uintptr(offset)) & (abi.SwissMapBucketCount - 1)
|
||||
if isEmpty(b.tophash[offi]) {
|
||||
continue
|
||||
}
|
||||
|
|
@ -1709,7 +1711,7 @@ func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
|
|||
fatal("concurrent map read and map write")
|
||||
}
|
||||
|
||||
ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
|
||||
ele := add(unsafe.Pointer(b), dataOffset+abi.SwissMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
|
||||
if t.IndirectElem() {
|
||||
ele = *((*unsafe.Pointer)(ele))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,188 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.swissmap
|
||||
|
||||
package runtime_test
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"runtime"
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMapIterOrder(t *testing.T) {
|
||||
sizes := []int{3, 7, 9, 15}
|
||||
if abi.SwissMapBucketCountBits >= 5 {
|
||||
// it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5.
|
||||
t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.SwissMapBucketCountBits)
|
||||
}
|
||||
for _, n := range sizes {
|
||||
for i := 0; i < 1000; i++ {
|
||||
// Make m be {0: true, 1: true, ..., n-1: true}.
|
||||
m := make(map[int]bool)
|
||||
for i := 0; i < n; i++ {
|
||||
m[i] = true
|
||||
}
|
||||
// Check that iterating over the map produces at least two different orderings.
|
||||
ord := func() []int {
|
||||
var s []int
|
||||
for key := range m {
|
||||
s = append(s, key)
|
||||
}
|
||||
return s
|
||||
}
|
||||
first := ord()
|
||||
ok := false
|
||||
for try := 0; try < 100; try++ {
|
||||
if !slices.Equal(first, ord()) {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const bs = abi.SwissMapBucketCount
|
||||
|
||||
// belowOverflow should be a pretty-full pair of buckets;
|
||||
// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets
|
||||
// that are 13/16 full each, which is the overflow boundary.
|
||||
// Adding one to that should ensure overflow to the next higher size.
|
||||
const (
|
||||
belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75%
|
||||
atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill.
|
||||
)
|
||||
|
||||
var mapBucketTests = [...]struct {
|
||||
n int // n is the number of map elements
|
||||
noescape int // number of expected buckets for non-escaping map
|
||||
escape int // number of expected buckets for escaping map
|
||||
}{
|
||||
{-(1 << 30), 1, 1},
|
||||
{-1, 1, 1},
|
||||
{0, 1, 1},
|
||||
{1, 1, 1},
|
||||
{bs, 1, 1},
|
||||
{bs + 1, 2, 2},
|
||||
{belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75%
|
||||
{atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4
|
||||
|
||||
{2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75%
|
||||
{2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8
|
||||
|
||||
{4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75%
|
||||
{4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16
|
||||
}
|
||||
|
||||
func TestMapBuckets(t *testing.T) {
|
||||
// Test that maps of different sizes have the right number of buckets.
|
||||
// Non-escaping maps with small buckets (like map[int]int) never
|
||||
// have a nil bucket pointer due to starting with preallocated buckets
|
||||
// on the stack. Escaping maps start with a non-nil bucket pointer if
|
||||
// hint size is above bucketCnt and thereby have more than one bucket.
|
||||
// These tests depend on bucketCnt and loadFactor* in map.go.
|
||||
t.Run("mapliteral", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := map[int]int{}
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(map[int]int{})
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("nohint", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int)
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("makemap", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int, tt.n)
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int, tt.n))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("makemap64", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int, int64(tt.n))
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int, tt.n))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -6,7 +6,6 @@ package runtime_test
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"internal/testenv"
|
||||
"math"
|
||||
|
|
@ -509,43 +508,6 @@ func TestMapNanGrowIterator(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMapIterOrder(t *testing.T) {
|
||||
sizes := []int{3, 7, 9, 15}
|
||||
if abi.MapBucketCountBits >= 5 {
|
||||
// it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5.
|
||||
t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.MapBucketCountBits)
|
||||
}
|
||||
for _, n := range sizes {
|
||||
for i := 0; i < 1000; i++ {
|
||||
// Make m be {0: true, 1: true, ..., n-1: true}.
|
||||
m := make(map[int]bool)
|
||||
for i := 0; i < n; i++ {
|
||||
m[i] = true
|
||||
}
|
||||
// Check that iterating over the map produces at least two different orderings.
|
||||
ord := func() []int {
|
||||
var s []int
|
||||
for key := range m {
|
||||
s = append(s, key)
|
||||
}
|
||||
return s
|
||||
}
|
||||
first := ord()
|
||||
ok := false
|
||||
for try := 0; try < 100; try++ {
|
||||
if !slices.Equal(first, ord()) {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 8410
|
||||
func TestMapSparseIterOrder(t *testing.T) {
|
||||
// Run several rounds to increase the probability
|
||||
|
|
@ -682,144 +644,6 @@ func TestIgnoreBogusMapHint(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
const bs = abi.MapBucketCount
|
||||
|
||||
// belowOverflow should be a pretty-full pair of buckets;
|
||||
// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets
|
||||
// that are 13/16 full each, which is the overflow boundary.
|
||||
// Adding one to that should ensure overflow to the next higher size.
|
||||
const (
|
||||
belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75%
|
||||
atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill.
|
||||
)
|
||||
|
||||
var mapBucketTests = [...]struct {
|
||||
n int // n is the number of map elements
|
||||
noescape int // number of expected buckets for non-escaping map
|
||||
escape int // number of expected buckets for escaping map
|
||||
}{
|
||||
{-(1 << 30), 1, 1},
|
||||
{-1, 1, 1},
|
||||
{0, 1, 1},
|
||||
{1, 1, 1},
|
||||
{bs, 1, 1},
|
||||
{bs + 1, 2, 2},
|
||||
{belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75%
|
||||
{atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4
|
||||
|
||||
{2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75%
|
||||
{2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8
|
||||
|
||||
{4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75%
|
||||
{4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16
|
||||
}
|
||||
|
||||
func TestMapBuckets(t *testing.T) {
|
||||
// Test that maps of different sizes have the right number of buckets.
|
||||
// Non-escaping maps with small buckets (like map[int]int) never
|
||||
// have a nil bucket pointer due to starting with preallocated buckets
|
||||
// on the stack. Escaping maps start with a non-nil bucket pointer if
|
||||
// hint size is above bucketCnt and thereby have more than one bucket.
|
||||
// These tests depend on bucketCnt and loadFactor* in map.go.
|
||||
t.Run("mapliteral", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := map[int]int{}
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(map[int]int{})
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("nohint", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int)
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("makemap", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int, tt.n)
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int, tt.n))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("makemap64", func(t *testing.T) {
|
||||
for _, tt := range mapBucketTests {
|
||||
localMap := make(map[int]int, int64(tt.n))
|
||||
if runtime.MapBucketsPointerIsNil(localMap) {
|
||||
t.Errorf("no escape: buckets pointer is nil for non-escaping map")
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
localMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
|
||||
t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
|
||||
}
|
||||
escapingMap := runtime.Escape(make(map[int]int, tt.n))
|
||||
if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
|
||||
t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
|
||||
}
|
||||
for i := 0; i < tt.n; i++ {
|
||||
escapingMap[i] = i
|
||||
}
|
||||
if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
|
||||
t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func benchmarkMapPop(b *testing.B, n int) {
|
||||
m := map[int]int{}
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
|
|
|||
|
|
@ -141,6 +141,7 @@ class SliceTypePrinter:
|
|||
yield ('[{0}]'.format(idx), item)
|
||||
|
||||
|
||||
# TODO(go.dev/issue/54766): Support swisstable maps.
|
||||
class MapTypePrinter:
|
||||
"""Pretty print map[K]V types.
|
||||
|
||||
|
|
|
|||
|
|
@ -119,8 +119,8 @@ import "fmt"
|
|||
import "runtime"
|
||||
var gslice []string
|
||||
func main() {
|
||||
mapvar := make(map[string]string, ` + strconv.FormatInt(abi.MapBucketCount+9, 10) + `)
|
||||
slicemap := make(map[string][]string,` + strconv.FormatInt(abi.MapBucketCount+3, 10) + `)
|
||||
mapvar := make(map[string]string, ` + strconv.FormatInt(abi.OldMapBucketCount+9, 10) + `)
|
||||
slicemap := make(map[string][]string,` + strconv.FormatInt(abi.OldMapBucketCount+3, 10) + `)
|
||||
chanint := make(chan int, 10)
|
||||
chanstr := make(chan string, 10)
|
||||
chanint <- 99
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ package runtime
|
|||
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/goexperiment"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
|
@ -235,8 +236,6 @@ type uncommontype = abi.UncommonType
|
|||
|
||||
type interfacetype = abi.InterfaceType
|
||||
|
||||
type maptype = abi.MapType
|
||||
|
||||
type arraytype = abi.ArrayType
|
||||
|
||||
type chantype = abi.ChanType
|
||||
|
|
@ -439,8 +438,13 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
|
|||
}
|
||||
return true
|
||||
case abi.Map:
|
||||
mt := (*maptype)(unsafe.Pointer(t))
|
||||
mv := (*maptype)(unsafe.Pointer(v))
|
||||
if goexperiment.SwissMap {
|
||||
mt := (*abi.SwissMapType)(unsafe.Pointer(t))
|
||||
mv := (*abi.SwissMapType)(unsafe.Pointer(v))
|
||||
return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
|
||||
}
|
||||
mt := (*abi.OldMapType)(unsafe.Pointer(t))
|
||||
mv := (*abi.OldMapType)(unsafe.Pointer(v))
|
||||
return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
|
||||
case abi.Pointer:
|
||||
pt := (*ptrtype)(unsafe.Pointer(t))
|
||||
|
|
|
|||
Loading…
Reference in New Issue