1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7// This file contains the implementation of Go's map type.
8//
9// A map is just a hash table. The data is arranged
10// into an array of buckets. Each bucket contains up to
11// 8 key/elem pairs. The low-order bits of the hash are
12// used to select a bucket. Each bucket contains a few
13// high-order bits of each hash to distinguish the entries
14// within a single bucket.
15//
16// If more than 8 keys hash to a bucket, we chain on
17// extra buckets.
18//
19// When the hashtable grows, we allocate a new array
20// of buckets twice as big. Buckets are incrementally
21// copied from the old bucket array to the new bucket array.
22//
23// Map iterators walk through the array of buckets and
24// return the keys in walk order (bucket #, then overflow
25// chain order, then bucket index).  To maintain iteration
26// semantics, we never move keys within their bucket (if
27// we did, keys might be returned 0 or 2 times).  When
28// growing the table, iterators remain iterating through the
29// old table and must check the new table if the bucket
30// they are iterating through has been moved ("evacuated")
31// to the new table.
32
33// Picking loadFactor: too large and we have lots of overflow
34// buckets, too small and we waste a lot of space. I wrote
35// a simple program to check some stats for different loads:
36// (64-bit, 8 byte keys and elems)
37//  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
38//        4.00         2.13        20.77         3.00         4.00
39//        4.50         4.05        17.30         3.25         4.50
40//        5.00         6.85        14.77         3.50         5.00
41//        5.50        10.55        12.94         3.75         5.50
42//        6.00        15.27        11.67         4.00         6.00
43//        6.50        20.90        10.79         4.25         6.50
44//        7.00        27.14        10.15         4.50         7.00
45//        7.50        34.03         9.73         4.75         7.50
46//        8.00        41.10         9.40         5.00         8.00
47//
48// %overflow   = percentage of buckets which have an overflow bucket
49// bytes/entry = overhead bytes used per key/elem pair
50// hitprobe    = # of entries to check when looking up a present key
51// missprobe   = # of entries to check when looking up an absent key
52//
53// Keep in mind this data is for maximally loaded tables, i.e. just
54// before the table grows. Typical tables will be somewhat less loaded.
55
56import (
57	"internal/abi"
58	"internal/goarch"
59	"internal/runtime/atomic"
60	"runtime/internal/math"
61	"unsafe"
62)
63
64const (
65	// Maximum number of key/elem pairs a bucket can hold.
66	bucketCntBits = abi.MapBucketCountBits
67
68	// Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
69	// Because of minimum alignment rules, bucketCnt is known to be at least 8.
70	// Represent as loadFactorNum/loadFactorDen, to allow integer math.
71	loadFactorDen = 2
72	loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16
73
74	// data offset should be the size of the bmap struct, but needs to be
75	// aligned correctly. For amd64p32 this means 64-bit alignment
76	// even though pointers are 32 bit.
77	dataOffset = unsafe.Offsetof(struct {
78		b bmap
79		v int64
80	}{}.v)
81
82	// Possible tophash values. We reserve a few possibilities for special marks.
83	// Each bucket (including its overflow buckets, if any) will have either all or none of its
84	// entries in the evacuated* states (except during the evacuate() method, which only happens
85	// during map writes and thus no one else can observe the map during that time).
86	emptyRest      = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
87	emptyOne       = 1 // this cell is empty
88	evacuatedX     = 2 // key/elem is valid.  Entry has been evacuated to first half of larger table.
89	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
90	evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
91	minTopHash     = 5 // minimum tophash for a normal filled cell.
92
93	// flags
94	iterator     = 1 // there may be an iterator using buckets
95	oldIterator  = 2 // there may be an iterator using oldbuckets
96	hashWriting  = 4 // a goroutine is writing to the map
97	sameSizeGrow = 8 // the current map growth is to a new map of the same size
98
99	// sentinel bucket ID for iterator checks
100	noCheck = 1<<(8*goarch.PtrSize) - 1
101)
102
103// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
104func isEmpty(x uint8) bool {
105	return x <= emptyOne
106}
107
108// A header for a Go map.
109type hmap struct {
110	// Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
111	// Make sure this stays in sync with the compiler's definition.
112	count     int // # live cells == size of map.  Must be first (used by len() builtin)
113	flags     uint8
114	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
115	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
116	hash0     uint32 // hash seed
117
118	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
119	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
120	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
121
122	extra *mapextra // optional fields
123}
124
125// mapextra holds fields that are not present on all maps.
126type mapextra struct {
127	// If both key and elem do not contain pointers and are inline, then we mark bucket
128	// type as containing no pointers. This avoids scanning such maps.
129	// However, bmap.overflow is a pointer. In order to keep overflow buckets
130	// alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
131	// overflow and oldoverflow are only used if key and elem do not contain pointers.
132	// overflow contains overflow buckets for hmap.buckets.
133	// oldoverflow contains overflow buckets for hmap.oldbuckets.
134	// The indirection allows to store a pointer to the slice in hiter.
135	overflow    *[]*bmap
136	oldoverflow *[]*bmap
137
138	// nextOverflow holds a pointer to a free overflow bucket.
139	nextOverflow *bmap
140}
141
142// A bucket for a Go map.
143type bmap struct {
144	// tophash generally contains the top byte of the hash value
145	// for each key in this bucket. If tophash[0] < minTopHash,
146	// tophash[0] is a bucket evacuation state instead.
147	tophash [abi.MapBucketCount]uint8
148	// Followed by bucketCnt keys and then bucketCnt elems.
149	// NOTE: packing all the keys together and then all the elems together makes the
150	// code a bit more complicated than alternating key/elem/key/elem/... but it allows
151	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
152	// Followed by an overflow pointer.
153}
154
155// A hash iteration structure.
156// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
157// and reflect/value.go to match the layout of this structure.
158type hiter struct {
159	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
160	elem        unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
161	t           *maptype
162	h           *hmap
163	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
164	bptr        *bmap          // current bucket
165	overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
166	oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
167	startBucket uintptr        // bucket iteration started at
168	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
169	wrapped     bool           // already wrapped around from end of bucket array to beginning
170	B           uint8
171	i           uint8
172	bucket      uintptr
173	checkBucket uintptr
174}
175
176// bucketShift returns 1<<b, optimized for code generation.
177func bucketShift(b uint8) uintptr {
178	// Masking the shift amount allows overflow checks to be elided.
179	return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
180}
181
182// bucketMask returns 1<<b - 1, optimized for code generation.
183func bucketMask(b uint8) uintptr {
184	return bucketShift(b) - 1
185}
186
187// tophash calculates the tophash value for hash.
188func tophash(hash uintptr) uint8 {
189	top := uint8(hash >> (goarch.PtrSize*8 - 8))
190	if top < minTopHash {
191		top += minTopHash
192	}
193	return top
194}
195
196func evacuated(b *bmap) bool {
197	h := b.tophash[0]
198	return h > emptyOne && h < minTopHash
199}
200
201func (b *bmap) overflow(t *maptype) *bmap {
202	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
203}
204
205func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
206	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
207}
208
209func (b *bmap) keys() unsafe.Pointer {
210	return add(unsafe.Pointer(b), dataOffset)
211}
212
213// incrnoverflow increments h.noverflow.
214// noverflow counts the number of overflow buckets.
215// This is used to trigger same-size map growth.
216// See also tooManyOverflowBuckets.
217// To keep hmap small, noverflow is a uint16.
218// When there are few buckets, noverflow is an exact count.
219// When there are many buckets, noverflow is an approximate count.
220func (h *hmap) incrnoverflow() {
221	// We trigger same-size map growth if there are
222	// as many overflow buckets as buckets.
223	// We need to be able to count to 1<<h.B.
224	if h.B < 16 {
225		h.noverflow++
226		return
227	}
228	// Increment with probability 1/(1<<(h.B-15)).
229	// When we reach 1<<15 - 1, we will have approximately
230	// as many overflow buckets as buckets.
231	mask := uint32(1)<<(h.B-15) - 1
232	// Example: if h.B == 18, then mask == 7,
233	// and rand() & 7 == 0 with probability 1/8.
234	if uint32(rand())&mask == 0 {
235		h.noverflow++
236	}
237}
238
239func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
240	var ovf *bmap
241	if h.extra != nil && h.extra.nextOverflow != nil {
242		// We have preallocated overflow buckets available.
243		// See makeBucketArray for more details.
244		ovf = h.extra.nextOverflow
245		if ovf.overflow(t) == nil {
246			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
247			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
248		} else {
249			// This is the last preallocated overflow bucket.
250			// Reset the overflow pointer on this bucket,
251			// which was set to a non-nil sentinel value.
252			ovf.setoverflow(t, nil)
253			h.extra.nextOverflow = nil
254		}
255	} else {
256		ovf = (*bmap)(newobject(t.Bucket))
257	}
258	h.incrnoverflow()
259	if !t.Bucket.Pointers() {
260		h.createOverflow()
261		*h.extra.overflow = append(*h.extra.overflow, ovf)
262	}
263	b.setoverflow(t, ovf)
264	return ovf
265}
266
267func (h *hmap) createOverflow() {
268	if h.extra == nil {
269		h.extra = new(mapextra)
270	}
271	if h.extra.overflow == nil {
272		h.extra.overflow = new([]*bmap)
273	}
274}
275
276func makemap64(t *maptype, hint int64, h *hmap) *hmap {
277	if int64(int(hint)) != hint {
278		hint = 0
279	}
280	return makemap(t, int(hint), h)
281}
282
283// makemap_small implements Go map creation for make(map[k]v) and
284// make(map[k]v, hint) when hint is known to be at most bucketCnt
285// at compile time and the map needs to be allocated on the heap.
286//
287// makemap_small should be an internal detail,
288// but widely used packages access it using linkname.
289// Notable members of the hall of shame include:
290//   - github.com/bytedance/sonic
291//
292// Do not remove or change the type signature.
293// See go.dev/issue/67401.
294//
295//go:linkname makemap_small
296func makemap_small() *hmap {
297	h := new(hmap)
298	h.hash0 = uint32(rand())
299	return h
300}
301
302// makemap implements Go map creation for make(map[k]v, hint).
303// If the compiler has determined that the map or the first bucket
304// can be created on the stack, h and/or bucket may be non-nil.
305// If h != nil, the map can be created directly in h.
306// If h.buckets != nil, bucket pointed to can be used as the first bucket.
307//
308// makemap should be an internal detail,
309// but widely used packages access it using linkname.
310// Notable members of the hall of shame include:
311//   - github.com/cloudwego/frugal
312//   - github.com/ugorji/go/codec
313//
314// Do not remove or change the type signature.
315// See go.dev/issue/67401.
316//
317//go:linkname makemap
318func makemap(t *maptype, hint int, h *hmap) *hmap {
319	mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
320	if overflow || mem > maxAlloc {
321		hint = 0
322	}
323
324	// initialize Hmap
325	if h == nil {
326		h = new(hmap)
327	}
328	h.hash0 = uint32(rand())
329
330	// Find the size parameter B which will hold the requested # of elements.
331	// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
332	B := uint8(0)
333	for overLoadFactor(hint, B) {
334		B++
335	}
336	h.B = B
337
338	// allocate initial hash table
339	// if B == 0, the buckets field is allocated lazily later (in mapassign)
340	// If hint is large zeroing this memory could take a while.
341	if h.B != 0 {
342		var nextOverflow *bmap
343		h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
344		if nextOverflow != nil {
345			h.extra = new(mapextra)
346			h.extra.nextOverflow = nextOverflow
347		}
348	}
349
350	return h
351}
352
353// makeBucketArray initializes a backing array for map buckets.
354// 1<<b is the minimum number of buckets to allocate.
355// dirtyalloc should either be nil or a bucket array previously
356// allocated by makeBucketArray with the same t and b parameters.
357// If dirtyalloc is nil a new backing array will be alloced and
358// otherwise dirtyalloc will be cleared and reused as backing array.
359func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
360	base := bucketShift(b)
361	nbuckets := base
362	// For small b, overflow buckets are unlikely.
363	// Avoid the overhead of the calculation.
364	if b >= 4 {
365		// Add on the estimated number of overflow buckets
366		// required to insert the median number of elements
367		// used with this value of b.
368		nbuckets += bucketShift(b - 4)
369		sz := t.Bucket.Size_ * nbuckets
370		up := roundupsize(sz, !t.Bucket.Pointers())
371		if up != sz {
372			nbuckets = up / t.Bucket.Size_
373		}
374	}
375
376	if dirtyalloc == nil {
377		buckets = newarray(t.Bucket, int(nbuckets))
378	} else {
379		// dirtyalloc was previously generated by
380		// the above newarray(t.Bucket, int(nbuckets))
381		// but may not be empty.
382		buckets = dirtyalloc
383		size := t.Bucket.Size_ * nbuckets
384		if t.Bucket.Pointers() {
385			memclrHasPointers(buckets, size)
386		} else {
387			memclrNoHeapPointers(buckets, size)
388		}
389	}
390
391	if base != nbuckets {
392		// We preallocated some overflow buckets.
393		// To keep the overhead of tracking these overflow buckets to a minimum,
394		// we use the convention that if a preallocated overflow bucket's overflow
395		// pointer is nil, then there are more available by bumping the pointer.
396		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
397		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
398		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
399		last.setoverflow(t, (*bmap)(buckets))
400	}
401	return buckets, nextOverflow
402}
403
404// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
405// it will return a reference to the zero object for the elem type if
406// the key is not in the map.
407// NOTE: The returned pointer may keep the whole map live, so don't
408// hold onto it for very long.
409func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
410	if raceenabled && h != nil {
411		callerpc := getcallerpc()
412		pc := abi.FuncPCABIInternal(mapaccess1)
413		racereadpc(unsafe.Pointer(h), callerpc, pc)
414		raceReadObjectPC(t.Key, key, callerpc, pc)
415	}
416	if msanenabled && h != nil {
417		msanread(key, t.Key.Size_)
418	}
419	if asanenabled && h != nil {
420		asanread(key, t.Key.Size_)
421	}
422	if h == nil || h.count == 0 {
423		if err := mapKeyError(t, key); err != nil {
424			panic(err) // see issue 23734
425		}
426		return unsafe.Pointer(&zeroVal[0])
427	}
428	if h.flags&hashWriting != 0 {
429		fatal("concurrent map read and map write")
430	}
431	hash := t.Hasher(key, uintptr(h.hash0))
432	m := bucketMask(h.B)
433	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
434	if c := h.oldbuckets; c != nil {
435		if !h.sameSizeGrow() {
436			// There used to be half as many buckets; mask down one more power of two.
437			m >>= 1
438		}
439		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
440		if !evacuated(oldb) {
441			b = oldb
442		}
443	}
444	top := tophash(hash)
445bucketloop:
446	for ; b != nil; b = b.overflow(t) {
447		for i := uintptr(0); i < abi.MapBucketCount; i++ {
448			if b.tophash[i] != top {
449				if b.tophash[i] == emptyRest {
450					break bucketloop
451				}
452				continue
453			}
454			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
455			if t.IndirectKey() {
456				k = *((*unsafe.Pointer)(k))
457			}
458			if t.Key.Equal(key, k) {
459				e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
460				if t.IndirectElem() {
461					e = *((*unsafe.Pointer)(e))
462				}
463				return e
464			}
465		}
466	}
467	return unsafe.Pointer(&zeroVal[0])
468}
469
470// mapaccess2 should be an internal detail,
471// but widely used packages access it using linkname.
472// Notable members of the hall of shame include:
473//   - github.com/ugorji/go/codec
474//
475// Do not remove or change the type signature.
476// See go.dev/issue/67401.
477//
478//go:linkname mapaccess2
479func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
480	if raceenabled && h != nil {
481		callerpc := getcallerpc()
482		pc := abi.FuncPCABIInternal(mapaccess2)
483		racereadpc(unsafe.Pointer(h), callerpc, pc)
484		raceReadObjectPC(t.Key, key, callerpc, pc)
485	}
486	if msanenabled && h != nil {
487		msanread(key, t.Key.Size_)
488	}
489	if asanenabled && h != nil {
490		asanread(key, t.Key.Size_)
491	}
492	if h == nil || h.count == 0 {
493		if err := mapKeyError(t, key); err != nil {
494			panic(err) // see issue 23734
495		}
496		return unsafe.Pointer(&zeroVal[0]), false
497	}
498	if h.flags&hashWriting != 0 {
499		fatal("concurrent map read and map write")
500	}
501	hash := t.Hasher(key, uintptr(h.hash0))
502	m := bucketMask(h.B)
503	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
504	if c := h.oldbuckets; c != nil {
505		if !h.sameSizeGrow() {
506			// There used to be half as many buckets; mask down one more power of two.
507			m >>= 1
508		}
509		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
510		if !evacuated(oldb) {
511			b = oldb
512		}
513	}
514	top := tophash(hash)
515bucketloop:
516	for ; b != nil; b = b.overflow(t) {
517		for i := uintptr(0); i < abi.MapBucketCount; i++ {
518			if b.tophash[i] != top {
519				if b.tophash[i] == emptyRest {
520					break bucketloop
521				}
522				continue
523			}
524			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
525			if t.IndirectKey() {
526				k = *((*unsafe.Pointer)(k))
527			}
528			if t.Key.Equal(key, k) {
529				e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
530				if t.IndirectElem() {
531					e = *((*unsafe.Pointer)(e))
532				}
533				return e, true
534			}
535		}
536	}
537	return unsafe.Pointer(&zeroVal[0]), false
538}
539
540// returns both key and elem. Used by map iterator.
541func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
542	if h == nil || h.count == 0 {
543		return nil, nil
544	}
545	hash := t.Hasher(key, uintptr(h.hash0))
546	m := bucketMask(h.B)
547	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
548	if c := h.oldbuckets; c != nil {
549		if !h.sameSizeGrow() {
550			// There used to be half as many buckets; mask down one more power of two.
551			m >>= 1
552		}
553		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
554		if !evacuated(oldb) {
555			b = oldb
556		}
557	}
558	top := tophash(hash)
559bucketloop:
560	for ; b != nil; b = b.overflow(t) {
561		for i := uintptr(0); i < abi.MapBucketCount; i++ {
562			if b.tophash[i] != top {
563				if b.tophash[i] == emptyRest {
564					break bucketloop
565				}
566				continue
567			}
568			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
569			if t.IndirectKey() {
570				k = *((*unsafe.Pointer)(k))
571			}
572			if t.Key.Equal(key, k) {
573				e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
574				if t.IndirectElem() {
575					e = *((*unsafe.Pointer)(e))
576				}
577				return k, e
578			}
579		}
580	}
581	return nil, nil
582}
583
584func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
585	e := mapaccess1(t, h, key)
586	if e == unsafe.Pointer(&zeroVal[0]) {
587		return zero
588	}
589	return e
590}
591
592func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
593	e := mapaccess1(t, h, key)
594	if e == unsafe.Pointer(&zeroVal[0]) {
595		return zero, false
596	}
597	return e, true
598}
599
600// Like mapaccess, but allocates a slot for the key if it is not present in the map.
601//
602// mapassign should be an internal detail,
603// but widely used packages access it using linkname.
604// Notable members of the hall of shame include:
605//   - github.com/bytedance/sonic
606//   - github.com/cloudwego/frugal
607//   - github.com/RomiChan/protobuf
608//   - github.com/segmentio/encoding
609//   - github.com/ugorji/go/codec
610//
611// Do not remove or change the type signature.
612// See go.dev/issue/67401.
613//
614//go:linkname mapassign
615func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
616	if h == nil {
617		panic(plainError("assignment to entry in nil map"))
618	}
619	if raceenabled {
620		callerpc := getcallerpc()
621		pc := abi.FuncPCABIInternal(mapassign)
622		racewritepc(unsafe.Pointer(h), callerpc, pc)
623		raceReadObjectPC(t.Key, key, callerpc, pc)
624	}
625	if msanenabled {
626		msanread(key, t.Key.Size_)
627	}
628	if asanenabled {
629		asanread(key, t.Key.Size_)
630	}
631	if h.flags&hashWriting != 0 {
632		fatal("concurrent map writes")
633	}
634	hash := t.Hasher(key, uintptr(h.hash0))
635
636	// Set hashWriting after calling t.hasher, since t.hasher may panic,
637	// in which case we have not actually done a write.
638	h.flags ^= hashWriting
639
640	if h.buckets == nil {
641		h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
642	}
643
644again:
645	bucket := hash & bucketMask(h.B)
646	if h.growing() {
647		growWork(t, h, bucket)
648	}
649	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
650	top := tophash(hash)
651
652	var inserti *uint8
653	var insertk unsafe.Pointer
654	var elem unsafe.Pointer
655bucketloop:
656	for {
657		for i := uintptr(0); i < abi.MapBucketCount; i++ {
658			if b.tophash[i] != top {
659				if isEmpty(b.tophash[i]) && inserti == nil {
660					inserti = &b.tophash[i]
661					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
662					elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
663				}
664				if b.tophash[i] == emptyRest {
665					break bucketloop
666				}
667				continue
668			}
669			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
670			if t.IndirectKey() {
671				k = *((*unsafe.Pointer)(k))
672			}
673			if !t.Key.Equal(key, k) {
674				continue
675			}
676			// already have a mapping for key. Update it.
677			if t.NeedKeyUpdate() {
678				typedmemmove(t.Key, k, key)
679			}
680			elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
681			goto done
682		}
683		ovf := b.overflow(t)
684		if ovf == nil {
685			break
686		}
687		b = ovf
688	}
689
690	// Did not find mapping for key. Allocate new cell & add entry.
691
692	// If we hit the max load factor or we have too many overflow buckets,
693	// and we're not already in the middle of growing, start growing.
694	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
695		hashGrow(t, h)
696		goto again // Growing the table invalidates everything, so try again
697	}
698
699	if inserti == nil {
700		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
701		newb := h.newoverflow(t, b)
702		inserti = &newb.tophash[0]
703		insertk = add(unsafe.Pointer(newb), dataOffset)
704		elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize))
705	}
706
707	// store new key/elem at insert position
708	if t.IndirectKey() {
709		kmem := newobject(t.Key)
710		*(*unsafe.Pointer)(insertk) = kmem
711		insertk = kmem
712	}
713	if t.IndirectElem() {
714		vmem := newobject(t.Elem)
715		*(*unsafe.Pointer)(elem) = vmem
716	}
717	typedmemmove(t.Key, insertk, key)
718	*inserti = top
719	h.count++
720
721done:
722	if h.flags&hashWriting == 0 {
723		fatal("concurrent map writes")
724	}
725	h.flags &^= hashWriting
726	if t.IndirectElem() {
727		elem = *((*unsafe.Pointer)(elem))
728	}
729	return elem
730}
731
732// mapdelete should be an internal detail,
733// but widely used packages access it using linkname.
734// Notable members of the hall of shame include:
735//   - github.com/ugorji/go/codec
736//
737// Do not remove or change the type signature.
738// See go.dev/issue/67401.
739//
740//go:linkname mapdelete
741func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
742	if raceenabled && h != nil {
743		callerpc := getcallerpc()
744		pc := abi.FuncPCABIInternal(mapdelete)
745		racewritepc(unsafe.Pointer(h), callerpc, pc)
746		raceReadObjectPC(t.Key, key, callerpc, pc)
747	}
748	if msanenabled && h != nil {
749		msanread(key, t.Key.Size_)
750	}
751	if asanenabled && h != nil {
752		asanread(key, t.Key.Size_)
753	}
754	if h == nil || h.count == 0 {
755		if err := mapKeyError(t, key); err != nil {
756			panic(err) // see issue 23734
757		}
758		return
759	}
760	if h.flags&hashWriting != 0 {
761		fatal("concurrent map writes")
762	}
763
764	hash := t.Hasher(key, uintptr(h.hash0))
765
766	// Set hashWriting after calling t.hasher, since t.hasher may panic,
767	// in which case we have not actually done a write (delete).
768	h.flags ^= hashWriting
769
770	bucket := hash & bucketMask(h.B)
771	if h.growing() {
772		growWork(t, h, bucket)
773	}
774	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
775	bOrig := b
776	top := tophash(hash)
777search:
778	for ; b != nil; b = b.overflow(t) {
779		for i := uintptr(0); i < abi.MapBucketCount; i++ {
780			if b.tophash[i] != top {
781				if b.tophash[i] == emptyRest {
782					break search
783				}
784				continue
785			}
786			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
787			k2 := k
788			if t.IndirectKey() {
789				k2 = *((*unsafe.Pointer)(k2))
790			}
791			if !t.Key.Equal(key, k2) {
792				continue
793			}
794			// Only clear key if there are pointers in it.
795			if t.IndirectKey() {
796				*(*unsafe.Pointer)(k) = nil
797			} else if t.Key.Pointers() {
798				memclrHasPointers(k, t.Key.Size_)
799			}
800			e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
801			if t.IndirectElem() {
802				*(*unsafe.Pointer)(e) = nil
803			} else if t.Elem.Pointers() {
804				memclrHasPointers(e, t.Elem.Size_)
805			} else {
806				memclrNoHeapPointers(e, t.Elem.Size_)
807			}
808			b.tophash[i] = emptyOne
809			// If the bucket now ends in a bunch of emptyOne states,
810			// change those to emptyRest states.
811			// It would be nice to make this a separate function, but
812			// for loops are not currently inlineable.
813			if i == abi.MapBucketCount-1 {
814				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
815					goto notLast
816				}
817			} else {
818				if b.tophash[i+1] != emptyRest {
819					goto notLast
820				}
821			}
822			for {
823				b.tophash[i] = emptyRest
824				if i == 0 {
825					if b == bOrig {
826						break // beginning of initial bucket, we're done.
827					}
828					// Find previous bucket, continue at its last entry.
829					c := b
830					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
831					}
832					i = abi.MapBucketCount - 1
833				} else {
834					i--
835				}
836				if b.tophash[i] != emptyOne {
837					break
838				}
839			}
840		notLast:
841			h.count--
842			// Reset the hash seed to make it more difficult for attackers to
843			// repeatedly trigger hash collisions. See issue 25237.
844			if h.count == 0 {
845				h.hash0 = uint32(rand())
846			}
847			break search
848		}
849	}
850
851	if h.flags&hashWriting == 0 {
852		fatal("concurrent map writes")
853	}
854	h.flags &^= hashWriting
855}
856
857// mapiterinit initializes the hiter struct used for ranging over maps.
858// The hiter struct pointed to by 'it' is allocated on the stack
859// by the compilers order pass or on the heap by reflect_mapiterinit.
860// Both need to have zeroed hiter since the struct contains pointers.
861//
862// mapiterinit should be an internal detail,
863// but widely used packages access it using linkname.
864// Notable members of the hall of shame include:
865//   - github.com/bytedance/sonic
866//   - github.com/cloudwego/frugal
867//   - github.com/goccy/go-json
868//   - github.com/RomiChan/protobuf
869//   - github.com/segmentio/encoding
870//   - github.com/ugorji/go/codec
871//   - github.com/wI2L/jettison
872//
873// Do not remove or change the type signature.
874// See go.dev/issue/67401.
875//
876//go:linkname mapiterinit
877func mapiterinit(t *maptype, h *hmap, it *hiter) {
878	if raceenabled && h != nil {
879		callerpc := getcallerpc()
880		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
881	}
882
883	it.t = t
884	if h == nil || h.count == 0 {
885		return
886	}
887
888	if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
889		throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
890	}
891	it.h = h
892
893	// grab snapshot of bucket state
894	it.B = h.B
895	it.buckets = h.buckets
896	if !t.Bucket.Pointers() {
897		// Allocate the current slice and remember pointers to both current and old.
898		// This preserves all relevant overflow buckets alive even if
899		// the table grows and/or overflow buckets are added to the table
900		// while we are iterating.
901		h.createOverflow()
902		it.overflow = h.extra.overflow
903		it.oldoverflow = h.extra.oldoverflow
904	}
905
906	// decide where to start
907	r := uintptr(rand())
908	it.startBucket = r & bucketMask(h.B)
909	it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1))
910
911	// iterator state
912	it.bucket = it.startBucket
913
914	// Remember we have an iterator.
915	// Can run concurrently with another mapiterinit().
916	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
917		atomic.Or8(&h.flags, iterator|oldIterator)
918	}
919
920	mapiternext(it)
921}
922
923// mapiternext should be an internal detail,
924// but widely used packages access it using linkname.
925// Notable members of the hall of shame include:
926//   - github.com/bytedance/sonic
927//   - github.com/cloudwego/frugal
928//   - github.com/RomiChan/protobuf
929//   - github.com/segmentio/encoding
930//   - github.com/ugorji/go/codec
931//   - gonum.org/v1/gonum
932//
933// Do not remove or change the type signature.
934// See go.dev/issue/67401.
935//
936//go:linkname mapiternext
937func mapiternext(it *hiter) {
938	h := it.h
939	if raceenabled {
940		callerpc := getcallerpc()
941		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
942	}
943	if h.flags&hashWriting != 0 {
944		fatal("concurrent map iteration and map write")
945	}
946	t := it.t
947	bucket := it.bucket
948	b := it.bptr
949	i := it.i
950	checkBucket := it.checkBucket
951
952next:
953	if b == nil {
954		if bucket == it.startBucket && it.wrapped {
955			// end of iteration
956			it.key = nil
957			it.elem = nil
958			return
959		}
960		if h.growing() && it.B == h.B {
961			// Iterator was started in the middle of a grow, and the grow isn't done yet.
962			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
963			// bucket hasn't been evacuated) then we need to iterate through the old
964			// bucket and only return the ones that will be migrated to this bucket.
965			oldbucket := bucket & it.h.oldbucketmask()
966			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
967			if !evacuated(b) {
968				checkBucket = bucket
969			} else {
970				b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
971				checkBucket = noCheck
972			}
973		} else {
974			b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
975			checkBucket = noCheck
976		}
977		bucket++
978		if bucket == bucketShift(it.B) {
979			bucket = 0
980			it.wrapped = true
981		}
982		i = 0
983	}
984	for ; i < abi.MapBucketCount; i++ {
985		offi := (i + it.offset) & (abi.MapBucketCount - 1)
986		if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
987			// TODO: emptyRest is hard to use here, as we start iterating
988			// in the middle of a bucket. It's feasible, just tricky.
989			continue
990		}
991		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
992		if t.IndirectKey() {
993			k = *((*unsafe.Pointer)(k))
994		}
995		e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
996		if checkBucket != noCheck && !h.sameSizeGrow() {
997			// Special case: iterator was started during a grow to a larger size
998			// and the grow is not done yet. We're working on a bucket whose
999			// oldbucket has not been evacuated yet. Or at least, it wasn't
1000			// evacuated when we started the bucket. So we're iterating
1001			// through the oldbucket, skipping any keys that will go
1002			// to the other new bucket (each oldbucket expands to two
1003			// buckets during a grow).
1004			if t.ReflexiveKey() || t.Key.Equal(k, k) {
1005				// If the item in the oldbucket is not destined for
1006				// the current new bucket in the iteration, skip it.
1007				hash := t.Hasher(k, uintptr(h.hash0))
1008				if hash&bucketMask(it.B) != checkBucket {
1009					continue
1010				}
1011			} else {
1012				// Hash isn't repeatable if k != k (NaNs).  We need a
1013				// repeatable and randomish choice of which direction
1014				// to send NaNs during evacuation. We'll use the low
1015				// bit of tophash to decide which way NaNs go.
1016				// NOTE: this case is why we need two evacuate tophash
1017				// values, evacuatedX and evacuatedY, that differ in
1018				// their low bit.
1019				if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
1020					continue
1021				}
1022			}
1023		}
1024		if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
1025			!(t.ReflexiveKey() || t.Key.Equal(k, k)) {
1026			// This is the golden data, we can return it.
1027			// OR
1028			// key!=key, so the entry can't be deleted or updated, so we can just return it.
1029			// That's lucky for us because when key!=key we can't look it up successfully.
1030			it.key = k
1031			if t.IndirectElem() {
1032				e = *((*unsafe.Pointer)(e))
1033			}
1034			it.elem = e
1035		} else {
1036			// The hash table has grown since the iterator was started.
1037			// The golden data for this key is now somewhere else.
1038			// Check the current hash table for the data.
1039			// This code handles the case where the key
1040			// has been deleted, updated, or deleted and reinserted.
1041			// NOTE: we need to regrab the key as it has potentially been
1042			// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
1043			rk, re := mapaccessK(t, h, k)
1044			if rk == nil {
1045				continue // key has been deleted
1046			}
1047			it.key = rk
1048			it.elem = re
1049		}
1050		it.bucket = bucket
1051		if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
1052			it.bptr = b
1053		}
1054		it.i = i + 1
1055		it.checkBucket = checkBucket
1056		return
1057	}
1058	b = b.overflow(t)
1059	i = 0
1060	goto next
1061}
1062
1063// mapclear deletes all keys from a map.
1064// It is called by the compiler.
1065//
1066// mapclear should be an internal detail,
1067// but widely used packages access it using linkname.
1068// Notable members of the hall of shame include:
1069//   - github.com/cloudwego/frugal
1070//
1071// Do not remove or change the type signature.
1072// See go.dev/issue/67401.
1073//
1074//go:linkname mapclear
1075func mapclear(t *maptype, h *hmap) {
1076	if raceenabled && h != nil {
1077		callerpc := getcallerpc()
1078		pc := abi.FuncPCABIInternal(mapclear)
1079		racewritepc(unsafe.Pointer(h), callerpc, pc)
1080	}
1081
1082	if h == nil || h.count == 0 {
1083		return
1084	}
1085
1086	if h.flags&hashWriting != 0 {
1087		fatal("concurrent map writes")
1088	}
1089
1090	h.flags ^= hashWriting
1091
1092	// Mark buckets empty, so existing iterators can be terminated, see issue #59411.
1093	markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
1094		for i := uintptr(0); i <= mask; i++ {
1095			b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
1096			for ; b != nil; b = b.overflow(t) {
1097				for i := uintptr(0); i < abi.MapBucketCount; i++ {
1098					b.tophash[i] = emptyRest
1099				}
1100			}
1101		}
1102	}
1103	markBucketsEmpty(h.buckets, bucketMask(h.B))
1104	if oldBuckets := h.oldbuckets; oldBuckets != nil {
1105		markBucketsEmpty(oldBuckets, h.oldbucketmask())
1106	}
1107
1108	h.flags &^= sameSizeGrow
1109	h.oldbuckets = nil
1110	h.nevacuate = 0
1111	h.noverflow = 0
1112	h.count = 0
1113
1114	// Reset the hash seed to make it more difficult for attackers to
1115	// repeatedly trigger hash collisions. See issue 25237.
1116	h.hash0 = uint32(rand())
1117
1118	// Keep the mapextra allocation but clear any extra information.
1119	if h.extra != nil {
1120		*h.extra = mapextra{}
1121	}
1122
1123	// makeBucketArray clears the memory pointed to by h.buckets
1124	// and recovers any overflow buckets by generating them
1125	// as if h.buckets was newly alloced.
1126	_, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1127	if nextOverflow != nil {
1128		// If overflow buckets are created then h.extra
1129		// will have been allocated during initial bucket creation.
1130		h.extra.nextOverflow = nextOverflow
1131	}
1132
1133	if h.flags&hashWriting == 0 {
1134		fatal("concurrent map writes")
1135	}
1136	h.flags &^= hashWriting
1137}
1138
1139func hashGrow(t *maptype, h *hmap) {
1140	// If we've hit the load factor, get bigger.
1141	// Otherwise, there are too many overflow buckets,
1142	// so keep the same number of buckets and "grow" laterally.
1143	bigger := uint8(1)
1144	if !overLoadFactor(h.count+1, h.B) {
1145		bigger = 0
1146		h.flags |= sameSizeGrow
1147	}
1148	oldbuckets := h.buckets
1149	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1150
1151	flags := h.flags &^ (iterator | oldIterator)
1152	if h.flags&iterator != 0 {
1153		flags |= oldIterator
1154	}
1155	// commit the grow (atomic wrt gc)
1156	h.B += bigger
1157	h.flags = flags
1158	h.oldbuckets = oldbuckets
1159	h.buckets = newbuckets
1160	h.nevacuate = 0
1161	h.noverflow = 0
1162
1163	if h.extra != nil && h.extra.overflow != nil {
1164		// Promote current overflow buckets to the old generation.
1165		if h.extra.oldoverflow != nil {
1166			throw("oldoverflow is not nil")
1167		}
1168		h.extra.oldoverflow = h.extra.overflow
1169		h.extra.overflow = nil
1170	}
1171	if nextOverflow != nil {
1172		if h.extra == nil {
1173			h.extra = new(mapextra)
1174		}
1175		h.extra.nextOverflow = nextOverflow
1176	}
1177
1178	// the actual copying of the hash table data is done incrementally
1179	// by growWork() and evacuate().
1180}
1181
1182// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
1183func overLoadFactor(count int, B uint8) bool {
1184	return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1185}
1186
1187// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
1188// Note that most of these overflow buckets must be in sparse use;
1189// if use was dense, then we'd have already triggered regular map growth.
1190func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1191	// If the threshold is too low, we do extraneous work.
1192	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
1193	// "too many" means (approximately) as many overflow buckets as regular buckets.
1194	// See incrnoverflow for more details.
1195	if B > 15 {
1196		B = 15
1197	}
1198	// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
1199	return noverflow >= uint16(1)<<(B&15)
1200}
1201
1202// growing reports whether h is growing. The growth may be to the same size or bigger.
1203func (h *hmap) growing() bool {
1204	return h.oldbuckets != nil
1205}
1206
1207// sameSizeGrow reports whether the current growth is to a map of the same size.
1208func (h *hmap) sameSizeGrow() bool {
1209	return h.flags&sameSizeGrow != 0
1210}
1211
1212//go:linkname sameSizeGrowForIssue69110Test
1213func sameSizeGrowForIssue69110Test(h *hmap) bool {
1214	return h.sameSizeGrow()
1215}
1216
1217// noldbuckets calculates the number of buckets prior to the current map growth.
1218func (h *hmap) noldbuckets() uintptr {
1219	oldB := h.B
1220	if !h.sameSizeGrow() {
1221		oldB--
1222	}
1223	return bucketShift(oldB)
1224}
1225
1226// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
1227func (h *hmap) oldbucketmask() uintptr {
1228	return h.noldbuckets() - 1
1229}
1230
1231func growWork(t *maptype, h *hmap, bucket uintptr) {
1232	// make sure we evacuate the oldbucket corresponding
1233	// to the bucket we're about to use
1234	evacuate(t, h, bucket&h.oldbucketmask())
1235
1236	// evacuate one more oldbucket to make progress on growing
1237	if h.growing() {
1238		evacuate(t, h, h.nevacuate)
1239	}
1240}
1241
1242func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1243	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
1244	return evacuated(b)
1245}
1246
1247// evacDst is an evacuation destination.
1248type evacDst struct {
1249	b *bmap          // current destination bucket
1250	i int            // key/elem index into b
1251	k unsafe.Pointer // pointer to current key storage
1252	e unsafe.Pointer // pointer to current elem storage
1253}
1254
1255func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1256	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
1257	newbit := h.noldbuckets()
1258	if !evacuated(b) {
1259		// TODO: reuse overflow buckets instead of using new ones, if there
1260		// is no iterator using the old buckets.  (If !oldIterator.)
1261
1262		// xy contains the x and y (low and high) evacuation destinations.
1263		var xy [2]evacDst
1264		x := &xy[0]
1265		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
1266		x.k = add(unsafe.Pointer(x.b), dataOffset)
1267		x.e = add(x.k, abi.MapBucketCount*uintptr(t.KeySize))
1268
1269		if !h.sameSizeGrow() {
1270			// Only calculate y pointers if we're growing bigger.
1271			// Otherwise GC can see bad pointers.
1272			y := &xy[1]
1273			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
1274			y.k = add(unsafe.Pointer(y.b), dataOffset)
1275			y.e = add(y.k, abi.MapBucketCount*uintptr(t.KeySize))
1276		}
1277
1278		for ; b != nil; b = b.overflow(t) {
1279			k := add(unsafe.Pointer(b), dataOffset)
1280			e := add(k, abi.MapBucketCount*uintptr(t.KeySize))
1281			for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
1282				top := b.tophash[i]
1283				if isEmpty(top) {
1284					b.tophash[i] = evacuatedEmpty
1285					continue
1286				}
1287				if top < minTopHash {
1288					throw("bad map state")
1289				}
1290				k2 := k
1291				if t.IndirectKey() {
1292					k2 = *((*unsafe.Pointer)(k2))
1293				}
1294				var useY uint8
1295				if !h.sameSizeGrow() {
1296					// Compute hash to make our evacuation decision (whether we need
1297					// to send this key/elem to bucket x or bucket y).
1298					hash := t.Hasher(k2, uintptr(h.hash0))
1299					if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
1300						// If key != key (NaNs), then the hash could be (and probably
1301						// will be) entirely different from the old hash. Moreover,
1302						// it isn't reproducible. Reproducibility is required in the
1303						// presence of iterators, as our evacuation decision must
1304						// match whatever decision the iterator made.
1305						// Fortunately, we have the freedom to send these keys either
1306						// way. Also, tophash is meaningless for these kinds of keys.
1307						// We let the low bit of tophash drive the evacuation decision.
1308						// We recompute a new random tophash for the next level so
1309						// these keys will get evenly distributed across all buckets
1310						// after multiple grows.
1311						useY = top & 1
1312						top = tophash(hash)
1313					} else {
1314						if hash&newbit != 0 {
1315							useY = 1
1316						}
1317					}
1318				}
1319
1320				if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1321					throw("bad evacuatedN")
1322				}
1323
1324				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
1325				dst := &xy[useY]                 // evacuation destination
1326
1327				if dst.i == abi.MapBucketCount {
1328					dst.b = h.newoverflow(t, dst.b)
1329					dst.i = 0
1330					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1331					dst.e = add(dst.k, abi.MapBucketCount*uintptr(t.KeySize))
1332				}
1333				dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1334				if t.IndirectKey() {
1335					*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
1336				} else {
1337					typedmemmove(t.Key, dst.k, k) // copy elem
1338				}
1339				if t.IndirectElem() {
1340					*(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1341				} else {
1342					typedmemmove(t.Elem, dst.e, e)
1343				}
1344				dst.i++
1345				// These updates might push these pointers past the end of the
1346				// key or elem arrays.  That's ok, as we have the overflow pointer
1347				// at the end of the bucket to protect against pointing past the
1348				// end of the bucket.
1349				dst.k = add(dst.k, uintptr(t.KeySize))
1350				dst.e = add(dst.e, uintptr(t.ValueSize))
1351			}
1352		}
1353		// Unlink the overflow buckets & clear key/elem to help GC.
1354		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
1355			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
1356			// Preserve b.tophash because the evacuation
1357			// state is maintained there.
1358			ptr := add(b, dataOffset)
1359			n := uintptr(t.BucketSize) - dataOffset
1360			memclrHasPointers(ptr, n)
1361		}
1362	}
1363
1364	if oldbucket == h.nevacuate {
1365		advanceEvacuationMark(h, t, newbit)
1366	}
1367}
1368
1369func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1370	h.nevacuate++
1371	// Experiments suggest that 1024 is overkill by at least an order of magnitude.
1372	// Put it in there as a safeguard anyway, to ensure O(1) behavior.
1373	stop := h.nevacuate + 1024
1374	if stop > newbit {
1375		stop = newbit
1376	}
1377	for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1378		h.nevacuate++
1379	}
1380	if h.nevacuate == newbit { // newbit == # of oldbuckets
1381		// Growing is all done. Free old main bucket array.
1382		h.oldbuckets = nil
1383		// Can discard old overflow buckets as well.
1384		// If they are still referenced by an iterator,
1385		// then the iterator holds a pointers to the slice.
1386		if h.extra != nil {
1387			h.extra.oldoverflow = nil
1388		}
1389		h.flags &^= sameSizeGrow
1390	}
1391}
1392
1393// Reflect stubs. Called from ../reflect/asm_*.s
1394
1395// reflect_makemap is for package reflect,
1396// but widely used packages access it using linkname.
1397// Notable members of the hall of shame include:
1398//   - gitee.com/quant1x/gox
1399//   - github.com/modern-go/reflect2
1400//   - github.com/goccy/go-json
1401//   - github.com/RomiChan/protobuf
1402//   - github.com/segmentio/encoding
1403//   - github.com/v2pro/plz
1404//
1405// Do not remove or change the type signature.
1406// See go.dev/issue/67401.
1407//
1408//go:linkname reflect_makemap reflect.makemap
1409func reflect_makemap(t *maptype, cap int) *hmap {
1410	// Check invariants and reflects math.
1411	if t.Key.Equal == nil {
1412		throw("runtime.reflect_makemap: unsupported map key type")
1413	}
1414	if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
1415		t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
1416		throw("key size wrong")
1417	}
1418	if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
1419		t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
1420		throw("elem size wrong")
1421	}
1422	if t.Key.Align_ > abi.MapBucketCount {
1423		throw("key align too big")
1424	}
1425	if t.Elem.Align_ > abi.MapBucketCount {
1426		throw("elem align too big")
1427	}
1428	if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
1429		throw("key size not a multiple of key align")
1430	}
1431	if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
1432		throw("elem size not a multiple of elem align")
1433	}
1434	if abi.MapBucketCount < 8 {
1435		throw("bucketsize too small for proper alignment")
1436	}
1437	if dataOffset%uintptr(t.Key.Align_) != 0 {
1438		throw("need padding in bucket (key)")
1439	}
1440	if dataOffset%uintptr(t.Elem.Align_) != 0 {
1441		throw("need padding in bucket (elem)")
1442	}
1443
1444	return makemap(t, cap, nil)
1445}
1446
1447// reflect_mapaccess is for package reflect,
1448// but widely used packages access it using linkname.
1449// Notable members of the hall of shame include:
1450//   - gitee.com/quant1x/gox
1451//   - github.com/modern-go/reflect2
1452//   - github.com/v2pro/plz
1453//
1454// Do not remove or change the type signature.
1455// See go.dev/issue/67401.
1456//
1457//go:linkname reflect_mapaccess reflect.mapaccess
1458func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1459	elem, ok := mapaccess2(t, h, key)
1460	if !ok {
1461		// reflect wants nil for a missing element
1462		elem = nil
1463	}
1464	return elem
1465}
1466
1467//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
1468func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1469	elem, ok := mapaccess2_faststr(t, h, key)
1470	if !ok {
1471		// reflect wants nil for a missing element
1472		elem = nil
1473	}
1474	return elem
1475}
1476
1477// reflect_mapassign is for package reflect,
1478// but widely used packages access it using linkname.
1479// Notable members of the hall of shame include:
1480//   - gitee.com/quant1x/gox
1481//   - github.com/v2pro/plz
1482//
1483// Do not remove or change the type signature.
1484//
1485//go:linkname reflect_mapassign reflect.mapassign0
1486func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1487	p := mapassign(t, h, key)
1488	typedmemmove(t.Elem, p, elem)
1489}
1490
1491//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
1492func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1493	p := mapassign_faststr(t, h, key)
1494	typedmemmove(t.Elem, p, elem)
1495}
1496
1497//go:linkname reflect_mapdelete reflect.mapdelete
1498func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1499	mapdelete(t, h, key)
1500}
1501
1502//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
1503func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1504	mapdelete_faststr(t, h, key)
1505}
1506
1507// reflect_mapiterinit is for package reflect,
1508// but widely used packages access it using linkname.
1509// Notable members of the hall of shame include:
1510//   - github.com/modern-go/reflect2
1511//   - gitee.com/quant1x/gox
1512//   - github.com/v2pro/plz
1513//   - github.com/wI2L/jettison
1514//
1515// Do not remove or change the type signature.
1516// See go.dev/issue/67401.
1517//
1518//go:linkname reflect_mapiterinit reflect.mapiterinit
1519func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1520	mapiterinit(t, h, it)
1521}
1522
1523// reflect_mapiternext is for package reflect,
1524// but widely used packages access it using linkname.
1525// Notable members of the hall of shame include:
1526//   - gitee.com/quant1x/gox
1527//   - github.com/modern-go/reflect2
1528//   - github.com/goccy/go-json
1529//   - github.com/v2pro/plz
1530//   - github.com/wI2L/jettison
1531//
1532// Do not remove or change the type signature.
1533// See go.dev/issue/67401.
1534//
1535//go:linkname reflect_mapiternext reflect.mapiternext
1536func reflect_mapiternext(it *hiter) {
1537	mapiternext(it)
1538}
1539
1540// reflect_mapiterkey is for package reflect,
1541// but widely used packages access it using linkname.
1542// Notable members of the hall of shame include:
1543//   - github.com/goccy/go-json
1544//   - gonum.org/v1/gonum
1545//
1546// Do not remove or change the type signature.
1547// See go.dev/issue/67401.
1548//
1549//go:linkname reflect_mapiterkey reflect.mapiterkey
1550func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1551	return it.key
1552}
1553
1554// reflect_mapiterelem is for package reflect,
1555// but widely used packages access it using linkname.
1556// Notable members of the hall of shame include:
1557//   - github.com/goccy/go-json
1558//   - gonum.org/v1/gonum
1559//
1560// Do not remove or change the type signature.
1561// See go.dev/issue/67401.
1562//
1563//go:linkname reflect_mapiterelem reflect.mapiterelem
1564func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1565	return it.elem
1566}
1567
1568// reflect_maplen is for package reflect,
1569// but widely used packages access it using linkname.
1570// Notable members of the hall of shame include:
1571//   - github.com/goccy/go-json
1572//   - github.com/wI2L/jettison
1573//
1574// Do not remove or change the type signature.
1575// See go.dev/issue/67401.
1576//
1577//go:linkname reflect_maplen reflect.maplen
1578func reflect_maplen(h *hmap) int {
1579	if h == nil {
1580		return 0
1581	}
1582	if raceenabled {
1583		callerpc := getcallerpc()
1584		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1585	}
1586	return h.count
1587}
1588
1589//go:linkname reflect_mapclear reflect.mapclear
1590func reflect_mapclear(t *maptype, h *hmap) {
1591	mapclear(t, h)
1592}
1593
1594//go:linkname reflectlite_maplen internal/reflectlite.maplen
1595func reflectlite_maplen(h *hmap) int {
1596	if h == nil {
1597		return 0
1598	}
1599	if raceenabled {
1600		callerpc := getcallerpc()
1601		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1602	}
1603	return h.count
1604}
1605
1606// mapinitnoop is a no-op function known the Go linker; if a given global
1607// map (of the right size) is determined to be dead, the linker will
1608// rewrite the relocation (from the package init func) from the outlined
1609// map init function to this symbol. Defined in assembly so as to avoid
1610// complications with instrumentation (coverage, etc).
1611func mapinitnoop()
1612
1613// mapclone for implementing maps.Clone
1614//
1615//go:linkname mapclone maps.clone
1616func mapclone(m any) any {
1617	e := efaceOf(&m)
1618	e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
1619	return m
1620}
1621
1622// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
1623// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
1624func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
1625	for i := 0; i < abi.MapBucketCount; i++ {
1626		if isEmpty(src.tophash[i]) {
1627			continue
1628		}
1629
1630		for ; pos < abi.MapBucketCount; pos++ {
1631			if isEmpty(dst.tophash[pos]) {
1632				break
1633			}
1634		}
1635
1636		if pos == abi.MapBucketCount {
1637			dst = h.newoverflow(t, dst)
1638			pos = 0
1639		}
1640
1641		srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
1642		srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
1643		dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
1644		dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
1645
1646		dst.tophash[pos] = src.tophash[i]
1647		if t.IndirectKey() {
1648			srcK = *(*unsafe.Pointer)(srcK)
1649			if t.NeedKeyUpdate() {
1650				kStore := newobject(t.Key)
1651				typedmemmove(t.Key, kStore, srcK)
1652				srcK = kStore
1653			}
1654			// Note: if NeedKeyUpdate is false, then the memory
1655			// used to store the key is immutable, so we can share
1656			// it between the original map and its clone.
1657			*(*unsafe.Pointer)(dstK) = srcK
1658		} else {
1659			typedmemmove(t.Key, dstK, srcK)
1660		}
1661		if t.IndirectElem() {
1662			srcEle = *(*unsafe.Pointer)(srcEle)
1663			eStore := newobject(t.Elem)
1664			typedmemmove(t.Elem, eStore, srcEle)
1665			*(*unsafe.Pointer)(dstEle) = eStore
1666		} else {
1667			typedmemmove(t.Elem, dstEle, srcEle)
1668		}
1669		pos++
1670		h.count++
1671	}
1672	return dst, pos
1673}
1674
1675func mapclone2(t *maptype, src *hmap) *hmap {
1676	hint := src.count
1677	if overLoadFactor(hint, src.B) {
1678		// Note: in rare cases (e.g. during a same-sized grow) the map
1679		// can be overloaded. Make sure we don't allocate a destination
1680		// bucket array larger than the source bucket array.
1681		// This will cause the cloned map to be overloaded also,
1682		// but that's better than crashing. See issue 69110.
1683		hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
1684	}
1685	dst := makemap(t, hint, nil)
1686	dst.hash0 = src.hash0
1687	dst.nevacuate = 0
1688	// flags do not need to be copied here, just like a new map has no flags.
1689
1690	if src.count == 0 {
1691		return dst
1692	}
1693
1694	if src.flags&hashWriting != 0 {
1695		fatal("concurrent map clone and map write")
1696	}
1697
1698	if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
1699		// Quick copy for small maps.
1700		dst.buckets = newobject(t.Bucket)
1701		dst.count = src.count
1702		typedmemmove(t.Bucket, dst.buckets, src.buckets)
1703		return dst
1704	}
1705
1706	if dst.B == 0 {
1707		dst.buckets = newobject(t.Bucket)
1708	}
1709	dstArraySize := int(bucketShift(dst.B))
1710	srcArraySize := int(bucketShift(src.B))
1711	for i := 0; i < dstArraySize; i++ {
1712		dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
1713		pos := 0
1714		for j := 0; j < srcArraySize; j += dstArraySize {
1715			srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
1716			for srcBmap != nil {
1717				dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1718				srcBmap = srcBmap.overflow(t)
1719			}
1720		}
1721	}
1722
1723	if src.oldbuckets == nil {
1724		return dst
1725	}
1726
1727	oldB := src.B
1728	srcOldbuckets := src.oldbuckets
1729	if !src.sameSizeGrow() {
1730		oldB--
1731	}
1732	oldSrcArraySize := int(bucketShift(oldB))
1733
1734	for i := 0; i < oldSrcArraySize; i++ {
1735		srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
1736		if evacuated(srcBmap) {
1737			continue
1738		}
1739
1740		if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
1741			dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
1742			for dstBmap.overflow(t) != nil {
1743				dstBmap = dstBmap.overflow(t)
1744			}
1745			pos := 0
1746			for srcBmap != nil {
1747				dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1748				srcBmap = srcBmap.overflow(t)
1749			}
1750			continue
1751		}
1752
1753		// oldB < dst.B, so a single source bucket may go to multiple destination buckets.
1754		// Process entries one at a time.
1755		for srcBmap != nil {
1756			// move from oldBlucket to new bucket
1757			for i := uintptr(0); i < abi.MapBucketCount; i++ {
1758				if isEmpty(srcBmap.tophash[i]) {
1759					continue
1760				}
1761
1762				if src.flags&hashWriting != 0 {
1763					fatal("concurrent map clone and map write")
1764				}
1765
1766				srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
1767				if t.IndirectKey() {
1768					srcK = *((*unsafe.Pointer)(srcK))
1769				}
1770
1771				srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
1772				if t.IndirectElem() {
1773					srcEle = *((*unsafe.Pointer)(srcEle))
1774				}
1775				dstEle := mapassign(t, dst, srcK)
1776				typedmemmove(t.Elem, dstEle, srcEle)
1777			}
1778			srcBmap = srcBmap.overflow(t)
1779		}
1780	}
1781	return dst
1782}
1783
1784// keys for implementing maps.keys
1785//
1786//go:linkname keys maps.keys
1787func keys(m any, p unsafe.Pointer) {
1788	e := efaceOf(&m)
1789	t := (*maptype)(unsafe.Pointer(e._type))
1790	h := (*hmap)(e.data)
1791
1792	if h == nil || h.count == 0 {
1793		return
1794	}
1795	s := (*slice)(p)
1796	r := int(rand())
1797	offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
1798	if h.B == 0 {
1799		copyKeys(t, h, (*bmap)(h.buckets), s, offset)
1800		return
1801	}
1802	arraySize := int(bucketShift(h.B))
1803	buckets := h.buckets
1804	for i := 0; i < arraySize; i++ {
1805		bucket := (i + r) & (arraySize - 1)
1806		b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1807		copyKeys(t, h, b, s, offset)
1808	}
1809
1810	if h.growing() {
1811		oldArraySize := int(h.noldbuckets())
1812		for i := 0; i < oldArraySize; i++ {
1813			bucket := (i + r) & (oldArraySize - 1)
1814			b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1815			if evacuated(b) {
1816				continue
1817			}
1818			copyKeys(t, h, b, s, offset)
1819		}
1820	}
1821	return
1822}
1823
1824func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1825	for b != nil {
1826		for i := uintptr(0); i < abi.MapBucketCount; i++ {
1827			offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
1828			if isEmpty(b.tophash[offi]) {
1829				continue
1830			}
1831			if h.flags&hashWriting != 0 {
1832				fatal("concurrent map read and map write")
1833			}
1834			k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
1835			if t.IndirectKey() {
1836				k = *((*unsafe.Pointer)(k))
1837			}
1838			if s.len >= s.cap {
1839				fatal("concurrent map read and map write")
1840			}
1841			typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
1842			s.len++
1843		}
1844		b = b.overflow(t)
1845	}
1846}
1847
1848// values for implementing maps.values
1849//
1850//go:linkname values maps.values
1851func values(m any, p unsafe.Pointer) {
1852	e := efaceOf(&m)
1853	t := (*maptype)(unsafe.Pointer(e._type))
1854	h := (*hmap)(e.data)
1855	if h == nil || h.count == 0 {
1856		return
1857	}
1858	s := (*slice)(p)
1859	r := int(rand())
1860	offset := uint8(r >> h.B & (abi.MapBucketCount - 1))
1861	if h.B == 0 {
1862		copyValues(t, h, (*bmap)(h.buckets), s, offset)
1863		return
1864	}
1865	arraySize := int(bucketShift(h.B))
1866	buckets := h.buckets
1867	for i := 0; i < arraySize; i++ {
1868		bucket := (i + r) & (arraySize - 1)
1869		b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1870		copyValues(t, h, b, s, offset)
1871	}
1872
1873	if h.growing() {
1874		oldArraySize := int(h.noldbuckets())
1875		for i := 0; i < oldArraySize; i++ {
1876			bucket := (i + r) & (oldArraySize - 1)
1877			b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1878			if evacuated(b) {
1879				continue
1880			}
1881			copyValues(t, h, b, s, offset)
1882		}
1883	}
1884	return
1885}
1886
1887func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1888	for b != nil {
1889		for i := uintptr(0); i < abi.MapBucketCount; i++ {
1890			offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1)
1891			if isEmpty(b.tophash[offi]) {
1892				continue
1893			}
1894
1895			if h.flags&hashWriting != 0 {
1896				fatal("concurrent map read and map write")
1897			}
1898
1899			ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
1900			if t.IndirectElem() {
1901				ele = *((*unsafe.Pointer)(ele))
1902			}
1903			if s.len >= s.cap {
1904				fatal("concurrent map read and map write")
1905			}
1906			typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
1907			s.len++
1908		}
1909		b = b.overflow(t)
1910	}
1911}
1912