1// Copyright 2018 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/abi"
9	"internal/goarch"
10	"unsafe"
11)
12
13func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
14	if raceenabled && h != nil {
15		callerpc := getcallerpc()
16		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
17	}
18	if h == nil || h.count == 0 {
19		return unsafe.Pointer(&zeroVal[0])
20	}
21	if h.flags&hashWriting != 0 {
22		fatal("concurrent map read and map write")
23	}
24	var b *bmap
25	if h.B == 0 {
26		// One-bucket table. No need to hash.
27		b = (*bmap)(h.buckets)
28	} else {
29		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
30		m := bucketMask(h.B)
31		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
32		if c := h.oldbuckets; c != nil {
33			if !h.sameSizeGrow() {
34				// There used to be half as many buckets; mask down one more power of two.
35				m >>= 1
36			}
37			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
38			if !evacuated(oldb) {
39				b = oldb
40			}
41		}
42	}
43	for ; b != nil; b = b.overflow(t) {
44		for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
45			if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
46				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
47			}
48		}
49	}
50	return unsafe.Pointer(&zeroVal[0])
51}
52
53// mapaccess2_fast32 should be an internal detail,
54// but widely used packages access it using linkname.
55// Notable members of the hall of shame include:
56//   - github.com/ugorji/go/codec
57//
58// Do not remove or change the type signature.
59// See go.dev/issue/67401.
60//
61//go:linkname mapaccess2_fast32
62func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
63	if raceenabled && h != nil {
64		callerpc := getcallerpc()
65		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
66	}
67	if h == nil || h.count == 0 {
68		return unsafe.Pointer(&zeroVal[0]), false
69	}
70	if h.flags&hashWriting != 0 {
71		fatal("concurrent map read and map write")
72	}
73	var b *bmap
74	if h.B == 0 {
75		// One-bucket table. No need to hash.
76		b = (*bmap)(h.buckets)
77	} else {
78		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
79		m := bucketMask(h.B)
80		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
81		if c := h.oldbuckets; c != nil {
82			if !h.sameSizeGrow() {
83				// There used to be half as many buckets; mask down one more power of two.
84				m >>= 1
85			}
86			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
87			if !evacuated(oldb) {
88				b = oldb
89			}
90		}
91	}
92	for ; b != nil; b = b.overflow(t) {
93		for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
94			if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
95				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)), true
96			}
97		}
98	}
99	return unsafe.Pointer(&zeroVal[0]), false
100}
101
102// mapassign_fast32 should be an internal detail,
103// but widely used packages access it using linkname.
104// Notable members of the hall of shame include:
105//   - github.com/bytedance/sonic
106//   - github.com/cloudwego/frugal
107//   - github.com/ugorji/go/codec
108//
109// Do not remove or change the type signature.
110// See go.dev/issue/67401.
111//
112//go:linkname mapassign_fast32
113func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
114	if h == nil {
115		panic(plainError("assignment to entry in nil map"))
116	}
117	if raceenabled {
118		callerpc := getcallerpc()
119		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
120	}
121	if h.flags&hashWriting != 0 {
122		fatal("concurrent map writes")
123	}
124	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
125
126	// Set hashWriting after calling t.hasher for consistency with mapassign.
127	h.flags ^= hashWriting
128
129	if h.buckets == nil {
130		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
131	}
132
133again:
134	bucket := hash & bucketMask(h.B)
135	if h.growing() {
136		growWork_fast32(t, h, bucket)
137	}
138	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
139
140	var insertb *bmap
141	var inserti uintptr
142	var insertk unsafe.Pointer
143
144bucketloop:
145	for {
146		for i := uintptr(0); i < abi.MapBucketCount; i++ {
147			if isEmpty(b.tophash[i]) {
148				if insertb == nil {
149					inserti = i
150					insertb = b
151				}
152				if b.tophash[i] == emptyRest {
153					break bucketloop
154				}
155				continue
156			}
157			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
158			if k != key {
159				continue
160			}
161			inserti = i
162			insertb = b
163			goto done
164		}
165		ovf := b.overflow(t)
166		if ovf == nil {
167			break
168		}
169		b = ovf
170	}
171
172	// Did not find mapping for key. Allocate new cell & add entry.
173
174	// If we hit the max load factor or we have too many overflow buckets,
175	// and we're not already in the middle of growing, start growing.
176	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
177		hashGrow(t, h)
178		goto again // Growing the table invalidates everything, so try again
179	}
180
181	if insertb == nil {
182		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
183		insertb = h.newoverflow(t, b)
184		inserti = 0 // not necessary, but avoids needlessly spilling inserti
185	}
186	insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
187
188	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
189	// store new key at insert position
190	*(*uint32)(insertk) = key
191
192	h.count++
193
194done:
195	elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
196	if h.flags&hashWriting == 0 {
197		fatal("concurrent map writes")
198	}
199	h.flags &^= hashWriting
200	return elem
201}
202
203// mapassign_fast32ptr should be an internal detail,
204// but widely used packages access it using linkname.
205// Notable members of the hall of shame include:
206//   - github.com/ugorji/go/codec
207//
208// Do not remove or change the type signature.
209// See go.dev/issue/67401.
210//
211//go:linkname mapassign_fast32ptr
212func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
213	if h == nil {
214		panic(plainError("assignment to entry in nil map"))
215	}
216	if raceenabled {
217		callerpc := getcallerpc()
218		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
219	}
220	if h.flags&hashWriting != 0 {
221		fatal("concurrent map writes")
222	}
223	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
224
225	// Set hashWriting after calling t.hasher for consistency with mapassign.
226	h.flags ^= hashWriting
227
228	if h.buckets == nil {
229		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
230	}
231
232again:
233	bucket := hash & bucketMask(h.B)
234	if h.growing() {
235		growWork_fast32(t, h, bucket)
236	}
237	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
238
239	var insertb *bmap
240	var inserti uintptr
241	var insertk unsafe.Pointer
242
243bucketloop:
244	for {
245		for i := uintptr(0); i < abi.MapBucketCount; i++ {
246			if isEmpty(b.tophash[i]) {
247				if insertb == nil {
248					inserti = i
249					insertb = b
250				}
251				if b.tophash[i] == emptyRest {
252					break bucketloop
253				}
254				continue
255			}
256			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
257			if k != key {
258				continue
259			}
260			inserti = i
261			insertb = b
262			goto done
263		}
264		ovf := b.overflow(t)
265		if ovf == nil {
266			break
267		}
268		b = ovf
269	}
270
271	// Did not find mapping for key. Allocate new cell & add entry.
272
273	// If we hit the max load factor or we have too many overflow buckets,
274	// and we're not already in the middle of growing, start growing.
275	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
276		hashGrow(t, h)
277		goto again // Growing the table invalidates everything, so try again
278	}
279
280	if insertb == nil {
281		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
282		insertb = h.newoverflow(t, b)
283		inserti = 0 // not necessary, but avoids needlessly spilling inserti
284	}
285	insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
286
287	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
288	// store new key at insert position
289	*(*unsafe.Pointer)(insertk) = key
290
291	h.count++
292
293done:
294	elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize))
295	if h.flags&hashWriting == 0 {
296		fatal("concurrent map writes")
297	}
298	h.flags &^= hashWriting
299	return elem
300}
301
302func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
303	if raceenabled && h != nil {
304		callerpc := getcallerpc()
305		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
306	}
307	if h == nil || h.count == 0 {
308		return
309	}
310	if h.flags&hashWriting != 0 {
311		fatal("concurrent map writes")
312	}
313
314	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
315
316	// Set hashWriting after calling t.hasher for consistency with mapdelete
317	h.flags ^= hashWriting
318
319	bucket := hash & bucketMask(h.B)
320	if h.growing() {
321		growWork_fast32(t, h, bucket)
322	}
323	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
324	bOrig := b
325search:
326	for ; b != nil; b = b.overflow(t) {
327		for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) {
328			if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
329				continue
330			}
331			// Only clear key if there are pointers in it.
332			// This can only happen if pointers are 32 bit
333			// wide as 64 bit pointers do not fit into a 32 bit key.
334			if goarch.PtrSize == 4 && t.Key.Pointers() {
335				// The key must be a pointer as we checked pointers are
336				// 32 bits wide and the key is 32 bits wide also.
337				*(*unsafe.Pointer)(k) = nil
338			}
339			e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize))
340			if t.Elem.Pointers() {
341				memclrHasPointers(e, t.Elem.Size_)
342			} else {
343				memclrNoHeapPointers(e, t.Elem.Size_)
344			}
345			b.tophash[i] = emptyOne
346			// If the bucket now ends in a bunch of emptyOne states,
347			// change those to emptyRest states.
348			if i == abi.MapBucketCount-1 {
349				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
350					goto notLast
351				}
352			} else {
353				if b.tophash[i+1] != emptyRest {
354					goto notLast
355				}
356			}
357			for {
358				b.tophash[i] = emptyRest
359				if i == 0 {
360					if b == bOrig {
361						break // beginning of initial bucket, we're done.
362					}
363					// Find previous bucket, continue at its last entry.
364					c := b
365					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
366					}
367					i = abi.MapBucketCount - 1
368				} else {
369					i--
370				}
371				if b.tophash[i] != emptyOne {
372					break
373				}
374			}
375		notLast:
376			h.count--
377			// Reset the hash seed to make it more difficult for attackers to
378			// repeatedly trigger hash collisions. See issue 25237.
379			if h.count == 0 {
380				h.hash0 = uint32(rand())
381			}
382			break search
383		}
384	}
385
386	if h.flags&hashWriting == 0 {
387		fatal("concurrent map writes")
388	}
389	h.flags &^= hashWriting
390}
391
392func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
393	// make sure we evacuate the oldbucket corresponding
394	// to the bucket we're about to use
395	evacuate_fast32(t, h, bucket&h.oldbucketmask())
396
397	// evacuate one more oldbucket to make progress on growing
398	if h.growing() {
399		evacuate_fast32(t, h, h.nevacuate)
400	}
401}
402
403func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
404	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
405	newbit := h.noldbuckets()
406	if !evacuated(b) {
407		// TODO: reuse overflow buckets instead of using new ones, if there
408		// is no iterator using the old buckets.  (If !oldIterator.)
409
410		// xy contains the x and y (low and high) evacuation destinations.
411		var xy [2]evacDst
412		x := &xy[0]
413		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
414		x.k = add(unsafe.Pointer(x.b), dataOffset)
415		x.e = add(x.k, abi.MapBucketCount*4)
416
417		if !h.sameSizeGrow() {
418			// Only calculate y pointers if we're growing bigger.
419			// Otherwise GC can see bad pointers.
420			y := &xy[1]
421			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
422			y.k = add(unsafe.Pointer(y.b), dataOffset)
423			y.e = add(y.k, abi.MapBucketCount*4)
424		}
425
426		for ; b != nil; b = b.overflow(t) {
427			k := add(unsafe.Pointer(b), dataOffset)
428			e := add(k, abi.MapBucketCount*4)
429			for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
430				top := b.tophash[i]
431				if isEmpty(top) {
432					b.tophash[i] = evacuatedEmpty
433					continue
434				}
435				if top < minTopHash {
436					throw("bad map state")
437				}
438				var useY uint8
439				if !h.sameSizeGrow() {
440					// Compute hash to make our evacuation decision (whether we need
441					// to send this key/elem to bucket x or bucket y).
442					hash := t.Hasher(k, uintptr(h.hash0))
443					if hash&newbit != 0 {
444						useY = 1
445					}
446				}
447
448				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
449				dst := &xy[useY]                 // evacuation destination
450
451				if dst.i == abi.MapBucketCount {
452					dst.b = h.newoverflow(t, dst.b)
453					dst.i = 0
454					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
455					dst.e = add(dst.k, abi.MapBucketCount*4)
456				}
457				dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
458
459				// Copy key.
460				if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
461					// Write with a write barrier.
462					*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
463				} else {
464					*(*uint32)(dst.k) = *(*uint32)(k)
465				}
466
467				typedmemmove(t.Elem, dst.e, e)
468				dst.i++
469				// These updates might push these pointers past the end of the
470				// key or elem arrays.  That's ok, as we have the overflow pointer
471				// at the end of the bucket to protect against pointing past the
472				// end of the bucket.
473				dst.k = add(dst.k, 4)
474				dst.e = add(dst.e, uintptr(t.ValueSize))
475			}
476		}
477		// Unlink the overflow buckets & clear key/elem to help GC.
478		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
479			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
480			// Preserve b.tophash because the evacuation
481			// state is maintained there.
482			ptr := add(b, dataOffset)
483			n := uintptr(t.BucketSize) - dataOffset
484			memclrHasPointers(ptr, n)
485		}
486	}
487
488	if oldbucket == h.nevacuate {
489		advanceEvacuationMark(h, t, newbit)
490	}
491}
492