1// Copyright 2018 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/abi"
9	"internal/goarch"
10	"unsafe"
11)
12
13func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
14	if raceenabled && h != nil {
15		callerpc := getcallerpc()
16		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
17	}
18	if h == nil || h.count == 0 {
19		return unsafe.Pointer(&zeroVal[0])
20	}
21	if h.flags&hashWriting != 0 {
22		fatal("concurrent map read and map write")
23	}
24	var b *bmap
25	if h.B == 0 {
26		// One-bucket table. No need to hash.
27		b = (*bmap)(h.buckets)
28	} else {
29		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
30		m := bucketMask(h.B)
31		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
32		if c := h.oldbuckets; c != nil {
33			if !h.sameSizeGrow() {
34				// There used to be half as many buckets; mask down one more power of two.
35				m >>= 1
36			}
37			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
38			if !evacuated(oldb) {
39				b = oldb
40			}
41		}
42	}
43	for ; b != nil; b = b.overflow(t) {
44		for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
45			if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
46				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
47			}
48		}
49	}
50	return unsafe.Pointer(&zeroVal[0])
51}
52
53// mapaccess2_fast64 should be an internal detail,
54// but widely used packages access it using linkname.
55// Notable members of the hall of shame include:
56//   - github.com/ugorji/go/codec
57//
58// Do not remove or change the type signature.
59// See go.dev/issue/67401.
60//
61//go:linkname mapaccess2_fast64
62func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
63	if raceenabled && h != nil {
64		callerpc := getcallerpc()
65		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
66	}
67	if h == nil || h.count == 0 {
68		return unsafe.Pointer(&zeroVal[0]), false
69	}
70	if h.flags&hashWriting != 0 {
71		fatal("concurrent map read and map write")
72	}
73	var b *bmap
74	if h.B == 0 {
75		// One-bucket table. No need to hash.
76		b = (*bmap)(h.buckets)
77	} else {
78		hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
79		m := bucketMask(h.B)
80		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
81		if c := h.oldbuckets; c != nil {
82			if !h.sameSizeGrow() {
83				// There used to be half as many buckets; mask down one more power of two.
84				m >>= 1
85			}
86			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
87			if !evacuated(oldb) {
88				b = oldb
89			}
90		}
91	}
92	for ; b != nil; b = b.overflow(t) {
93		for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
94			if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
95				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)), true
96			}
97		}
98	}
99	return unsafe.Pointer(&zeroVal[0]), false
100}
101
102// mapassign_fast64 should be an internal detail,
103// but widely used packages access it using linkname.
104// Notable members of the hall of shame include:
105//   - github.com/bytedance/sonic
106//   - github.com/cloudwego/frugal
107//   - github.com/ugorji/go/codec
108//
109// Do not remove or change the type signature.
110// See go.dev/issue/67401.
111//
112//go:linkname mapassign_fast64
113func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
114	if h == nil {
115		panic(plainError("assignment to entry in nil map"))
116	}
117	if raceenabled {
118		callerpc := getcallerpc()
119		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
120	}
121	if h.flags&hashWriting != 0 {
122		fatal("concurrent map writes")
123	}
124	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
125
126	// Set hashWriting after calling t.hasher for consistency with mapassign.
127	h.flags ^= hashWriting
128
129	if h.buckets == nil {
130		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
131	}
132
133again:
134	bucket := hash & bucketMask(h.B)
135	if h.growing() {
136		growWork_fast64(t, h, bucket)
137	}
138	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
139
140	var insertb *bmap
141	var inserti uintptr
142	var insertk unsafe.Pointer
143
144bucketloop:
145	for {
146		for i := uintptr(0); i < abi.MapBucketCount; i++ {
147			if isEmpty(b.tophash[i]) {
148				if insertb == nil {
149					insertb = b
150					inserti = i
151				}
152				if b.tophash[i] == emptyRest {
153					break bucketloop
154				}
155				continue
156			}
157			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
158			if k != key {
159				continue
160			}
161			insertb = b
162			inserti = i
163			goto done
164		}
165		ovf := b.overflow(t)
166		if ovf == nil {
167			break
168		}
169		b = ovf
170	}
171
172	// Did not find mapping for key. Allocate new cell & add entry.
173
174	// If we hit the max load factor or we have too many overflow buckets,
175	// and we're not already in the middle of growing, start growing.
176	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
177		hashGrow(t, h)
178		goto again // Growing the table invalidates everything, so try again
179	}
180
181	if insertb == nil {
182		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
183		insertb = h.newoverflow(t, b)
184		inserti = 0 // not necessary, but avoids needlessly spilling inserti
185	}
186	insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
187
188	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
189	// store new key at insert position
190	*(*uint64)(insertk) = key
191
192	h.count++
193
194done:
195	elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
196	if h.flags&hashWriting == 0 {
197		fatal("concurrent map writes")
198	}
199	h.flags &^= hashWriting
200	return elem
201}
202
203// mapassign_fast64ptr should be an internal detail,
204// but widely used packages access it using linkname.
205// Notable members of the hall of shame include:
206//   - github.com/bytedance/sonic
207//   - github.com/cloudwego/frugal
208//   - github.com/ugorji/go/codec
209//
210// Do not remove or change the type signature.
211// See go.dev/issue/67401.
212//
213//go:linkname mapassign_fast64ptr
214func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
215	if h == nil {
216		panic(plainError("assignment to entry in nil map"))
217	}
218	if raceenabled {
219		callerpc := getcallerpc()
220		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
221	}
222	if h.flags&hashWriting != 0 {
223		fatal("concurrent map writes")
224	}
225	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
226
227	// Set hashWriting after calling t.hasher for consistency with mapassign.
228	h.flags ^= hashWriting
229
230	if h.buckets == nil {
231		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
232	}
233
234again:
235	bucket := hash & bucketMask(h.B)
236	if h.growing() {
237		growWork_fast64(t, h, bucket)
238	}
239	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
240
241	var insertb *bmap
242	var inserti uintptr
243	var insertk unsafe.Pointer
244
245bucketloop:
246	for {
247		for i := uintptr(0); i < abi.MapBucketCount; i++ {
248			if isEmpty(b.tophash[i]) {
249				if insertb == nil {
250					insertb = b
251					inserti = i
252				}
253				if b.tophash[i] == emptyRest {
254					break bucketloop
255				}
256				continue
257			}
258			k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
259			if k != key {
260				continue
261			}
262			insertb = b
263			inserti = i
264			goto done
265		}
266		ovf := b.overflow(t)
267		if ovf == nil {
268			break
269		}
270		b = ovf
271	}
272
273	// Did not find mapping for key. Allocate new cell & add entry.
274
275	// If we hit the max load factor or we have too many overflow buckets,
276	// and we're not already in the middle of growing, start growing.
277	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
278		hashGrow(t, h)
279		goto again // Growing the table invalidates everything, so try again
280	}
281
282	if insertb == nil {
283		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
284		insertb = h.newoverflow(t, b)
285		inserti = 0 // not necessary, but avoids needlessly spilling inserti
286	}
287	insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
288
289	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
290	// store new key at insert position
291	*(*unsafe.Pointer)(insertk) = key
292
293	h.count++
294
295done:
296	elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize))
297	if h.flags&hashWriting == 0 {
298		fatal("concurrent map writes")
299	}
300	h.flags &^= hashWriting
301	return elem
302}
303
304func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
305	if raceenabled && h != nil {
306		callerpc := getcallerpc()
307		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
308	}
309	if h == nil || h.count == 0 {
310		return
311	}
312	if h.flags&hashWriting != 0 {
313		fatal("concurrent map writes")
314	}
315
316	hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
317
318	// Set hashWriting after calling t.hasher for consistency with mapdelete
319	h.flags ^= hashWriting
320
321	bucket := hash & bucketMask(h.B)
322	if h.growing() {
323		growWork_fast64(t, h, bucket)
324	}
325	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
326	bOrig := b
327search:
328	for ; b != nil; b = b.overflow(t) {
329		for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) {
330			if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
331				continue
332			}
333			// Only clear key if there are pointers in it.
334			if t.Key.Pointers() {
335				if goarch.PtrSize == 8 {
336					*(*unsafe.Pointer)(k) = nil
337				} else {
338					// There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
339					// Just call memclrHasPointers instead of trying to handle all cases here.
340					memclrHasPointers(k, 8)
341				}
342			}
343			e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize))
344			if t.Elem.Pointers() {
345				memclrHasPointers(e, t.Elem.Size_)
346			} else {
347				memclrNoHeapPointers(e, t.Elem.Size_)
348			}
349			b.tophash[i] = emptyOne
350			// If the bucket now ends in a bunch of emptyOne states,
351			// change those to emptyRest states.
352			if i == abi.MapBucketCount-1 {
353				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
354					goto notLast
355				}
356			} else {
357				if b.tophash[i+1] != emptyRest {
358					goto notLast
359				}
360			}
361			for {
362				b.tophash[i] = emptyRest
363				if i == 0 {
364					if b == bOrig {
365						break // beginning of initial bucket, we're done.
366					}
367					// Find previous bucket, continue at its last entry.
368					c := b
369					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
370					}
371					i = abi.MapBucketCount - 1
372				} else {
373					i--
374				}
375				if b.tophash[i] != emptyOne {
376					break
377				}
378			}
379		notLast:
380			h.count--
381			// Reset the hash seed to make it more difficult for attackers to
382			// repeatedly trigger hash collisions. See issue 25237.
383			if h.count == 0 {
384				h.hash0 = uint32(rand())
385			}
386			break search
387		}
388	}
389
390	if h.flags&hashWriting == 0 {
391		fatal("concurrent map writes")
392	}
393	h.flags &^= hashWriting
394}
395
396func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
397	// make sure we evacuate the oldbucket corresponding
398	// to the bucket we're about to use
399	evacuate_fast64(t, h, bucket&h.oldbucketmask())
400
401	// evacuate one more oldbucket to make progress on growing
402	if h.growing() {
403		evacuate_fast64(t, h, h.nevacuate)
404	}
405}
406
407func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
408	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
409	newbit := h.noldbuckets()
410	if !evacuated(b) {
411		// TODO: reuse overflow buckets instead of using new ones, if there
412		// is no iterator using the old buckets.  (If !oldIterator.)
413
414		// xy contains the x and y (low and high) evacuation destinations.
415		var xy [2]evacDst
416		x := &xy[0]
417		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
418		x.k = add(unsafe.Pointer(x.b), dataOffset)
419		x.e = add(x.k, abi.MapBucketCount*8)
420
421		if !h.sameSizeGrow() {
422			// Only calculate y pointers if we're growing bigger.
423			// Otherwise GC can see bad pointers.
424			y := &xy[1]
425			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
426			y.k = add(unsafe.Pointer(y.b), dataOffset)
427			y.e = add(y.k, abi.MapBucketCount*8)
428		}
429
430		for ; b != nil; b = b.overflow(t) {
431			k := add(unsafe.Pointer(b), dataOffset)
432			e := add(k, abi.MapBucketCount*8)
433			for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
434				top := b.tophash[i]
435				if isEmpty(top) {
436					b.tophash[i] = evacuatedEmpty
437					continue
438				}
439				if top < minTopHash {
440					throw("bad map state")
441				}
442				var useY uint8
443				if !h.sameSizeGrow() {
444					// Compute hash to make our evacuation decision (whether we need
445					// to send this key/elem to bucket x or bucket y).
446					hash := t.Hasher(k, uintptr(h.hash0))
447					if hash&newbit != 0 {
448						useY = 1
449					}
450				}
451
452				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
453				dst := &xy[useY]                 // evacuation destination
454
455				if dst.i == abi.MapBucketCount {
456					dst.b = h.newoverflow(t, dst.b)
457					dst.i = 0
458					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
459					dst.e = add(dst.k, abi.MapBucketCount*8)
460				}
461				dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
462
463				// Copy key.
464				if t.Key.Pointers() && writeBarrier.enabled {
465					if goarch.PtrSize == 8 {
466						// Write with a write barrier.
467						*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
468					} else {
469						// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
470						// Give up and call typedmemmove.
471						typedmemmove(t.Key, dst.k, k)
472					}
473				} else {
474					*(*uint64)(dst.k) = *(*uint64)(k)
475				}
476
477				typedmemmove(t.Elem, dst.e, e)
478				dst.i++
479				// These updates might push these pointers past the end of the
480				// key or elem arrays.  That's ok, as we have the overflow pointer
481				// at the end of the bucket to protect against pointing past the
482				// end of the bucket.
483				dst.k = add(dst.k, 8)
484				dst.e = add(dst.e, uintptr(t.ValueSize))
485			}
486		}
487		// Unlink the overflow buckets & clear key/elem to help GC.
488		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
489			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
490			// Preserve b.tophash because the evacuation
491			// state is maintained there.
492			ptr := add(b, dataOffset)
493			n := uintptr(t.BucketSize) - dataOffset
494			memclrHasPointers(ptr, n)
495		}
496	}
497
498	if oldbucket == h.nevacuate {
499		advanceEvacuationMark(h, t, newbit)
500	}
501}
502