1// Copyright 2018 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/abi"
9	"internal/goarch"
10	"unsafe"
11)
12
13func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
14	if raceenabled && h != nil {
15		callerpc := getcallerpc()
16		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
17	}
18	if h == nil || h.count == 0 {
19		return unsafe.Pointer(&zeroVal[0])
20	}
21	if h.flags&hashWriting != 0 {
22		fatal("concurrent map read and map write")
23	}
24	key := stringStructOf(&ky)
25	if h.B == 0 {
26		// One-bucket table.
27		b := (*bmap)(h.buckets)
28		if key.len < 32 {
29			// short key, doing lots of comparisons is ok
30			for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
31				k := (*stringStruct)(kptr)
32				if k.len != key.len || isEmpty(b.tophash[i]) {
33					if b.tophash[i] == emptyRest {
34						break
35					}
36					continue
37				}
38				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
39					return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
40				}
41			}
42			return unsafe.Pointer(&zeroVal[0])
43		}
44		// long key, try not to do more comparisons than necessary
45		keymaybe := uintptr(abi.MapBucketCount)
46		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
47			k := (*stringStruct)(kptr)
48			if k.len != key.len || isEmpty(b.tophash[i]) {
49				if b.tophash[i] == emptyRest {
50					break
51				}
52				continue
53			}
54			if k.str == key.str {
55				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
56			}
57			// check first 4 bytes
58			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
59				continue
60			}
61			// check last 4 bytes
62			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
63				continue
64			}
65			if keymaybe != abi.MapBucketCount {
66				// Two keys are potential matches. Use hash to distinguish them.
67				goto dohash
68			}
69			keymaybe = i
70		}
71		if keymaybe != abi.MapBucketCount {
72			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
73			if memequal(k.str, key.str, uintptr(key.len)) {
74				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
75			}
76		}
77		return unsafe.Pointer(&zeroVal[0])
78	}
79dohash:
80	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
81	m := bucketMask(h.B)
82	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
83	if c := h.oldbuckets; c != nil {
84		if !h.sameSizeGrow() {
85			// There used to be half as many buckets; mask down one more power of two.
86			m >>= 1
87		}
88		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
89		if !evacuated(oldb) {
90			b = oldb
91		}
92	}
93	top := tophash(hash)
94	for ; b != nil; b = b.overflow(t) {
95		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
96			k := (*stringStruct)(kptr)
97			if k.len != key.len || b.tophash[i] != top {
98				continue
99			}
100			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
101				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
102			}
103		}
104	}
105	return unsafe.Pointer(&zeroVal[0])
106}
107
108// mapaccess2_faststr should be an internal detail,
109// but widely used packages access it using linkname.
110// Notable members of the hall of shame include:
111//   - github.com/ugorji/go/codec
112//
113// Do not remove or change the type signature.
114// See go.dev/issue/67401.
115//
116//go:linkname mapaccess2_faststr
117func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
118	if raceenabled && h != nil {
119		callerpc := getcallerpc()
120		racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
121	}
122	if h == nil || h.count == 0 {
123		return unsafe.Pointer(&zeroVal[0]), false
124	}
125	if h.flags&hashWriting != 0 {
126		fatal("concurrent map read and map write")
127	}
128	key := stringStructOf(&ky)
129	if h.B == 0 {
130		// One-bucket table.
131		b := (*bmap)(h.buckets)
132		if key.len < 32 {
133			// short key, doing lots of comparisons is ok
134			for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
135				k := (*stringStruct)(kptr)
136				if k.len != key.len || isEmpty(b.tophash[i]) {
137					if b.tophash[i] == emptyRest {
138						break
139					}
140					continue
141				}
142				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
143					return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
144				}
145			}
146			return unsafe.Pointer(&zeroVal[0]), false
147		}
148		// long key, try not to do more comparisons than necessary
149		keymaybe := uintptr(abi.MapBucketCount)
150		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
151			k := (*stringStruct)(kptr)
152			if k.len != key.len || isEmpty(b.tophash[i]) {
153				if b.tophash[i] == emptyRest {
154					break
155				}
156				continue
157			}
158			if k.str == key.str {
159				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
160			}
161			// check first 4 bytes
162			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
163				continue
164			}
165			// check last 4 bytes
166			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
167				continue
168			}
169			if keymaybe != abi.MapBucketCount {
170				// Two keys are potential matches. Use hash to distinguish them.
171				goto dohash
172			}
173			keymaybe = i
174		}
175		if keymaybe != abi.MapBucketCount {
176			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
177			if memequal(k.str, key.str, uintptr(key.len)) {
178				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
179			}
180		}
181		return unsafe.Pointer(&zeroVal[0]), false
182	}
183dohash:
184	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
185	m := bucketMask(h.B)
186	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
187	if c := h.oldbuckets; c != nil {
188		if !h.sameSizeGrow() {
189			// There used to be half as many buckets; mask down one more power of two.
190			m >>= 1
191		}
192		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
193		if !evacuated(oldb) {
194			b = oldb
195		}
196	}
197	top := tophash(hash)
198	for ; b != nil; b = b.overflow(t) {
199		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
200			k := (*stringStruct)(kptr)
201			if k.len != key.len || b.tophash[i] != top {
202				continue
203			}
204			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
205				return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
206			}
207		}
208	}
209	return unsafe.Pointer(&zeroVal[0]), false
210}
211
212// mapassign_faststr should be an internal detail,
213// but widely used packages access it using linkname.
214// Notable members of the hall of shame include:
215//   - github.com/bytedance/sonic
216//   - github.com/cloudwego/frugal
217//   - github.com/ugorji/go/codec
218//
219// Do not remove or change the type signature.
220// See go.dev/issue/67401.
221//
222//go:linkname mapassign_faststr
223func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
224	if h == nil {
225		panic(plainError("assignment to entry in nil map"))
226	}
227	if raceenabled {
228		callerpc := getcallerpc()
229		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
230	}
231	if h.flags&hashWriting != 0 {
232		fatal("concurrent map writes")
233	}
234	key := stringStructOf(&s)
235	hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
236
237	// Set hashWriting after calling t.hasher for consistency with mapassign.
238	h.flags ^= hashWriting
239
240	if h.buckets == nil {
241		h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
242	}
243
244again:
245	bucket := hash & bucketMask(h.B)
246	if h.growing() {
247		growWork_faststr(t, h, bucket)
248	}
249	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
250	top := tophash(hash)
251
252	var insertb *bmap
253	var inserti uintptr
254	var insertk unsafe.Pointer
255
256bucketloop:
257	for {
258		for i := uintptr(0); i < abi.MapBucketCount; i++ {
259			if b.tophash[i] != top {
260				if isEmpty(b.tophash[i]) && insertb == nil {
261					insertb = b
262					inserti = i
263				}
264				if b.tophash[i] == emptyRest {
265					break bucketloop
266				}
267				continue
268			}
269			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
270			if k.len != key.len {
271				continue
272			}
273			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
274				continue
275			}
276			// already have a mapping for key. Update it.
277			inserti = i
278			insertb = b
279			// Overwrite existing key, so it can be garbage collected.
280			// The size is already guaranteed to be set correctly.
281			k.str = key.str
282			goto done
283		}
284		ovf := b.overflow(t)
285		if ovf == nil {
286			break
287		}
288		b = ovf
289	}
290
291	// Did not find mapping for key. Allocate new cell & add entry.
292
293	// If we hit the max load factor or we have too many overflow buckets,
294	// and we're not already in the middle of growing, start growing.
295	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
296		hashGrow(t, h)
297		goto again // Growing the table invalidates everything, so try again
298	}
299
300	if insertb == nil {
301		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
302		insertb = h.newoverflow(t, b)
303		inserti = 0 // not necessary, but avoids needlessly spilling inserti
304	}
305	insertb.tophash[inserti&(abi.MapBucketCount-1)] = top // mask inserti to avoid bounds checks
306
307	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
308	// store new key at insert position
309	*((*stringStruct)(insertk)) = *key
310	h.count++
311
312done:
313	elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
314	if h.flags&hashWriting == 0 {
315		fatal("concurrent map writes")
316	}
317	h.flags &^= hashWriting
318	return elem
319}
320
321func mapdelete_faststr(t *maptype, h *hmap, ky string) {
322	if raceenabled && h != nil {
323		callerpc := getcallerpc()
324		racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
325	}
326	if h == nil || h.count == 0 {
327		return
328	}
329	if h.flags&hashWriting != 0 {
330		fatal("concurrent map writes")
331	}
332
333	key := stringStructOf(&ky)
334	hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
335
336	// Set hashWriting after calling t.hasher for consistency with mapdelete
337	h.flags ^= hashWriting
338
339	bucket := hash & bucketMask(h.B)
340	if h.growing() {
341		growWork_faststr(t, h, bucket)
342	}
343	b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
344	bOrig := b
345	top := tophash(hash)
346search:
347	for ; b != nil; b = b.overflow(t) {
348		for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
349			k := (*stringStruct)(kptr)
350			if k.len != key.len || b.tophash[i] != top {
351				continue
352			}
353			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
354				continue
355			}
356			// Clear key's pointer.
357			k.str = nil
358			e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
359			if t.Elem.Pointers() {
360				memclrHasPointers(e, t.Elem.Size_)
361			} else {
362				memclrNoHeapPointers(e, t.Elem.Size_)
363			}
364			b.tophash[i] = emptyOne
365			// If the bucket now ends in a bunch of emptyOne states,
366			// change those to emptyRest states.
367			if i == abi.MapBucketCount-1 {
368				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
369					goto notLast
370				}
371			} else {
372				if b.tophash[i+1] != emptyRest {
373					goto notLast
374				}
375			}
376			for {
377				b.tophash[i] = emptyRest
378				if i == 0 {
379					if b == bOrig {
380						break // beginning of initial bucket, we're done.
381					}
382					// Find previous bucket, continue at its last entry.
383					c := b
384					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
385					}
386					i = abi.MapBucketCount - 1
387				} else {
388					i--
389				}
390				if b.tophash[i] != emptyOne {
391					break
392				}
393			}
394		notLast:
395			h.count--
396			// Reset the hash seed to make it more difficult for attackers to
397			// repeatedly trigger hash collisions. See issue 25237.
398			if h.count == 0 {
399				h.hash0 = uint32(rand())
400			}
401			break search
402		}
403	}
404
405	if h.flags&hashWriting == 0 {
406		fatal("concurrent map writes")
407	}
408	h.flags &^= hashWriting
409}
410
411func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
412	// make sure we evacuate the oldbucket corresponding
413	// to the bucket we're about to use
414	evacuate_faststr(t, h, bucket&h.oldbucketmask())
415
416	// evacuate one more oldbucket to make progress on growing
417	if h.growing() {
418		evacuate_faststr(t, h, h.nevacuate)
419	}
420}
421
422func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
423	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
424	newbit := h.noldbuckets()
425	if !evacuated(b) {
426		// TODO: reuse overflow buckets instead of using new ones, if there
427		// is no iterator using the old buckets.  (If !oldIterator.)
428
429		// xy contains the x and y (low and high) evacuation destinations.
430		var xy [2]evacDst
431		x := &xy[0]
432		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
433		x.k = add(unsafe.Pointer(x.b), dataOffset)
434		x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize)
435
436		if !h.sameSizeGrow() {
437			// Only calculate y pointers if we're growing bigger.
438			// Otherwise GC can see bad pointers.
439			y := &xy[1]
440			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
441			y.k = add(unsafe.Pointer(y.b), dataOffset)
442			y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize)
443		}
444
445		for ; b != nil; b = b.overflow(t) {
446			k := add(unsafe.Pointer(b), dataOffset)
447			e := add(k, abi.MapBucketCount*2*goarch.PtrSize)
448			for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
449				top := b.tophash[i]
450				if isEmpty(top) {
451					b.tophash[i] = evacuatedEmpty
452					continue
453				}
454				if top < minTopHash {
455					throw("bad map state")
456				}
457				var useY uint8
458				if !h.sameSizeGrow() {
459					// Compute hash to make our evacuation decision (whether we need
460					// to send this key/elem to bucket x or bucket y).
461					hash := t.Hasher(k, uintptr(h.hash0))
462					if hash&newbit != 0 {
463						useY = 1
464					}
465				}
466
467				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
468				dst := &xy[useY]                 // evacuation destination
469
470				if dst.i == abi.MapBucketCount {
471					dst.b = h.newoverflow(t, dst.b)
472					dst.i = 0
473					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
474					dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize)
475				}
476				dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
477
478				// Copy key.
479				*(*string)(dst.k) = *(*string)(k)
480
481				typedmemmove(t.Elem, dst.e, e)
482				dst.i++
483				// These updates might push these pointers past the end of the
484				// key or elem arrays.  That's ok, as we have the overflow pointer
485				// at the end of the bucket to protect against pointing past the
486				// end of the bucket.
487				dst.k = add(dst.k, 2*goarch.PtrSize)
488				dst.e = add(dst.e, uintptr(t.ValueSize))
489			}
490		}
491		// Unlink the overflow buckets & clear key/elem to help GC.
492		if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
493			b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
494			// Preserve b.tophash because the evacuation
495			// state is maintained there.
496			ptr := add(b, dataOffset)
497			n := uintptr(t.BucketSize) - dataOffset
498			memclrHasPointers(ptr, n)
499		}
500	}
501
502	if oldbucket == h.nevacuate {
503		advanceEvacuationMark(h, t, newbit)
504	}
505}
506