1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/abi"
9	"internal/goarch"
10	"internal/runtime/atomic"
11	"runtime/internal/sys"
12	"unsafe"
13)
14
15const itabInitSize = 512
16
17var (
18	itabLock      mutex                               // lock for accessing itab table
19	itabTable     = &itabTableInit                    // pointer to current table
20	itabTableInit = itabTableType{size: itabInitSize} // starter table
21)
22
23// Note: change the formula in the mallocgc call in itabAdd if you change these fields.
24type itabTableType struct {
25	size    uintptr             // length of entries array. Always a power of 2.
26	count   uintptr             // current number of filled entries.
27	entries [itabInitSize]*itab // really [size] large
28}
29
30func itabHashFunc(inter *interfacetype, typ *_type) uintptr {
31	// compiler has provided some good hash codes for us.
32	return uintptr(inter.Type.Hash ^ typ.Hash)
33}
34
35// getitab should be an internal detail,
36// but widely used packages access it using linkname.
37// Notable members of the hall of shame include:
38//   - github.com/bytedance/sonic
39//
40// Do not remove or change the type signature.
41// See go.dev/issue/67401.
42//
43//go:linkname getitab
44func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
45	if len(inter.Methods) == 0 {
46		throw("internal error - misuse of itab")
47	}
48
49	// easy case
50	if typ.TFlag&abi.TFlagUncommon == 0 {
51		if canfail {
52			return nil
53		}
54		name := toRType(&inter.Type).nameOff(inter.Methods[0].Name)
55		panic(&TypeAssertionError{nil, typ, &inter.Type, name.Name()})
56	}
57
58	var m *itab
59
60	// First, look in the existing table to see if we can find the itab we need.
61	// This is by far the most common case, so do it without locks.
62	// Use atomic to ensure we see any previous writes done by the thread
63	// that updates the itabTable field (with atomic.Storep in itabAdd).
64	t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable)))
65	if m = t.find(inter, typ); m != nil {
66		goto finish
67	}
68
69	// Not found.  Grab the lock and try again.
70	lock(&itabLock)
71	if m = itabTable.find(inter, typ); m != nil {
72		unlock(&itabLock)
73		goto finish
74	}
75
76	// Entry doesn't exist yet. Make a new entry & add it.
77	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
78	m.Inter = inter
79	m.Type = typ
80	// The hash is used in type switches. However, compiler statically generates itab's
81	// for all interface/type pairs used in switches (which are added to itabTable
82	// in itabsinit). The dynamically-generated itab's never participate in type switches,
83	// and thus the hash is irrelevant.
84	// Note: m.Hash is _not_ the hash used for the runtime itabTable hash table.
85	m.Hash = 0
86	itabInit(m, true)
87	itabAdd(m)
88	unlock(&itabLock)
89finish:
90	if m.Fun[0] != 0 {
91		return m
92	}
93	if canfail {
94		return nil
95	}
96	// this can only happen if the conversion
97	// was already done once using the , ok form
98	// and we have a cached negative result.
99	// The cached result doesn't record which
100	// interface function was missing, so initialize
101	// the itab again to get the missing function name.
102	panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: itabInit(m, false)})
103}
104
105// find finds the given interface/type pair in t.
106// Returns nil if the given interface/type pair isn't present.
107func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab {
108	// Implemented using quadratic probing.
109	// Probe sequence is h(i) = h0 + i*(i+1)/2 mod 2^k.
110	// We're guaranteed to hit all table entries using this probe sequence.
111	mask := t.size - 1
112	h := itabHashFunc(inter, typ) & mask
113	for i := uintptr(1); ; i++ {
114		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
115		// Use atomic read here so if we see m != nil, we also see
116		// the initializations of the fields of m.
117		// m := *p
118		m := (*itab)(atomic.Loadp(unsafe.Pointer(p)))
119		if m == nil {
120			return nil
121		}
122		if m.Inter == inter && m.Type == typ {
123			return m
124		}
125		h += i
126		h &= mask
127	}
128}
129
130// itabAdd adds the given itab to the itab hash table.
131// itabLock must be held.
132func itabAdd(m *itab) {
133	// Bugs can lead to calling this while mallocing is set,
134	// typically because this is called while panicking.
135	// Crash reliably, rather than only when we need to grow
136	// the hash table.
137	if getg().m.mallocing != 0 {
138		throw("malloc deadlock")
139	}
140
141	t := itabTable
142	if t.count >= 3*(t.size/4) { // 75% load factor
143		// Grow hash table.
144		// t2 = new(itabTableType) + some additional entries
145		// We lie and tell malloc we want pointer-free memory because
146		// all the pointed-to values are not in the heap.
147		t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
148		t2.size = t.size * 2
149
150		// Copy over entries.
151		// Note: while copying, other threads may look for an itab and
152		// fail to find it. That's ok, they will then try to get the itab lock
153		// and as a consequence wait until this copying is complete.
154		iterate_itabs(t2.add)
155		if t2.count != t.count {
156			throw("mismatched count during itab table copy")
157		}
158		// Publish new hash table. Use an atomic write: see comment in getitab.
159		atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2))
160		// Adopt the new table as our own.
161		t = itabTable
162		// Note: the old table can be GC'ed here.
163	}
164	t.add(m)
165}
166
167// add adds the given itab to itab table t.
168// itabLock must be held.
169func (t *itabTableType) add(m *itab) {
170	// See comment in find about the probe sequence.
171	// Insert new itab in the first empty spot in the probe sequence.
172	mask := t.size - 1
173	h := itabHashFunc(m.Inter, m.Type) & mask
174	for i := uintptr(1); ; i++ {
175		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
176		m2 := *p
177		if m2 == m {
178			// A given itab may be used in more than one module
179			// and thanks to the way global symbol resolution works, the
180			// pointed-to itab may already have been inserted into the
181			// global 'hash'.
182			return
183		}
184		if m2 == nil {
185			// Use atomic write here so if a reader sees m, it also
186			// sees the correctly initialized fields of m.
187			// NoWB is ok because m is not in heap memory.
188			// *p = m
189			atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m))
190			t.count++
191			return
192		}
193		h += i
194		h &= mask
195	}
196}
197
198// itabInit fills in the m.Fun array with all the code pointers for
199// the m.Inter/m.Type pair. If the type does not implement the interface,
200// it sets m.Fun[0] to 0 and returns the name of an interface function that is missing.
201// If !firstTime, itabInit will not write anything to m.Fun (see issue 65962).
202// It is ok to call this multiple times on the same m, even concurrently
203// (although it will only be called once with firstTime==true).
204func itabInit(m *itab, firstTime bool) string {
205	inter := m.Inter
206	typ := m.Type
207	x := typ.Uncommon()
208
209	// both inter and typ have method sorted by name,
210	// and interface names are unique,
211	// so can iterate over both in lock step;
212	// the loop is O(ni+nt) not O(ni*nt).
213	ni := len(inter.Methods)
214	nt := int(x.Mcount)
215	xmhdr := (*[1 << 16]abi.Method)(add(unsafe.Pointer(x), uintptr(x.Moff)))[:nt:nt]
216	j := 0
217	methods := (*[1 << 16]unsafe.Pointer)(unsafe.Pointer(&m.Fun[0]))[:ni:ni]
218	var fun0 unsafe.Pointer
219imethods:
220	for k := 0; k < ni; k++ {
221		i := &inter.Methods[k]
222		itype := toRType(&inter.Type).typeOff(i.Typ)
223		name := toRType(&inter.Type).nameOff(i.Name)
224		iname := name.Name()
225		ipkg := pkgPath(name)
226		if ipkg == "" {
227			ipkg = inter.PkgPath.Name()
228		}
229		for ; j < nt; j++ {
230			t := &xmhdr[j]
231			rtyp := toRType(typ)
232			tname := rtyp.nameOff(t.Name)
233			if rtyp.typeOff(t.Mtyp) == itype && tname.Name() == iname {
234				pkgPath := pkgPath(tname)
235				if pkgPath == "" {
236					pkgPath = rtyp.nameOff(x.PkgPath).Name()
237				}
238				if tname.IsExported() || pkgPath == ipkg {
239					ifn := rtyp.textOff(t.Ifn)
240					if k == 0 {
241						fun0 = ifn // we'll set m.Fun[0] at the end
242					} else if firstTime {
243						methods[k] = ifn
244					}
245					continue imethods
246				}
247			}
248		}
249		// didn't find method
250		// Leaves m.Fun[0] set to 0.
251		return iname
252	}
253	if firstTime {
254		m.Fun[0] = uintptr(fun0)
255	}
256	return ""
257}
258
259func itabsinit() {
260	lockInit(&itabLock, lockRankItab)
261	lock(&itabLock)
262	for _, md := range activeModules() {
263		for _, i := range md.itablinks {
264			itabAdd(i)
265		}
266	}
267	unlock(&itabLock)
268}
269
270// panicdottypeE is called when doing an e.(T) conversion and the conversion fails.
271// have = the dynamic type we have.
272// want = the static type we're trying to convert to.
273// iface = the static type we're converting from.
274func panicdottypeE(have, want, iface *_type) {
275	panic(&TypeAssertionError{iface, have, want, ""})
276}
277
278// panicdottypeI is called when doing an i.(T) conversion and the conversion fails.
279// Same args as panicdottypeE, but "have" is the dynamic itab we have.
280func panicdottypeI(have *itab, want, iface *_type) {
281	var t *_type
282	if have != nil {
283		t = have.Type
284	}
285	panicdottypeE(t, want, iface)
286}
287
288// panicnildottype is called when doing an i.(T) conversion and the interface i is nil.
289// want = the static type we're trying to convert to.
290func panicnildottype(want *_type) {
291	panic(&TypeAssertionError{nil, nil, want, ""})
292	// TODO: Add the static type we're converting from as well.
293	// It might generate a better error message.
294	// Just to match other nil conversion errors, we don't for now.
295}
296
297// The specialized convTx routines need a type descriptor to use when calling mallocgc.
298// We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness.
299// However, when debugging, it'd be nice to have some indication in mallocgc where the types came from,
300// so we use named types here.
301// We then construct interface values of these types,
302// and then extract the type word to use as needed.
303type (
304	uint16InterfacePtr uint16
305	uint32InterfacePtr uint32
306	uint64InterfacePtr uint64
307	stringInterfacePtr string
308	sliceInterfacePtr  []byte
309)
310
311var (
312	uint16Eface any = uint16InterfacePtr(0)
313	uint32Eface any = uint32InterfacePtr(0)
314	uint64Eface any = uint64InterfacePtr(0)
315	stringEface any = stringInterfacePtr("")
316	sliceEface  any = sliceInterfacePtr(nil)
317
318	uint16Type *_type = efaceOf(&uint16Eface)._type
319	uint32Type *_type = efaceOf(&uint32Eface)._type
320	uint64Type *_type = efaceOf(&uint64Eface)._type
321	stringType *_type = efaceOf(&stringEface)._type
322	sliceType  *_type = efaceOf(&sliceEface)._type
323)
324
325// The conv and assert functions below do very similar things.
326// The convXXX functions are guaranteed by the compiler to succeed.
327// The assertXXX functions may fail (either panicking or returning false,
328// depending on whether they are 1-result or 2-result).
329// The convXXX functions succeed on a nil input, whereas the assertXXX
330// functions fail on a nil input.
331
332// convT converts a value of type t, which is pointed to by v, to a pointer that can
333// be used as the second word of an interface value.
334func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
335	if raceenabled {
336		raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convT))
337	}
338	if msanenabled {
339		msanread(v, t.Size_)
340	}
341	if asanenabled {
342		asanread(v, t.Size_)
343	}
344	x := mallocgc(t.Size_, t, true)
345	typedmemmove(t, x, v)
346	return x
347}
348func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
349	// TODO: maybe take size instead of type?
350	if raceenabled {
351		raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convTnoptr))
352	}
353	if msanenabled {
354		msanread(v, t.Size_)
355	}
356	if asanenabled {
357		asanread(v, t.Size_)
358	}
359
360	x := mallocgc(t.Size_, t, false)
361	memmove(x, v, t.Size_)
362	return x
363}
364
365func convT16(val uint16) (x unsafe.Pointer) {
366	if val < uint16(len(staticuint64s)) {
367		x = unsafe.Pointer(&staticuint64s[val])
368		if goarch.BigEndian {
369			x = add(x, 6)
370		}
371	} else {
372		x = mallocgc(2, uint16Type, false)
373		*(*uint16)(x) = val
374	}
375	return
376}
377
378func convT32(val uint32) (x unsafe.Pointer) {
379	if val < uint32(len(staticuint64s)) {
380		x = unsafe.Pointer(&staticuint64s[val])
381		if goarch.BigEndian {
382			x = add(x, 4)
383		}
384	} else {
385		x = mallocgc(4, uint32Type, false)
386		*(*uint32)(x) = val
387	}
388	return
389}
390
391// convT64 should be an internal detail,
392// but widely used packages access it using linkname.
393// Notable members of the hall of shame include:
394//   - github.com/bytedance/sonic
395//
396// Do not remove or change the type signature.
397// See go.dev/issue/67401.
398//
399//go:linkname convT64
400func convT64(val uint64) (x unsafe.Pointer) {
401	if val < uint64(len(staticuint64s)) {
402		x = unsafe.Pointer(&staticuint64s[val])
403	} else {
404		x = mallocgc(8, uint64Type, false)
405		*(*uint64)(x) = val
406	}
407	return
408}
409
410// convTstring should be an internal detail,
411// but widely used packages access it using linkname.
412// Notable members of the hall of shame include:
413//   - github.com/bytedance/sonic
414//
415// Do not remove or change the type signature.
416// See go.dev/issue/67401.
417//
418//go:linkname convTstring
419func convTstring(val string) (x unsafe.Pointer) {
420	if val == "" {
421		x = unsafe.Pointer(&zeroVal[0])
422	} else {
423		x = mallocgc(unsafe.Sizeof(val), stringType, true)
424		*(*string)(x) = val
425	}
426	return
427}
428
429// convTslice should be an internal detail,
430// but widely used packages access it using linkname.
431// Notable members of the hall of shame include:
432//   - github.com/bytedance/sonic
433//
434// Do not remove or change the type signature.
435// See go.dev/issue/67401.
436//
437//go:linkname convTslice
438func convTslice(val []byte) (x unsafe.Pointer) {
439	// Note: this must work for any element type, not just byte.
440	if (*slice)(unsafe.Pointer(&val)).array == nil {
441		x = unsafe.Pointer(&zeroVal[0])
442	} else {
443		x = mallocgc(unsafe.Sizeof(val), sliceType, true)
444		*(*[]byte)(x) = val
445	}
446	return
447}
448
449func assertE2I(inter *interfacetype, t *_type) *itab {
450	if t == nil {
451		// explicit conversions require non-nil interface value.
452		panic(&TypeAssertionError{nil, nil, &inter.Type, ""})
453	}
454	return getitab(inter, t, false)
455}
456
457func assertE2I2(inter *interfacetype, t *_type) *itab {
458	if t == nil {
459		return nil
460	}
461	return getitab(inter, t, true)
462}
463
464// typeAssert builds an itab for the concrete type t and the
465// interface type s.Inter. If the conversion is not possible it
466// panics if s.CanFail is false and returns nil if s.CanFail is true.
467func typeAssert(s *abi.TypeAssert, t *_type) *itab {
468	var tab *itab
469	if t == nil {
470		if !s.CanFail {
471			panic(&TypeAssertionError{nil, nil, &s.Inter.Type, ""})
472		}
473	} else {
474		tab = getitab(s.Inter, t, s.CanFail)
475	}
476
477	if !abi.UseInterfaceSwitchCache(GOARCH) {
478		return tab
479	}
480
481	// Maybe update the cache, so the next time the generated code
482	// doesn't need to call into the runtime.
483	if cheaprand()&1023 != 0 {
484		// Only bother updating the cache ~1 in 1000 times.
485		return tab
486	}
487	// Load the current cache.
488	oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
489
490	if cheaprand()&uint32(oldC.Mask) != 0 {
491		// As cache gets larger, choose to update it less often
492		// so we can amortize the cost of building a new cache.
493		return tab
494	}
495
496	// Make a new cache.
497	newC := buildTypeAssertCache(oldC, t, tab)
498
499	// Update cache. Use compare-and-swap so if multiple threads
500	// are fighting to update the cache, at least one of their
501	// updates will stick.
502	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
503
504	return tab
505}
506
507func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache {
508	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
509
510	// Count the number of entries we need.
511	n := 1
512	for _, e := range oldEntries {
513		if e.Typ != 0 {
514			n++
515		}
516	}
517
518	// Figure out how big a table we need.
519	// We need at least one more slot than the number of entries
520	// so that we are guaranteed an empty slot (for termination).
521	newN := n * 2                         // make it at most 50% full
522	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
523
524	// Allocate the new table.
525	newSize := unsafe.Sizeof(abi.TypeAssertCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.TypeAssertCacheEntry{})
526	newC := (*abi.TypeAssertCache)(mallocgc(newSize, nil, true))
527	newC.Mask = uintptr(newN - 1)
528	newEntries := unsafe.Slice(&newC.Entries[0], newN)
529
530	// Fill the new table.
531	addEntry := func(typ *_type, tab *itab) {
532		h := int(typ.Hash) & (newN - 1)
533		for {
534			if newEntries[h].Typ == 0 {
535				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
536				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
537				return
538			}
539			h = (h + 1) & (newN - 1)
540		}
541	}
542	for _, e := range oldEntries {
543		if e.Typ != 0 {
544			addEntry((*_type)(unsafe.Pointer(e.Typ)), (*itab)(unsafe.Pointer(e.Itab)))
545		}
546	}
547	addEntry(typ, tab)
548
549	return newC
550}
551
552// Empty type assert cache. Contains one entry with a nil Typ (which
553// causes a cache lookup to fail immediately.)
554var emptyTypeAssertCache = abi.TypeAssertCache{Mask: 0}
555
556// interfaceSwitch compares t against the list of cases in s.
557// If t matches case i, interfaceSwitch returns the case index i and
558// an itab for the pair <t, s.Cases[i]>.
559// If there is no match, return N,nil, where N is the number
560// of cases.
561func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) {
562	cases := unsafe.Slice(&s.Cases[0], s.NCases)
563
564	// Results if we don't find a match.
565	case_ := len(cases)
566	var tab *itab
567
568	// Look through each case in order.
569	for i, c := range cases {
570		tab = getitab(c, t, true)
571		if tab != nil {
572			case_ = i
573			break
574		}
575	}
576
577	if !abi.UseInterfaceSwitchCache(GOARCH) {
578		return case_, tab
579	}
580
581	// Maybe update the cache, so the next time the generated code
582	// doesn't need to call into the runtime.
583	if cheaprand()&1023 != 0 {
584		// Only bother updating the cache ~1 in 1000 times.
585		// This ensures we don't waste memory on switches, or
586		// switch arguments, that only happen a few times.
587		return case_, tab
588	}
589	// Load the current cache.
590	oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache)))
591
592	if cheaprand()&uint32(oldC.Mask) != 0 {
593		// As cache gets larger, choose to update it less often
594		// so we can amortize the cost of building a new cache
595		// (that cost is linear in oldc.Mask).
596		return case_, tab
597	}
598
599	// Make a new cache.
600	newC := buildInterfaceSwitchCache(oldC, t, case_, tab)
601
602	// Update cache. Use compare-and-swap so if multiple threads
603	// are fighting to update the cache, at least one of their
604	// updates will stick.
605	atomic_casPointer((*unsafe.Pointer)(unsafe.Pointer(&s.Cache)), unsafe.Pointer(oldC), unsafe.Pointer(newC))
606
607	return case_, tab
608}
609
610// buildInterfaceSwitchCache constructs an interface switch cache
611// containing all the entries from oldC plus the new entry
612// (typ,case_,tab).
613func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache {
614	oldEntries := unsafe.Slice(&oldC.Entries[0], oldC.Mask+1)
615
616	// Count the number of entries we need.
617	n := 1
618	for _, e := range oldEntries {
619		if e.Typ != 0 {
620			n++
621		}
622	}
623
624	// Figure out how big a table we need.
625	// We need at least one more slot than the number of entries
626	// so that we are guaranteed an empty slot (for termination).
627	newN := n * 2                         // make it at most 50% full
628	newN = 1 << sys.Len64(uint64(newN-1)) // round up to a power of 2
629
630	// Allocate the new table.
631	newSize := unsafe.Sizeof(abi.InterfaceSwitchCache{}) + uintptr(newN-1)*unsafe.Sizeof(abi.InterfaceSwitchCacheEntry{})
632	newC := (*abi.InterfaceSwitchCache)(mallocgc(newSize, nil, true))
633	newC.Mask = uintptr(newN - 1)
634	newEntries := unsafe.Slice(&newC.Entries[0], newN)
635
636	// Fill the new table.
637	addEntry := func(typ *_type, case_ int, tab *itab) {
638		h := int(typ.Hash) & (newN - 1)
639		for {
640			if newEntries[h].Typ == 0 {
641				newEntries[h].Typ = uintptr(unsafe.Pointer(typ))
642				newEntries[h].Case = case_
643				newEntries[h].Itab = uintptr(unsafe.Pointer(tab))
644				return
645			}
646			h = (h + 1) & (newN - 1)
647		}
648	}
649	for _, e := range oldEntries {
650		if e.Typ != 0 {
651			addEntry((*_type)(unsafe.Pointer(e.Typ)), e.Case, (*itab)(unsafe.Pointer(e.Itab)))
652		}
653	}
654	addEntry(typ, case_, tab)
655
656	return newC
657}
658
659// Empty interface switch cache. Contains one entry with a nil Typ (which
660// causes a cache lookup to fail immediately.)
661var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{Mask: 0}
662
663// reflect_ifaceE2I is for package reflect,
664// but widely used packages access it using linkname.
665// Notable members of the hall of shame include:
666//   - gitee.com/quant1x/gox
667//   - github.com/modern-go/reflect2
668//   - github.com/v2pro/plz
669//
670// Do not remove or change the type signature.
671//
672//go:linkname reflect_ifaceE2I reflect.ifaceE2I
673func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
674	*dst = iface{assertE2I(inter, e._type), e.data}
675}
676
677//go:linkname reflectlite_ifaceE2I internal/reflectlite.ifaceE2I
678func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface) {
679	*dst = iface{assertE2I(inter, e._type), e.data}
680}
681
682func iterate_itabs(fn func(*itab)) {
683	// Note: only runs during stop the world or with itabLock held,
684	// so no other locks/atomics needed.
685	t := itabTable
686	for i := uintptr(0); i < t.size; i++ {
687		m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
688		if m != nil {
689			fn(m)
690		}
691	}
692}
693
694// staticuint64s is used to avoid allocating in convTx for small integer values.
695var staticuint64s = [...]uint64{
696	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
697	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
698	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
699	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
700	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
701	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
702	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
703	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
704	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
705	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
706	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
707	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
708	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
709	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
710	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
711	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
712	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
713	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
714	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
715	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
716	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
717	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
718	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
719	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
720	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
721	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
722	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
723	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
724	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
725	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
726	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
727	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
728}
729
730// The linker redirects a reference of a method that it determined
731// unreachable to a reference to this function, so it will throw if
732// ever called.
733func unreachableMethod() {
734	throw("unreachable method called. linker bug?")
735}
736