1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package reflect implements run-time reflection, allowing a program to
6// manipulate objects with arbitrary types. The typical use is to take a value
7// with static type interface{} and extract its dynamic type information by
8// calling TypeOf, which returns a Type.
9//
10// A call to ValueOf returns a Value representing the run-time data.
11// Zero takes a Type and returns a Value representing a zero value
12// for that type.
13//
14// See "The Laws of Reflection" for an introduction to reflection in Go:
15// https://golang.org/doc/articles/laws_of_reflection.html
16package reflect
17
18import (
19	"internal/abi"
20	"internal/goarch"
21	"strconv"
22	"sync"
23	"unicode"
24	"unicode/utf8"
25	"unsafe"
26)
27
28// Type is the representation of a Go type.
29//
30// Not all methods apply to all kinds of types. Restrictions,
31// if any, are noted in the documentation for each method.
32// Use the Kind method to find out the kind of type before
33// calling kind-specific methods. Calling a method
34// inappropriate to the kind of type causes a run-time panic.
35//
36// Type values are comparable, such as with the == operator,
37// so they can be used as map keys.
38// Two Type values are equal if they represent identical types.
39type Type interface {
40	// Methods applicable to all types.
41
42	// Align returns the alignment in bytes of a value of
43	// this type when allocated in memory.
44	Align() int
45
46	// FieldAlign returns the alignment in bytes of a value of
47	// this type when used as a field in a struct.
48	FieldAlign() int
49
50	// Method returns the i'th method in the type's method set.
51	// It panics if i is not in the range [0, NumMethod()).
52	//
53	// For a non-interface type T or *T, the returned Method's Type and Func
54	// fields describe a function whose first argument is the receiver,
55	// and only exported methods are accessible.
56	//
57	// For an interface type, the returned Method's Type field gives the
58	// method signature, without a receiver, and the Func field is nil.
59	//
60	// Methods are sorted in lexicographic order.
61	Method(int) Method
62
63	// MethodByName returns the method with that name in the type's
64	// method set and a boolean indicating if the method was found.
65	//
66	// For a non-interface type T or *T, the returned Method's Type and Func
67	// fields describe a function whose first argument is the receiver.
68	//
69	// For an interface type, the returned Method's Type field gives the
70	// method signature, without a receiver, and the Func field is nil.
71	MethodByName(string) (Method, bool)
72
73	// NumMethod returns the number of methods accessible using Method.
74	//
75	// For a non-interface type, it returns the number of exported methods.
76	//
77	// For an interface type, it returns the number of exported and unexported methods.
78	NumMethod() int
79
80	// Name returns the type's name within its package for a defined type.
81	// For other (non-defined) types it returns the empty string.
82	Name() string
83
84	// PkgPath returns a defined type's package path, that is, the import path
85	// that uniquely identifies the package, such as "encoding/base64".
86	// If the type was predeclared (string, error) or not defined (*T, struct{},
87	// []int, or A where A is an alias for a non-defined type), the package path
88	// will be the empty string.
89	PkgPath() string
90
91	// Size returns the number of bytes needed to store
92	// a value of the given type; it is analogous to unsafe.Sizeof.
93	Size() uintptr
94
95	// String returns a string representation of the type.
96	// The string representation may use shortened package names
97	// (e.g., base64 instead of "encoding/base64") and is not
98	// guaranteed to be unique among types. To test for type identity,
99	// compare the Types directly.
100	String() string
101
102	// Kind returns the specific kind of this type.
103	Kind() Kind
104
105	// Implements reports whether the type implements the interface type u.
106	Implements(u Type) bool
107
108	// AssignableTo reports whether a value of the type is assignable to type u.
109	AssignableTo(u Type) bool
110
111	// ConvertibleTo reports whether a value of the type is convertible to type u.
112	// Even if ConvertibleTo returns true, the conversion may still panic.
113	// For example, a slice of type []T is convertible to *[N]T,
114	// but the conversion will panic if its length is less than N.
115	ConvertibleTo(u Type) bool
116
117	// Comparable reports whether values of this type are comparable.
118	// Even if Comparable returns true, the comparison may still panic.
119	// For example, values of interface type are comparable,
120	// but the comparison will panic if their dynamic type is not comparable.
121	Comparable() bool
122
123	// Methods applicable only to some types, depending on Kind.
124	// The methods allowed for each kind are:
125	//
126	//	Int*, Uint*, Float*, Complex*: Bits
127	//	Array: Elem, Len
128	//	Chan: ChanDir, Elem
129	//	Func: In, NumIn, Out, NumOut, IsVariadic.
130	//	Map: Key, Elem
131	//	Pointer: Elem
132	//	Slice: Elem
133	//	Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
134
135	// Bits returns the size of the type in bits.
136	// It panics if the type's Kind is not one of the
137	// sized or unsized Int, Uint, Float, or Complex kinds.
138	Bits() int
139
140	// ChanDir returns a channel type's direction.
141	// It panics if the type's Kind is not Chan.
142	ChanDir() ChanDir
143
144	// IsVariadic reports whether a function type's final input parameter
145	// is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
146	// implicit actual type []T.
147	//
148	// For concreteness, if t represents func(x int, y ... float64), then
149	//
150	//	t.NumIn() == 2
151	//	t.In(0) is the reflect.Type for "int"
152	//	t.In(1) is the reflect.Type for "[]float64"
153	//	t.IsVariadic() == true
154	//
155	// IsVariadic panics if the type's Kind is not Func.
156	IsVariadic() bool
157
158	// Elem returns a type's element type.
159	// It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice.
160	Elem() Type
161
162	// Field returns a struct type's i'th field.
163	// It panics if the type's Kind is not Struct.
164	// It panics if i is not in the range [0, NumField()).
165	Field(i int) StructField
166
167	// FieldByIndex returns the nested field corresponding
168	// to the index sequence. It is equivalent to calling Field
169	// successively for each index i.
170	// It panics if the type's Kind is not Struct.
171	FieldByIndex(index []int) StructField
172
173	// FieldByName returns the struct field with the given name
174	// and a boolean indicating if the field was found.
175	// If the returned field is promoted from an embedded struct,
176	// then Offset in the returned StructField is the offset in
177	// the embedded struct.
178	FieldByName(name string) (StructField, bool)
179
180	// FieldByNameFunc returns the struct field with a name
181	// that satisfies the match function and a boolean indicating if
182	// the field was found.
183	//
184	// FieldByNameFunc considers the fields in the struct itself
185	// and then the fields in any embedded structs, in breadth first order,
186	// stopping at the shallowest nesting depth containing one or more
187	// fields satisfying the match function. If multiple fields at that depth
188	// satisfy the match function, they cancel each other
189	// and FieldByNameFunc returns no match.
190	// This behavior mirrors Go's handling of name lookup in
191	// structs containing embedded fields.
192	//
193	// If the returned field is promoted from an embedded struct,
194	// then Offset in the returned StructField is the offset in
195	// the embedded struct.
196	FieldByNameFunc(match func(string) bool) (StructField, bool)
197
198	// In returns the type of a function type's i'th input parameter.
199	// It panics if the type's Kind is not Func.
200	// It panics if i is not in the range [0, NumIn()).
201	In(i int) Type
202
203	// Key returns a map type's key type.
204	// It panics if the type's Kind is not Map.
205	Key() Type
206
207	// Len returns an array type's length.
208	// It panics if the type's Kind is not Array.
209	Len() int
210
211	// NumField returns a struct type's field count.
212	// It panics if the type's Kind is not Struct.
213	NumField() int
214
215	// NumIn returns a function type's input parameter count.
216	// It panics if the type's Kind is not Func.
217	NumIn() int
218
219	// NumOut returns a function type's output parameter count.
220	// It panics if the type's Kind is not Func.
221	NumOut() int
222
223	// Out returns the type of a function type's i'th output parameter.
224	// It panics if the type's Kind is not Func.
225	// It panics if i is not in the range [0, NumOut()).
226	Out(i int) Type
227
228	// OverflowComplex reports whether the complex128 x cannot be represented by type t.
229	// It panics if t's Kind is not Complex64 or Complex128.
230	OverflowComplex(x complex128) bool
231
232	// OverflowFloat reports whether the float64 x cannot be represented by type t.
233	// It panics if t's Kind is not Float32 or Float64.
234	OverflowFloat(x float64) bool
235
236	// OverflowInt reports whether the int64 x cannot be represented by type t.
237	// It panics if t's Kind is not Int, Int8, Int16, Int32, or Int64.
238	OverflowInt(x int64) bool
239
240	// OverflowUint reports whether the uint64 x cannot be represented by type t.
241	// It panics if t's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
242	OverflowUint(x uint64) bool
243
244	// CanSeq reports whether a [Value] with this type can be iterated over using [Value.Seq].
245	CanSeq() bool
246
247	// CanSeq2 reports whether a [Value] with this type can be iterated over using [Value.Seq2].
248	CanSeq2() bool
249
250	common() *abi.Type
251	uncommon() *uncommonType
252}
253
254// BUG(rsc): FieldByName and related functions consider struct field names to be equal
255// if the names are equal, even if they are unexported names originating
256// in different packages. The practical effect of this is that the result of
257// t.FieldByName("x") is not well defined if the struct type t contains
258// multiple fields named x (embedded from different packages).
259// FieldByName may return one of the fields named x or may report that there are none.
260// See https://golang.org/issue/4876 for more details.
261
262/*
263 * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go).
264 * A few are known to ../runtime/type.go to convey to debuggers.
265 * They are also known to ../runtime/type.go.
266 */
267
268// A Kind represents the specific kind of type that a [Type] represents.
269// The zero Kind is not a valid kind.
270type Kind uint
271
272const (
273	Invalid Kind = iota
274	Bool
275	Int
276	Int8
277	Int16
278	Int32
279	Int64
280	Uint
281	Uint8
282	Uint16
283	Uint32
284	Uint64
285	Uintptr
286	Float32
287	Float64
288	Complex64
289	Complex128
290	Array
291	Chan
292	Func
293	Interface
294	Map
295	Pointer
296	Slice
297	String
298	Struct
299	UnsafePointer
300)
301
302// Ptr is the old name for the [Pointer] kind.
303const Ptr = Pointer
304
305// uncommonType is present only for defined types or types with methods
306// (if T is a defined type, the uncommonTypes for T and *T have methods).
307// Using a pointer to this struct reduces the overall size required
308// to describe a non-defined type with no methods.
309type uncommonType = abi.UncommonType
310
311// Embed this type to get common/uncommon
312type common struct {
313	abi.Type
314}
315
316// rtype is the common implementation of most values.
317// It is embedded in other struct types.
318type rtype struct {
319	t abi.Type
320}
321
322func (t *rtype) common() *abi.Type {
323	return &t.t
324}
325
326func (t *rtype) uncommon() *abi.UncommonType {
327	return t.t.Uncommon()
328}
329
330type aNameOff = abi.NameOff
331type aTypeOff = abi.TypeOff
332type aTextOff = abi.TextOff
333
334// ChanDir represents a channel type's direction.
335type ChanDir int
336
337const (
338	RecvDir ChanDir             = 1 << iota // <-chan
339	SendDir                                 // chan<-
340	BothDir = RecvDir | SendDir             // chan
341)
342
343// arrayType represents a fixed array type.
344type arrayType = abi.ArrayType
345
346// chanType represents a channel type.
347type chanType = abi.ChanType
348
349// funcType represents a function type.
350//
351// A *rtype for each in and out parameter is stored in an array that
352// directly follows the funcType (and possibly its uncommonType). So
353// a function type with one method, one input, and one output is:
354//
355//	struct {
356//		funcType
357//		uncommonType
358//		[2]*rtype    // [0] is in, [1] is out
359//	}
360type funcType = abi.FuncType
361
362// interfaceType represents an interface type.
363type interfaceType struct {
364	abi.InterfaceType // can embed directly because not a public type.
365}
366
367func (t *interfaceType) nameOff(off aNameOff) abi.Name {
368	return toRType(&t.Type).nameOff(off)
369}
370
371func nameOffFor(t *abi.Type, off aNameOff) abi.Name {
372	return toRType(t).nameOff(off)
373}
374
375func typeOffFor(t *abi.Type, off aTypeOff) *abi.Type {
376	return toRType(t).typeOff(off)
377}
378
379func (t *interfaceType) typeOff(off aTypeOff) *abi.Type {
380	return toRType(&t.Type).typeOff(off)
381}
382
383func (t *interfaceType) common() *abi.Type {
384	return &t.Type
385}
386
387func (t *interfaceType) uncommon() *abi.UncommonType {
388	return t.Uncommon()
389}
390
391// mapType represents a map type.
392type mapType struct {
393	abi.MapType
394}
395
396// ptrType represents a pointer type.
397type ptrType struct {
398	abi.PtrType
399}
400
401// sliceType represents a slice type.
402type sliceType struct {
403	abi.SliceType
404}
405
406// Struct field
407type structField = abi.StructField
408
409// structType represents a struct type.
410type structType struct {
411	abi.StructType
412}
413
414func pkgPath(n abi.Name) string {
415	if n.Bytes == nil || *n.DataChecked(0, "name flag field")&(1<<2) == 0 {
416		return ""
417	}
418	i, l := n.ReadVarint(1)
419	off := 1 + i + l
420	if n.HasTag() {
421		i2, l2 := n.ReadVarint(off)
422		off += i2 + l2
423	}
424	var nameOff int32
425	// Note that this field may not be aligned in memory,
426	// so we cannot use a direct int32 assignment here.
427	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.DataChecked(off, "name offset field")))[:])
428	pkgPathName := abi.Name{Bytes: (*byte)(resolveTypeOff(unsafe.Pointer(n.Bytes), nameOff))}
429	return pkgPathName.Name()
430}
431
432func newName(n, tag string, exported, embedded bool) abi.Name {
433	return abi.NewName(n, tag, exported, embedded)
434}
435
436/*
437 * The compiler knows the exact layout of all the data structures above.
438 * The compiler does not know about the data structures and methods below.
439 */
440
441// Method represents a single method.
442type Method struct {
443	// Name is the method name.
444	Name string
445
446	// PkgPath is the package path that qualifies a lower case (unexported)
447	// method name. It is empty for upper case (exported) method names.
448	// The combination of PkgPath and Name uniquely identifies a method
449	// in a method set.
450	// See https://golang.org/ref/spec#Uniqueness_of_identifiers
451	PkgPath string
452
453	Type  Type  // method type
454	Func  Value // func with receiver as first argument
455	Index int   // index for Type.Method
456}
457
458// IsExported reports whether the method is exported.
459func (m Method) IsExported() bool {
460	return m.PkgPath == ""
461}
462
463// String returns the name of k.
464func (k Kind) String() string {
465	if uint(k) < uint(len(kindNames)) {
466		return kindNames[uint(k)]
467	}
468	return "kind" + strconv.Itoa(int(k))
469}
470
471var kindNames = []string{
472	Invalid:       "invalid",
473	Bool:          "bool",
474	Int:           "int",
475	Int8:          "int8",
476	Int16:         "int16",
477	Int32:         "int32",
478	Int64:         "int64",
479	Uint:          "uint",
480	Uint8:         "uint8",
481	Uint16:        "uint16",
482	Uint32:        "uint32",
483	Uint64:        "uint64",
484	Uintptr:       "uintptr",
485	Float32:       "float32",
486	Float64:       "float64",
487	Complex64:     "complex64",
488	Complex128:    "complex128",
489	Array:         "array",
490	Chan:          "chan",
491	Func:          "func",
492	Interface:     "interface",
493	Map:           "map",
494	Pointer:       "ptr",
495	Slice:         "slice",
496	String:        "string",
497	Struct:        "struct",
498	UnsafePointer: "unsafe.Pointer",
499}
500
501// resolveNameOff resolves a name offset from a base pointer.
502// The (*rtype).nameOff method is a convenience wrapper for this function.
503// Implemented in the runtime package.
504//
505//go:noescape
506func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
507
508// resolveTypeOff resolves an *rtype offset from a base type.
509// The (*rtype).typeOff method is a convenience wrapper for this function.
510// Implemented in the runtime package.
511//
512//go:noescape
513func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
514
515// resolveTextOff resolves a function pointer offset from a base type.
516// The (*rtype).textOff method is a convenience wrapper for this function.
517// Implemented in the runtime package.
518//
519//go:noescape
520func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
521
522// addReflectOff adds a pointer to the reflection lookup map in the runtime.
523// It returns a new ID that can be used as a typeOff or textOff, and will
524// be resolved correctly. Implemented in the runtime package.
525//
526// addReflectOff should be an internal detail,
527// but widely used packages access it using linkname.
528// Notable members of the hall of shame include:
529//   - github.com/goplus/reflectx
530//
531// Do not remove or change the type signature.
532// See go.dev/issue/67401.
533//
534//go:linkname addReflectOff
535//go:noescape
536func addReflectOff(ptr unsafe.Pointer) int32
537
538// resolveReflectName adds a name to the reflection lookup map in the runtime.
539// It returns a new nameOff that can be used to refer to the pointer.
540func resolveReflectName(n abi.Name) aNameOff {
541	return aNameOff(addReflectOff(unsafe.Pointer(n.Bytes)))
542}
543
544// resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
545// It returns a new typeOff that can be used to refer to the pointer.
546func resolveReflectType(t *abi.Type) aTypeOff {
547	return aTypeOff(addReflectOff(unsafe.Pointer(t)))
548}
549
550// resolveReflectText adds a function pointer to the reflection lookup map in
551// the runtime. It returns a new textOff that can be used to refer to the
552// pointer.
553func resolveReflectText(ptr unsafe.Pointer) aTextOff {
554	return aTextOff(addReflectOff(ptr))
555}
556
557func (t *rtype) nameOff(off aNameOff) abi.Name {
558	return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
559}
560
561func (t *rtype) typeOff(off aTypeOff) *abi.Type {
562	return (*abi.Type)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
563}
564
565func (t *rtype) textOff(off aTextOff) unsafe.Pointer {
566	return resolveTextOff(unsafe.Pointer(t), int32(off))
567}
568
569func textOffFor(t *abi.Type, off aTextOff) unsafe.Pointer {
570	return toRType(t).textOff(off)
571}
572
573func (t *rtype) String() string {
574	s := t.nameOff(t.t.Str).Name()
575	if t.t.TFlag&abi.TFlagExtraStar != 0 {
576		return s[1:]
577	}
578	return s
579}
580
581func (t *rtype) Size() uintptr { return t.t.Size() }
582
583func (t *rtype) Bits() int {
584	if t == nil {
585		panic("reflect: Bits of nil Type")
586	}
587	k := t.Kind()
588	if k < Int || k > Complex128 {
589		panic("reflect: Bits of non-arithmetic Type " + t.String())
590	}
591	return int(t.t.Size_) * 8
592}
593
594func (t *rtype) Align() int { return t.t.Align() }
595
596func (t *rtype) FieldAlign() int { return t.t.FieldAlign() }
597
598func (t *rtype) Kind() Kind { return Kind(t.t.Kind()) }
599
600func (t *rtype) exportedMethods() []abi.Method {
601	ut := t.uncommon()
602	if ut == nil {
603		return nil
604	}
605	return ut.ExportedMethods()
606}
607
608func (t *rtype) NumMethod() int {
609	if t.Kind() == Interface {
610		tt := (*interfaceType)(unsafe.Pointer(t))
611		return tt.NumMethod()
612	}
613	return len(t.exportedMethods())
614}
615
616func (t *rtype) Method(i int) (m Method) {
617	if t.Kind() == Interface {
618		tt := (*interfaceType)(unsafe.Pointer(t))
619		return tt.Method(i)
620	}
621	methods := t.exportedMethods()
622	if i < 0 || i >= len(methods) {
623		panic("reflect: Method index out of range")
624	}
625	p := methods[i]
626	pname := t.nameOff(p.Name)
627	m.Name = pname.Name()
628	fl := flag(Func)
629	mtyp := t.typeOff(p.Mtyp)
630	ft := (*funcType)(unsafe.Pointer(mtyp))
631	in := make([]Type, 0, 1+ft.NumIn())
632	in = append(in, t)
633	for _, arg := range ft.InSlice() {
634		in = append(in, toRType(arg))
635	}
636	out := make([]Type, 0, ft.NumOut())
637	for _, ret := range ft.OutSlice() {
638		out = append(out, toRType(ret))
639	}
640	mt := FuncOf(in, out, ft.IsVariadic())
641	m.Type = mt
642	tfn := t.textOff(p.Tfn)
643	fn := unsafe.Pointer(&tfn)
644	m.Func = Value{&mt.(*rtype).t, fn, fl}
645
646	m.Index = i
647	return m
648}
649
650func (t *rtype) MethodByName(name string) (m Method, ok bool) {
651	if t.Kind() == Interface {
652		tt := (*interfaceType)(unsafe.Pointer(t))
653		return tt.MethodByName(name)
654	}
655	ut := t.uncommon()
656	if ut == nil {
657		return Method{}, false
658	}
659
660	methods := ut.ExportedMethods()
661
662	// We are looking for the first index i where the string becomes >= s.
663	// This is a copy of sort.Search, with f(h) replaced by (t.nameOff(methods[h].name).name() >= name).
664	i, j := 0, len(methods)
665	for i < j {
666		h := int(uint(i+j) >> 1) // avoid overflow when computing h
667		// i ≤ h < j
668		if !(t.nameOff(methods[h].Name).Name() >= name) {
669			i = h + 1 // preserves f(i-1) == false
670		} else {
671			j = h // preserves f(j) == true
672		}
673	}
674	// i == j, f(i-1) == false, and f(j) (= f(i)) == true  =>  answer is i.
675	if i < len(methods) && name == t.nameOff(methods[i].Name).Name() {
676		return t.Method(i), true
677	}
678
679	return Method{}, false
680}
681
682func (t *rtype) PkgPath() string {
683	if t.t.TFlag&abi.TFlagNamed == 0 {
684		return ""
685	}
686	ut := t.uncommon()
687	if ut == nil {
688		return ""
689	}
690	return t.nameOff(ut.PkgPath).Name()
691}
692
693func pkgPathFor(t *abi.Type) string {
694	return toRType(t).PkgPath()
695}
696
697func (t *rtype) Name() string {
698	if !t.t.HasName() {
699		return ""
700	}
701	s := t.String()
702	i := len(s) - 1
703	sqBrackets := 0
704	for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
705		switch s[i] {
706		case ']':
707			sqBrackets++
708		case '[':
709			sqBrackets--
710		}
711		i--
712	}
713	return s[i+1:]
714}
715
716func nameFor(t *abi.Type) string {
717	return toRType(t).Name()
718}
719
720func (t *rtype) ChanDir() ChanDir {
721	if t.Kind() != Chan {
722		panic("reflect: ChanDir of non-chan type " + t.String())
723	}
724	tt := (*abi.ChanType)(unsafe.Pointer(t))
725	return ChanDir(tt.Dir)
726}
727
728func toRType(t *abi.Type) *rtype {
729	return (*rtype)(unsafe.Pointer(t))
730}
731
732func elem(t *abi.Type) *abi.Type {
733	et := t.Elem()
734	if et != nil {
735		return et
736	}
737	panic("reflect: Elem of invalid type " + stringFor(t))
738}
739
740func (t *rtype) Elem() Type {
741	return toType(elem(t.common()))
742}
743
744func (t *rtype) Field(i int) StructField {
745	if t.Kind() != Struct {
746		panic("reflect: Field of non-struct type " + t.String())
747	}
748	tt := (*structType)(unsafe.Pointer(t))
749	return tt.Field(i)
750}
751
752func (t *rtype) FieldByIndex(index []int) StructField {
753	if t.Kind() != Struct {
754		panic("reflect: FieldByIndex of non-struct type " + t.String())
755	}
756	tt := (*structType)(unsafe.Pointer(t))
757	return tt.FieldByIndex(index)
758}
759
760func (t *rtype) FieldByName(name string) (StructField, bool) {
761	if t.Kind() != Struct {
762		panic("reflect: FieldByName of non-struct type " + t.String())
763	}
764	tt := (*structType)(unsafe.Pointer(t))
765	return tt.FieldByName(name)
766}
767
768func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
769	if t.Kind() != Struct {
770		panic("reflect: FieldByNameFunc of non-struct type " + t.String())
771	}
772	tt := (*structType)(unsafe.Pointer(t))
773	return tt.FieldByNameFunc(match)
774}
775
776func (t *rtype) Key() Type {
777	if t.Kind() != Map {
778		panic("reflect: Key of non-map type " + t.String())
779	}
780	tt := (*mapType)(unsafe.Pointer(t))
781	return toType(tt.Key)
782}
783
784func (t *rtype) Len() int {
785	if t.Kind() != Array {
786		panic("reflect: Len of non-array type " + t.String())
787	}
788	tt := (*arrayType)(unsafe.Pointer(t))
789	return int(tt.Len)
790}
791
792func (t *rtype) NumField() int {
793	if t.Kind() != Struct {
794		panic("reflect: NumField of non-struct type " + t.String())
795	}
796	tt := (*structType)(unsafe.Pointer(t))
797	return len(tt.Fields)
798}
799
800func (t *rtype) In(i int) Type {
801	if t.Kind() != Func {
802		panic("reflect: In of non-func type " + t.String())
803	}
804	tt := (*abi.FuncType)(unsafe.Pointer(t))
805	return toType(tt.InSlice()[i])
806}
807
808func (t *rtype) NumIn() int {
809	if t.Kind() != Func {
810		panic("reflect: NumIn of non-func type " + t.String())
811	}
812	tt := (*abi.FuncType)(unsafe.Pointer(t))
813	return tt.NumIn()
814}
815
816func (t *rtype) NumOut() int {
817	if t.Kind() != Func {
818		panic("reflect: NumOut of non-func type " + t.String())
819	}
820	tt := (*abi.FuncType)(unsafe.Pointer(t))
821	return tt.NumOut()
822}
823
824func (t *rtype) Out(i int) Type {
825	if t.Kind() != Func {
826		panic("reflect: Out of non-func type " + t.String())
827	}
828	tt := (*abi.FuncType)(unsafe.Pointer(t))
829	return toType(tt.OutSlice()[i])
830}
831
832func (t *rtype) IsVariadic() bool {
833	if t.Kind() != Func {
834		panic("reflect: IsVariadic of non-func type " + t.String())
835	}
836	tt := (*abi.FuncType)(unsafe.Pointer(t))
837	return tt.IsVariadic()
838}
839
840func (t *rtype) OverflowComplex(x complex128) bool {
841	k := t.Kind()
842	switch k {
843	case Complex64:
844		return overflowFloat32(real(x)) || overflowFloat32(imag(x))
845	case Complex128:
846		return false
847	}
848	panic("reflect: OverflowComplex of non-complex type " + t.String())
849}
850
851func (t *rtype) OverflowFloat(x float64) bool {
852	k := t.Kind()
853	switch k {
854	case Float32:
855		return overflowFloat32(x)
856	case Float64:
857		return false
858	}
859	panic("reflect: OverflowFloat of non-float type " + t.String())
860}
861
862func (t *rtype) OverflowInt(x int64) bool {
863	k := t.Kind()
864	switch k {
865	case Int, Int8, Int16, Int32, Int64:
866		bitSize := t.Size() * 8
867		trunc := (x << (64 - bitSize)) >> (64 - bitSize)
868		return x != trunc
869	}
870	panic("reflect: OverflowInt of non-int type " + t.String())
871}
872
873func (t *rtype) OverflowUint(x uint64) bool {
874	k := t.Kind()
875	switch k {
876	case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:
877		bitSize := t.Size() * 8
878		trunc := (x << (64 - bitSize)) >> (64 - bitSize)
879		return x != trunc
880	}
881	panic("reflect: OverflowUint of non-uint type " + t.String())
882}
883
884func (t *rtype) CanSeq() bool {
885	switch t.Kind() {
886	case Int8, Int16, Int32, Int64, Int, Uint8, Uint16, Uint32, Uint64, Uint, Uintptr, Array, Slice, Chan, String, Map:
887		return true
888	case Func:
889		return canRangeFunc(&t.t)
890	case Pointer:
891		return t.Elem().Kind() == Array
892	}
893	return false
894}
895
896func canRangeFunc(t *abi.Type) bool {
897	if t.Kind() != abi.Func {
898		return false
899	}
900	f := t.FuncType()
901	if f.InCount != 1 || f.OutCount != 0 {
902		return false
903	}
904	y := f.In(0)
905	if y.Kind() != abi.Func {
906		return false
907	}
908	yield := y.FuncType()
909	return yield.InCount == 1 && yield.OutCount == 1 && yield.Out(0).Kind() == abi.Bool
910}
911
912func (t *rtype) CanSeq2() bool {
913	switch t.Kind() {
914	case Array, Slice, String, Map:
915		return true
916	case Func:
917		return canRangeFunc2(&t.t)
918	case Pointer:
919		return t.Elem().Kind() == Array
920	}
921	return false
922}
923
924func canRangeFunc2(t *abi.Type) bool {
925	if t.Kind() != abi.Func {
926		return false
927	}
928	f := t.FuncType()
929	if f.InCount != 1 || f.OutCount != 0 {
930		return false
931	}
932	y := f.In(0)
933	if y.Kind() != abi.Func {
934		return false
935	}
936	yield := y.FuncType()
937	return yield.InCount == 2 && yield.OutCount == 1 && yield.Out(0).Kind() == abi.Bool
938}
939
940// add returns p+x.
941//
942// The whySafe string is ignored, so that the function still inlines
943// as efficiently as p+x, but all call sites should use the string to
944// record why the addition is safe, which is to say why the addition
945// does not cause x to advance to the very end of p's allocation
946// and therefore point incorrectly at the next block in memory.
947//
948// add should be an internal detail (and is trivially copyable),
949// but widely used packages access it using linkname.
950// Notable members of the hall of shame include:
951//   - github.com/pinpoint-apm/pinpoint-go-agent
952//   - github.com/vmware/govmomi
953//
954// Do not remove or change the type signature.
955// See go.dev/issue/67401.
956//
957//go:linkname add
958func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
959	return unsafe.Pointer(uintptr(p) + x)
960}
961
962func (d ChanDir) String() string {
963	switch d {
964	case SendDir:
965		return "chan<-"
966	case RecvDir:
967		return "<-chan"
968	case BothDir:
969		return "chan"
970	}
971	return "ChanDir" + strconv.Itoa(int(d))
972}
973
974// Method returns the i'th method in the type's method set.
975func (t *interfaceType) Method(i int) (m Method) {
976	if i < 0 || i >= len(t.Methods) {
977		return
978	}
979	p := &t.Methods[i]
980	pname := t.nameOff(p.Name)
981	m.Name = pname.Name()
982	if !pname.IsExported() {
983		m.PkgPath = pkgPath(pname)
984		if m.PkgPath == "" {
985			m.PkgPath = t.PkgPath.Name()
986		}
987	}
988	m.Type = toType(t.typeOff(p.Typ))
989	m.Index = i
990	return
991}
992
993// NumMethod returns the number of interface methods in the type's method set.
994func (t *interfaceType) NumMethod() int { return len(t.Methods) }
995
996// MethodByName method with the given name in the type's method set.
997func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
998	if t == nil {
999		return
1000	}
1001	var p *abi.Imethod
1002	for i := range t.Methods {
1003		p = &t.Methods[i]
1004		if t.nameOff(p.Name).Name() == name {
1005			return t.Method(i), true
1006		}
1007	}
1008	return
1009}
1010
1011// A StructField describes a single field in a struct.
1012type StructField struct {
1013	// Name is the field name.
1014	Name string
1015
1016	// PkgPath is the package path that qualifies a lower case (unexported)
1017	// field name. It is empty for upper case (exported) field names.
1018	// See https://golang.org/ref/spec#Uniqueness_of_identifiers
1019	PkgPath string
1020
1021	Type      Type      // field type
1022	Tag       StructTag // field tag string
1023	Offset    uintptr   // offset within struct, in bytes
1024	Index     []int     // index sequence for Type.FieldByIndex
1025	Anonymous bool      // is an embedded field
1026}
1027
1028// IsExported reports whether the field is exported.
1029func (f StructField) IsExported() bool {
1030	return f.PkgPath == ""
1031}
1032
1033// A StructTag is the tag string in a struct field.
1034//
1035// By convention, tag strings are a concatenation of
1036// optionally space-separated key:"value" pairs.
1037// Each key is a non-empty string consisting of non-control
1038// characters other than space (U+0020 ' '), quote (U+0022 '"'),
1039// and colon (U+003A ':').  Each value is quoted using U+0022 '"'
1040// characters and Go string literal syntax.
1041type StructTag string
1042
1043// Get returns the value associated with key in the tag string.
1044// If there is no such key in the tag, Get returns the empty string.
1045// If the tag does not have the conventional format, the value
1046// returned by Get is unspecified. To determine whether a tag is
1047// explicitly set to the empty string, use [StructTag.Lookup].
1048func (tag StructTag) Get(key string) string {
1049	v, _ := tag.Lookup(key)
1050	return v
1051}
1052
1053// Lookup returns the value associated with key in the tag string.
1054// If the key is present in the tag the value (which may be empty)
1055// is returned. Otherwise the returned value will be the empty string.
1056// The ok return value reports whether the value was explicitly set in
1057// the tag string. If the tag does not have the conventional format,
1058// the value returned by Lookup is unspecified.
1059func (tag StructTag) Lookup(key string) (value string, ok bool) {
1060	// When modifying this code, also update the validateStructTag code
1061	// in cmd/vet/structtag.go.
1062
1063	for tag != "" {
1064		// Skip leading space.
1065		i := 0
1066		for i < len(tag) && tag[i] == ' ' {
1067			i++
1068		}
1069		tag = tag[i:]
1070		if tag == "" {
1071			break
1072		}
1073
1074		// Scan to colon. A space, a quote or a control character is a syntax error.
1075		// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
1076		// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
1077		// as it is simpler to inspect the tag's bytes than the tag's runes.
1078		i = 0
1079		for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
1080			i++
1081		}
1082		if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
1083			break
1084		}
1085		name := string(tag[:i])
1086		tag = tag[i+1:]
1087
1088		// Scan quoted string to find value.
1089		i = 1
1090		for i < len(tag) && tag[i] != '"' {
1091			if tag[i] == '\\' {
1092				i++
1093			}
1094			i++
1095		}
1096		if i >= len(tag) {
1097			break
1098		}
1099		qvalue := string(tag[:i+1])
1100		tag = tag[i+1:]
1101
1102		if key == name {
1103			value, err := strconv.Unquote(qvalue)
1104			if err != nil {
1105				break
1106			}
1107			return value, true
1108		}
1109	}
1110	return "", false
1111}
1112
1113// Field returns the i'th struct field.
1114func (t *structType) Field(i int) (f StructField) {
1115	if i < 0 || i >= len(t.Fields) {
1116		panic("reflect: Field index out of bounds")
1117	}
1118	p := &t.Fields[i]
1119	f.Type = toType(p.Typ)
1120	f.Name = p.Name.Name()
1121	f.Anonymous = p.Embedded()
1122	if !p.Name.IsExported() {
1123		f.PkgPath = t.PkgPath.Name()
1124	}
1125	if tag := p.Name.Tag(); tag != "" {
1126		f.Tag = StructTag(tag)
1127	}
1128	f.Offset = p.Offset
1129
1130	// NOTE(rsc): This is the only allocation in the interface
1131	// presented by a reflect.Type. It would be nice to avoid,
1132	// at least in the common cases, but we need to make sure
1133	// that misbehaving clients of reflect cannot affect other
1134	// uses of reflect. One possibility is CL 5371098, but we
1135	// postponed that ugliness until there is a demonstrated
1136	// need for the performance. This is issue 2320.
1137	f.Index = []int{i}
1138	return
1139}
1140
1141// TODO(gri): Should there be an error/bool indicator if the index
1142// is wrong for FieldByIndex?
1143
1144// FieldByIndex returns the nested field corresponding to index.
1145func (t *structType) FieldByIndex(index []int) (f StructField) {
1146	f.Type = toType(&t.Type)
1147	for i, x := range index {
1148		if i > 0 {
1149			ft := f.Type
1150			if ft.Kind() == Pointer && ft.Elem().Kind() == Struct {
1151				ft = ft.Elem()
1152			}
1153			f.Type = ft
1154		}
1155		f = f.Type.Field(x)
1156	}
1157	return
1158}
1159
1160// A fieldScan represents an item on the fieldByNameFunc scan work list.
1161type fieldScan struct {
1162	typ   *structType
1163	index []int
1164}
1165
1166// FieldByNameFunc returns the struct field with a name that satisfies the
1167// match function and a boolean to indicate if the field was found.
1168func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
1169	// This uses the same condition that the Go language does: there must be a unique instance
1170	// of the match at a given depth level. If there are multiple instances of a match at the
1171	// same depth, they annihilate each other and inhibit any possible match at a lower level.
1172	// The algorithm is breadth first search, one depth level at a time.
1173
1174	// The current and next slices are work queues:
1175	// current lists the fields to visit on this depth level,
1176	// and next lists the fields on the next lower level.
1177	current := []fieldScan{}
1178	next := []fieldScan{{typ: t}}
1179
1180	// nextCount records the number of times an embedded type has been
1181	// encountered and considered for queueing in the 'next' slice.
1182	// We only queue the first one, but we increment the count on each.
1183	// If a struct type T can be reached more than once at a given depth level,
1184	// then it annihilates itself and need not be considered at all when we
1185	// process that next depth level.
1186	var nextCount map[*structType]int
1187
1188	// visited records the structs that have been considered already.
1189	// Embedded pointer fields can create cycles in the graph of
1190	// reachable embedded types; visited avoids following those cycles.
1191	// It also avoids duplicated effort: if we didn't find the field in an
1192	// embedded type T at level 2, we won't find it in one at level 4 either.
1193	visited := map[*structType]bool{}
1194
1195	for len(next) > 0 {
1196		current, next = next, current[:0]
1197		count := nextCount
1198		nextCount = nil
1199
1200		// Process all the fields at this depth, now listed in 'current'.
1201		// The loop queues embedded fields found in 'next', for processing during the next
1202		// iteration. The multiplicity of the 'current' field counts is recorded
1203		// in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
1204		for _, scan := range current {
1205			t := scan.typ
1206			if visited[t] {
1207				// We've looked through this type before, at a higher level.
1208				// That higher level would shadow the lower level we're now at,
1209				// so this one can't be useful to us. Ignore it.
1210				continue
1211			}
1212			visited[t] = true
1213			for i := range t.Fields {
1214				f := &t.Fields[i]
1215				// Find name and (for embedded field) type for field f.
1216				fname := f.Name.Name()
1217				var ntyp *abi.Type
1218				if f.Embedded() {
1219					// Embedded field of type T or *T.
1220					ntyp = f.Typ
1221					if ntyp.Kind() == abi.Pointer {
1222						ntyp = ntyp.Elem()
1223					}
1224				}
1225
1226				// Does it match?
1227				if match(fname) {
1228					// Potential match
1229					if count[t] > 1 || ok {
1230						// Name appeared multiple times at this level: annihilate.
1231						return StructField{}, false
1232					}
1233					result = t.Field(i)
1234					result.Index = nil
1235					result.Index = append(result.Index, scan.index...)
1236					result.Index = append(result.Index, i)
1237					ok = true
1238					continue
1239				}
1240
1241				// Queue embedded struct fields for processing with next level,
1242				// but only if we haven't seen a match yet at this level and only
1243				// if the embedded types haven't already been queued.
1244				if ok || ntyp == nil || ntyp.Kind() != abi.Struct {
1245					continue
1246				}
1247				styp := (*structType)(unsafe.Pointer(ntyp))
1248				if nextCount[styp] > 0 {
1249					nextCount[styp] = 2 // exact multiple doesn't matter
1250					continue
1251				}
1252				if nextCount == nil {
1253					nextCount = map[*structType]int{}
1254				}
1255				nextCount[styp] = 1
1256				if count[t] > 1 {
1257					nextCount[styp] = 2 // exact multiple doesn't matter
1258				}
1259				var index []int
1260				index = append(index, scan.index...)
1261				index = append(index, i)
1262				next = append(next, fieldScan{styp, index})
1263			}
1264		}
1265		if ok {
1266			break
1267		}
1268	}
1269	return
1270}
1271
1272// FieldByName returns the struct field with the given name
1273// and a boolean to indicate if the field was found.
1274func (t *structType) FieldByName(name string) (f StructField, present bool) {
1275	// Quick check for top-level name, or struct without embedded fields.
1276	hasEmbeds := false
1277	if name != "" {
1278		for i := range t.Fields {
1279			tf := &t.Fields[i]
1280			if tf.Name.Name() == name {
1281				return t.Field(i), true
1282			}
1283			if tf.Embedded() {
1284				hasEmbeds = true
1285			}
1286		}
1287	}
1288	if !hasEmbeds {
1289		return
1290	}
1291	return t.FieldByNameFunc(func(s string) bool { return s == name })
1292}
1293
1294// TypeOf returns the reflection [Type] that represents the dynamic type of i.
1295// If i is a nil interface value, TypeOf returns nil.
1296func TypeOf(i any) Type {
1297	return toType(abi.TypeOf(i))
1298}
1299
1300// rtypeOf directly extracts the *rtype of the provided value.
1301func rtypeOf(i any) *abi.Type {
1302	return abi.TypeOf(i)
1303}
1304
1305// ptrMap is the cache for PointerTo.
1306var ptrMap sync.Map // map[*rtype]*ptrType
1307
1308// PtrTo returns the pointer type with element t.
1309// For example, if t represents type Foo, PtrTo(t) represents *Foo.
1310//
1311// PtrTo is the old spelling of [PointerTo].
1312// The two functions behave identically.
1313//
1314// Deprecated: Superseded by [PointerTo].
1315func PtrTo(t Type) Type { return PointerTo(t) }
1316
1317// PointerTo returns the pointer type with element t.
1318// For example, if t represents type Foo, PointerTo(t) represents *Foo.
1319func PointerTo(t Type) Type {
1320	return toRType(t.(*rtype).ptrTo())
1321}
1322
1323func (t *rtype) ptrTo() *abi.Type {
1324	at := &t.t
1325	if at.PtrToThis != 0 {
1326		return t.typeOff(at.PtrToThis)
1327	}
1328
1329	// Check the cache.
1330	if pi, ok := ptrMap.Load(t); ok {
1331		return &pi.(*ptrType).Type
1332	}
1333
1334	// Look in known types.
1335	s := "*" + t.String()
1336	for _, tt := range typesByString(s) {
1337		p := (*ptrType)(unsafe.Pointer(tt))
1338		if p.Elem != &t.t {
1339			continue
1340		}
1341		pi, _ := ptrMap.LoadOrStore(t, p)
1342		return &pi.(*ptrType).Type
1343	}
1344
1345	// Create a new ptrType starting with the description
1346	// of an *unsafe.Pointer.
1347	var iptr any = (*unsafe.Pointer)(nil)
1348	prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1349	pp := *prototype
1350
1351	pp.Str = resolveReflectName(newName(s, "", false, false))
1352	pp.PtrToThis = 0
1353
1354	// For the type structures linked into the binary, the
1355	// compiler provides a good hash of the string.
1356	// Create a good hash for the new string by using
1357	// the FNV-1 hash's mixing function to combine the
1358	// old hash and the new "*".
1359	pp.Hash = fnv1(t.t.Hash, '*')
1360
1361	pp.Elem = at
1362
1363	pi, _ := ptrMap.LoadOrStore(t, &pp)
1364	return &pi.(*ptrType).Type
1365}
1366
1367func ptrTo(t *abi.Type) *abi.Type {
1368	return toRType(t).ptrTo()
1369}
1370
1371// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1372func fnv1(x uint32, list ...byte) uint32 {
1373	for _, b := range list {
1374		x = x*16777619 ^ uint32(b)
1375	}
1376	return x
1377}
1378
1379func (t *rtype) Implements(u Type) bool {
1380	if u == nil {
1381		panic("reflect: nil type passed to Type.Implements")
1382	}
1383	if u.Kind() != Interface {
1384		panic("reflect: non-interface type passed to Type.Implements")
1385	}
1386	return implements(u.common(), t.common())
1387}
1388
1389func (t *rtype) AssignableTo(u Type) bool {
1390	if u == nil {
1391		panic("reflect: nil type passed to Type.AssignableTo")
1392	}
1393	uu := u.common()
1394	return directlyAssignable(uu, t.common()) || implements(uu, t.common())
1395}
1396
1397func (t *rtype) ConvertibleTo(u Type) bool {
1398	if u == nil {
1399		panic("reflect: nil type passed to Type.ConvertibleTo")
1400	}
1401	return convertOp(u.common(), t.common()) != nil
1402}
1403
1404func (t *rtype) Comparable() bool {
1405	return t.t.Equal != nil
1406}
1407
1408// implements reports whether the type V implements the interface type T.
1409func implements(T, V *abi.Type) bool {
1410	if T.Kind() != abi.Interface {
1411		return false
1412	}
1413	t := (*interfaceType)(unsafe.Pointer(T))
1414	if len(t.Methods) == 0 {
1415		return true
1416	}
1417
1418	// The same algorithm applies in both cases, but the
1419	// method tables for an interface type and a concrete type
1420	// are different, so the code is duplicated.
1421	// In both cases the algorithm is a linear scan over the two
1422	// lists - T's methods and V's methods - simultaneously.
1423	// Since method tables are stored in a unique sorted order
1424	// (alphabetical, with no duplicate method names), the scan
1425	// through V's methods must hit a match for each of T's
1426	// methods along the way, or else V does not implement T.
1427	// This lets us run the scan in overall linear time instead of
1428	// the quadratic time  a naive search would require.
1429	// See also ../runtime/iface.go.
1430	if V.Kind() == abi.Interface {
1431		v := (*interfaceType)(unsafe.Pointer(V))
1432		i := 0
1433		for j := 0; j < len(v.Methods); j++ {
1434			tm := &t.Methods[i]
1435			tmName := t.nameOff(tm.Name)
1436			vm := &v.Methods[j]
1437			vmName := nameOffFor(V, vm.Name)
1438			if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Typ) == t.typeOff(tm.Typ) {
1439				if !tmName.IsExported() {
1440					tmPkgPath := pkgPath(tmName)
1441					if tmPkgPath == "" {
1442						tmPkgPath = t.PkgPath.Name()
1443					}
1444					vmPkgPath := pkgPath(vmName)
1445					if vmPkgPath == "" {
1446						vmPkgPath = v.PkgPath.Name()
1447					}
1448					if tmPkgPath != vmPkgPath {
1449						continue
1450					}
1451				}
1452				if i++; i >= len(t.Methods) {
1453					return true
1454				}
1455			}
1456		}
1457		return false
1458	}
1459
1460	v := V.Uncommon()
1461	if v == nil {
1462		return false
1463	}
1464	i := 0
1465	vmethods := v.Methods()
1466	for j := 0; j < int(v.Mcount); j++ {
1467		tm := &t.Methods[i]
1468		tmName := t.nameOff(tm.Name)
1469		vm := vmethods[j]
1470		vmName := nameOffFor(V, vm.Name)
1471		if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Mtyp) == t.typeOff(tm.Typ) {
1472			if !tmName.IsExported() {
1473				tmPkgPath := pkgPath(tmName)
1474				if tmPkgPath == "" {
1475					tmPkgPath = t.PkgPath.Name()
1476				}
1477				vmPkgPath := pkgPath(vmName)
1478				if vmPkgPath == "" {
1479					vmPkgPath = nameOffFor(V, v.PkgPath).Name()
1480				}
1481				if tmPkgPath != vmPkgPath {
1482					continue
1483				}
1484			}
1485			if i++; i >= len(t.Methods) {
1486				return true
1487			}
1488		}
1489	}
1490	return false
1491}
1492
1493// specialChannelAssignability reports whether a value x of channel type V
1494// can be directly assigned (using memmove) to another channel type T.
1495// https://golang.org/doc/go_spec.html#Assignability
1496// T and V must be both of Chan kind.
1497func specialChannelAssignability(T, V *abi.Type) bool {
1498	// Special case:
1499	// x is a bidirectional channel value, T is a channel type,
1500	// x's type V and T have identical element types,
1501	// and at least one of V or T is not a defined type.
1502	return V.ChanDir() == abi.BothDir && (nameFor(T) == "" || nameFor(V) == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
1503}
1504
1505// directlyAssignable reports whether a value x of type V can be directly
1506// assigned (using memmove) to a value of type T.
1507// https://golang.org/doc/go_spec.html#Assignability
1508// Ignoring the interface rules (implemented elsewhere)
1509// and the ideal constant rules (no ideal constants at run time).
1510func directlyAssignable(T, V *abi.Type) bool {
1511	// x's type V is identical to T?
1512	if T == V {
1513		return true
1514	}
1515
1516	// Otherwise at least one of T and V must not be defined
1517	// and they must have the same kind.
1518	if T.HasName() && V.HasName() || T.Kind() != V.Kind() {
1519		return false
1520	}
1521
1522	if T.Kind() == abi.Chan && specialChannelAssignability(T, V) {
1523		return true
1524	}
1525
1526	// x's type T and V must have identical underlying types.
1527	return haveIdenticalUnderlyingType(T, V, true)
1528}
1529
1530func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool {
1531	if cmpTags {
1532		return T == V
1533	}
1534
1535	if nameFor(T) != nameFor(V) || T.Kind() != V.Kind() || pkgPathFor(T) != pkgPathFor(V) {
1536		return false
1537	}
1538
1539	return haveIdenticalUnderlyingType(T, V, false)
1540}
1541
1542func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool {
1543	if T == V {
1544		return true
1545	}
1546
1547	kind := Kind(T.Kind())
1548	if kind != Kind(V.Kind()) {
1549		return false
1550	}
1551
1552	// Non-composite types of equal kind have same underlying type
1553	// (the predefined instance of the type).
1554	if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1555		return true
1556	}
1557
1558	// Composite types.
1559	switch kind {
1560	case Array:
1561		return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1562
1563	case Chan:
1564		return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1565
1566	case Func:
1567		t := (*funcType)(unsafe.Pointer(T))
1568		v := (*funcType)(unsafe.Pointer(V))
1569		if t.OutCount != v.OutCount || t.InCount != v.InCount {
1570			return false
1571		}
1572		for i := 0; i < t.NumIn(); i++ {
1573			if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
1574				return false
1575			}
1576		}
1577		for i := 0; i < t.NumOut(); i++ {
1578			if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
1579				return false
1580			}
1581		}
1582		return true
1583
1584	case Interface:
1585		t := (*interfaceType)(unsafe.Pointer(T))
1586		v := (*interfaceType)(unsafe.Pointer(V))
1587		if len(t.Methods) == 0 && len(v.Methods) == 0 {
1588			return true
1589		}
1590		// Might have the same methods but still
1591		// need a run time conversion.
1592		return false
1593
1594	case Map:
1595		return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1596
1597	case Pointer, Slice:
1598		return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1599
1600	case Struct:
1601		t := (*structType)(unsafe.Pointer(T))
1602		v := (*structType)(unsafe.Pointer(V))
1603		if len(t.Fields) != len(v.Fields) {
1604			return false
1605		}
1606		if t.PkgPath.Name() != v.PkgPath.Name() {
1607			return false
1608		}
1609		for i := range t.Fields {
1610			tf := &t.Fields[i]
1611			vf := &v.Fields[i]
1612			if tf.Name.Name() != vf.Name.Name() {
1613				return false
1614			}
1615			if !haveIdenticalType(tf.Typ, vf.Typ, cmpTags) {
1616				return false
1617			}
1618			if cmpTags && tf.Name.Tag() != vf.Name.Tag() {
1619				return false
1620			}
1621			if tf.Offset != vf.Offset {
1622				return false
1623			}
1624			if tf.Embedded() != vf.Embedded() {
1625				return false
1626			}
1627		}
1628		return true
1629	}
1630
1631	return false
1632}
1633
1634// typelinks is implemented in package runtime.
1635// It returns a slice of the sections in each module,
1636// and a slice of *rtype offsets in each module.
1637//
1638// The types in each module are sorted by string. That is, the first
1639// two linked types of the first module are:
1640//
1641//	d0 := sections[0]
1642//	t1 := (*rtype)(add(d0, offset[0][0]))
1643//	t2 := (*rtype)(add(d0, offset[0][1]))
1644//
1645// and
1646//
1647//	t1.String() < t2.String()
1648//
1649// Note that strings are not unique identifiers for types:
1650// there can be more than one with a given string.
1651// Only types we might want to look up are included:
1652// pointers, channels, maps, slices, and arrays.
1653func typelinks() (sections []unsafe.Pointer, offset [][]int32)
1654
1655// rtypeOff should be an internal detail,
1656// but widely used packages access it using linkname.
1657// Notable members of the hall of shame include:
1658//   - github.com/goccy/go-json
1659//
1660// Do not remove or change the type signature.
1661// See go.dev/issue/67401.
1662//
1663//go:linkname rtypeOff
1664func rtypeOff(section unsafe.Pointer, off int32) *abi.Type {
1665	return (*abi.Type)(add(section, uintptr(off), "sizeof(rtype) > 0"))
1666}
1667
1668// typesByString returns the subslice of typelinks() whose elements have
1669// the given string representation.
1670// It may be empty (no known types with that string) or may have
1671// multiple elements (multiple types with that string).
1672//
1673// typesByString should be an internal detail,
1674// but widely used packages access it using linkname.
1675// Notable members of the hall of shame include:
1676//   - github.com/aristanetworks/goarista
1677//   - fortio.org/log
1678//
1679// Do not remove or change the type signature.
1680// See go.dev/issue/67401.
1681//
1682//go:linkname typesByString
1683func typesByString(s string) []*abi.Type {
1684	sections, offset := typelinks()
1685	var ret []*abi.Type
1686
1687	for offsI, offs := range offset {
1688		section := sections[offsI]
1689
1690		// We are looking for the first index i where the string becomes >= s.
1691		// This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
1692		i, j := 0, len(offs)
1693		for i < j {
1694			h := int(uint(i+j) >> 1) // avoid overflow when computing h
1695			// i ≤ h < j
1696			if !(stringFor(rtypeOff(section, offs[h])) >= s) {
1697				i = h + 1 // preserves f(i-1) == false
1698			} else {
1699				j = h // preserves f(j) == true
1700			}
1701		}
1702		// i == j, f(i-1) == false, and f(j) (= f(i)) == true  =>  answer is i.
1703
1704		// Having found the first, linear scan forward to find the last.
1705		// We could do a second binary search, but the caller is going
1706		// to do a linear scan anyway.
1707		for j := i; j < len(offs); j++ {
1708			typ := rtypeOff(section, offs[j])
1709			if stringFor(typ) != s {
1710				break
1711			}
1712			ret = append(ret, typ)
1713		}
1714	}
1715	return ret
1716}
1717
1718// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1719var lookupCache sync.Map // map[cacheKey]*rtype
1720
1721// A cacheKey is the key for use in the lookupCache.
1722// Four values describe any of the types we are looking for:
1723// type kind, one or two subtypes, and an extra integer.
1724type cacheKey struct {
1725	kind  Kind
1726	t1    *abi.Type
1727	t2    *abi.Type
1728	extra uintptr
1729}
1730
1731// The funcLookupCache caches FuncOf lookups.
1732// FuncOf does not share the common lookupCache since cacheKey is not
1733// sufficient to represent functions unambiguously.
1734var funcLookupCache struct {
1735	sync.Mutex // Guards stores (but not loads) on m.
1736
1737	// m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1738	// Elements of m are append-only and thus safe for concurrent reading.
1739	m sync.Map
1740}
1741
1742// ChanOf returns the channel type with the given direction and element type.
1743// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1744//
1745// The gc runtime imposes a limit of 64 kB on channel element types.
1746// If t's size is equal to or exceeds this limit, ChanOf panics.
1747func ChanOf(dir ChanDir, t Type) Type {
1748	typ := t.common()
1749
1750	// Look in cache.
1751	ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1752	if ch, ok := lookupCache.Load(ckey); ok {
1753		return ch.(*rtype)
1754	}
1755
1756	// This restriction is imposed by the gc compiler and the runtime.
1757	if typ.Size_ >= 1<<16 {
1758		panic("reflect.ChanOf: element size too large")
1759	}
1760
1761	// Look in known types.
1762	var s string
1763	switch dir {
1764	default:
1765		panic("reflect.ChanOf: invalid dir")
1766	case SendDir:
1767		s = "chan<- " + stringFor(typ)
1768	case RecvDir:
1769		s = "<-chan " + stringFor(typ)
1770	case BothDir:
1771		typeStr := stringFor(typ)
1772		if typeStr[0] == '<' {
1773			// typ is recv chan, need parentheses as "<-" associates with leftmost
1774			// chan possible, see:
1775			// * https://golang.org/ref/spec#Channel_types
1776			// * https://github.com/golang/go/issues/39897
1777			s = "chan (" + typeStr + ")"
1778		} else {
1779			s = "chan " + typeStr
1780		}
1781	}
1782	for _, tt := range typesByString(s) {
1783		ch := (*chanType)(unsafe.Pointer(tt))
1784		if ch.Elem == typ && ch.Dir == abi.ChanDir(dir) {
1785			ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
1786			return ti.(Type)
1787		}
1788	}
1789
1790	// Make a channel type.
1791	var ichan any = (chan unsafe.Pointer)(nil)
1792	prototype := *(**chanType)(unsafe.Pointer(&ichan))
1793	ch := *prototype
1794	ch.TFlag = abi.TFlagRegularMemory
1795	ch.Dir = abi.ChanDir(dir)
1796	ch.Str = resolveReflectName(newName(s, "", false, false))
1797	ch.Hash = fnv1(typ.Hash, 'c', byte(dir))
1798	ch.Elem = typ
1799
1800	ti, _ := lookupCache.LoadOrStore(ckey, toRType(&ch.Type))
1801	return ti.(Type)
1802}
1803
1804// MapOf returns the map type with the given key and element types.
1805// For example, if k represents int and e represents string,
1806// MapOf(k, e) represents map[int]string.
1807//
1808// If the key type is not a valid map key type (that is, if it does
1809// not implement Go's == operator), MapOf panics.
1810func MapOf(key, elem Type) Type {
1811	ktyp := key.common()
1812	etyp := elem.common()
1813
1814	if ktyp.Equal == nil {
1815		panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
1816	}
1817
1818	// Look in cache.
1819	ckey := cacheKey{Map, ktyp, etyp, 0}
1820	if mt, ok := lookupCache.Load(ckey); ok {
1821		return mt.(Type)
1822	}
1823
1824	// Look in known types.
1825	s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
1826	for _, tt := range typesByString(s) {
1827		mt := (*mapType)(unsafe.Pointer(tt))
1828		if mt.Key == ktyp && mt.Elem == etyp {
1829			ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
1830			return ti.(Type)
1831		}
1832	}
1833
1834	// Make a map type.
1835	// Note: flag values must match those used in the TMAP case
1836	// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
1837	var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1838	mt := **(**mapType)(unsafe.Pointer(&imap))
1839	mt.Str = resolveReflectName(newName(s, "", false, false))
1840	mt.TFlag = 0
1841	mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
1842	mt.Key = ktyp
1843	mt.Elem = etyp
1844	mt.Bucket = bucketOf(ktyp, etyp)
1845	mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
1846		return typehash(ktyp, p, seed)
1847	}
1848	mt.Flags = 0
1849	if ktyp.Size_ > abi.MapMaxKeyBytes {
1850		mt.KeySize = uint8(goarch.PtrSize)
1851		mt.Flags |= 1 // indirect key
1852	} else {
1853		mt.KeySize = uint8(ktyp.Size_)
1854	}
1855	if etyp.Size_ > abi.MapMaxElemBytes {
1856		mt.ValueSize = uint8(goarch.PtrSize)
1857		mt.Flags |= 2 // indirect value
1858	} else {
1859		mt.MapType.ValueSize = uint8(etyp.Size_)
1860	}
1861	mt.MapType.BucketSize = uint16(mt.Bucket.Size_)
1862	if isReflexive(ktyp) {
1863		mt.Flags |= 4
1864	}
1865	if needKeyUpdate(ktyp) {
1866		mt.Flags |= 8
1867	}
1868	if hashMightPanic(ktyp) {
1869		mt.Flags |= 16
1870	}
1871	mt.PtrToThis = 0
1872
1873	ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
1874	return ti.(Type)
1875}
1876
1877var funcTypes []Type
1878var funcTypesMutex sync.Mutex
1879
1880func initFuncTypes(n int) Type {
1881	funcTypesMutex.Lock()
1882	defer funcTypesMutex.Unlock()
1883	if n >= len(funcTypes) {
1884		newFuncTypes := make([]Type, n+1)
1885		copy(newFuncTypes, funcTypes)
1886		funcTypes = newFuncTypes
1887	}
1888	if funcTypes[n] != nil {
1889		return funcTypes[n]
1890	}
1891
1892	funcTypes[n] = StructOf([]StructField{
1893		{
1894			Name: "FuncType",
1895			Type: TypeOf(funcType{}),
1896		},
1897		{
1898			Name: "Args",
1899			Type: ArrayOf(n, TypeOf(&rtype{})),
1900		},
1901	})
1902	return funcTypes[n]
1903}
1904
1905// FuncOf returns the function type with the given argument and result types.
1906// For example if k represents int and e represents string,
1907// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1908//
1909// The variadic argument controls whether the function is variadic. FuncOf
1910// panics if the in[len(in)-1] does not represent a slice and variadic is
1911// true.
1912func FuncOf(in, out []Type, variadic bool) Type {
1913	if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1914		panic("reflect.FuncOf: last arg of variadic func must be slice")
1915	}
1916
1917	// Make a func type.
1918	var ifunc any = (func())(nil)
1919	prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1920	n := len(in) + len(out)
1921
1922	if n > 128 {
1923		panic("reflect.FuncOf: too many arguments")
1924	}
1925
1926	o := New(initFuncTypes(n)).Elem()
1927	ft := (*funcType)(unsafe.Pointer(o.Field(0).Addr().Pointer()))
1928	args := unsafe.Slice((**rtype)(unsafe.Pointer(o.Field(1).Addr().Pointer())), n)[0:0:n]
1929	*ft = *prototype
1930
1931	// Build a hash and minimally populate ft.
1932	var hash uint32
1933	for _, in := range in {
1934		t := in.(*rtype)
1935		args = append(args, t)
1936		hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash))
1937	}
1938	if variadic {
1939		hash = fnv1(hash, 'v')
1940	}
1941	hash = fnv1(hash, '.')
1942	for _, out := range out {
1943		t := out.(*rtype)
1944		args = append(args, t)
1945		hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash))
1946	}
1947
1948	ft.TFlag = 0
1949	ft.Hash = hash
1950	ft.InCount = uint16(len(in))
1951	ft.OutCount = uint16(len(out))
1952	if variadic {
1953		ft.OutCount |= 1 << 15
1954	}
1955
1956	// Look in cache.
1957	if ts, ok := funcLookupCache.m.Load(hash); ok {
1958		for _, t := range ts.([]*abi.Type) {
1959			if haveIdenticalUnderlyingType(&ft.Type, t, true) {
1960				return toRType(t)
1961			}
1962		}
1963	}
1964
1965	// Not in cache, lock and retry.
1966	funcLookupCache.Lock()
1967	defer funcLookupCache.Unlock()
1968	if ts, ok := funcLookupCache.m.Load(hash); ok {
1969		for _, t := range ts.([]*abi.Type) {
1970			if haveIdenticalUnderlyingType(&ft.Type, t, true) {
1971				return toRType(t)
1972			}
1973		}
1974	}
1975
1976	addToCache := func(tt *abi.Type) Type {
1977		var rts []*abi.Type
1978		if rti, ok := funcLookupCache.m.Load(hash); ok {
1979			rts = rti.([]*abi.Type)
1980		}
1981		funcLookupCache.m.Store(hash, append(rts, tt))
1982		return toType(tt)
1983	}
1984
1985	// Look in known types for the same string representation.
1986	str := funcStr(ft)
1987	for _, tt := range typesByString(str) {
1988		if haveIdenticalUnderlyingType(&ft.Type, tt, true) {
1989			return addToCache(tt)
1990		}
1991	}
1992
1993	// Populate the remaining fields of ft and store in cache.
1994	ft.Str = resolveReflectName(newName(str, "", false, false))
1995	ft.PtrToThis = 0
1996	return addToCache(&ft.Type)
1997}
1998func stringFor(t *abi.Type) string {
1999	return toRType(t).String()
2000}
2001
2002// funcStr builds a string representation of a funcType.
2003func funcStr(ft *funcType) string {
2004	repr := make([]byte, 0, 64)
2005	repr = append(repr, "func("...)
2006	for i, t := range ft.InSlice() {
2007		if i > 0 {
2008			repr = append(repr, ", "...)
2009		}
2010		if ft.IsVariadic() && i == int(ft.InCount)-1 {
2011			repr = append(repr, "..."...)
2012			repr = append(repr, stringFor((*sliceType)(unsafe.Pointer(t)).Elem)...)
2013		} else {
2014			repr = append(repr, stringFor(t)...)
2015		}
2016	}
2017	repr = append(repr, ')')
2018	out := ft.OutSlice()
2019	if len(out) == 1 {
2020		repr = append(repr, ' ')
2021	} else if len(out) > 1 {
2022		repr = append(repr, " ("...)
2023	}
2024	for i, t := range out {
2025		if i > 0 {
2026			repr = append(repr, ", "...)
2027		}
2028		repr = append(repr, stringFor(t)...)
2029	}
2030	if len(out) > 1 {
2031		repr = append(repr, ')')
2032	}
2033	return string(repr)
2034}
2035
2036// isReflexive reports whether the == operation on the type is reflexive.
2037// That is, x == x for all values x of type t.
2038func isReflexive(t *abi.Type) bool {
2039	switch Kind(t.Kind()) {
2040	case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer:
2041		return true
2042	case Float32, Float64, Complex64, Complex128, Interface:
2043		return false
2044	case Array:
2045		tt := (*arrayType)(unsafe.Pointer(t))
2046		return isReflexive(tt.Elem)
2047	case Struct:
2048		tt := (*structType)(unsafe.Pointer(t))
2049		for _, f := range tt.Fields {
2050			if !isReflexive(f.Typ) {
2051				return false
2052			}
2053		}
2054		return true
2055	default:
2056		// Func, Map, Slice, Invalid
2057		panic("isReflexive called on non-key type " + stringFor(t))
2058	}
2059}
2060
2061// needKeyUpdate reports whether map overwrites require the key to be copied.
2062func needKeyUpdate(t *abi.Type) bool {
2063	switch Kind(t.Kind()) {
2064	case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer:
2065		return false
2066	case Float32, Float64, Complex64, Complex128, Interface, String:
2067		// Float keys can be updated from +0 to -0.
2068		// String keys can be updated to use a smaller backing store.
2069		// Interfaces might have floats or strings in them.
2070		return true
2071	case Array:
2072		tt := (*arrayType)(unsafe.Pointer(t))
2073		return needKeyUpdate(tt.Elem)
2074	case Struct:
2075		tt := (*structType)(unsafe.Pointer(t))
2076		for _, f := range tt.Fields {
2077			if needKeyUpdate(f.Typ) {
2078				return true
2079			}
2080		}
2081		return false
2082	default:
2083		// Func, Map, Slice, Invalid
2084		panic("needKeyUpdate called on non-key type " + stringFor(t))
2085	}
2086}
2087
2088// hashMightPanic reports whether the hash of a map key of type t might panic.
2089func hashMightPanic(t *abi.Type) bool {
2090	switch Kind(t.Kind()) {
2091	case Interface:
2092		return true
2093	case Array:
2094		tt := (*arrayType)(unsafe.Pointer(t))
2095		return hashMightPanic(tt.Elem)
2096	case Struct:
2097		tt := (*structType)(unsafe.Pointer(t))
2098		for _, f := range tt.Fields {
2099			if hashMightPanic(f.Typ) {
2100				return true
2101			}
2102		}
2103		return false
2104	default:
2105		return false
2106	}
2107}
2108
2109func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
2110	if ktyp.Size_ > abi.MapMaxKeyBytes {
2111		ktyp = ptrTo(ktyp)
2112	}
2113	if etyp.Size_ > abi.MapMaxElemBytes {
2114		etyp = ptrTo(etyp)
2115	}
2116
2117	// Prepare GC data if any.
2118	// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
2119	// or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
2120	// Note that since the key and value are known to be <= 128 bytes,
2121	// they're guaranteed to have bitmaps instead of GC programs.
2122	var gcdata *byte
2123	var ptrdata uintptr
2124
2125	size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
2126	if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
2127		panic("reflect: bad size computation in MapOf")
2128	}
2129
2130	if ktyp.Pointers() || etyp.Pointers() {
2131		nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
2132		n := (nptr + 7) / 8
2133
2134		// Runtime needs pointer masks to be a multiple of uintptr in size.
2135		n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
2136		mask := make([]byte, n)
2137		base := uintptr(abi.MapBucketCount / goarch.PtrSize)
2138
2139		if ktyp.Pointers() {
2140			emitGCMask(mask, base, ktyp, abi.MapBucketCount)
2141		}
2142		base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize
2143
2144		if etyp.Pointers() {
2145			emitGCMask(mask, base, etyp, abi.MapBucketCount)
2146		}
2147		base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize
2148
2149		word := base
2150		mask[word/8] |= 1 << (word % 8)
2151		gcdata = &mask[0]
2152		ptrdata = (word + 1) * goarch.PtrSize
2153
2154		// overflow word must be last
2155		if ptrdata != size {
2156			panic("reflect: bad layout computation in MapOf")
2157		}
2158	}
2159
2160	b := &abi.Type{
2161		Align_:   goarch.PtrSize,
2162		Size_:    size,
2163		Kind_:    abi.Struct,
2164		PtrBytes: ptrdata,
2165		GCData:   gcdata,
2166	}
2167	s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
2168	b.Str = resolveReflectName(newName(s, "", false, false))
2169	return b
2170}
2171
2172func (t *rtype) gcSlice(begin, end uintptr) []byte {
2173	return (*[1 << 30]byte)(unsafe.Pointer(t.t.GCData))[begin:end:end]
2174}
2175
2176// emitGCMask writes the GC mask for [n]typ into out, starting at bit
2177// offset base.
2178func emitGCMask(out []byte, base uintptr, typ *abi.Type, n uintptr) {
2179	if typ.Kind_&abi.KindGCProg != 0 {
2180		panic("reflect: unexpected GC program")
2181	}
2182	ptrs := typ.PtrBytes / goarch.PtrSize
2183	words := typ.Size_ / goarch.PtrSize
2184	mask := typ.GcSlice(0, (ptrs+7)/8)
2185	for j := uintptr(0); j < ptrs; j++ {
2186		if (mask[j/8]>>(j%8))&1 != 0 {
2187			for i := uintptr(0); i < n; i++ {
2188				k := base + i*words + j
2189				out[k/8] |= 1 << (k % 8)
2190			}
2191		}
2192	}
2193}
2194
2195// appendGCProg appends the GC program for the first ptrdata bytes of
2196// typ to dst and returns the extended slice.
2197func appendGCProg(dst []byte, typ *abi.Type) []byte {
2198	if typ.Kind_&abi.KindGCProg != 0 {
2199		// Element has GC program; emit one element.
2200		n := uintptr(*(*uint32)(unsafe.Pointer(typ.GCData)))
2201		prog := typ.GcSlice(4, 4+n-1)
2202		return append(dst, prog...)
2203	}
2204
2205	// Element is small with pointer mask; use as literal bits.
2206	ptrs := typ.PtrBytes / goarch.PtrSize
2207	mask := typ.GcSlice(0, (ptrs+7)/8)
2208
2209	// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2210	for ; ptrs > 120; ptrs -= 120 {
2211		dst = append(dst, 120)
2212		dst = append(dst, mask[:15]...)
2213		mask = mask[15:]
2214	}
2215
2216	dst = append(dst, byte(ptrs))
2217	dst = append(dst, mask...)
2218	return dst
2219}
2220
2221// SliceOf returns the slice type with element type t.
2222// For example, if t represents int, SliceOf(t) represents []int.
2223func SliceOf(t Type) Type {
2224	typ := t.common()
2225
2226	// Look in cache.
2227	ckey := cacheKey{Slice, typ, nil, 0}
2228	if slice, ok := lookupCache.Load(ckey); ok {
2229		return slice.(Type)
2230	}
2231
2232	// Look in known types.
2233	s := "[]" + stringFor(typ)
2234	for _, tt := range typesByString(s) {
2235		slice := (*sliceType)(unsafe.Pointer(tt))
2236		if slice.Elem == typ {
2237			ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
2238			return ti.(Type)
2239		}
2240	}
2241
2242	// Make a slice type.
2243	var islice any = ([]unsafe.Pointer)(nil)
2244	prototype := *(**sliceType)(unsafe.Pointer(&islice))
2245	slice := *prototype
2246	slice.TFlag = 0
2247	slice.Str = resolveReflectName(newName(s, "", false, false))
2248	slice.Hash = fnv1(typ.Hash, '[')
2249	slice.Elem = typ
2250	slice.PtrToThis = 0
2251
2252	ti, _ := lookupCache.LoadOrStore(ckey, toRType(&slice.Type))
2253	return ti.(Type)
2254}
2255
2256// The structLookupCache caches StructOf lookups.
2257// StructOf does not share the common lookupCache since we need to pin
2258// the memory associated with *structTypeFixedN.
2259var structLookupCache struct {
2260	sync.Mutex // Guards stores (but not loads) on m.
2261
2262	// m is a map[uint32][]Type keyed by the hash calculated in StructOf.
2263	// Elements in m are append-only and thus safe for concurrent reading.
2264	m sync.Map
2265}
2266
2267type structTypeUncommon struct {
2268	structType
2269	u uncommonType
2270}
2271
2272// isLetter reports whether a given 'rune' is classified as a Letter.
2273func isLetter(ch rune) bool {
2274	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
2275}
2276
2277// isValidFieldName checks if a string is a valid (struct) field name or not.
2278//
2279// According to the language spec, a field name should be an identifier.
2280//
2281// identifier = letter { letter | unicode_digit } .
2282// letter = unicode_letter | "_" .
2283func isValidFieldName(fieldName string) bool {
2284	for i, c := range fieldName {
2285		if i == 0 && !isLetter(c) {
2286			return false
2287		}
2288
2289		if !(isLetter(c) || unicode.IsDigit(c)) {
2290			return false
2291		}
2292	}
2293
2294	return len(fieldName) > 0
2295}
2296
2297// This must match cmd/compile/internal/compare.IsRegularMemory
2298func isRegularMemory(t Type) bool {
2299	switch t.Kind() {
2300	case Array:
2301		elem := t.Elem()
2302		if isRegularMemory(elem) {
2303			return true
2304		}
2305		return elem.Comparable() && t.Len() == 0
2306	case Int8, Int16, Int32, Int64, Int, Uint8, Uint16, Uint32, Uint64, Uint, Uintptr, Chan, Pointer, Bool, UnsafePointer:
2307		return true
2308	case Struct:
2309		num := t.NumField()
2310		switch num {
2311		case 0:
2312			return true
2313		case 1:
2314			field := t.Field(0)
2315			if field.Name == "_" {
2316				return false
2317			}
2318			return isRegularMemory(field.Type)
2319		default:
2320			for i := range num {
2321				field := t.Field(i)
2322				if field.Name == "_" || !isRegularMemory(field.Type) || isPaddedField(t, i) {
2323					return false
2324				}
2325			}
2326			return true
2327		}
2328	}
2329	return false
2330}
2331
2332// isPaddedField reports whether the i'th field of struct type t is followed
2333// by padding.
2334func isPaddedField(t Type, i int) bool {
2335	field := t.Field(i)
2336	if i+1 < t.NumField() {
2337		return field.Offset+field.Type.Size() != t.Field(i+1).Offset
2338	}
2339	return field.Offset+field.Type.Size() != t.Size()
2340}
2341
2342// StructOf returns the struct type containing fields.
2343// The Offset and Index fields are ignored and computed as they would be
2344// by the compiler.
2345//
2346// StructOf currently does not support promoted methods of embedded fields
2347// and panics if passed unexported StructFields.
2348func StructOf(fields []StructField) Type {
2349	var (
2350		hash       = fnv1(0, []byte("struct {")...)
2351		size       uintptr
2352		typalign   uint8
2353		comparable = true
2354		methods    []abi.Method
2355
2356		fs   = make([]structField, len(fields))
2357		repr = make([]byte, 0, 64)
2358		fset = map[string]struct{}{} // fields' names
2359
2360		hasGCProg = false // records whether a struct-field type has a GCProg
2361	)
2362
2363	lastzero := uintptr(0)
2364	repr = append(repr, "struct {"...)
2365	pkgpath := ""
2366	for i, field := range fields {
2367		if field.Name == "" {
2368			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
2369		}
2370		if !isValidFieldName(field.Name) {
2371			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
2372		}
2373		if field.Type == nil {
2374			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
2375		}
2376		f, fpkgpath := runtimeStructField(field)
2377		ft := f.Typ
2378		if ft.Kind_&abi.KindGCProg != 0 {
2379			hasGCProg = true
2380		}
2381		if fpkgpath != "" {
2382			if pkgpath == "" {
2383				pkgpath = fpkgpath
2384			} else if pkgpath != fpkgpath {
2385				panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
2386			}
2387		}
2388
2389		// Update string and hash
2390		name := f.Name.Name()
2391		hash = fnv1(hash, []byte(name)...)
2392		if !f.Embedded() {
2393			repr = append(repr, (" " + name)...)
2394		} else {
2395			// Embedded field
2396			if f.Typ.Kind() == abi.Pointer {
2397				// Embedded ** and *interface{} are illegal
2398				elem := ft.Elem()
2399				if k := elem.Kind(); k == abi.Pointer || k == abi.Interface {
2400					panic("reflect.StructOf: illegal embedded field type " + stringFor(ft))
2401				}
2402			}
2403
2404			switch Kind(f.Typ.Kind()) {
2405			case Interface:
2406				ift := (*interfaceType)(unsafe.Pointer(ft))
2407				for _, m := range ift.Methods {
2408					if pkgPath(ift.nameOff(m.Name)) != "" {
2409						// TODO(sbinet).  Issue 15924.
2410						panic("reflect: embedded interface with unexported method(s) not implemented")
2411					}
2412
2413					fnStub := resolveReflectText(unsafe.Pointer(abi.FuncPCABIInternal(embeddedIfaceMethStub)))
2414					methods = append(methods, abi.Method{
2415						Name: resolveReflectName(ift.nameOff(m.Name)),
2416						Mtyp: resolveReflectType(ift.typeOff(m.Typ)),
2417						Ifn:  fnStub,
2418						Tfn:  fnStub,
2419					})
2420				}
2421			case Pointer:
2422				ptr := (*ptrType)(unsafe.Pointer(ft))
2423				if unt := ptr.Uncommon(); unt != nil {
2424					if i > 0 && unt.Mcount > 0 {
2425						// Issue 15924.
2426						panic("reflect: embedded type with methods not implemented if type is not first field")
2427					}
2428					if len(fields) > 1 {
2429						panic("reflect: embedded type with methods not implemented if there is more than one field")
2430					}
2431					for _, m := range unt.Methods() {
2432						mname := nameOffFor(ft, m.Name)
2433						if pkgPath(mname) != "" {
2434							// TODO(sbinet).
2435							// Issue 15924.
2436							panic("reflect: embedded interface with unexported method(s) not implemented")
2437						}
2438						methods = append(methods, abi.Method{
2439							Name: resolveReflectName(mname),
2440							Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)),
2441							Ifn:  resolveReflectText(textOffFor(ft, m.Ifn)),
2442							Tfn:  resolveReflectText(textOffFor(ft, m.Tfn)),
2443						})
2444					}
2445				}
2446				if unt := ptr.Elem.Uncommon(); unt != nil {
2447					for _, m := range unt.Methods() {
2448						mname := nameOffFor(ft, m.Name)
2449						if pkgPath(mname) != "" {
2450							// TODO(sbinet)
2451							// Issue 15924.
2452							panic("reflect: embedded interface with unexported method(s) not implemented")
2453						}
2454						methods = append(methods, abi.Method{
2455							Name: resolveReflectName(mname),
2456							Mtyp: resolveReflectType(typeOffFor(ptr.Elem, m.Mtyp)),
2457							Ifn:  resolveReflectText(textOffFor(ptr.Elem, m.Ifn)),
2458							Tfn:  resolveReflectText(textOffFor(ptr.Elem, m.Tfn)),
2459						})
2460					}
2461				}
2462			default:
2463				if unt := ft.Uncommon(); unt != nil {
2464					if i > 0 && unt.Mcount > 0 {
2465						// Issue 15924.
2466						panic("reflect: embedded type with methods not implemented if type is not first field")
2467					}
2468					if len(fields) > 1 && ft.Kind_&abi.KindDirectIface != 0 {
2469						panic("reflect: embedded type with methods not implemented for non-pointer type")
2470					}
2471					for _, m := range unt.Methods() {
2472						mname := nameOffFor(ft, m.Name)
2473						if pkgPath(mname) != "" {
2474							// TODO(sbinet)
2475							// Issue 15924.
2476							panic("reflect: embedded interface with unexported method(s) not implemented")
2477						}
2478						methods = append(methods, abi.Method{
2479							Name: resolveReflectName(mname),
2480							Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)),
2481							Ifn:  resolveReflectText(textOffFor(ft, m.Ifn)),
2482							Tfn:  resolveReflectText(textOffFor(ft, m.Tfn)),
2483						})
2484
2485					}
2486				}
2487			}
2488		}
2489		if _, dup := fset[name]; dup && name != "_" {
2490			panic("reflect.StructOf: duplicate field " + name)
2491		}
2492		fset[name] = struct{}{}
2493
2494		hash = fnv1(hash, byte(ft.Hash>>24), byte(ft.Hash>>16), byte(ft.Hash>>8), byte(ft.Hash))
2495
2496		repr = append(repr, (" " + stringFor(ft))...)
2497		if f.Name.HasTag() {
2498			hash = fnv1(hash, []byte(f.Name.Tag())...)
2499			repr = append(repr, (" " + strconv.Quote(f.Name.Tag()))...)
2500		}
2501		if i < len(fields)-1 {
2502			repr = append(repr, ';')
2503		}
2504
2505		comparable = comparable && (ft.Equal != nil)
2506
2507		offset := align(size, uintptr(ft.Align_))
2508		if offset < size {
2509			panic("reflect.StructOf: struct size would exceed virtual address space")
2510		}
2511		if ft.Align_ > typalign {
2512			typalign = ft.Align_
2513		}
2514		size = offset + ft.Size_
2515		if size < offset {
2516			panic("reflect.StructOf: struct size would exceed virtual address space")
2517		}
2518		f.Offset = offset
2519
2520		if ft.Size_ == 0 {
2521			lastzero = size
2522		}
2523
2524		fs[i] = f
2525	}
2526
2527	if size > 0 && lastzero == size {
2528		// This is a non-zero sized struct that ends in a
2529		// zero-sized field. We add an extra byte of padding,
2530		// to ensure that taking the address of the final
2531		// zero-sized field can't manufacture a pointer to the
2532		// next object in the heap. See issue 9401.
2533		size++
2534		if size == 0 {
2535			panic("reflect.StructOf: struct size would exceed virtual address space")
2536		}
2537	}
2538
2539	var typ *structType
2540	var ut *uncommonType
2541
2542	if len(methods) == 0 {
2543		t := new(structTypeUncommon)
2544		typ = &t.structType
2545		ut = &t.u
2546	} else {
2547		// A *rtype representing a struct is followed directly in memory by an
2548		// array of method objects representing the methods attached to the
2549		// struct. To get the same layout for a run time generated type, we
2550		// need an array directly following the uncommonType memory.
2551		// A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
2552		tt := New(StructOf([]StructField{
2553			{Name: "S", Type: TypeOf(structType{})},
2554			{Name: "U", Type: TypeOf(uncommonType{})},
2555			{Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))},
2556		}))
2557
2558		typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer())
2559		ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer())
2560
2561		copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]abi.Method), methods)
2562	}
2563	// TODO(sbinet): Once we allow embedding multiple types,
2564	// methods will need to be sorted like the compiler does.
2565	// TODO(sbinet): Once we allow non-exported methods, we will
2566	// need to compute xcount as the number of exported methods.
2567	ut.Mcount = uint16(len(methods))
2568	ut.Xcount = ut.Mcount
2569	ut.Moff = uint32(unsafe.Sizeof(uncommonType{}))
2570
2571	if len(fs) > 0 {
2572		repr = append(repr, ' ')
2573	}
2574	repr = append(repr, '}')
2575	hash = fnv1(hash, '}')
2576	str := string(repr)
2577
2578	// Round the size up to be a multiple of the alignment.
2579	s := align(size, uintptr(typalign))
2580	if s < size {
2581		panic("reflect.StructOf: struct size would exceed virtual address space")
2582	}
2583	size = s
2584
2585	// Make the struct type.
2586	var istruct any = struct{}{}
2587	prototype := *(**structType)(unsafe.Pointer(&istruct))
2588	*typ = *prototype
2589	typ.Fields = fs
2590	if pkgpath != "" {
2591		typ.PkgPath = newName(pkgpath, "", false, false)
2592	}
2593
2594	// Look in cache.
2595	if ts, ok := structLookupCache.m.Load(hash); ok {
2596		for _, st := range ts.([]Type) {
2597			t := st.common()
2598			if haveIdenticalUnderlyingType(&typ.Type, t, true) {
2599				return toType(t)
2600			}
2601		}
2602	}
2603
2604	// Not in cache, lock and retry.
2605	structLookupCache.Lock()
2606	defer structLookupCache.Unlock()
2607	if ts, ok := structLookupCache.m.Load(hash); ok {
2608		for _, st := range ts.([]Type) {
2609			t := st.common()
2610			if haveIdenticalUnderlyingType(&typ.Type, t, true) {
2611				return toType(t)
2612			}
2613		}
2614	}
2615
2616	addToCache := func(t Type) Type {
2617		var ts []Type
2618		if ti, ok := structLookupCache.m.Load(hash); ok {
2619			ts = ti.([]Type)
2620		}
2621		structLookupCache.m.Store(hash, append(ts, t))
2622		return t
2623	}
2624
2625	// Look in known types.
2626	for _, t := range typesByString(str) {
2627		if haveIdenticalUnderlyingType(&typ.Type, t, true) {
2628			// even if 't' wasn't a structType with methods, we should be ok
2629			// as the 'u uncommonType' field won't be accessed except when
2630			// tflag&abi.TFlagUncommon is set.
2631			return addToCache(toType(t))
2632		}
2633	}
2634
2635	typ.Str = resolveReflectName(newName(str, "", false, false))
2636	if isRegularMemory(toType(&typ.Type)) {
2637		typ.TFlag = abi.TFlagRegularMemory
2638	} else {
2639		typ.TFlag = 0
2640	}
2641	typ.Hash = hash
2642	typ.Size_ = size
2643	typ.PtrBytes = typeptrdata(&typ.Type)
2644	typ.Align_ = typalign
2645	typ.FieldAlign_ = typalign
2646	typ.PtrToThis = 0
2647	if len(methods) > 0 {
2648		typ.TFlag |= abi.TFlagUncommon
2649	}
2650
2651	if hasGCProg {
2652		lastPtrField := 0
2653		for i, ft := range fs {
2654			if ft.Typ.Pointers() {
2655				lastPtrField = i
2656			}
2657		}
2658		prog := []byte{0, 0, 0, 0} // will be length of prog
2659		var off uintptr
2660		for i, ft := range fs {
2661			if i > lastPtrField {
2662				// gcprog should not include anything for any field after
2663				// the last field that contains pointer data
2664				break
2665			}
2666			if !ft.Typ.Pointers() {
2667				// Ignore pointerless fields.
2668				continue
2669			}
2670			// Pad to start of this field with zeros.
2671			if ft.Offset > off {
2672				n := (ft.Offset - off) / goarch.PtrSize
2673				prog = append(prog, 0x01, 0x00) // emit a 0 bit
2674				if n > 1 {
2675					prog = append(prog, 0x81)      // repeat previous bit
2676					prog = appendVarint(prog, n-1) // n-1 times
2677				}
2678				off = ft.Offset
2679			}
2680
2681			prog = appendGCProg(prog, ft.Typ)
2682			off += ft.Typ.PtrBytes
2683		}
2684		prog = append(prog, 0)
2685		*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2686		typ.Kind_ |= abi.KindGCProg
2687		typ.GCData = &prog[0]
2688	} else {
2689		typ.Kind_ &^= abi.KindGCProg
2690		bv := new(bitVector)
2691		addTypeBits(bv, 0, &typ.Type)
2692		if len(bv.data) > 0 {
2693			typ.GCData = &bv.data[0]
2694		}
2695	}
2696	typ.Equal = nil
2697	if comparable {
2698		typ.Equal = func(p, q unsafe.Pointer) bool {
2699			for _, ft := range typ.Fields {
2700				pi := add(p, ft.Offset, "&x.field safe")
2701				qi := add(q, ft.Offset, "&x.field safe")
2702				if !ft.Typ.Equal(pi, qi) {
2703					return false
2704				}
2705			}
2706			return true
2707		}
2708	}
2709
2710	switch {
2711	case len(fs) == 1 && !fs[0].Typ.IfaceIndir():
2712		// structs of 1 direct iface type can be direct
2713		typ.Kind_ |= abi.KindDirectIface
2714	default:
2715		typ.Kind_ &^= abi.KindDirectIface
2716	}
2717
2718	return addToCache(toType(&typ.Type))
2719}
2720
2721func embeddedIfaceMethStub() {
2722	panic("reflect: StructOf does not support methods of embedded interfaces")
2723}
2724
2725// runtimeStructField takes a StructField value passed to StructOf and
2726// returns both the corresponding internal representation, of type
2727// structField, and the pkgpath value to use for this field.
2728func runtimeStructField(field StructField) (structField, string) {
2729	if field.Anonymous && field.PkgPath != "" {
2730		panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
2731	}
2732
2733	if field.IsExported() {
2734		// Best-effort check for misuse.
2735		// Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
2736		c := field.Name[0]
2737		if 'a' <= c && c <= 'z' || c == '_' {
2738			panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2739		}
2740	}
2741
2742	resolveReflectType(field.Type.common()) // install in runtime
2743	f := structField{
2744		Name:   newName(field.Name, string(field.Tag), field.IsExported(), field.Anonymous),
2745		Typ:    field.Type.common(),
2746		Offset: 0,
2747	}
2748	return f, field.PkgPath
2749}
2750
2751// typeptrdata returns the length in bytes of the prefix of t
2752// containing pointer data. Anything after this offset is scalar data.
2753// keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
2754func typeptrdata(t *abi.Type) uintptr {
2755	switch t.Kind() {
2756	case abi.Struct:
2757		st := (*structType)(unsafe.Pointer(t))
2758		// find the last field that has pointers.
2759		field := -1
2760		for i := range st.Fields {
2761			ft := st.Fields[i].Typ
2762			if ft.Pointers() {
2763				field = i
2764			}
2765		}
2766		if field == -1 {
2767			return 0
2768		}
2769		f := st.Fields[field]
2770		return f.Offset + f.Typ.PtrBytes
2771
2772	default:
2773		panic("reflect.typeptrdata: unexpected type, " + stringFor(t))
2774	}
2775}
2776
2777// ArrayOf returns the array type with the given length and element type.
2778// For example, if t represents int, ArrayOf(5, t) represents [5]int.
2779//
2780// If the resulting type would be larger than the available address space,
2781// ArrayOf panics.
2782func ArrayOf(length int, elem Type) Type {
2783	if length < 0 {
2784		panic("reflect: negative length passed to ArrayOf")
2785	}
2786
2787	typ := elem.common()
2788
2789	// Look in cache.
2790	ckey := cacheKey{Array, typ, nil, uintptr(length)}
2791	if array, ok := lookupCache.Load(ckey); ok {
2792		return array.(Type)
2793	}
2794
2795	// Look in known types.
2796	s := "[" + strconv.Itoa(length) + "]" + stringFor(typ)
2797	for _, tt := range typesByString(s) {
2798		array := (*arrayType)(unsafe.Pointer(tt))
2799		if array.Elem == typ {
2800			ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
2801			return ti.(Type)
2802		}
2803	}
2804
2805	// Make an array type.
2806	var iarray any = [1]unsafe.Pointer{}
2807	prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2808	array := *prototype
2809	array.TFlag = typ.TFlag & abi.TFlagRegularMemory
2810	array.Str = resolveReflectName(newName(s, "", false, false))
2811	array.Hash = fnv1(typ.Hash, '[')
2812	for n := uint32(length); n > 0; n >>= 8 {
2813		array.Hash = fnv1(array.Hash, byte(n))
2814	}
2815	array.Hash = fnv1(array.Hash, ']')
2816	array.Elem = typ
2817	array.PtrToThis = 0
2818	if typ.Size_ > 0 {
2819		max := ^uintptr(0) / typ.Size_
2820		if uintptr(length) > max {
2821			panic("reflect.ArrayOf: array size would exceed virtual address space")
2822		}
2823	}
2824	array.Size_ = typ.Size_ * uintptr(length)
2825	if length > 0 && typ.Pointers() {
2826		array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes
2827	}
2828	array.Align_ = typ.Align_
2829	array.FieldAlign_ = typ.FieldAlign_
2830	array.Len = uintptr(length)
2831	array.Slice = &(SliceOf(elem).(*rtype).t)
2832
2833	switch {
2834	case !typ.Pointers() || array.Size_ == 0:
2835		// No pointers.
2836		array.GCData = nil
2837		array.PtrBytes = 0
2838
2839	case length == 1:
2840		// In memory, 1-element array looks just like the element.
2841		array.Kind_ |= typ.Kind_ & abi.KindGCProg
2842		array.GCData = typ.GCData
2843		array.PtrBytes = typ.PtrBytes
2844
2845	case typ.Kind_&abi.KindGCProg == 0 && array.Size_ <= abi.MaxPtrmaskBytes*8*goarch.PtrSize:
2846		// Element is small with pointer mask; array is still small.
2847		// Create direct pointer mask by turning each 1 bit in elem
2848		// into length 1 bits in larger mask.
2849		n := (array.PtrBytes/goarch.PtrSize + 7) / 8
2850		// Runtime needs pointer masks to be a multiple of uintptr in size.
2851		n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
2852		mask := make([]byte, n)
2853		emitGCMask(mask, 0, typ, array.Len)
2854		array.GCData = &mask[0]
2855
2856	default:
2857		// Create program that emits one element
2858		// and then repeats to make the array.
2859		prog := []byte{0, 0, 0, 0} // will be length of prog
2860		prog = appendGCProg(prog, typ)
2861		// Pad from ptrdata to size.
2862		elemPtrs := typ.PtrBytes / goarch.PtrSize
2863		elemWords := typ.Size_ / goarch.PtrSize
2864		if elemPtrs < elemWords {
2865			// Emit literal 0 bit, then repeat as needed.
2866			prog = append(prog, 0x01, 0x00)
2867			if elemPtrs+1 < elemWords {
2868				prog = append(prog, 0x81)
2869				prog = appendVarint(prog, elemWords-elemPtrs-1)
2870			}
2871		}
2872		// Repeat length-1 times.
2873		if elemWords < 0x80 {
2874			prog = append(prog, byte(elemWords|0x80))
2875		} else {
2876			prog = append(prog, 0x80)
2877			prog = appendVarint(prog, elemWords)
2878		}
2879		prog = appendVarint(prog, uintptr(length)-1)
2880		prog = append(prog, 0)
2881		*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2882		array.Kind_ |= abi.KindGCProg
2883		array.GCData = &prog[0]
2884		array.PtrBytes = array.Size_ // overestimate but ok; must match program
2885	}
2886
2887	etyp := typ
2888	esize := etyp.Size()
2889
2890	array.Equal = nil
2891	if eequal := etyp.Equal; eequal != nil {
2892		array.Equal = func(p, q unsafe.Pointer) bool {
2893			for i := 0; i < length; i++ {
2894				pi := arrayAt(p, i, esize, "i < length")
2895				qi := arrayAt(q, i, esize, "i < length")
2896				if !eequal(pi, qi) {
2897					return false
2898				}
2899
2900			}
2901			return true
2902		}
2903	}
2904
2905	switch {
2906	case length == 1 && !typ.IfaceIndir():
2907		// array of 1 direct iface type can be direct
2908		array.Kind_ |= abi.KindDirectIface
2909	default:
2910		array.Kind_ &^= abi.KindDirectIface
2911	}
2912
2913	ti, _ := lookupCache.LoadOrStore(ckey, toRType(&array.Type))
2914	return ti.(Type)
2915}
2916
2917func appendVarint(x []byte, v uintptr) []byte {
2918	for ; v >= 0x80; v >>= 7 {
2919		x = append(x, byte(v|0x80))
2920	}
2921	x = append(x, byte(v))
2922	return x
2923}
2924
2925// toType converts from a *rtype to a Type that can be returned
2926// to the client of package reflect. In gc, the only concern is that
2927// a nil *rtype must be replaced by a nil Type, but in gccgo this
2928// function takes care of ensuring that multiple *rtype for the same
2929// type are coalesced into a single Type.
2930//
2931// toType should be an internal detail,
2932// but widely used packages access it using linkname.
2933// Notable members of the hall of shame include:
2934//   - fortio.org/log
2935//   - github.com/goccy/go-json
2936//   - github.com/goccy/go-reflect
2937//   - github.com/sohaha/zlsgo
2938//
2939// Do not remove or change the type signature.
2940// See go.dev/issue/67401.
2941//
2942//go:linkname toType
2943func toType(t *abi.Type) Type {
2944	if t == nil {
2945		return nil
2946	}
2947	return toRType(t)
2948}
2949
2950type layoutKey struct {
2951	ftyp *funcType // function signature
2952	rcvr *abi.Type // receiver type, or nil if none
2953}
2954
2955type layoutType struct {
2956	t         *abi.Type
2957	framePool *sync.Pool
2958	abid      abiDesc
2959}
2960
2961var layoutCache sync.Map // map[layoutKey]layoutType
2962
2963// funcLayout computes a struct type representing the layout of the
2964// stack-assigned function arguments and return values for the function
2965// type t.
2966// If rcvr != nil, rcvr specifies the type of the receiver.
2967// The returned type exists only for GC, so we only fill out GC relevant info.
2968// Currently, that's just size and the GC program. We also fill in
2969// the name for possible debugging use.
2970func funcLayout(t *funcType, rcvr *abi.Type) (frametype *abi.Type, framePool *sync.Pool, abid abiDesc) {
2971	if t.Kind() != abi.Func {
2972		panic("reflect: funcLayout of non-func type " + stringFor(&t.Type))
2973	}
2974	if rcvr != nil && rcvr.Kind() == abi.Interface {
2975		panic("reflect: funcLayout with interface receiver " + stringFor(rcvr))
2976	}
2977	k := layoutKey{t, rcvr}
2978	if lti, ok := layoutCache.Load(k); ok {
2979		lt := lti.(layoutType)
2980		return lt.t, lt.framePool, lt.abid
2981	}
2982
2983	// Compute the ABI layout.
2984	abid = newAbiDesc(t, rcvr)
2985
2986	// build dummy rtype holding gc program
2987	x := &abi.Type{
2988		Align_: goarch.PtrSize,
2989		// Don't add spill space here; it's only necessary in
2990		// reflectcall's frame, not in the allocated frame.
2991		// TODO(mknyszek): Remove this comment when register
2992		// spill space in the frame is no longer required.
2993		Size_:    align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
2994		PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
2995	}
2996	if abid.stackPtrs.n > 0 {
2997		x.GCData = &abid.stackPtrs.data[0]
2998	}
2999
3000	var s string
3001	if rcvr != nil {
3002		s = "methodargs(" + stringFor(rcvr) + ")(" + stringFor(&t.Type) + ")"
3003	} else {
3004		s = "funcargs(" + stringFor(&t.Type) + ")"
3005	}
3006	x.Str = resolveReflectName(newName(s, "", false, false))
3007
3008	// cache result for future callers
3009	framePool = &sync.Pool{New: func() any {
3010		return unsafe_New(x)
3011	}}
3012	lti, _ := layoutCache.LoadOrStore(k, layoutType{
3013		t:         x,
3014		framePool: framePool,
3015		abid:      abid,
3016	})
3017	lt := lti.(layoutType)
3018	return lt.t, lt.framePool, lt.abid
3019}
3020
3021// Note: this type must agree with runtime.bitvector.
3022type bitVector struct {
3023	n    uint32 // number of bits
3024	data []byte
3025}
3026
3027// append a bit to the bitmap.
3028func (bv *bitVector) append(bit uint8) {
3029	if bv.n%(8*goarch.PtrSize) == 0 {
3030		// Runtime needs pointer masks to be a multiple of uintptr in size.
3031		// Since reflect passes bv.data directly to the runtime as a pointer mask,
3032		// we append a full uintptr of zeros at a time.
3033		for i := 0; i < goarch.PtrSize; i++ {
3034			bv.data = append(bv.data, 0)
3035		}
3036	}
3037	bv.data[bv.n/8] |= bit << (bv.n % 8)
3038	bv.n++
3039}
3040
3041func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) {
3042	if !t.Pointers() {
3043		return
3044	}
3045
3046	switch Kind(t.Kind_ & abi.KindMask) {
3047	case Chan, Func, Map, Pointer, Slice, String, UnsafePointer:
3048		// 1 pointer at start of representation
3049		for bv.n < uint32(offset/goarch.PtrSize) {
3050			bv.append(0)
3051		}
3052		bv.append(1)
3053
3054	case Interface:
3055		// 2 pointers
3056		for bv.n < uint32(offset/goarch.PtrSize) {
3057			bv.append(0)
3058		}
3059		bv.append(1)
3060		bv.append(1)
3061
3062	case Array:
3063		// repeat inner type
3064		tt := (*arrayType)(unsafe.Pointer(t))
3065		for i := 0; i < int(tt.Len); i++ {
3066			addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem)
3067		}
3068
3069	case Struct:
3070		// apply fields
3071		tt := (*structType)(unsafe.Pointer(t))
3072		for i := range tt.Fields {
3073			f := &tt.Fields[i]
3074			addTypeBits(bv, offset+f.Offset, f.Typ)
3075		}
3076	}
3077}
3078
3079// TypeFor returns the [Type] that represents the type argument T.
3080func TypeFor[T any]() Type {
3081	var v T
3082	if t := TypeOf(v); t != nil {
3083		return t // optimize for T being a non-interface kind
3084	}
3085	return TypeOf((*T)(nil)).Elem() // only for an interface kind
3086}
3087