1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package reflectdata
6
7import (
8	"encoding/binary"
9	"fmt"
10	"internal/abi"
11	"os"
12	"sort"
13	"strings"
14	"sync"
15
16	"cmd/compile/internal/base"
17	"cmd/compile/internal/bitvec"
18	"cmd/compile/internal/compare"
19	"cmd/compile/internal/ir"
20	"cmd/compile/internal/objw"
21	"cmd/compile/internal/rttype"
22	"cmd/compile/internal/staticdata"
23	"cmd/compile/internal/typebits"
24	"cmd/compile/internal/typecheck"
25	"cmd/compile/internal/types"
26	"cmd/internal/gcprog"
27	"cmd/internal/obj"
28	"cmd/internal/objabi"
29	"cmd/internal/src"
30)
31
32type ptabEntry struct {
33	s *types.Sym
34	t *types.Type
35}
36
37// runtime interface and reflection data structures
38var (
39	// protects signatset and signatslice
40	signatmu sync.Mutex
41	// Tracking which types need runtime type descriptor
42	signatset = make(map[*types.Type]struct{})
43	// Queue of types wait to be generated runtime type descriptor
44	signatslice []typeAndStr
45
46	gcsymmu  sync.Mutex // protects gcsymset and gcsymslice
47	gcsymset = make(map[*types.Type]struct{})
48)
49
50type typeSig struct {
51	name  *types.Sym
52	isym  *obj.LSym
53	tsym  *obj.LSym
54	type_ *types.Type
55	mtype *types.Type
56}
57
58func commonSize() int { return int(rttype.Type.Size()) } // Sizeof(runtime._type{})
59
60func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
61	if t.Sym() == nil && len(methods(t)) == 0 {
62		return 0
63	}
64	return int(rttype.UncommonType.Size())
65}
66
67func makefield(name string, t *types.Type) *types.Field {
68	sym := (*types.Pkg)(nil).Lookup(name)
69	return types.NewField(src.NoXPos, sym, t)
70}
71
72// MapBucketType makes the map bucket type given the type of the map.
73func MapBucketType(t *types.Type) *types.Type {
74	// Builds a type representing a Bucket structure for
75	// the given map type. This type is not visible to users -
76	// we include only enough information to generate a correct GC
77	// program for it.
78	// Make sure this stays in sync with runtime/map.go.
79	//
80	//	A "bucket" is a "struct" {
81	//	      tophash [abi.MapBucketCount]uint8
82	//	      keys [abi.MapBucketCount]keyType
83	//	      elems [abi.MapBucketCount]elemType
84	//	      overflow *bucket
85	//	    }
86	if t.MapType().Bucket != nil {
87		return t.MapType().Bucket
88	}
89
90	keytype := t.Key()
91	elemtype := t.Elem()
92	types.CalcSize(keytype)
93	types.CalcSize(elemtype)
94	if keytype.Size() > abi.MapMaxKeyBytes {
95		keytype = types.NewPtr(keytype)
96	}
97	if elemtype.Size() > abi.MapMaxElemBytes {
98		elemtype = types.NewPtr(elemtype)
99	}
100
101	field := make([]*types.Field, 0, 5)
102
103	// The first field is: uint8 topbits[BUCKETSIZE].
104	arr := types.NewArray(types.Types[types.TUINT8], abi.MapBucketCount)
105	field = append(field, makefield("topbits", arr))
106
107	arr = types.NewArray(keytype, abi.MapBucketCount)
108	arr.SetNoalg(true)
109	keys := makefield("keys", arr)
110	field = append(field, keys)
111
112	arr = types.NewArray(elemtype, abi.MapBucketCount)
113	arr.SetNoalg(true)
114	elems := makefield("elems", arr)
115	field = append(field, elems)
116
117	// If keys and elems have no pointers, the map implementation
118	// can keep a list of overflow pointers on the side so that
119	// buckets can be marked as having no pointers.
120	// Arrange for the bucket to have no pointers by changing
121	// the type of the overflow field to uintptr in this case.
122	// See comment on hmap.overflow in runtime/map.go.
123	otyp := types.Types[types.TUNSAFEPTR]
124	if !elemtype.HasPointers() && !keytype.HasPointers() {
125		otyp = types.Types[types.TUINTPTR]
126	}
127	overflow := makefield("overflow", otyp)
128	field = append(field, overflow)
129
130	// link up fields
131	bucket := types.NewStruct(field[:])
132	bucket.SetNoalg(true)
133	types.CalcSize(bucket)
134
135	// Check invariants that map code depends on.
136	if !types.IsComparable(t.Key()) {
137		base.Fatalf("unsupported map key type for %v", t)
138	}
139	if abi.MapBucketCount < 8 {
140		base.Fatalf("bucket size %d too small for proper alignment %d", abi.MapBucketCount, 8)
141	}
142	if uint8(keytype.Alignment()) > abi.MapBucketCount {
143		base.Fatalf("key align too big for %v", t)
144	}
145	if uint8(elemtype.Alignment()) > abi.MapBucketCount {
146		base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.MapBucketCount)
147	}
148	if keytype.Size() > abi.MapMaxKeyBytes {
149		base.Fatalf("key size too large for %v", t)
150	}
151	if elemtype.Size() > abi.MapMaxElemBytes {
152		base.Fatalf("elem size too large for %v", t)
153	}
154	if t.Key().Size() > abi.MapMaxKeyBytes && !keytype.IsPtr() {
155		base.Fatalf("key indirect incorrect for %v", t)
156	}
157	if t.Elem().Size() > abi.MapMaxElemBytes && !elemtype.IsPtr() {
158		base.Fatalf("elem indirect incorrect for %v", t)
159	}
160	if keytype.Size()%keytype.Alignment() != 0 {
161		base.Fatalf("key size not a multiple of key align for %v", t)
162	}
163	if elemtype.Size()%elemtype.Alignment() != 0 {
164		base.Fatalf("elem size not a multiple of elem align for %v", t)
165	}
166	if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
167		base.Fatalf("bucket align not multiple of key align %v", t)
168	}
169	if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
170		base.Fatalf("bucket align not multiple of elem align %v", t)
171	}
172	if keys.Offset%keytype.Alignment() != 0 {
173		base.Fatalf("bad alignment of keys in bmap for %v", t)
174	}
175	if elems.Offset%elemtype.Alignment() != 0 {
176		base.Fatalf("bad alignment of elems in bmap for %v", t)
177	}
178
179	// Double-check that overflow field is final memory in struct,
180	// with no padding at end.
181	if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
182		base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
183			t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
184	}
185
186	t.MapType().Bucket = bucket
187
188	bucket.StructType().Map = t
189	return bucket
190}
191
192var hmapType *types.Type
193
194// MapType returns a type interchangeable with runtime.hmap.
195// Make sure this stays in sync with runtime/map.go.
196func MapType() *types.Type {
197	if hmapType != nil {
198		return hmapType
199	}
200
201	// build a struct:
202	// type hmap struct {
203	//    count      int
204	//    flags      uint8
205	//    B          uint8
206	//    noverflow  uint16
207	//    hash0      uint32
208	//    buckets    unsafe.Pointer
209	//    oldbuckets unsafe.Pointer
210	//    nevacuate  uintptr
211	//    extra      unsafe.Pointer // *mapextra
212	// }
213	// must match runtime/map.go:hmap.
214	fields := []*types.Field{
215		makefield("count", types.Types[types.TINT]),
216		makefield("flags", types.Types[types.TUINT8]),
217		makefield("B", types.Types[types.TUINT8]),
218		makefield("noverflow", types.Types[types.TUINT16]),
219		makefield("hash0", types.Types[types.TUINT32]),      // Used in walk.go for OMAKEMAP.
220		makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
221		makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
222		makefield("nevacuate", types.Types[types.TUINTPTR]),
223		makefield("extra", types.Types[types.TUNSAFEPTR]),
224	}
225
226	n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
227	hmap := types.NewNamed(n)
228	n.SetType(hmap)
229	n.SetTypecheck(1)
230
231	hmap.SetUnderlying(types.NewStruct(fields))
232	types.CalcSize(hmap)
233
234	// The size of hmap should be 48 bytes on 64 bit
235	// and 28 bytes on 32 bit platforms.
236	if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
237		base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
238	}
239
240	hmapType = hmap
241	return hmap
242}
243
244var hiterType *types.Type
245
246// MapIterType returns a type interchangeable with runtime.hiter.
247// Make sure this stays in sync with runtime/map.go.
248func MapIterType() *types.Type {
249	if hiterType != nil {
250		return hiterType
251	}
252
253	hmap := MapType()
254
255	// build a struct:
256	// type hiter struct {
257	//    key         unsafe.Pointer // *Key
258	//    elem        unsafe.Pointer // *Elem
259	//    t           unsafe.Pointer // *MapType
260	//    h           *hmap
261	//    buckets     unsafe.Pointer
262	//    bptr        unsafe.Pointer // *bmap
263	//    overflow    unsafe.Pointer // *[]*bmap
264	//    oldoverflow unsafe.Pointer // *[]*bmap
265	//    startBucket uintptr
266	//    offset      uint8
267	//    wrapped     bool
268	//    B           uint8
269	//    i           uint8
270	//    bucket      uintptr
271	//    checkBucket uintptr
272	// }
273	// must match runtime/map.go:hiter.
274	fields := []*types.Field{
275		makefield("key", types.Types[types.TUNSAFEPTR]),  // Used in range.go for TMAP.
276		makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
277		makefield("t", types.Types[types.TUNSAFEPTR]),
278		makefield("h", types.NewPtr(hmap)),
279		makefield("buckets", types.Types[types.TUNSAFEPTR]),
280		makefield("bptr", types.Types[types.TUNSAFEPTR]),
281		makefield("overflow", types.Types[types.TUNSAFEPTR]),
282		makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
283		makefield("startBucket", types.Types[types.TUINTPTR]),
284		makefield("offset", types.Types[types.TUINT8]),
285		makefield("wrapped", types.Types[types.TBOOL]),
286		makefield("B", types.Types[types.TUINT8]),
287		makefield("i", types.Types[types.TUINT8]),
288		makefield("bucket", types.Types[types.TUINTPTR]),
289		makefield("checkBucket", types.Types[types.TUINTPTR]),
290	}
291
292	// build iterator struct holding the above fields
293	n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
294	hiter := types.NewNamed(n)
295	n.SetType(hiter)
296	n.SetTypecheck(1)
297
298	hiter.SetUnderlying(types.NewStruct(fields))
299	types.CalcSize(hiter)
300	if hiter.Size() != int64(12*types.PtrSize) {
301		base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
302	}
303
304	hiterType = hiter
305	return hiter
306}
307
308// methods returns the methods of the non-interface type t, sorted by name.
309// Generates stub functions as needed.
310func methods(t *types.Type) []*typeSig {
311	if t.HasShape() {
312		// Shape types have no methods.
313		return nil
314	}
315	// method type
316	mt := types.ReceiverBaseType(t)
317
318	if mt == nil {
319		return nil
320	}
321	typecheck.CalcMethods(mt)
322
323	// make list of methods for t,
324	// generating code if necessary.
325	var ms []*typeSig
326	for _, f := range mt.AllMethods() {
327		if f.Sym == nil {
328			base.Fatalf("method with no sym on %v", mt)
329		}
330		if !f.IsMethod() {
331			base.Fatalf("non-method on %v method %v %v", mt, f.Sym, f)
332		}
333		if f.Type.Recv() == nil {
334			base.Fatalf("receiver with no type on %v method %v %v", mt, f.Sym, f)
335		}
336		if f.Nointerface() && !t.IsFullyInstantiated() {
337			// Skip creating method wrappers if f is nointerface. But, if
338			// t is an instantiated type, we still have to call
339			// methodWrapper, because methodWrapper generates the actual
340			// generic method on the type as well.
341			continue
342		}
343
344		// get receiver type for this particular method.
345		// if pointer receiver but non-pointer t and
346		// this is not an embedded pointer inside a struct,
347		// method does not apply.
348		if !types.IsMethodApplicable(t, f) {
349			continue
350		}
351
352		sig := &typeSig{
353			name:  f.Sym,
354			isym:  methodWrapper(t, f, true),
355			tsym:  methodWrapper(t, f, false),
356			type_: typecheck.NewMethodType(f.Type, t),
357			mtype: typecheck.NewMethodType(f.Type, nil),
358		}
359		if f.Nointerface() {
360			// In the case of a nointerface method on an instantiated
361			// type, don't actually append the typeSig.
362			continue
363		}
364		ms = append(ms, sig)
365	}
366
367	return ms
368}
369
370// imethods returns the methods of the interface type t, sorted by name.
371func imethods(t *types.Type) []*typeSig {
372	var methods []*typeSig
373	for _, f := range t.AllMethods() {
374		if f.Type.Kind() != types.TFUNC || f.Sym == nil {
375			continue
376		}
377		if f.Sym.IsBlank() {
378			base.Fatalf("unexpected blank symbol in interface method set")
379		}
380		if n := len(methods); n > 0 {
381			last := methods[n-1]
382			if !last.name.Less(f.Sym) {
383				base.Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
384			}
385		}
386
387		sig := &typeSig{
388			name:  f.Sym,
389			mtype: f.Type,
390			type_: typecheck.NewMethodType(f.Type, nil),
391		}
392		methods = append(methods, sig)
393
394		// NOTE(rsc): Perhaps an oversight that
395		// IfaceType.Method is not in the reflect data.
396		// Generate the method body, so that compiled
397		// code can refer to it.
398		methodWrapper(t, f, false)
399	}
400
401	return methods
402}
403
404func dimportpath(p *types.Pkg) {
405	if p.Pathsym != nil {
406		return
407	}
408
409	if p == types.LocalPkg && base.Ctxt.Pkgpath == "" {
410		panic("missing pkgpath")
411	}
412
413	// If we are compiling the runtime package, there are two runtime packages around
414	// -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for
415	// both of them, so just produce one for localpkg.
416	if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime {
417		return
418	}
419
420	s := base.Ctxt.Lookup("type:.importpath." + p.Prefix + ".")
421	ot := dnameData(s, 0, p.Path, "", nil, false, false)
422	objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
423	s.Set(obj.AttrContentAddressable, true)
424	p.Pathsym = s
425}
426
427func dgopkgpath(c rttype.Cursor, pkg *types.Pkg) {
428	c = c.Field("Bytes")
429	if pkg == nil {
430		c.WritePtr(nil)
431		return
432	}
433
434	dimportpath(pkg)
435	c.WritePtr(pkg.Pathsym)
436}
437
438// dgopkgpathOff writes an offset relocation to the pkg path symbol to c.
439func dgopkgpathOff(c rttype.Cursor, pkg *types.Pkg) {
440	if pkg == nil {
441		c.WriteInt32(0)
442		return
443	}
444
445	dimportpath(pkg)
446	c.WriteSymPtrOff(pkg.Pathsym, false)
447}
448
449// dnameField dumps a reflect.name for a struct field.
450func dnameField(c rttype.Cursor, spkg *types.Pkg, ft *types.Field) {
451	if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
452		base.Fatalf("package mismatch for %v", ft.Sym)
453	}
454	nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name), ft.Embedded != 0)
455	c.Field("Bytes").WritePtr(nsym)
456}
457
458// dnameData writes the contents of a reflect.name into s at offset ot.
459func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported, embedded bool) int {
460	if len(name) >= 1<<29 {
461		base.Fatalf("name too long: %d %s...", len(name), name[:1024])
462	}
463	if len(tag) >= 1<<29 {
464		base.Fatalf("tag too long: %d %s...", len(tag), tag[:1024])
465	}
466	var nameLen [binary.MaxVarintLen64]byte
467	nameLenLen := binary.PutUvarint(nameLen[:], uint64(len(name)))
468	var tagLen [binary.MaxVarintLen64]byte
469	tagLenLen := binary.PutUvarint(tagLen[:], uint64(len(tag)))
470
471	// Encode name and tag. See reflect/type.go for details.
472	var bits byte
473	l := 1 + nameLenLen + len(name)
474	if exported {
475		bits |= 1 << 0
476	}
477	if len(tag) > 0 {
478		l += tagLenLen + len(tag)
479		bits |= 1 << 1
480	}
481	if pkg != nil {
482		bits |= 1 << 2
483	}
484	if embedded {
485		bits |= 1 << 3
486	}
487	b := make([]byte, l)
488	b[0] = bits
489	copy(b[1:], nameLen[:nameLenLen])
490	copy(b[1+nameLenLen:], name)
491	if len(tag) > 0 {
492		tb := b[1+nameLenLen+len(name):]
493		copy(tb, tagLen[:tagLenLen])
494		copy(tb[tagLenLen:], tag)
495	}
496
497	ot = int(s.WriteBytes(base.Ctxt, int64(ot), b))
498
499	if pkg != nil {
500		c := rttype.NewCursor(s, int64(ot), types.Types[types.TUINT32])
501		dgopkgpathOff(c, pkg)
502		ot += 4
503	}
504
505	return ot
506}
507
508var dnameCount int
509
510// dname creates a reflect.name for a struct field or method.
511func dname(name, tag string, pkg *types.Pkg, exported, embedded bool) *obj.LSym {
512	// Write out data as "type:." to signal two things to the
513	// linker, first that when dynamically linking, the symbol
514	// should be moved to a relro section, and second that the
515	// contents should not be decoded as a type.
516	sname := "type:.namedata."
517	if pkg == nil {
518		// In the common case, share data with other packages.
519		if name == "" {
520			if exported {
521				sname += "-noname-exported." + tag
522			} else {
523				sname += "-noname-unexported." + tag
524			}
525		} else {
526			if exported {
527				sname += name + "." + tag
528			} else {
529				sname += name + "-" + tag
530			}
531		}
532	} else {
533		// TODO(mdempsky): We should be able to share these too (except
534		// maybe when dynamic linking).
535		sname = fmt.Sprintf("%s%s.%d", sname, types.LocalPkg.Prefix, dnameCount)
536		dnameCount++
537	}
538	if embedded {
539		sname += ".embedded"
540	}
541	s := base.Ctxt.Lookup(sname)
542	if len(s.P) > 0 {
543		return s
544	}
545	ot := dnameData(s, 0, name, tag, pkg, exported, embedded)
546	objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
547	s.Set(obj.AttrContentAddressable, true)
548	return s
549}
550
551// dextratype dumps the fields of a runtime.uncommontype.
552// dataAdd is the offset in bytes after the header where the
553// backing array of the []method field should be written.
554func dextratype(lsym *obj.LSym, off int64, t *types.Type, dataAdd int) {
555	m := methods(t)
556	if t.Sym() == nil && len(m) == 0 {
557		base.Fatalf("extra requested of type with no extra info %v", t)
558	}
559	noff := types.RoundUp(off, int64(types.PtrSize))
560	if noff != off {
561		base.Fatalf("unexpected alignment in dextratype for %v", t)
562	}
563
564	for _, a := range m {
565		writeType(a.type_)
566	}
567
568	c := rttype.NewCursor(lsym, off, rttype.UncommonType)
569	dgopkgpathOff(c.Field("PkgPath"), typePkg(t))
570
571	dataAdd += uncommonSize(t)
572	mcount := len(m)
573	if mcount != int(uint16(mcount)) {
574		base.Fatalf("too many methods on %v: %d", t, mcount)
575	}
576	xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
577	if dataAdd != int(uint32(dataAdd)) {
578		base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
579	}
580
581	c.Field("Mcount").WriteUint16(uint16(mcount))
582	c.Field("Xcount").WriteUint16(uint16(xcount))
583	c.Field("Moff").WriteUint32(uint32(dataAdd))
584	// Note: there is an unused uint32 field here.
585
586	// Write the backing array for the []method field.
587	array := rttype.NewArrayCursor(lsym, off+int64(dataAdd), rttype.Method, mcount)
588	for i, a := range m {
589		exported := types.IsExported(a.name.Name)
590		var pkg *types.Pkg
591		if !exported && a.name.Pkg != typePkg(t) {
592			pkg = a.name.Pkg
593		}
594		nsym := dname(a.name.Name, "", pkg, exported, false)
595
596		e := array.Elem(i)
597		e.Field("Name").WriteSymPtrOff(nsym, false)
598		dmethodptrOff(e.Field("Mtyp"), writeType(a.mtype))
599		dmethodptrOff(e.Field("Ifn"), a.isym)
600		dmethodptrOff(e.Field("Tfn"), a.tsym)
601	}
602}
603
604func typePkg(t *types.Type) *types.Pkg {
605	tsym := t.Sym()
606	if tsym == nil {
607		switch t.Kind() {
608		case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN:
609			if t.Elem() != nil {
610				tsym = t.Elem().Sym()
611			}
612		}
613	}
614	if tsym != nil && tsym.Pkg != types.BuiltinPkg {
615		return tsym.Pkg
616	}
617	return nil
618}
619
620func dmethodptrOff(c rttype.Cursor, x *obj.LSym) {
621	c.WriteInt32(0)
622	r := c.Reloc()
623	r.Sym = x
624	r.Type = objabi.R_METHODOFF
625}
626
627var kinds = []abi.Kind{
628	types.TINT:        abi.Int,
629	types.TUINT:       abi.Uint,
630	types.TINT8:       abi.Int8,
631	types.TUINT8:      abi.Uint8,
632	types.TINT16:      abi.Int16,
633	types.TUINT16:     abi.Uint16,
634	types.TINT32:      abi.Int32,
635	types.TUINT32:     abi.Uint32,
636	types.TINT64:      abi.Int64,
637	types.TUINT64:     abi.Uint64,
638	types.TUINTPTR:    abi.Uintptr,
639	types.TFLOAT32:    abi.Float32,
640	types.TFLOAT64:    abi.Float64,
641	types.TBOOL:       abi.Bool,
642	types.TSTRING:     abi.String,
643	types.TPTR:        abi.Pointer,
644	types.TSTRUCT:     abi.Struct,
645	types.TINTER:      abi.Interface,
646	types.TCHAN:       abi.Chan,
647	types.TMAP:        abi.Map,
648	types.TARRAY:      abi.Array,
649	types.TSLICE:      abi.Slice,
650	types.TFUNC:       abi.Func,
651	types.TCOMPLEX64:  abi.Complex64,
652	types.TCOMPLEX128: abi.Complex128,
653	types.TUNSAFEPTR:  abi.UnsafePointer,
654}
655
656var (
657	memhashvarlen  *obj.LSym
658	memequalvarlen *obj.LSym
659)
660
661// dcommontype dumps the contents of a reflect.rtype (runtime._type) to c.
662func dcommontype(c rttype.Cursor, t *types.Type) {
663	types.CalcSize(t)
664	eqfunc := geneq(t)
665
666	sptrWeak := true
667	var sptr *obj.LSym
668	if !t.IsPtr() || t.IsPtrElem() {
669		tptr := types.NewPtr(t)
670		if t.Sym() != nil || methods(tptr) != nil {
671			sptrWeak = false
672		}
673		sptr = writeType(tptr)
674	}
675
676	gcsym, useGCProg, ptrdata := dgcsym(t, true)
677	delete(gcsymset, t)
678
679	// ../../../../reflect/type.go:/^type.rtype
680	// actual type structure
681	//	type rtype struct {
682	//		size          uintptr
683	//		ptrdata       uintptr
684	//		hash          uint32
685	//		tflag         tflag
686	//		align         uint8
687	//		fieldAlign    uint8
688	//		kind          uint8
689	//		equal         func(unsafe.Pointer, unsafe.Pointer) bool
690	//		gcdata        *byte
691	//		str           nameOff
692	//		ptrToThis     typeOff
693	//	}
694	c.Field("Size_").WriteUintptr(uint64(t.Size()))
695	c.Field("PtrBytes").WriteUintptr(uint64(ptrdata))
696	c.Field("Hash").WriteUint32(types.TypeHash(t))
697
698	var tflag abi.TFlag
699	if uncommonSize(t) != 0 {
700		tflag |= abi.TFlagUncommon
701	}
702	if t.Sym() != nil && t.Sym().Name != "" {
703		tflag |= abi.TFlagNamed
704	}
705	if compare.IsRegularMemory(t) {
706		tflag |= abi.TFlagRegularMemory
707	}
708
709	exported := false
710	p := t.NameString()
711	// If we're writing out type T,
712	// we are very likely to write out type *T as well.
713	// Use the string "*T"[1:] for "T", so that the two
714	// share storage. This is a cheap way to reduce the
715	// amount of space taken up by reflect strings.
716	if !strings.HasPrefix(p, "*") {
717		p = "*" + p
718		tflag |= abi.TFlagExtraStar
719		if t.Sym() != nil {
720			exported = types.IsExported(t.Sym().Name)
721		}
722	} else {
723		if t.Elem() != nil && t.Elem().Sym() != nil {
724			exported = types.IsExported(t.Elem().Sym().Name)
725		}
726	}
727
728	if tflag != abi.TFlag(uint8(tflag)) {
729		// this should optimize away completely
730		panic("Unexpected change in size of abi.TFlag")
731	}
732	c.Field("TFlag").WriteUint8(uint8(tflag))
733
734	// runtime (and common sense) expects alignment to be a power of two.
735	i := int(uint8(t.Alignment()))
736
737	if i == 0 {
738		i = 1
739	}
740	if i&(i-1) != 0 {
741		base.Fatalf("invalid alignment %d for %v", uint8(t.Alignment()), t)
742	}
743	c.Field("Align_").WriteUint8(uint8(t.Alignment()))
744	c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment()))
745
746	kind := kinds[t.Kind()]
747	if types.IsDirectIface(t) {
748		kind |= abi.KindDirectIface
749	}
750	if useGCProg {
751		kind |= abi.KindGCProg
752	}
753	c.Field("Kind_").WriteUint8(uint8(kind))
754
755	c.Field("Equal").WritePtr(eqfunc)
756	c.Field("GCData").WritePtr(gcsym)
757
758	nsym := dname(p, "", nil, exported, false)
759	c.Field("Str").WriteSymPtrOff(nsym, false)
760	c.Field("PtrToThis").WriteSymPtrOff(sptr, sptrWeak)
761}
762
763// TrackSym returns the symbol for tracking use of field/method f, assumed
764// to be a member of struct/interface type t.
765func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
766	return base.PkgLinksym("go:track", t.LinkString()+"."+f.Sym.Name, obj.ABI0)
767}
768
769func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
770	p := prefix + "." + t.LinkString()
771	s := types.TypeSymLookup(p)
772
773	// This function is for looking up type-related generated functions
774	// (e.g. eq and hash). Make sure they are indeed generated.
775	signatmu.Lock()
776	NeedRuntimeType(t)
777	signatmu.Unlock()
778
779	//print("algsym: %s -> %+S\n", p, s);
780
781	return s
782}
783
784func TypeSym(t *types.Type) *types.Sym {
785	if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
786		base.Fatalf("TypeSym %v", t)
787	}
788	if t.Kind() == types.TFUNC && t.Recv() != nil {
789		base.Fatalf("misuse of method type: %v", t)
790	}
791	s := types.TypeSym(t)
792	signatmu.Lock()
793	NeedRuntimeType(t)
794	signatmu.Unlock()
795	return s
796}
797
798func TypeLinksymPrefix(prefix string, t *types.Type) *obj.LSym {
799	return TypeSymPrefix(prefix, t).Linksym()
800}
801
802func TypeLinksymLookup(name string) *obj.LSym {
803	return types.TypeSymLookup(name).Linksym()
804}
805
806func TypeLinksym(t *types.Type) *obj.LSym {
807	lsym := TypeSym(t).Linksym()
808	signatmu.Lock()
809	if lsym.Extra == nil {
810		ti := lsym.NewTypeInfo()
811		ti.Type = t
812	}
813	signatmu.Unlock()
814	return lsym
815}
816
817// TypePtrAt returns an expression that evaluates to the
818// *runtime._type value for t.
819func TypePtrAt(pos src.XPos, t *types.Type) *ir.AddrExpr {
820	return typecheck.LinksymAddr(pos, TypeLinksym(t), types.Types[types.TUINT8])
821}
822
823// ITabLsym returns the LSym representing the itab for concrete type typ implementing
824// interface iface. A dummy tab will be created in the unusual case where typ doesn't
825// implement iface. Normally, this wouldn't happen, because the typechecker would
826// have reported a compile-time error. This situation can only happen when the
827// destination type of a type assert or a type in a type switch is parameterized, so
828// it may sometimes, but not always, be a type that can't implement the specified
829// interface.
830func ITabLsym(typ, iface *types.Type) *obj.LSym {
831	s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
832	lsym := s.Linksym()
833
834	if !existed {
835		writeITab(lsym, typ, iface, true)
836	}
837	return lsym
838}
839
840// ITabAddrAt returns an expression that evaluates to the
841// *runtime.itab value for concrete type typ implementing interface
842// iface.
843func ITabAddrAt(pos src.XPos, typ, iface *types.Type) *ir.AddrExpr {
844	s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
845	lsym := s.Linksym()
846
847	if !existed {
848		writeITab(lsym, typ, iface, false)
849	}
850
851	return typecheck.LinksymAddr(pos, lsym, types.Types[types.TUINT8])
852}
853
854// needkeyupdate reports whether map updates with t as a key
855// need the key to be updated.
856func needkeyupdate(t *types.Type) bool {
857	switch t.Kind() {
858	case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32,
859		types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
860		return false
861
862	case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0
863		types.TINTER,
864		types.TSTRING: // strings might have smaller backing stores
865		return true
866
867	case types.TARRAY:
868		return needkeyupdate(t.Elem())
869
870	case types.TSTRUCT:
871		for _, t1 := range t.Fields() {
872			if needkeyupdate(t1.Type) {
873				return true
874			}
875		}
876		return false
877
878	default:
879		base.Fatalf("bad type for map key: %v", t)
880		return true
881	}
882}
883
884// hashMightPanic reports whether the hash of a map key of type t might panic.
885func hashMightPanic(t *types.Type) bool {
886	switch t.Kind() {
887	case types.TINTER:
888		return true
889
890	case types.TARRAY:
891		return hashMightPanic(t.Elem())
892
893	case types.TSTRUCT:
894		for _, t1 := range t.Fields() {
895			if hashMightPanic(t1.Type) {
896				return true
897			}
898		}
899		return false
900
901	default:
902		return false
903	}
904}
905
906// formalType replaces predeclared aliases with real types.
907// They've been separate internally to make error messages
908// better, but we have to merge them in the reflect tables.
909func formalType(t *types.Type) *types.Type {
910	switch t {
911	case types.AnyType, types.ByteType, types.RuneType:
912		return types.Types[t.Kind()]
913	}
914	return t
915}
916
917func writeType(t *types.Type) *obj.LSym {
918	t = formalType(t)
919	if t.IsUntyped() {
920		base.Fatalf("writeType %v", t)
921	}
922
923	s := types.TypeSym(t)
924	lsym := s.Linksym()
925
926	// special case (look for runtime below):
927	// when compiling package runtime,
928	// emit the type structures for int, float, etc.
929	tbase := t
930	if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
931		tbase = t.Elem()
932	}
933	if tbase.Kind() == types.TFORW {
934		base.Fatalf("unresolved defined type: %v", tbase)
935	}
936
937	// This is a fake type we generated for our builtin pseudo-runtime
938	// package. We'll emit a description for the real type while
939	// compiling package runtime, so we don't need or want to emit one
940	// from this fake type.
941	if sym := tbase.Sym(); sym != nil && sym.Pkg == ir.Pkgs.Runtime {
942		return lsym
943	}
944
945	if s.Siggen() {
946		return lsym
947	}
948	s.SetSiggen(true)
949
950	if !NeedEmit(tbase) {
951		if i := typecheck.BaseTypeIndex(t); i >= 0 {
952			lsym.Pkg = tbase.Sym().Pkg.Prefix
953			lsym.SymIdx = int32(i)
954			lsym.Set(obj.AttrIndexed, true)
955		}
956
957		// TODO(mdempsky): Investigate whether this still happens.
958		// If we know we don't need to emit code for a type,
959		// we should have a link-symbol index for it.
960		// See also TODO in NeedEmit.
961		return lsym
962	}
963
964	// Type layout                          Written by               Marker
965	// +--------------------------------+                            - 0
966	// | abi/internal.Type              |   dcommontype
967	// +--------------------------------+                            - A
968	// | additional type-dependent      |   code in the switch below
969	// | fields, e.g.                   |
970	// | abi/internal.ArrayType.Len     |
971	// +--------------------------------+                            - B
972	// | internal/abi.UncommonType      |   dextratype
973	// | This section is optional,      |
974	// | if type has a name or methods  |
975	// +--------------------------------+                            - C
976	// | variable-length data           |   code in the switch below
977	// | referenced by                  |
978	// | type-dependent fields, e.g.    |
979	// | abi/internal.StructType.Fields |
980	// | dataAdd = size of this section |
981	// +--------------------------------+                            - D
982	// | method list, if any            |   dextratype
983	// +--------------------------------+                            - E
984
985	// UncommonType section is included if we have a name or a method.
986	extra := t.Sym() != nil || len(methods(t)) != 0
987
988	// Decide the underlying type of the descriptor, and remember
989	// the size we need for variable-length data.
990	var rt *types.Type
991	dataAdd := 0
992	switch t.Kind() {
993	default:
994		rt = rttype.Type
995	case types.TARRAY:
996		rt = rttype.ArrayType
997	case types.TSLICE:
998		rt = rttype.SliceType
999	case types.TCHAN:
1000		rt = rttype.ChanType
1001	case types.TFUNC:
1002		rt = rttype.FuncType
1003		dataAdd = (t.NumRecvs() + t.NumParams() + t.NumResults()) * types.PtrSize
1004	case types.TINTER:
1005		rt = rttype.InterfaceType
1006		dataAdd = len(imethods(t)) * int(rttype.IMethod.Size())
1007	case types.TMAP:
1008		rt = rttype.MapType
1009	case types.TPTR:
1010		rt = rttype.PtrType
1011		// TODO: use rttype.Type for Elem() is ANY?
1012	case types.TSTRUCT:
1013		rt = rttype.StructType
1014		dataAdd = t.NumFields() * int(rttype.StructField.Size())
1015	}
1016
1017	// Compute offsets of each section.
1018	B := rt.Size()
1019	C := B
1020	if extra {
1021		C = B + rttype.UncommonType.Size()
1022	}
1023	D := C + int64(dataAdd)
1024	E := D + int64(len(methods(t)))*rttype.Method.Size()
1025
1026	// Write the runtime._type
1027	c := rttype.NewCursor(lsym, 0, rt)
1028	if rt == rttype.Type {
1029		dcommontype(c, t)
1030	} else {
1031		dcommontype(c.Field("Type"), t)
1032	}
1033
1034	// Write additional type-specific data
1035	// (Both the fixed size and variable-sized sections.)
1036	switch t.Kind() {
1037	case types.TARRAY:
1038		// internal/abi.ArrayType
1039		s1 := writeType(t.Elem())
1040		t2 := types.NewSlice(t.Elem())
1041		s2 := writeType(t2)
1042		c.Field("Elem").WritePtr(s1)
1043		c.Field("Slice").WritePtr(s2)
1044		c.Field("Len").WriteUintptr(uint64(t.NumElem()))
1045
1046	case types.TSLICE:
1047		// internal/abi.SliceType
1048		s1 := writeType(t.Elem())
1049		c.Field("Elem").WritePtr(s1)
1050
1051	case types.TCHAN:
1052		// internal/abi.ChanType
1053		s1 := writeType(t.Elem())
1054		c.Field("Elem").WritePtr(s1)
1055		c.Field("Dir").WriteInt(int64(t.ChanDir()))
1056
1057	case types.TFUNC:
1058		// internal/abi.FuncType
1059		for _, t1 := range t.RecvParamsResults() {
1060			writeType(t1.Type)
1061		}
1062		inCount := t.NumRecvs() + t.NumParams()
1063		outCount := t.NumResults()
1064		if t.IsVariadic() {
1065			outCount |= 1 << 15
1066		}
1067
1068		c.Field("InCount").WriteUint16(uint16(inCount))
1069		c.Field("OutCount").WriteUint16(uint16(outCount))
1070
1071		// Array of rtype pointers follows funcType.
1072		typs := t.RecvParamsResults()
1073		array := rttype.NewArrayCursor(lsym, C, types.Types[types.TUNSAFEPTR], len(typs))
1074		for i, t1 := range typs {
1075			array.Elem(i).WritePtr(writeType(t1.Type))
1076		}
1077
1078	case types.TINTER:
1079		// internal/abi.InterfaceType
1080		m := imethods(t)
1081		n := len(m)
1082		for _, a := range m {
1083			writeType(a.type_)
1084		}
1085
1086		var tpkg *types.Pkg
1087		if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
1088			tpkg = t.Sym().Pkg
1089		}
1090		dgopkgpath(c.Field("PkgPath"), tpkg)
1091		c.Field("Methods").WriteSlice(lsym, C, int64(n), int64(n))
1092
1093		array := rttype.NewArrayCursor(lsym, C, rttype.IMethod, n)
1094		for i, a := range m {
1095			exported := types.IsExported(a.name.Name)
1096			var pkg *types.Pkg
1097			if !exported && a.name.Pkg != tpkg {
1098				pkg = a.name.Pkg
1099			}
1100			nsym := dname(a.name.Name, "", pkg, exported, false)
1101
1102			e := array.Elem(i)
1103			e.Field("Name").WriteSymPtrOff(nsym, false)
1104			e.Field("Typ").WriteSymPtrOff(writeType(a.type_), false)
1105		}
1106
1107	case types.TMAP:
1108		// internal/abi.MapType
1109		s1 := writeType(t.Key())
1110		s2 := writeType(t.Elem())
1111		s3 := writeType(MapBucketType(t))
1112		hasher := genhash(t.Key())
1113
1114		c.Field("Key").WritePtr(s1)
1115		c.Field("Elem").WritePtr(s2)
1116		c.Field("Bucket").WritePtr(s3)
1117		c.Field("Hasher").WritePtr(hasher)
1118		var flags uint32
1119		// Note: flags must match maptype accessors in ../../../../runtime/type.go
1120		// and maptype builder in ../../../../reflect/type.go:MapOf.
1121		if t.Key().Size() > abi.MapMaxKeyBytes {
1122			c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
1123			flags |= 1 // indirect key
1124		} else {
1125			c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
1126		}
1127
1128		if t.Elem().Size() > abi.MapMaxElemBytes {
1129			c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
1130			flags |= 2 // indirect value
1131		} else {
1132			c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
1133		}
1134		c.Field("BucketSize").WriteUint16(uint16(MapBucketType(t).Size()))
1135		if types.IsReflexive(t.Key()) {
1136			flags |= 4 // reflexive key
1137		}
1138		if needkeyupdate(t.Key()) {
1139			flags |= 8 // need key update
1140		}
1141		if hashMightPanic(t.Key()) {
1142			flags |= 16 // hash might panic
1143		}
1144		c.Field("Flags").WriteUint32(flags)
1145
1146		if u := t.Underlying(); u != t {
1147			// If t is a named map type, also keep the underlying map
1148			// type live in the binary. This is important to make sure that
1149			// a named map and that same map cast to its underlying type via
1150			// reflection, use the same hash function. See issue 37716.
1151			r := obj.Addrel(lsym)
1152			r.Sym = writeType(u)
1153			r.Type = objabi.R_KEEP
1154		}
1155
1156	case types.TPTR:
1157		// internal/abi.PtrType
1158		if t.Elem().Kind() == types.TANY {
1159			base.Fatalf("bad pointer base type")
1160		}
1161
1162		s1 := writeType(t.Elem())
1163		c.Field("Elem").WritePtr(s1)
1164
1165	case types.TSTRUCT:
1166		// internal/abi.StructType
1167		fields := t.Fields()
1168		for _, t1 := range fields {
1169			writeType(t1.Type)
1170		}
1171
1172		// All non-exported struct field names within a struct
1173		// type must originate from a single package. By
1174		// identifying and recording that package within the
1175		// struct type descriptor, we can omit that
1176		// information from the field descriptors.
1177		var spkg *types.Pkg
1178		for _, f := range fields {
1179			if !types.IsExported(f.Sym.Name) {
1180				spkg = f.Sym.Pkg
1181				break
1182			}
1183		}
1184
1185		dgopkgpath(c.Field("PkgPath"), spkg)
1186		c.Field("Fields").WriteSlice(lsym, C, int64(len(fields)), int64(len(fields)))
1187
1188		array := rttype.NewArrayCursor(lsym, C, rttype.StructField, len(fields))
1189		for i, f := range fields {
1190			e := array.Elem(i)
1191			dnameField(e.Field("Name"), spkg, f)
1192			e.Field("Typ").WritePtr(writeType(f.Type))
1193			e.Field("Offset").WriteUintptr(uint64(f.Offset))
1194		}
1195	}
1196
1197	// Write the extra info, if any.
1198	if extra {
1199		dextratype(lsym, B, t, dataAdd)
1200	}
1201
1202	// Note: DUPOK is required to ensure that we don't end up with more
1203	// than one type descriptor for a given type, if the type descriptor
1204	// can be defined in multiple packages, that is, unnamed types,
1205	// instantiated types and shape types.
1206	dupok := 0
1207	if tbase.Sym() == nil || tbase.IsFullyInstantiated() || tbase.HasShape() {
1208		dupok = obj.DUPOK
1209	}
1210
1211	objw.Global(lsym, int32(E), int16(dupok|obj.RODATA))
1212
1213	// The linker will leave a table of all the typelinks for
1214	// types in the binary, so the runtime can find them.
1215	//
1216	// When buildmode=shared, all types are in typelinks so the
1217	// runtime can deduplicate type pointers.
1218	keep := base.Ctxt.Flag_dynlink
1219	if !keep && t.Sym() == nil {
1220		// For an unnamed type, we only need the link if the type can
1221		// be created at run time by reflect.PointerTo and similar
1222		// functions. If the type exists in the program, those
1223		// functions must return the existing type structure rather
1224		// than creating a new one.
1225		switch t.Kind() {
1226		case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT:
1227			keep = true
1228		}
1229	}
1230	// Do not put Noalg types in typelinks.  See issue #22605.
1231	if types.TypeHasNoAlg(t) {
1232		keep = false
1233	}
1234	lsym.Set(obj.AttrMakeTypelink, keep)
1235
1236	return lsym
1237}
1238
1239// InterfaceMethodOffset returns the offset of the i-th method in the interface
1240// type descriptor, ityp.
1241func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
1242	// interface type descriptor layout is struct {
1243	//   _type        // commonSize
1244	//   pkgpath      // 1 word
1245	//   []imethod    // 3 words (pointing to [...]imethod below)
1246	//   uncommontype // uncommonSize
1247	//   [...]imethod
1248	// }
1249	// The size of imethod is 8.
1250	return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8
1251}
1252
1253// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
1254func NeedRuntimeType(t *types.Type) {
1255	if _, ok := signatset[t]; !ok {
1256		signatset[t] = struct{}{}
1257		signatslice = append(signatslice, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
1258	}
1259}
1260
1261func WriteRuntimeTypes() {
1262	// Process signatslice. Use a loop, as writeType adds
1263	// entries to signatslice while it is being processed.
1264	for len(signatslice) > 0 {
1265		signats := signatslice
1266		// Sort for reproducible builds.
1267		sort.Sort(typesByString(signats))
1268		for _, ts := range signats {
1269			t := ts.t
1270			writeType(t)
1271			if t.Sym() != nil {
1272				writeType(types.NewPtr(t))
1273			}
1274		}
1275		signatslice = signatslice[len(signats):]
1276	}
1277}
1278
1279func WriteGCSymbols() {
1280	// Emit GC data symbols.
1281	gcsyms := make([]typeAndStr, 0, len(gcsymset))
1282	for t := range gcsymset {
1283		gcsyms = append(gcsyms, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
1284	}
1285	sort.Sort(typesByString(gcsyms))
1286	for _, ts := range gcsyms {
1287		dgcsym(ts.t, true)
1288	}
1289}
1290
1291// writeITab writes the itab for concrete type typ implementing interface iface. If
1292// allowNonImplement is true, allow the case where typ does not implement iface, and just
1293// create a dummy itab with zeroed-out method entries.
1294func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) {
1295	// TODO(mdempsky): Fix methodWrapper, geneq, and genhash (and maybe
1296	// others) to stop clobbering these.
1297	oldpos, oldfn := base.Pos, ir.CurFunc
1298	defer func() { base.Pos, ir.CurFunc = oldpos, oldfn }()
1299
1300	if typ == nil || (typ.IsPtr() && typ.Elem() == nil) || typ.IsUntyped() || iface == nil || !iface.IsInterface() || iface.IsEmptyInterface() {
1301		base.Fatalf("writeITab(%v, %v)", typ, iface)
1302	}
1303
1304	sigs := iface.AllMethods()
1305	entries := make([]*obj.LSym, 0, len(sigs))
1306
1307	// both sigs and methods are sorted by name,
1308	// so we can find the intersection in a single pass
1309	for _, m := range methods(typ) {
1310		if m.name == sigs[0].Sym {
1311			entries = append(entries, m.isym)
1312			if m.isym == nil {
1313				panic("NO ISYM")
1314			}
1315			sigs = sigs[1:]
1316			if len(sigs) == 0 {
1317				break
1318			}
1319		}
1320	}
1321	completeItab := len(sigs) == 0
1322	if !allowNonImplement && !completeItab {
1323		base.Fatalf("incomplete itab")
1324	}
1325
1326	// dump empty itab symbol into i.sym
1327	// type itab struct {
1328	//   inter  *interfacetype
1329	//   _type  *_type
1330	//   hash   uint32 // copy of _type.hash. Used for type switches.
1331	//   _      [4]byte
1332	//   fun    [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
1333	// }
1334	c := rttype.NewCursor(lsym, 0, rttype.ITab)
1335	c.Field("Inter").WritePtr(writeType(iface))
1336	c.Field("Type").WritePtr(writeType(typ))
1337	c.Field("Hash").WriteUint32(types.TypeHash(typ)) // copy of type hash
1338
1339	var delta int64
1340	c = c.Field("Fun")
1341	if !completeItab {
1342		// If typ doesn't implement iface, make method entries be zero.
1343		c.Elem(0).WriteUintptr(0)
1344	} else {
1345		var a rttype.ArrayCursor
1346		a, delta = c.ModifyArray(len(entries))
1347		for i, fn := range entries {
1348			a.Elem(i).WritePtrWeak(fn) // method pointer for each method
1349		}
1350	}
1351	// Nothing writes static itabs, so they are read only.
1352	objw.Global(lsym, int32(rttype.ITab.Size()+delta), int16(obj.DUPOK|obj.RODATA))
1353	lsym.Set(obj.AttrContentAddressable, true)
1354}
1355
1356func WritePluginTable() {
1357	ptabs := typecheck.Target.PluginExports
1358	if len(ptabs) == 0 {
1359		return
1360	}
1361
1362	lsym := base.Ctxt.Lookup("go:plugin.tabs")
1363	ot := 0
1364	for _, p := range ptabs {
1365		// Dump ptab symbol into go.pluginsym package.
1366		//
1367		// type ptab struct {
1368		//	name nameOff
1369		//	typ  typeOff // pointer to symbol
1370		// }
1371		nsym := dname(p.Sym().Name, "", nil, true, false)
1372		t := p.Type()
1373		if p.Class != ir.PFUNC {
1374			t = types.NewPtr(t)
1375		}
1376		tsym := writeType(t)
1377		ot = objw.SymPtrOff(lsym, ot, nsym)
1378		ot = objw.SymPtrOff(lsym, ot, tsym)
1379		// Plugin exports symbols as interfaces. Mark their types
1380		// as UsedInIface.
1381		tsym.Set(obj.AttrUsedInIface, true)
1382	}
1383	objw.Global(lsym, int32(ot), int16(obj.RODATA))
1384
1385	lsym = base.Ctxt.Lookup("go:plugin.exports")
1386	ot = 0
1387	for _, p := range ptabs {
1388		ot = objw.SymPtr(lsym, ot, p.Linksym(), 0)
1389	}
1390	objw.Global(lsym, int32(ot), int16(obj.RODATA))
1391}
1392
1393// writtenByWriteBasicTypes reports whether typ is written by WriteBasicTypes.
1394// WriteBasicTypes always writes pointer types; any pointer has been stripped off typ already.
1395func writtenByWriteBasicTypes(typ *types.Type) bool {
1396	if typ.Sym() == nil && typ.Kind() == types.TFUNC {
1397		// func(error) string
1398		if typ.NumRecvs() == 0 &&
1399			typ.NumParams() == 1 && typ.NumResults() == 1 &&
1400			typ.Param(0).Type == types.ErrorType &&
1401			typ.Result(0).Type == types.Types[types.TSTRING] {
1402			return true
1403		}
1404	}
1405
1406	// Now we have left the basic types plus any and error, plus slices of them.
1407	// Strip the slice.
1408	if typ.Sym() == nil && typ.IsSlice() {
1409		typ = typ.Elem()
1410	}
1411
1412	// Basic types.
1413	sym := typ.Sym()
1414	if sym != nil && (sym.Pkg == types.BuiltinPkg || sym.Pkg == types.UnsafePkg) {
1415		return true
1416	}
1417	// any or error
1418	return (sym == nil && typ.IsEmptyInterface()) || typ == types.ErrorType
1419}
1420
1421func WriteBasicTypes() {
1422	// do basic types if compiling package runtime.
1423	// they have to be in at least one package,
1424	// and runtime is always loaded implicitly,
1425	// so this is as good as any.
1426	// another possible choice would be package main,
1427	// but using runtime means fewer copies in object files.
1428	// The code here needs to be in sync with writtenByWriteBasicTypes above.
1429	if base.Ctxt.Pkgpath != "runtime" {
1430		return
1431	}
1432
1433	// Note: always write NewPtr(t) because NeedEmit's caller strips the pointer.
1434	var list []*types.Type
1435	for i := types.Kind(1); i <= types.TBOOL; i++ {
1436		list = append(list, types.Types[i])
1437	}
1438	list = append(list,
1439		types.Types[types.TSTRING],
1440		types.Types[types.TUNSAFEPTR],
1441		types.AnyType,
1442		types.ErrorType)
1443	for _, t := range list {
1444		writeType(types.NewPtr(t))
1445		writeType(types.NewPtr(types.NewSlice(t)))
1446	}
1447
1448	// emit type for func(error) string,
1449	// which is the type of an auto-generated wrapper.
1450	writeType(types.NewPtr(types.NewSignature(nil, []*types.Field{
1451		types.NewField(base.Pos, nil, types.ErrorType),
1452	}, []*types.Field{
1453		types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
1454	})))
1455}
1456
1457type typeAndStr struct {
1458	t       *types.Type
1459	short   string // "short" here means TypeSymName
1460	regular string
1461}
1462
1463type typesByString []typeAndStr
1464
1465func (a typesByString) Len() int { return len(a) }
1466func (a typesByString) Less(i, j int) bool {
1467	// put named types before unnamed types
1468	if a[i].t.Sym() != nil && a[j].t.Sym() == nil {
1469		return true
1470	}
1471	if a[i].t.Sym() == nil && a[j].t.Sym() != nil {
1472		return false
1473	}
1474
1475	if a[i].short != a[j].short {
1476		return a[i].short < a[j].short
1477	}
1478	// When the only difference between the types is whether
1479	// they refer to byte or uint8, such as **byte vs **uint8,
1480	// the types' NameStrings can be identical.
1481	// To preserve deterministic sort ordering, sort these by String().
1482	//
1483	// TODO(mdempsky): This all seems suspect. Using LinkString would
1484	// avoid naming collisions, and there shouldn't be a reason to care
1485	// about "byte" vs "uint8": they share the same runtime type
1486	// descriptor anyway.
1487	if a[i].regular != a[j].regular {
1488		return a[i].regular < a[j].regular
1489	}
1490	// Identical anonymous interfaces defined in different locations
1491	// will be equal for the above checks, but different in DWARF output.
1492	// Sort by source position to ensure deterministic order.
1493	// See issues 27013 and 30202.
1494	if a[i].t.Kind() == types.TINTER && len(a[i].t.AllMethods()) > 0 {
1495		return a[i].t.AllMethods()[0].Pos.Before(a[j].t.AllMethods()[0].Pos)
1496	}
1497	return false
1498}
1499func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
1500
1501// GCSym returns a data symbol containing GC information for type t, along
1502// with a boolean reporting whether the UseGCProg bit should be set in the
1503// type kind, and the ptrdata field to record in the reflect type information.
1504// GCSym may be called in concurrent backend, so it does not emit the symbol
1505// content.
1506func GCSym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
1507	// Record that we need to emit the GC symbol.
1508	gcsymmu.Lock()
1509	if _, ok := gcsymset[t]; !ok {
1510		gcsymset[t] = struct{}{}
1511	}
1512	gcsymmu.Unlock()
1513
1514	return dgcsym(t, false)
1515}
1516
1517// dgcsym returns a data symbol containing GC information for type t, along
1518// with a boolean reporting whether the UseGCProg bit should be set in the
1519// type kind, and the ptrdata field to record in the reflect type information.
1520// When write is true, it writes the symbol data.
1521func dgcsym(t *types.Type, write bool) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
1522	ptrdata = types.PtrDataSize(t)
1523	if ptrdata/int64(types.PtrSize) <= abi.MaxPtrmaskBytes*8 {
1524		lsym = dgcptrmask(t, write)
1525		return
1526	}
1527
1528	useGCProg = true
1529	lsym, ptrdata = dgcprog(t, write)
1530	return
1531}
1532
1533// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
1534func dgcptrmask(t *types.Type, write bool) *obj.LSym {
1535	// Bytes we need for the ptrmask.
1536	n := (types.PtrDataSize(t)/int64(types.PtrSize) + 7) / 8
1537	// Runtime wants ptrmasks padded to a multiple of uintptr in size.
1538	n = (n + int64(types.PtrSize) - 1) &^ (int64(types.PtrSize) - 1)
1539	ptrmask := make([]byte, n)
1540	fillptrmask(t, ptrmask)
1541	p := fmt.Sprintf("runtime.gcbits.%x", ptrmask)
1542
1543	lsym := base.Ctxt.Lookup(p)
1544	if write && !lsym.OnList() {
1545		for i, x := range ptrmask {
1546			objw.Uint8(lsym, i, x)
1547		}
1548		objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
1549		lsym.Set(obj.AttrContentAddressable, true)
1550	}
1551	return lsym
1552}
1553
1554// fillptrmask fills in ptrmask with 1s corresponding to the
1555// word offsets in t that hold pointers.
1556// ptrmask is assumed to fit at least types.PtrDataSize(t)/PtrSize bits.
1557func fillptrmask(t *types.Type, ptrmask []byte) {
1558	for i := range ptrmask {
1559		ptrmask[i] = 0
1560	}
1561	if !t.HasPointers() {
1562		return
1563	}
1564
1565	vec := bitvec.New(8 * int32(len(ptrmask)))
1566	typebits.Set(t, 0, vec)
1567
1568	nptr := types.PtrDataSize(t) / int64(types.PtrSize)
1569	for i := int64(0); i < nptr; i++ {
1570		if vec.Get(int32(i)) {
1571			ptrmask[i/8] |= 1 << (uint(i) % 8)
1572		}
1573	}
1574}
1575
1576// dgcprog emits and returns the symbol containing a GC program for type t
1577// along with the size of the data described by the program (in the range
1578// [types.PtrDataSize(t), t.Width]).
1579// In practice, the size is types.PtrDataSize(t) except for non-trivial arrays.
1580// For non-trivial arrays, the program describes the full t.Width size.
1581func dgcprog(t *types.Type, write bool) (*obj.LSym, int64) {
1582	types.CalcSize(t)
1583	if t.Size() == types.BADWIDTH {
1584		base.Fatalf("dgcprog: %v badwidth", t)
1585	}
1586	lsym := TypeLinksymPrefix(".gcprog", t)
1587	var p gcProg
1588	p.init(lsym, write)
1589	p.emit(t, 0)
1590	offset := p.w.BitIndex() * int64(types.PtrSize)
1591	p.end()
1592	if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Size() {
1593		base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Size())
1594	}
1595	return lsym, offset
1596}
1597
1598type gcProg struct {
1599	lsym   *obj.LSym
1600	symoff int
1601	w      gcprog.Writer
1602	write  bool
1603}
1604
1605func (p *gcProg) init(lsym *obj.LSym, write bool) {
1606	p.lsym = lsym
1607	p.write = write && !lsym.OnList()
1608	p.symoff = 4 // first 4 bytes hold program length
1609	if !write {
1610		p.w.Init(func(byte) {})
1611		return
1612	}
1613	p.w.Init(p.writeByte)
1614	if base.Debug.GCProg > 0 {
1615		fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
1616		p.w.Debug(os.Stderr)
1617	}
1618}
1619
1620func (p *gcProg) writeByte(x byte) {
1621	p.symoff = objw.Uint8(p.lsym, p.symoff, x)
1622}
1623
1624func (p *gcProg) end() {
1625	p.w.End()
1626	if !p.write {
1627		return
1628	}
1629	objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
1630	objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
1631	p.lsym.Set(obj.AttrContentAddressable, true)
1632	if base.Debug.GCProg > 0 {
1633		fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
1634	}
1635}
1636
1637func (p *gcProg) emit(t *types.Type, offset int64) {
1638	types.CalcSize(t)
1639	if !t.HasPointers() {
1640		return
1641	}
1642	if t.Size() == int64(types.PtrSize) {
1643		p.w.Ptr(offset / int64(types.PtrSize))
1644		return
1645	}
1646	switch t.Kind() {
1647	default:
1648		base.Fatalf("gcProg.emit: unexpected type %v", t)
1649
1650	case types.TSTRING:
1651		p.w.Ptr(offset / int64(types.PtrSize))
1652
1653	case types.TINTER:
1654		// Note: the first word isn't a pointer. See comment in typebits.Set
1655		p.w.Ptr(offset/int64(types.PtrSize) + 1)
1656
1657	case types.TSLICE:
1658		p.w.Ptr(offset / int64(types.PtrSize))
1659
1660	case types.TARRAY:
1661		if t.NumElem() == 0 {
1662			// should have been handled by haspointers check above
1663			base.Fatalf("gcProg.emit: empty array")
1664		}
1665
1666		// Flatten array-of-array-of-array to just a big array by multiplying counts.
1667		count := t.NumElem()
1668		elem := t.Elem()
1669		for elem.IsArray() {
1670			count *= elem.NumElem()
1671			elem = elem.Elem()
1672		}
1673
1674		if !p.w.ShouldRepeat(elem.Size()/int64(types.PtrSize), count) {
1675			// Cheaper to just emit the bits.
1676			for i := int64(0); i < count; i++ {
1677				p.emit(elem, offset+i*elem.Size())
1678			}
1679			return
1680		}
1681		p.emit(elem, offset)
1682		p.w.ZeroUntil((offset + elem.Size()) / int64(types.PtrSize))
1683		p.w.Repeat(elem.Size()/int64(types.PtrSize), count-1)
1684
1685	case types.TSTRUCT:
1686		for _, t1 := range t.Fields() {
1687			p.emit(t1.Type, offset+t1.Offset)
1688		}
1689	}
1690}
1691
1692// ZeroAddr returns the address of a symbol with at least
1693// size bytes of zeros.
1694func ZeroAddr(size int64) ir.Node {
1695	if size >= 1<<31 {
1696		base.Fatalf("map elem too big %d", size)
1697	}
1698	if ZeroSize < size {
1699		ZeroSize = size
1700	}
1701	lsym := base.PkgLinksym("go:map", "zero", obj.ABI0)
1702	x := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
1703	return typecheck.Expr(typecheck.NodAddr(x))
1704}
1705
1706// NeedEmit reports whether typ is a type that we need to emit code
1707// for (e.g., runtime type descriptors, method wrappers).
1708func NeedEmit(typ *types.Type) bool {
1709	// TODO(mdempsky): Export data should keep track of which anonymous
1710	// and instantiated types were emitted, so at least downstream
1711	// packages can skip re-emitting them.
1712	//
1713	// Perhaps we can just generalize the linker-symbol indexing to
1714	// track the index of arbitrary types, not just defined types, and
1715	// use its presence to detect this. The same idea would work for
1716	// instantiated generic functions too.
1717
1718	switch sym := typ.Sym(); {
1719	case writtenByWriteBasicTypes(typ):
1720		return base.Ctxt.Pkgpath == "runtime"
1721
1722	case sym == nil:
1723		// Anonymous type; possibly never seen before or ever again.
1724		// Need to emit to be safe (however, see TODO above).
1725		return true
1726
1727	case sym.Pkg == types.LocalPkg:
1728		// Local defined type; our responsibility.
1729		return true
1730
1731	case typ.IsFullyInstantiated():
1732		// Instantiated type; possibly instantiated with unique type arguments.
1733		// Need to emit to be safe (however, see TODO above).
1734		return true
1735
1736	case typ.HasShape():
1737		// Shape type; need to emit even though it lives in the .shape package.
1738		// TODO: make sure the linker deduplicates them (see dupok in writeType above).
1739		return true
1740
1741	default:
1742		// Should have been emitted by an imported package.
1743		return false
1744	}
1745}
1746
1747// Generate a wrapper function to convert from
1748// a receiver of type T to a receiver of type U.
1749// That is,
1750//
1751//	func (t T) M() {
1752//		...
1753//	}
1754//
1755// already exists; this function generates
1756//
1757//	func (u U) M() {
1758//		u.M()
1759//	}
1760//
1761// where the types T and U are such that u.M() is valid
1762// and calls the T.M method.
1763// The resulting function is for use in method tables.
1764//
1765//	rcvr - U
1766//	method - M func (t T)(), a TFIELD type struct
1767//
1768// Also wraps methods on instantiated generic types for use in itab entries.
1769// For an instantiated generic type G[int], we generate wrappers like:
1770// G[int] pointer shaped:
1771//
1772//	func (x G[int]) f(arg) {
1773//		.inst.G[int].f(dictionary, x, arg)
1774//	}
1775//
1776// G[int] not pointer shaped:
1777//
1778//	func (x *G[int]) f(arg) {
1779//		.inst.G[int].f(dictionary, *x, arg)
1780//	}
1781//
1782// These wrappers are always fully stenciled.
1783func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
1784	if forItab && !types.IsDirectIface(rcvr) {
1785		rcvr = rcvr.PtrTo()
1786	}
1787
1788	newnam := ir.MethodSym(rcvr, method.Sym)
1789	lsym := newnam.Linksym()
1790
1791	// Unified IR creates its own wrappers.
1792	return lsym
1793}
1794
1795var ZeroSize int64
1796
1797// MarkTypeUsedInInterface marks that type t is converted to an interface.
1798// This information is used in the linker in dead method elimination.
1799func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) {
1800	if t.HasShape() {
1801		// Shape types shouldn't be put in interfaces, so we shouldn't ever get here.
1802		base.Fatalf("shape types have no methods %+v", t)
1803	}
1804	MarkTypeSymUsedInInterface(TypeLinksym(t), from)
1805}
1806func MarkTypeSymUsedInInterface(tsym *obj.LSym, from *obj.LSym) {
1807	// Emit a marker relocation. The linker will know the type is converted
1808	// to an interface if "from" is reachable.
1809	r := obj.Addrel(from)
1810	r.Sym = tsym
1811	r.Type = objabi.R_USEIFACE
1812}
1813
1814// MarkUsedIfaceMethod marks that an interface method is used in the current
1815// function. n is OCALLINTER node.
1816func MarkUsedIfaceMethod(n *ir.CallExpr) {
1817	// skip unnamed functions (func _())
1818	if ir.CurFunc.LSym == nil {
1819		return
1820	}
1821	dot := n.Fun.(*ir.SelectorExpr)
1822	ityp := dot.X.Type()
1823	if ityp.HasShape() {
1824		// Here we're calling a method on a generic interface. Something like:
1825		//
1826		// type I[T any] interface { foo() T }
1827		// func f[T any](x I[T]) {
1828		//     ... = x.foo()
1829		// }
1830		// f[int](...)
1831		// f[string](...)
1832		//
1833		// In this case, in f we're calling foo on a generic interface.
1834		// Which method could that be? Normally we could match the method
1835		// both by name and by type. But in this case we don't really know
1836		// the type of the method we're calling. It could be func()int
1837		// or func()string. So we match on just the function name, instead
1838		// of both the name and the type used for the non-generic case below.
1839		// TODO: instantiations at least know the shape of the instantiated
1840		// type, and the linker could do more complicated matching using
1841		// some sort of fuzzy shape matching. For now, only use the name
1842		// of the method for matching.
1843		r := obj.Addrel(ir.CurFunc.LSym)
1844		r.Sym = staticdata.StringSymNoCommon(dot.Sel.Name)
1845		r.Type = objabi.R_USENAMEDMETHOD
1846		return
1847	}
1848
1849	tsym := TypeLinksym(ityp)
1850	r := obj.Addrel(ir.CurFunc.LSym)
1851	r.Sym = tsym
1852	// dot.Offset() is the method index * PtrSize (the offset of code pointer
1853	// in itab).
1854	midx := dot.Offset() / int64(types.PtrSize)
1855	r.Add = InterfaceMethodOffset(ityp, midx)
1856	r.Type = objabi.R_USEIFACEMETHOD
1857}
1858
1859func deref(t *types.Type) *types.Type {
1860	if t.IsPtr() {
1861		return t.Elem()
1862	}
1863	return t
1864}
1865