1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package types
6
7import (
8	"math"
9	"sort"
10
11	"cmd/compile/internal/base"
12	"cmd/internal/src"
13	"internal/types/errors"
14)
15
16var PtrSize int
17
18var RegSize int
19
20// Slices in the runtime are represented by three components:
21//
22//	type slice struct {
23//		ptr unsafe.Pointer
24//		len int
25//		cap int
26//	}
27//
28// Strings in the runtime are represented by two components:
29//
30//	type string struct {
31//		ptr unsafe.Pointer
32//		len int
33//	}
34//
35// These variables are the offsets of fields and sizes of these structs.
36var (
37	SlicePtrOffset int64
38	SliceLenOffset int64
39	SliceCapOffset int64
40
41	SliceSize  int64
42	StringSize int64
43)
44
45var SkipSizeForTracing bool
46
47// typePos returns the position associated with t.
48// This is where t was declared or where it appeared as a type expression.
49func typePos(t *Type) src.XPos {
50	if pos := t.Pos(); pos.IsKnown() {
51		return pos
52	}
53	base.Fatalf("bad type: %v", t)
54	panic("unreachable")
55}
56
57// MaxWidth is the maximum size of a value on the target architecture.
58var MaxWidth int64
59
60// CalcSizeDisabled indicates whether it is safe
61// to calculate Types' widths and alignments. See CalcSize.
62var CalcSizeDisabled bool
63
64// machine size and rounding alignment is dictated around
65// the size of a pointer, set in gc.Main (see ../gc/main.go).
66var defercalc int
67
68// RoundUp rounds o to a multiple of r, r is a power of 2.
69func RoundUp(o int64, r int64) int64 {
70	if r < 1 || r > 8 || r&(r-1) != 0 {
71		base.Fatalf("Round %d", r)
72	}
73	return (o + r - 1) &^ (r - 1)
74}
75
76// expandiface computes the method set for interface type t by
77// expanding embedded interfaces.
78func expandiface(t *Type) {
79	seen := make(map[*Sym]*Field)
80	var methods []*Field
81
82	addMethod := func(m *Field, explicit bool) {
83		switch prev := seen[m.Sym]; {
84		case prev == nil:
85			seen[m.Sym] = m
86		case !explicit && Identical(m.Type, prev.Type):
87			return
88		default:
89			base.ErrorfAt(m.Pos, errors.DuplicateDecl, "duplicate method %s", m.Sym.Name)
90		}
91		methods = append(methods, m)
92	}
93
94	{
95		methods := t.Methods()
96		sort.SliceStable(methods, func(i, j int) bool {
97			mi, mj := methods[i], methods[j]
98
99			// Sort embedded types by type name (if any).
100			if mi.Sym == nil && mj.Sym == nil {
101				return mi.Type.Sym().Less(mj.Type.Sym())
102			}
103
104			// Sort methods before embedded types.
105			if mi.Sym == nil || mj.Sym == nil {
106				return mi.Sym != nil
107			}
108
109			// Sort methods by symbol name.
110			return mi.Sym.Less(mj.Sym)
111		})
112	}
113
114	for _, m := range t.Methods() {
115		if m.Sym == nil {
116			continue
117		}
118
119		CheckSize(m.Type)
120		addMethod(m, true)
121	}
122
123	for _, m := range t.Methods() {
124		if m.Sym != nil || m.Type == nil {
125			continue
126		}
127
128		// In 1.18, embedded types can be anything. In Go 1.17, we disallow
129		// embedding anything other than interfaces. This requirement was caught
130		// by types2 already, so allow non-interface here.
131		if !m.Type.IsInterface() {
132			continue
133		}
134
135		// Embedded interface: duplicate all methods
136		// and add to t's method set.
137		for _, t1 := range m.Type.AllMethods() {
138			f := NewField(m.Pos, t1.Sym, t1.Type)
139			addMethod(f, false)
140
141			// Clear position after typechecking, for consistency with types2.
142			f.Pos = src.NoXPos
143		}
144
145		// Clear position after typechecking, for consistency with types2.
146		m.Pos = src.NoXPos
147	}
148
149	sort.Sort(MethodsByName(methods))
150
151	if int64(len(methods)) >= MaxWidth/int64(PtrSize) {
152		base.ErrorfAt(typePos(t), 0, "interface too large")
153	}
154	for i, m := range methods {
155		m.Offset = int64(i) * int64(PtrSize)
156	}
157
158	t.SetAllMethods(methods)
159}
160
161// calcStructOffset computes the offsets of a sequence of fields,
162// starting at the given offset. It returns the resulting offset and
163// maximum field alignment.
164func calcStructOffset(t *Type, fields []*Field, offset int64) int64 {
165	for _, f := range fields {
166		CalcSize(f.Type)
167		offset = RoundUp(offset, int64(f.Type.align))
168
169		if t.IsStruct() { // param offsets depend on ABI
170			f.Offset = offset
171
172			// If type T contains a field F marked as not-in-heap,
173			// then T must also be a not-in-heap type. Otherwise,
174			// you could heap allocate T and then get a pointer F,
175			// which would be a heap pointer to a not-in-heap type.
176			if f.Type.NotInHeap() {
177				t.SetNotInHeap(true)
178			}
179		}
180
181		offset += f.Type.width
182
183		maxwidth := MaxWidth
184		// On 32-bit systems, reflect tables impose an additional constraint
185		// that each field start offset must fit in 31 bits.
186		if maxwidth < 1<<32 {
187			maxwidth = 1<<31 - 1
188		}
189		if offset >= maxwidth {
190			base.ErrorfAt(typePos(t), 0, "type %L too large", t)
191			offset = 8 // small but nonzero
192		}
193	}
194
195	return offset
196}
197
198func isAtomicStdPkg(p *Pkg) bool {
199	if p.Prefix == `""` {
200		panic("bad package prefix")
201	}
202	return p.Prefix == "sync/atomic" || p.Prefix == "internal/runtime/atomic"
203}
204
205// CalcSize calculates and stores the size, alignment, eq/hash algorithm,
206// and ptrBytes for t.
207// If CalcSizeDisabled is set, and the size/alignment
208// have not already been calculated, it calls Fatal.
209// This is used to prevent data races in the back end.
210func CalcSize(t *Type) {
211	// Calling CalcSize when typecheck tracing enabled is not safe.
212	// See issue #33658.
213	if base.EnableTrace && SkipSizeForTracing {
214		return
215	}
216	if PtrSize == 0 {
217		// Assume this is a test.
218		return
219	}
220
221	if t == nil {
222		return
223	}
224
225	if t.width == -2 {
226		t.width = 0
227		t.align = 1
228		base.Fatalf("invalid recursive type %v", t)
229		return
230	}
231
232	if t.widthCalculated() {
233		return
234	}
235
236	if CalcSizeDisabled {
237		base.Fatalf("width not calculated: %v", t)
238	}
239
240	// defer CheckSize calls until after we're done
241	DeferCheckSize()
242
243	lno := base.Pos
244	if pos := t.Pos(); pos.IsKnown() {
245		base.Pos = pos
246	}
247
248	t.width = -2
249	t.align = 0  // 0 means use t.Width, below
250	t.alg = AMEM // default
251	// default t.ptrBytes is 0.
252	if t.Noalg() {
253		t.setAlg(ANOALG)
254	}
255
256	et := t.Kind()
257	switch et {
258	case TFUNC, TCHAN, TMAP, TSTRING:
259		break
260
261	// SimType == 0 during bootstrap
262	default:
263		if SimType[t.Kind()] != 0 {
264			et = SimType[t.Kind()]
265		}
266	}
267
268	var w int64
269	switch et {
270	default:
271		base.Fatalf("CalcSize: unknown type: %v", t)
272
273	// compiler-specific stuff
274	case TINT8, TUINT8, TBOOL:
275		// bool is int8
276		w = 1
277		t.intRegs = 1
278
279	case TINT16, TUINT16:
280		w = 2
281		t.intRegs = 1
282
283	case TINT32, TUINT32:
284		w = 4
285		t.intRegs = 1
286
287	case TINT64, TUINT64:
288		w = 8
289		t.align = uint8(RegSize)
290		t.intRegs = uint8(8 / RegSize)
291
292	case TFLOAT32:
293		w = 4
294		t.floatRegs = 1
295		t.setAlg(AFLOAT32)
296
297	case TFLOAT64:
298		w = 8
299		t.align = uint8(RegSize)
300		t.floatRegs = 1
301		t.setAlg(AFLOAT64)
302
303	case TCOMPLEX64:
304		w = 8
305		t.align = 4
306		t.floatRegs = 2
307		t.setAlg(ACPLX64)
308
309	case TCOMPLEX128:
310		w = 16
311		t.align = uint8(RegSize)
312		t.floatRegs = 2
313		t.setAlg(ACPLX128)
314
315	case TPTR:
316		w = int64(PtrSize)
317		t.intRegs = 1
318		CheckSize(t.Elem())
319		t.ptrBytes = int64(PtrSize) // See PtrDataSize
320
321	case TUNSAFEPTR:
322		w = int64(PtrSize)
323		t.intRegs = 1
324		t.ptrBytes = int64(PtrSize)
325
326	case TINTER: // implemented as 2 pointers
327		w = 2 * int64(PtrSize)
328		t.align = uint8(PtrSize)
329		t.intRegs = 2
330		expandiface(t)
331		if len(t.allMethods.Slice()) == 0 {
332			t.setAlg(ANILINTER)
333		} else {
334			t.setAlg(AINTER)
335		}
336		t.ptrBytes = int64(2 * PtrSize)
337
338	case TCHAN: // implemented as pointer
339		w = int64(PtrSize)
340		t.intRegs = 1
341		t.ptrBytes = int64(PtrSize)
342
343		CheckSize(t.Elem())
344
345		// Make fake type to trigger channel element size check after
346		// any top-level recursive type has been completed.
347		t1 := NewChanArgs(t)
348		CheckSize(t1)
349
350	case TCHANARGS:
351		t1 := t.ChanArgs()
352		CalcSize(t1) // just in case
353		// Make sure size of t1.Elem() is calculated at this point. We can
354		// use CalcSize() here rather than CheckSize(), because the top-level
355		// (possibly recursive) type will have been calculated before the fake
356		// chanargs is handled.
357		CalcSize(t1.Elem())
358		if t1.Elem().width >= 1<<16 {
359			base.Errorf("channel element type too large (>64kB)")
360		}
361		w = 1 // anything will do
362
363	case TMAP: // implemented as pointer
364		w = int64(PtrSize)
365		t.intRegs = 1
366		CheckSize(t.Elem())
367		CheckSize(t.Key())
368		t.setAlg(ANOEQ)
369		t.ptrBytes = int64(PtrSize)
370
371	case TFORW: // should have been filled in
372		base.Fatalf("invalid recursive type %v", t)
373
374	case TANY: // not a real type; should be replaced before use.
375		base.Fatalf("CalcSize any")
376
377	case TSTRING:
378		if StringSize == 0 {
379			base.Fatalf("early CalcSize string")
380		}
381		w = StringSize
382		t.align = uint8(PtrSize)
383		t.intRegs = 2
384		t.setAlg(ASTRING)
385		t.ptrBytes = int64(PtrSize)
386
387	case TARRAY:
388		if t.Elem() == nil {
389			break
390		}
391
392		CalcSize(t.Elem())
393		t.SetNotInHeap(t.Elem().NotInHeap())
394		if t.Elem().width != 0 {
395			cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().width)
396			if uint64(t.NumElem()) > cap {
397				base.Errorf("type %L larger than address space", t)
398			}
399		}
400		w = t.NumElem() * t.Elem().width
401		t.align = t.Elem().align
402
403		// ABIInternal only allows "trivial" arrays (i.e., length 0 or 1)
404		// to be passed by register.
405		switch t.NumElem() {
406		case 0:
407			t.intRegs = 0
408			t.floatRegs = 0
409		case 1:
410			t.intRegs = t.Elem().intRegs
411			t.floatRegs = t.Elem().floatRegs
412		default:
413			t.intRegs = math.MaxUint8
414			t.floatRegs = math.MaxUint8
415		}
416		switch a := t.Elem().alg; a {
417		case AMEM, ANOEQ, ANOALG:
418			t.setAlg(a)
419		default:
420			switch t.NumElem() {
421			case 0:
422				// We checked above that the element type is comparable.
423				t.setAlg(AMEM)
424			case 1:
425				// Single-element array is same as its lone element.
426				t.setAlg(a)
427			default:
428				t.setAlg(ASPECIAL)
429			}
430		}
431		if t.NumElem() > 0 {
432			x := PtrDataSize(t.Elem())
433			if x > 0 {
434				t.ptrBytes = t.Elem().width*(t.NumElem()-1) + x
435			}
436		}
437
438	case TSLICE:
439		if t.Elem() == nil {
440			break
441		}
442		w = SliceSize
443		CheckSize(t.Elem())
444		t.align = uint8(PtrSize)
445		t.intRegs = 3
446		t.setAlg(ANOEQ)
447		if !t.Elem().NotInHeap() {
448			t.ptrBytes = int64(PtrSize)
449		}
450
451	case TSTRUCT:
452		if t.IsFuncArgStruct() {
453			base.Fatalf("CalcSize fn struct %v", t)
454		}
455		CalcStructSize(t)
456		w = t.width
457
458	// make fake type to check later to
459	// trigger function argument computation.
460	case TFUNC:
461		t1 := NewFuncArgs(t)
462		CheckSize(t1)
463		w = int64(PtrSize) // width of func type is pointer
464		t.intRegs = 1
465		t.setAlg(ANOEQ)
466		t.ptrBytes = int64(PtrSize)
467
468	// function is 3 cated structures;
469	// compute their widths as side-effect.
470	case TFUNCARGS:
471		t1 := t.FuncArgs()
472		// TODO(mdempsky): Should package abi be responsible for computing argwid?
473		w = calcStructOffset(t1, t1.Recvs(), 0)
474		w = calcStructOffset(t1, t1.Params(), w)
475		w = RoundUp(w, int64(RegSize))
476		w = calcStructOffset(t1, t1.Results(), w)
477		w = RoundUp(w, int64(RegSize))
478		t1.extra.(*Func).Argwid = w
479		t.align = 1
480	}
481
482	if PtrSize == 4 && w != int64(int32(w)) {
483		base.Errorf("type %v too large", t)
484	}
485
486	t.width = w
487	if t.align == 0 {
488		if w == 0 || w > 8 || w&(w-1) != 0 {
489			base.Fatalf("invalid alignment for %v", t)
490		}
491		t.align = uint8(w)
492	}
493
494	base.Pos = lno
495
496	ResumeCheckSize()
497}
498
499// CalcStructSize calculates the size of t,
500// filling in t.width, t.align, t.intRegs, and t.floatRegs,
501// even if size calculation is otherwise disabled.
502func CalcStructSize(t *Type) {
503	var maxAlign uint8 = 1
504
505	// Recognize special types. This logic is duplicated in go/types and
506	// cmd/compile/internal/types2.
507	if sym := t.Sym(); sym != nil {
508		switch {
509		case sym.Name == "align64" && isAtomicStdPkg(sym.Pkg):
510			maxAlign = 8
511		}
512	}
513
514	fields := t.Fields()
515	size := calcStructOffset(t, fields, 0)
516
517	// For non-zero-sized structs which end in a zero-sized field, we
518	// add an extra byte of padding to the type. This padding ensures
519	// that taking the address of a zero-sized field can't manufacture a
520	// pointer to the next object in the heap. See issue 9401.
521	if size > 0 && fields[len(fields)-1].Type.width == 0 {
522		size++
523	}
524
525	var intRegs, floatRegs uint64
526	for _, field := range fields {
527		typ := field.Type
528
529		// The alignment of a struct type is the maximum alignment of its
530		// field types.
531		if align := typ.align; align > maxAlign {
532			maxAlign = align
533		}
534
535		// Each field needs its own registers.
536		// We sum in uint64 to avoid possible overflows.
537		intRegs += uint64(typ.intRegs)
538		floatRegs += uint64(typ.floatRegs)
539	}
540
541	// Final size includes trailing padding.
542	size = RoundUp(size, int64(maxAlign))
543
544	if intRegs > math.MaxUint8 || floatRegs > math.MaxUint8 {
545		intRegs = math.MaxUint8
546		floatRegs = math.MaxUint8
547	}
548
549	t.width = size
550	t.align = maxAlign
551	t.intRegs = uint8(intRegs)
552	t.floatRegs = uint8(floatRegs)
553
554	// Compute eq/hash algorithm type.
555	t.alg = AMEM // default
556	if t.Noalg() {
557		t.setAlg(ANOALG)
558	}
559	if len(fields) == 1 && !fields[0].Sym.IsBlank() {
560		// One-field struct is same as that one field alone.
561		t.setAlg(fields[0].Type.alg)
562	} else {
563		for i, f := range fields {
564			a := f.Type.alg
565			switch a {
566			case ANOEQ, ANOALG:
567			case AMEM:
568				// Blank fields and padded fields need a special compare.
569				if f.Sym.IsBlank() || IsPaddedField(t, i) {
570					a = ASPECIAL
571				}
572			default:
573				// Fields with non-memory equality need a special compare.
574				a = ASPECIAL
575			}
576			t.setAlg(a)
577		}
578	}
579	// Compute ptrBytes.
580	for i := len(fields) - 1; i >= 0; i-- {
581		f := fields[i]
582		if size := PtrDataSize(f.Type); size > 0 {
583			t.ptrBytes = f.Offset + size
584			break
585		}
586	}
587}
588
589func (t *Type) widthCalculated() bool {
590	return t.align > 0
591}
592
593// when a type's width should be known, we call CheckSize
594// to compute it.  during a declaration like
595//
596//	type T *struct { next T }
597//
598// it is necessary to defer the calculation of the struct width
599// until after T has been initialized to be a pointer to that struct.
600// similarly, during import processing structs may be used
601// before their definition.  in those situations, calling
602// DeferCheckSize() stops width calculations until
603// ResumeCheckSize() is called, at which point all the
604// CalcSizes that were deferred are executed.
605// CalcSize should only be called when the type's size
606// is needed immediately.  CheckSize makes sure the
607// size is evaluated eventually.
608
609var deferredTypeStack []*Type
610
611func CheckSize(t *Type) {
612	if t == nil {
613		return
614	}
615
616	// function arg structs should not be checked
617	// outside of the enclosing function.
618	if t.IsFuncArgStruct() {
619		base.Fatalf("CheckSize %v", t)
620	}
621
622	if defercalc == 0 {
623		CalcSize(t)
624		return
625	}
626
627	// if type has not yet been pushed on deferredTypeStack yet, do it now
628	if !t.Deferwidth() {
629		t.SetDeferwidth(true)
630		deferredTypeStack = append(deferredTypeStack, t)
631	}
632}
633
634func DeferCheckSize() {
635	defercalc++
636}
637
638func ResumeCheckSize() {
639	if defercalc == 1 {
640		for len(deferredTypeStack) > 0 {
641			t := deferredTypeStack[len(deferredTypeStack)-1]
642			deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
643			t.SetDeferwidth(false)
644			CalcSize(t)
645		}
646	}
647
648	defercalc--
649}
650
651// PtrDataSize returns the length in bytes of the prefix of t
652// containing pointer data. Anything after this offset is scalar data.
653//
654// PtrDataSize is only defined for actual Go types. It's an error to
655// use it on compiler-internal types (e.g., TSSA, TRESULTS).
656func PtrDataSize(t *Type) int64 {
657	CalcSize(t)
658	x := t.ptrBytes
659	if t.Kind() == TPTR && t.Elem().NotInHeap() {
660		// Note: this is done here instead of when we're setting
661		// the ptrBytes field, because at that time (in NewPtr, usually)
662		// the NotInHeap bit of the element type might not be set yet.
663		x = 0
664	}
665	return x
666}
667