1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssa
6
7import (
8	"cmd/compile/internal/reflectdata"
9	"cmd/compile/internal/types"
10	"cmd/internal/obj"
11	"cmd/internal/objabi"
12	"cmd/internal/src"
13	"fmt"
14	"internal/buildcfg"
15)
16
17// A ZeroRegion records parts of an object which are known to be zero.
18// A ZeroRegion only applies to a single memory state.
19// Each bit in mask is set if the corresponding pointer-sized word of
20// the base object is known to be zero.
21// In other words, if mask & (1<<i) != 0, then [base+i*ptrSize, base+(i+1)*ptrSize)
22// is known to be zero.
23type ZeroRegion struct {
24	base *Value
25	mask uint64
26}
27
28// mightBeHeapPointer reports whether v might point to the heap.
29// v must have pointer type.
30func mightBeHeapPointer(v *Value) bool {
31	if IsGlobalAddr(v) {
32		return false
33	}
34	return true
35}
36
37// mightContainHeapPointer reports whether the data currently at addresses
38// [ptr,ptr+size) might contain heap pointers. "currently" means at memory state mem.
39// zeroes contains ZeroRegion data to help make that decision (see computeZeroMap).
40func mightContainHeapPointer(ptr *Value, size int64, mem *Value, zeroes map[ID]ZeroRegion) bool {
41	if IsReadOnlyGlobalAddr(ptr) {
42		// The read-only globals section cannot contain any heap pointers.
43		return false
44	}
45
46	// See if we can prove that the queried memory is all zero.
47
48	// Find base pointer and offset. Hopefully, the base is the result of a new(T).
49	var off int64
50	for ptr.Op == OpOffPtr {
51		off += ptr.AuxInt
52		ptr = ptr.Args[0]
53	}
54
55	ptrSize := ptr.Block.Func.Config.PtrSize
56	if off%ptrSize != 0 {
57		return true // see issue 61187
58	}
59	if size%ptrSize != 0 {
60		ptr.Fatalf("unaligned pointer write")
61	}
62	if off < 0 || off+size > 64*ptrSize {
63		// memory range goes off end of tracked offsets
64		return true
65	}
66	z := zeroes[mem.ID]
67	if ptr != z.base {
68		// This isn't the object we know about at this memory state.
69		return true
70	}
71	// Mask of bits we're asking about
72	m := (uint64(1)<<(size/ptrSize) - 1) << (off / ptrSize)
73
74	if z.mask&m == m {
75		// All locations are known to be zero, so no heap pointers.
76		return false
77	}
78	return true
79}
80
81// needwb reports whether we need write barrier for store op v.
82// v must be Store/Move/Zero.
83// zeroes provides known zero information (keyed by ID of memory-type values).
84func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
85	t, ok := v.Aux.(*types.Type)
86	if !ok {
87		v.Fatalf("store aux is not a type: %s", v.LongString())
88	}
89	if !t.HasPointers() {
90		return false
91	}
92	dst := v.Args[0]
93	if IsStackAddr(dst) {
94		return false // writes into the stack don't need write barrier
95	}
96	// If we're writing to a place that might have heap pointers, we need
97	// the write barrier.
98	if mightContainHeapPointer(dst, t.Size(), v.MemoryArg(), zeroes) {
99		return true
100	}
101	// Lastly, check if the values we're writing might be heap pointers.
102	// If they aren't, we don't need a write barrier.
103	switch v.Op {
104	case OpStore:
105		if !mightBeHeapPointer(v.Args[1]) {
106			return false
107		}
108	case OpZero:
109		return false // nil is not a heap pointer
110	case OpMove:
111		if !mightContainHeapPointer(v.Args[1], t.Size(), v.Args[2], zeroes) {
112			return false
113		}
114	default:
115		v.Fatalf("store op unknown: %s", v.LongString())
116	}
117	return true
118}
119
120// needWBsrc reports whether GC needs to see v when it is the source of a store.
121func needWBsrc(v *Value) bool {
122	return !IsGlobalAddr(v)
123}
124
125// needWBdst reports whether GC needs to see what used to be in *ptr when ptr is
126// the target of a pointer store.
127func needWBdst(ptr, mem *Value, zeroes map[ID]ZeroRegion) bool {
128	// Detect storing to zeroed memory.
129	var off int64
130	for ptr.Op == OpOffPtr {
131		off += ptr.AuxInt
132		ptr = ptr.Args[0]
133	}
134	ptrSize := ptr.Block.Func.Config.PtrSize
135	if off%ptrSize != 0 {
136		return true // see issue 61187
137	}
138	if off < 0 || off >= 64*ptrSize {
139		// write goes off end of tracked offsets
140		return true
141	}
142	z := zeroes[mem.ID]
143	if ptr != z.base {
144		return true
145	}
146	// If destination is known to be zeroed, we don't need the write barrier
147	// to record the old value in *ptr.
148	return z.mask>>uint(off/ptrSize)&1 == 0
149}
150
151// writebarrier pass inserts write barriers for store ops (Store, Move, Zero)
152// when necessary (the condition above). It rewrites store ops to branches
153// and runtime calls, like
154//
155//	if writeBarrier.enabled {
156//		buf := gcWriteBarrier2()	// Not a regular Go call
157//		buf[0] = val
158//		buf[1] = *ptr
159//	}
160//	*ptr = val
161//
162// A sequence of WB stores for many pointer fields of a single type will
163// be emitted together, with a single branch.
164func writebarrier(f *Func) {
165	if !f.fe.UseWriteBarrier() {
166		return
167	}
168
169	// Number of write buffer entries we can request at once.
170	// Must match runtime/mwbbuf.go:wbMaxEntriesPerCall.
171	// It must also match the number of instances of runtime.gcWriteBarrier{X}.
172	const maxEntries = 8
173
174	var sb, sp, wbaddr, const0 *Value
175	var cgoCheckPtrWrite, cgoCheckMemmove *obj.LSym
176	var wbZero, wbMove *obj.LSym
177	var stores, after []*Value
178	var sset, sset2 *sparseSet
179	var storeNumber []int32
180
181	// Compute map from a value to the SelectN [1] value that uses it.
182	select1 := f.Cache.allocValueSlice(f.NumValues())
183	defer func() { f.Cache.freeValueSlice(select1) }()
184	for _, b := range f.Blocks {
185		for _, v := range b.Values {
186			if v.Op != OpSelectN {
187				continue
188			}
189			if v.AuxInt != 1 {
190				continue
191			}
192			select1[v.Args[0].ID] = v
193		}
194	}
195
196	zeroes := f.computeZeroMap(select1)
197	for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand
198		// first, identify all the stores that need to insert a write barrier.
199		// mark them with WB ops temporarily. record presence of WB ops.
200		nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block
201		for _, v := range b.Values {
202			switch v.Op {
203			case OpStore, OpMove, OpZero:
204				if needwb(v, zeroes) {
205					switch v.Op {
206					case OpStore:
207						v.Op = OpStoreWB
208					case OpMove:
209						v.Op = OpMoveWB
210					case OpZero:
211						v.Op = OpZeroWB
212					}
213					nWBops++
214				}
215			}
216		}
217		if nWBops == 0 {
218			continue
219		}
220
221		if wbaddr == nil {
222			// lazily initialize global values for write barrier test and calls
223			// find SB and SP values in entry block
224			initpos := f.Entry.Pos
225			sp, sb = f.spSb()
226			wbsym := f.fe.Syslook("writeBarrier")
227			wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb)
228			wbZero = f.fe.Syslook("wbZero")
229			wbMove = f.fe.Syslook("wbMove")
230			if buildcfg.Experiment.CgoCheck2 {
231				cgoCheckPtrWrite = f.fe.Syslook("cgoCheckPtrWrite")
232				cgoCheckMemmove = f.fe.Syslook("cgoCheckMemmove")
233			}
234			const0 = f.ConstInt32(f.Config.Types.UInt32, 0)
235
236			// allocate auxiliary data structures for computing store order
237			sset = f.newSparseSet(f.NumValues())
238			defer f.retSparseSet(sset)
239			sset2 = f.newSparseSet(f.NumValues())
240			defer f.retSparseSet(sset2)
241			storeNumber = f.Cache.allocInt32Slice(f.NumValues())
242			defer f.Cache.freeInt32Slice(storeNumber)
243		}
244
245		// order values in store order
246		b.Values = storeOrder(b.Values, sset, storeNumber)
247	again:
248		// find the start and end of the last contiguous WB store sequence.
249		// a branch will be inserted there. values after it will be moved
250		// to a new block.
251		var last *Value
252		var start, end int
253		var nonPtrStores int
254		values := b.Values
255	FindSeq:
256		for i := len(values) - 1; i >= 0; i-- {
257			w := values[i]
258			switch w.Op {
259			case OpStoreWB, OpMoveWB, OpZeroWB:
260				start = i
261				if last == nil {
262					last = w
263					end = i + 1
264				}
265				nonPtrStores = 0
266			case OpVarDef, OpVarLive:
267				continue
268			case OpStore:
269				if last == nil {
270					continue
271				}
272				nonPtrStores++
273				if nonPtrStores > 2 {
274					break FindSeq
275				}
276			default:
277				if last == nil {
278					continue
279				}
280				break FindSeq
281			}
282		}
283		stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing
284		after = append(after[:0], b.Values[end:]...)
285		b.Values = b.Values[:start]
286
287		// find the memory before the WB stores
288		mem := stores[0].MemoryArg()
289		pos := stores[0].Pos
290
291		// If the source of a MoveWB is volatile (will be clobbered by a
292		// function call), we need to copy it to a temporary location, as
293		// marshaling the args of wbMove might clobber the value we're
294		// trying to move.
295		// Look for volatile source, copy it to temporary before we check
296		// the write barrier flag.
297		// It is unlikely to have more than one of them. Just do a linear
298		// search instead of using a map.
299		// See issue 15854.
300		type volatileCopy struct {
301			src *Value // address of original volatile value
302			tmp *Value // address of temporary we've copied the volatile value into
303		}
304		var volatiles []volatileCopy
305
306		if !(f.ABIDefault == f.ABI1 && len(f.Config.intParamRegs) >= 3) {
307			// We don't need to do this if the calls we're going to do take
308			// all their arguments in registers.
309			// 3 is the magic number because it covers wbZero, wbMove, cgoCheckMemmove.
310		copyLoop:
311			for _, w := range stores {
312				if w.Op == OpMoveWB {
313					val := w.Args[1]
314					if isVolatile(val) {
315						for _, c := range volatiles {
316							if val == c.src {
317								continue copyLoop // already copied
318							}
319						}
320
321						t := val.Type.Elem()
322						tmp := f.NewLocal(w.Pos, t)
323						mem = b.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, mem)
324						tmpaddr := b.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, mem)
325						siz := t.Size()
326						mem = b.NewValue3I(w.Pos, OpMove, types.TypeMem, siz, tmpaddr, val, mem)
327						mem.Aux = t
328						volatiles = append(volatiles, volatileCopy{val, tmpaddr})
329					}
330				}
331			}
332		}
333
334		// Build branch point.
335		bThen := f.NewBlock(BlockPlain)
336		bEnd := f.NewBlock(b.Kind)
337		bThen.Pos = pos
338		bEnd.Pos = b.Pos
339		b.Pos = pos
340
341		// Set up control flow for end block.
342		bEnd.CopyControls(b)
343		bEnd.Likely = b.Likely
344		for _, e := range b.Succs {
345			bEnd.Succs = append(bEnd.Succs, e)
346			e.b.Preds[e.i].b = bEnd
347		}
348
349		// set up control flow for write barrier test
350		// load word, test word, avoiding partial register write from load byte.
351		cfgtypes := &f.Config.Types
352		flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem)
353		flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0)
354		b.Kind = BlockIf
355		b.SetControl(flag)
356		b.Likely = BranchUnlikely
357		b.Succs = b.Succs[:0]
358		b.AddEdgeTo(bThen)
359		b.AddEdgeTo(bEnd)
360		bThen.AddEdgeTo(bEnd)
361
362		// For each write barrier store, append write barrier code to bThen.
363		memThen := mem
364		var curCall *Value
365		var curPtr *Value
366		addEntry := func(pos src.XPos, v *Value) {
367			if curCall == nil || curCall.AuxInt == maxEntries {
368				t := types.NewTuple(types.Types[types.TUINTPTR].PtrTo(), types.TypeMem)
369				curCall = bThen.NewValue1(pos, OpWB, t, memThen)
370				curPtr = bThen.NewValue1(pos, OpSelect0, types.Types[types.TUINTPTR].PtrTo(), curCall)
371				memThen = bThen.NewValue1(pos, OpSelect1, types.TypeMem, curCall)
372			}
373			// Store value in write buffer
374			num := curCall.AuxInt
375			curCall.AuxInt = num + 1
376			wbuf := bThen.NewValue1I(pos, OpOffPtr, types.Types[types.TUINTPTR].PtrTo(), num*f.Config.PtrSize, curPtr)
377			memThen = bThen.NewValue3A(pos, OpStore, types.TypeMem, types.Types[types.TUINTPTR], wbuf, v, memThen)
378		}
379
380		// Note: we can issue the write barrier code in any order. In particular,
381		// it doesn't matter if they are in a different order *even if* they end
382		// up referring to overlapping memory regions. For instance if an OpStore
383		// stores to a location that is later read by an OpMove. In all cases
384		// any pointers we must get into the write barrier buffer still make it,
385		// possibly in a different order and possibly a different (but definitely
386		// more than 0) number of times.
387		// In light of that, we process all the OpStoreWBs first. This minimizes
388		// the amount of spill/restore code we need around the Zero/Move calls.
389
390		// srcs contains the value IDs of pointer values we've put in the write barrier buffer.
391		srcs := sset
392		srcs.clear()
393		// dsts contains the value IDs of locations which we've read a pointer out of
394		// and put the result in the write barrier buffer.
395		dsts := sset2
396		dsts.clear()
397
398		for _, w := range stores {
399			if w.Op != OpStoreWB {
400				continue
401			}
402			pos := w.Pos
403			ptr := w.Args[0]
404			val := w.Args[1]
405			if !srcs.contains(val.ID) && needWBsrc(val) {
406				srcs.add(val.ID)
407				addEntry(pos, val)
408			}
409			if !dsts.contains(ptr.ID) && needWBdst(ptr, w.Args[2], zeroes) {
410				dsts.add(ptr.ID)
411				// Load old value from store target.
412				// Note: This turns bad pointer writes into bad
413				// pointer reads, which could be confusing. We could avoid
414				// reading from obviously bad pointers, which would
415				// take care of the vast majority of these. We could
416				// patch this up in the signal handler, or use XCHG to
417				// combine the read and the write.
418				oldVal := bThen.NewValue2(pos, OpLoad, types.Types[types.TUINTPTR], ptr, memThen)
419				// Save old value to write buffer.
420				addEntry(pos, oldVal)
421			}
422			f.fe.Func().SetWBPos(pos)
423			nWBops--
424		}
425
426		for _, w := range stores {
427			pos := w.Pos
428			switch w.Op {
429			case OpZeroWB:
430				dst := w.Args[0]
431				typ := reflectdata.TypeLinksym(w.Aux.(*types.Type))
432				// zeroWB(&typ, dst)
433				taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
434				memThen = wbcall(pos, bThen, wbZero, sp, memThen, taddr, dst)
435				f.fe.Func().SetWBPos(pos)
436				nWBops--
437			case OpMoveWB:
438				dst := w.Args[0]
439				src := w.Args[1]
440				if isVolatile(src) {
441					for _, c := range volatiles {
442						if src == c.src {
443							src = c.tmp
444							break
445						}
446					}
447				}
448				typ := reflectdata.TypeLinksym(w.Aux.(*types.Type))
449				// moveWB(&typ, dst, src)
450				taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
451				memThen = wbcall(pos, bThen, wbMove, sp, memThen, taddr, dst, src)
452				f.fe.Func().SetWBPos(pos)
453				nWBops--
454			}
455		}
456
457		// merge memory
458		mem = bEnd.NewValue2(pos, OpPhi, types.TypeMem, mem, memThen)
459
460		// Do raw stores after merge point.
461		for _, w := range stores {
462			pos := w.Pos
463			switch w.Op {
464			case OpStoreWB:
465				ptr := w.Args[0]
466				val := w.Args[1]
467				if buildcfg.Experiment.CgoCheck2 {
468					// Issue cgo checking code.
469					mem = wbcall(pos, bEnd, cgoCheckPtrWrite, sp, mem, ptr, val)
470				}
471				mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem)
472			case OpZeroWB:
473				dst := w.Args[0]
474				mem = bEnd.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, dst, mem)
475				mem.Aux = w.Aux
476			case OpMoveWB:
477				dst := w.Args[0]
478				src := w.Args[1]
479				if isVolatile(src) {
480					for _, c := range volatiles {
481						if src == c.src {
482							src = c.tmp
483							break
484						}
485					}
486				}
487				if buildcfg.Experiment.CgoCheck2 {
488					// Issue cgo checking code.
489					typ := reflectdata.TypeLinksym(w.Aux.(*types.Type))
490					taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
491					mem = wbcall(pos, bEnd, cgoCheckMemmove, sp, mem, taddr, dst, src)
492				}
493				mem = bEnd.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, dst, src, mem)
494				mem.Aux = w.Aux
495			case OpVarDef, OpVarLive:
496				mem = bEnd.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, mem)
497			case OpStore:
498				ptr := w.Args[0]
499				val := w.Args[1]
500				mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem)
501			}
502		}
503
504		// The last store becomes the WBend marker. This marker is used by the liveness
505		// pass to determine what parts of the code are preemption-unsafe.
506		// All subsequent memory operations use this memory, so we have to sacrifice the
507		// previous last memory op to become this new value.
508		bEnd.Values = append(bEnd.Values, last)
509		last.Block = bEnd
510		last.reset(OpWBend)
511		last.Pos = last.Pos.WithNotStmt()
512		last.Type = types.TypeMem
513		last.AddArg(mem)
514
515		// Free all the old stores, except last which became the WBend marker.
516		for _, w := range stores {
517			if w != last {
518				w.resetArgs()
519			}
520		}
521		for _, w := range stores {
522			if w != last {
523				f.freeValue(w)
524			}
525		}
526
527		// put values after the store sequence into the end block
528		bEnd.Values = append(bEnd.Values, after...)
529		for _, w := range after {
530			w.Block = bEnd
531		}
532
533		// if we have more stores in this block, do this block again
534		if nWBops > 0 {
535			goto again
536		}
537	}
538}
539
540// computeZeroMap returns a map from an ID of a memory value to
541// a set of locations that are known to be zeroed at that memory value.
542func (f *Func) computeZeroMap(select1 []*Value) map[ID]ZeroRegion {
543
544	ptrSize := f.Config.PtrSize
545	// Keep track of which parts of memory are known to be zero.
546	// This helps with removing write barriers for various initialization patterns.
547	// This analysis is conservative. We only keep track, for each memory state, of
548	// which of the first 64 words of a single object are known to be zero.
549	zeroes := map[ID]ZeroRegion{}
550	// Find new objects.
551	for _, b := range f.Blocks {
552		for _, v := range b.Values {
553			if mem, ok := IsNewObject(v, select1); ok {
554				// While compiling package runtime itself, we might see user
555				// calls to newobject, which will have result type
556				// unsafe.Pointer instead. We can't easily infer how large the
557				// allocated memory is, so just skip it.
558				if types.LocalPkg.Path == "runtime" && v.Type.IsUnsafePtr() {
559					continue
560				}
561
562				nptr := v.Type.Elem().Size() / ptrSize
563				if nptr > 64 {
564					nptr = 64
565				}
566				zeroes[mem.ID] = ZeroRegion{base: v, mask: 1<<uint(nptr) - 1}
567			}
568		}
569	}
570	// Find stores to those new objects.
571	for {
572		changed := false
573		for _, b := range f.Blocks {
574			// Note: iterating forwards helps convergence, as values are
575			// typically (but not always!) in store order.
576			for _, v := range b.Values {
577				if v.Op != OpStore {
578					continue
579				}
580				z, ok := zeroes[v.MemoryArg().ID]
581				if !ok {
582					continue
583				}
584				ptr := v.Args[0]
585				var off int64
586				size := v.Aux.(*types.Type).Size()
587				for ptr.Op == OpOffPtr {
588					off += ptr.AuxInt
589					ptr = ptr.Args[0]
590				}
591				if ptr != z.base {
592					// Different base object - we don't know anything.
593					// We could even be writing to the base object we know
594					// about, but through an aliased but offset pointer.
595					// So we have to throw all the zero information we have away.
596					continue
597				}
598				// Round to cover any partially written pointer slots.
599				// Pointer writes should never be unaligned like this, but non-pointer
600				// writes to pointer-containing types will do this.
601				if d := off % ptrSize; d != 0 {
602					off -= d
603					size += d
604				}
605				if d := size % ptrSize; d != 0 {
606					size += ptrSize - d
607				}
608				// Clip to the 64 words that we track.
609				min := off
610				max := off + size
611				if min < 0 {
612					min = 0
613				}
614				if max > 64*ptrSize {
615					max = 64 * ptrSize
616				}
617				// Clear bits for parts that we are writing (and hence
618				// will no longer necessarily be zero).
619				for i := min; i < max; i += ptrSize {
620					bit := i / ptrSize
621					z.mask &^= 1 << uint(bit)
622				}
623				if z.mask == 0 {
624					// No more known zeros - don't bother keeping.
625					continue
626				}
627				// Save updated known zero contents for new store.
628				if zeroes[v.ID] != z {
629					zeroes[v.ID] = z
630					changed = true
631				}
632			}
633		}
634		if !changed {
635			break
636		}
637	}
638	if f.pass.debug > 0 {
639		fmt.Printf("func %s\n", f.Name)
640		for mem, z := range zeroes {
641			fmt.Printf("  memory=v%d ptr=%v zeromask=%b\n", mem, z.base, z.mask)
642		}
643	}
644	return zeroes
645}
646
647// wbcall emits write barrier runtime call in b, returns memory.
648func wbcall(pos src.XPos, b *Block, fn *obj.LSym, sp, mem *Value, args ...*Value) *Value {
649	config := b.Func.Config
650	typ := config.Types.Uintptr // type of all argument values
651	nargs := len(args)
652
653	// TODO (register args) this is a bit of a hack.
654	inRegs := b.Func.ABIDefault == b.Func.ABI1 && len(config.intParamRegs) >= 3
655
656	if !inRegs {
657		// Store arguments to the appropriate stack slot.
658		off := config.ctxt.Arch.FixedFrameSize
659		for _, arg := range args {
660			stkaddr := b.NewValue1I(pos, OpOffPtr, typ.PtrTo(), off, sp)
661			mem = b.NewValue3A(pos, OpStore, types.TypeMem, typ, stkaddr, arg, mem)
662			off += typ.Size()
663		}
664		args = args[:0]
665	}
666
667	args = append(args, mem)
668
669	// issue call
670	argTypes := make([]*types.Type, nargs, 3) // at most 3 args; allows stack allocation
671	for i := 0; i < nargs; i++ {
672		argTypes[i] = typ
673	}
674	call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(argTypes, nil)))
675	call.AddArgs(args...)
676	call.AuxInt = int64(nargs) * typ.Size()
677	return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call)
678}
679
680// round to a multiple of r, r is a power of 2.
681func round(o int64, r int64) int64 {
682	return (o + r - 1) &^ (r - 1)
683}
684
685// IsStackAddr reports whether v is known to be an address of a stack slot.
686func IsStackAddr(v *Value) bool {
687	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
688		v = v.Args[0]
689	}
690	switch v.Op {
691	case OpSP, OpLocalAddr, OpSelectNAddr, OpGetCallerSP:
692		return true
693	}
694	return false
695}
696
697// IsGlobalAddr reports whether v is known to be an address of a global (or nil).
698func IsGlobalAddr(v *Value) bool {
699	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
700		v = v.Args[0]
701	}
702	if v.Op == OpAddr && v.Args[0].Op == OpSB {
703		return true // address of a global
704	}
705	if v.Op == OpConstNil {
706		return true
707	}
708	if v.Op == OpLoad && IsReadOnlyGlobalAddr(v.Args[0]) {
709		return true // loading from a read-only global - the resulting address can't be a heap address.
710	}
711	return false
712}
713
714// IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global.
715func IsReadOnlyGlobalAddr(v *Value) bool {
716	if v.Op == OpConstNil {
717		// Nil pointers are read only. See issue 33438.
718		return true
719	}
720	if v.Op == OpAddr && v.Aux != nil && v.Aux.(*obj.LSym).Type == objabi.SRODATA {
721		return true
722	}
723	return false
724}
725
726// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object,
727// if so, also returns the memory state mem at which v is zero.
728func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) {
729	f := v.Block.Func
730	c := f.Config
731	if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
732		if v.Op != OpSelectN || v.AuxInt != 0 {
733			return nil, false
734		}
735		mem = select1[v.Args[0].ID]
736		if mem == nil {
737			return nil, false
738		}
739	} else {
740		if v.Op != OpLoad {
741			return nil, false
742		}
743		mem = v.MemoryArg()
744		if mem.Op != OpSelectN {
745			return nil, false
746		}
747		if mem.Type != types.TypeMem {
748			return nil, false
749		} // assume it is the right selection if true
750	}
751	call := mem.Args[0]
752	if call.Op != OpStaticCall {
753		return nil, false
754	}
755	if !isSameCall(call.Aux, "runtime.newobject") {
756		return nil, false
757	}
758	if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
759		if v.Args[0] == call {
760			return mem, true
761		}
762		return nil, false
763	}
764	if v.Args[0].Op != OpOffPtr {
765		return nil, false
766	}
767	if v.Args[0].Args[0].Op != OpSP {
768		return nil, false
769	}
770	if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value
771		return nil, false
772	}
773	return mem, true
774}
775
776// IsSanitizerSafeAddr reports whether v is known to be an address
777// that doesn't need instrumentation.
778func IsSanitizerSafeAddr(v *Value) bool {
779	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
780		v = v.Args[0]
781	}
782	switch v.Op {
783	case OpSP, OpLocalAddr, OpSelectNAddr:
784		// Stack addresses are always safe.
785		return true
786	case OpITab, OpStringPtr, OpGetClosurePtr:
787		// Itabs, string data, and closure fields are
788		// read-only once initialized.
789		return true
790	case OpAddr:
791		vt := v.Aux.(*obj.LSym).Type
792		return vt == objabi.SRODATA || vt == objabi.SLIBFUZZER_8BIT_COUNTER || vt == objabi.SCOVERAGE_COUNTER || vt == objabi.SCOVERAGE_AUXVAR
793	}
794	return false
795}
796
797// isVolatile reports whether v is a pointer to argument region on stack which
798// will be clobbered by a function call.
799func isVolatile(v *Value) bool {
800	for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr {
801		v = v.Args[0]
802	}
803	return v.Op == OpSP
804}
805