1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Garbage collector: marking and scanning
6
7package runtime
8
9import (
10	"internal/abi"
11	"internal/goarch"
12	"internal/runtime/atomic"
13	"runtime/internal/sys"
14	"unsafe"
15)
16
17const (
18	fixedRootFinalizers = iota
19	fixedRootFreeGStacks
20	fixedRootCount
21
22	// rootBlockBytes is the number of bytes to scan per data or
23	// BSS root.
24	rootBlockBytes = 256 << 10
25
26	// maxObletBytes is the maximum bytes of an object to scan at
27	// once. Larger objects will be split up into "oblets" of at
28	// most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
29	// scan preemption at ~100 µs.
30	//
31	// This must be > _MaxSmallSize so that the object base is the
32	// span base.
33	maxObletBytes = 128 << 10
34
35	// drainCheckThreshold specifies how many units of work to do
36	// between self-preemption checks in gcDrain. Assuming a scan
37	// rate of 1 MB/ms, this is ~100 µs. Lower values have higher
38	// overhead in the scan loop (the scheduler check may perform
39	// a syscall, so its overhead is nontrivial). Higher values
40	// make the system less responsive to incoming work.
41	drainCheckThreshold = 100000
42
43	// pagesPerSpanRoot indicates how many pages to scan from a span root
44	// at a time. Used by special root marking.
45	//
46	// Higher values improve throughput by increasing locality, but
47	// increase the minimum latency of a marking operation.
48	//
49	// Must be a multiple of the pageInUse bitmap element size and
50	// must also evenly divide pagesPerArena.
51	pagesPerSpanRoot = 512
52)
53
54// gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
55// some miscellany) and initializes scanning-related state.
56//
57// The world must be stopped.
58func gcMarkRootPrepare() {
59	assertWorldStopped()
60
61	// Compute how many data and BSS root blocks there are.
62	nBlocks := func(bytes uintptr) int {
63		return int(divRoundUp(bytes, rootBlockBytes))
64	}
65
66	work.nDataRoots = 0
67	work.nBSSRoots = 0
68
69	// Scan globals.
70	for _, datap := range activeModules() {
71		nDataRoots := nBlocks(datap.edata - datap.data)
72		if nDataRoots > work.nDataRoots {
73			work.nDataRoots = nDataRoots
74		}
75
76		nBSSRoots := nBlocks(datap.ebss - datap.bss)
77		if nBSSRoots > work.nBSSRoots {
78			work.nBSSRoots = nBSSRoots
79		}
80	}
81
82	// Scan span roots for finalizer specials.
83	//
84	// We depend on addfinalizer to mark objects that get
85	// finalizers after root marking.
86	//
87	// We're going to scan the whole heap (that was available at the time the
88	// mark phase started, i.e. markArenas) for in-use spans which have specials.
89	//
90	// Break up the work into arenas, and further into chunks.
91	//
92	// Snapshot allArenas as markArenas. This snapshot is safe because allArenas
93	// is append-only.
94	mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
95	work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
96
97	// Scan stacks.
98	//
99	// Gs may be created after this point, but it's okay that we
100	// ignore them because they begin life without any roots, so
101	// there's nothing to scan, and any roots they create during
102	// the concurrent phase will be caught by the write barrier.
103	work.stackRoots = allGsSnapshot()
104	work.nStackRoots = len(work.stackRoots)
105
106	work.markrootNext = 0
107	work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
108
109	// Calculate base indexes of each root type
110	work.baseData = uint32(fixedRootCount)
111	work.baseBSS = work.baseData + uint32(work.nDataRoots)
112	work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
113	work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
114	work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
115}
116
117// gcMarkRootCheck checks that all roots have been scanned. It is
118// purely for debugging.
119func gcMarkRootCheck() {
120	if work.markrootNext < work.markrootJobs {
121		print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
122		throw("left over markroot jobs")
123	}
124
125	// Check that stacks have been scanned.
126	//
127	// We only check the first nStackRoots Gs that we should have scanned.
128	// Since we don't care about newer Gs (see comment in
129	// gcMarkRootPrepare), no locking is required.
130	i := 0
131	forEachGRace(func(gp *g) {
132		if i >= work.nStackRoots {
133			return
134		}
135
136		if !gp.gcscandone {
137			println("gp", gp, "goid", gp.goid,
138				"status", readgstatus(gp),
139				"gcscandone", gp.gcscandone)
140			throw("scan missed a g")
141		}
142
143		i++
144	})
145}
146
147// ptrmask for an allocation containing a single pointer.
148var oneptrmask = [...]uint8{1}
149
150// markroot scans the i'th root.
151//
152// Preemption must be disabled (because this uses a gcWork).
153//
154// Returns the amount of GC work credit produced by the operation.
155// If flushBgCredit is true, then that credit is also flushed
156// to the background credit pool.
157//
158// nowritebarrier is only advisory here.
159//
160//go:nowritebarrier
161func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 {
162	// Note: if you add a case here, please also update heapdump.go:dumproots.
163	var workDone int64
164	var workCounter *atomic.Int64
165	switch {
166	case work.baseData <= i && i < work.baseBSS:
167		workCounter = &gcController.globalsScanWork
168		for _, datap := range activeModules() {
169			workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
170		}
171
172	case work.baseBSS <= i && i < work.baseSpans:
173		workCounter = &gcController.globalsScanWork
174		for _, datap := range activeModules() {
175			workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
176		}
177
178	case i == fixedRootFinalizers:
179		for fb := allfin; fb != nil; fb = fb.alllink {
180			cnt := uintptr(atomic.Load(&fb.cnt))
181			scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
182		}
183
184	case i == fixedRootFreeGStacks:
185		// Switch to the system stack so we can call
186		// stackfree.
187		systemstack(markrootFreeGStacks)
188
189	case work.baseSpans <= i && i < work.baseStacks:
190		// mark mspan.specials
191		markrootSpans(gcw, int(i-work.baseSpans))
192
193	default:
194		// the rest is scanning goroutine stacks
195		workCounter = &gcController.stackScanWork
196		if i < work.baseStacks || work.baseEnd <= i {
197			printlock()
198			print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
199			throw("markroot: bad index")
200		}
201		gp := work.stackRoots[i-work.baseStacks]
202
203		// remember when we've first observed the G blocked
204		// needed only to output in traceback
205		status := readgstatus(gp) // We are not in a scan state
206		if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
207			gp.waitsince = work.tstart
208		}
209
210		// scanstack must be done on the system stack in case
211		// we're trying to scan our own stack.
212		systemstack(func() {
213			// If this is a self-scan, put the user G in
214			// _Gwaiting to prevent self-deadlock. It may
215			// already be in _Gwaiting if this is a mark
216			// worker or we're in mark termination.
217			userG := getg().m.curg
218			selfScan := gp == userG && readgstatus(userG) == _Grunning
219			if selfScan {
220				casGToWaitingForGC(userG, _Grunning, waitReasonGarbageCollectionScan)
221			}
222
223			// TODO: suspendG blocks (and spins) until gp
224			// stops, which may take a while for
225			// running goroutines. Consider doing this in
226			// two phases where the first is non-blocking:
227			// we scan the stacks we can and ask running
228			// goroutines to scan themselves; and the
229			// second blocks.
230			stopped := suspendG(gp)
231			if stopped.dead {
232				gp.gcscandone = true
233				return
234			}
235			if gp.gcscandone {
236				throw("g already scanned")
237			}
238			workDone += scanstack(gp, gcw)
239			gp.gcscandone = true
240			resumeG(stopped)
241
242			if selfScan {
243				casgstatus(userG, _Gwaiting, _Grunning)
244			}
245		})
246	}
247	if workCounter != nil && workDone != 0 {
248		workCounter.Add(workDone)
249		if flushBgCredit {
250			gcFlushBgCredit(workDone)
251		}
252	}
253	return workDone
254}
255
256// markrootBlock scans the shard'th shard of the block of memory [b0,
257// b0+n0), with the given pointer mask.
258//
259// Returns the amount of work done.
260//
261//go:nowritebarrier
262func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64 {
263	if rootBlockBytes%(8*goarch.PtrSize) != 0 {
264		// This is necessary to pick byte offsets in ptrmask0.
265		throw("rootBlockBytes must be a multiple of 8*ptrSize")
266	}
267
268	// Note that if b0 is toward the end of the address space,
269	// then b0 + rootBlockBytes might wrap around.
270	// These tests are written to avoid any possible overflow.
271	off := uintptr(shard) * rootBlockBytes
272	if off >= n0 {
273		return 0
274	}
275	b := b0 + off
276	ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
277	n := uintptr(rootBlockBytes)
278	if off+n > n0 {
279		n = n0 - off
280	}
281
282	// Scan this shard.
283	scanblock(b, n, ptrmask, gcw, nil)
284	return int64(n)
285}
286
287// markrootFreeGStacks frees stacks of dead Gs.
288//
289// This does not free stacks of dead Gs cached on Ps, but having a few
290// cached stacks around isn't a problem.
291func markrootFreeGStacks() {
292	// Take list of dead Gs with stacks.
293	lock(&sched.gFree.lock)
294	list := sched.gFree.stack
295	sched.gFree.stack = gList{}
296	unlock(&sched.gFree.lock)
297	if list.empty() {
298		return
299	}
300
301	// Free stacks.
302	q := gQueue{list.head, list.head}
303	for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
304		stackfree(gp.stack)
305		gp.stack.lo = 0
306		gp.stack.hi = 0
307		// Manipulate the queue directly since the Gs are
308		// already all linked the right way.
309		q.tail.set(gp)
310	}
311
312	// Put Gs back on the free list.
313	lock(&sched.gFree.lock)
314	sched.gFree.noStack.pushAll(q)
315	unlock(&sched.gFree.lock)
316}
317
318// markrootSpans marks roots for one shard of markArenas.
319//
320//go:nowritebarrier
321func markrootSpans(gcw *gcWork, shard int) {
322	// Objects with finalizers have two GC-related invariants:
323	//
324	// 1) Everything reachable from the object must be marked.
325	// This ensures that when we pass the object to its finalizer,
326	// everything the finalizer can reach will be retained.
327	//
328	// 2) Finalizer specials (which are not in the garbage
329	// collected heap) are roots. In practice, this means the fn
330	// field must be scanned.
331	//
332	// Objects with weak handles have only one invariant related
333	// to this function: weak handle specials (which are not in the
334	// garbage collected heap) are roots. In practice, this means
335	// the handle field must be scanned. Note that the value the
336	// handle pointer referenced does *not* need to be scanned. See
337	// the definition of specialWeakHandle for details.
338	sg := mheap_.sweepgen
339
340	// Find the arena and page index into that arena for this shard.
341	ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
342	ha := mheap_.arenas[ai.l1()][ai.l2()]
343	arenaPage := uint(uintptr(shard) * pagesPerSpanRoot % pagesPerArena)
344
345	// Construct slice of bitmap which we'll iterate over.
346	specialsbits := ha.pageSpecials[arenaPage/8:]
347	specialsbits = specialsbits[:pagesPerSpanRoot/8]
348	for i := range specialsbits {
349		// Find set bits, which correspond to spans with specials.
350		specials := atomic.Load8(&specialsbits[i])
351		if specials == 0 {
352			continue
353		}
354		for j := uint(0); j < 8; j++ {
355			if specials&(1<<j) == 0 {
356				continue
357			}
358			// Find the span for this bit.
359			//
360			// This value is guaranteed to be non-nil because having
361			// specials implies that the span is in-use, and since we're
362			// currently marking we can be sure that we don't have to worry
363			// about the span being freed and re-used.
364			s := ha.spans[arenaPage+uint(i)*8+j]
365
366			// The state must be mSpanInUse if the specials bit is set, so
367			// sanity check that.
368			if state := s.state.get(); state != mSpanInUse {
369				print("s.state = ", state, "\n")
370				throw("non in-use span found with specials bit set")
371			}
372			// Check that this span was swept (it may be cached or uncached).
373			if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) {
374				// sweepgen was updated (+2) during non-checkmark GC pass
375				print("sweep ", s.sweepgen, " ", sg, "\n")
376				throw("gc: unswept span")
377			}
378
379			// Lock the specials to prevent a special from being
380			// removed from the list while we're traversing it.
381			lock(&s.speciallock)
382			for sp := s.specials; sp != nil; sp = sp.next {
383				switch sp.kind {
384				case _KindSpecialFinalizer:
385					// don't mark finalized object, but scan it so we
386					// retain everything it points to.
387					spf := (*specialfinalizer)(unsafe.Pointer(sp))
388					// A finalizer can be set for an inner byte of an object, find object beginning.
389					p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
390
391					// Mark everything that can be reached from
392					// the object (but *not* the object itself or
393					// we'll never collect it).
394					if !s.spanclass.noscan() {
395						scanobject(p, gcw)
396					}
397
398					// The special itself is a root.
399					scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
400				case _KindSpecialWeakHandle:
401					// The special itself is a root.
402					spw := (*specialWeakHandle)(unsafe.Pointer(sp))
403					scanblock(uintptr(unsafe.Pointer(&spw.handle)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
404				}
405			}
406			unlock(&s.speciallock)
407		}
408	}
409}
410
411// gcAssistAlloc performs GC work to make gp's assist debt positive.
412// gp must be the calling user goroutine.
413//
414// This must be called with preemption enabled.
415func gcAssistAlloc(gp *g) {
416	// Don't assist in non-preemptible contexts. These are
417	// generally fragile and won't allow the assist to block.
418	if getg() == gp.m.g0 {
419		return
420	}
421	if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
422		return
423	}
424
425	// This extremely verbose boolean indicates whether we've
426	// entered mark assist from the perspective of the tracer.
427	//
428	// In the tracer, this is just before we call gcAssistAlloc1
429	// *regardless* of whether tracing is enabled. This is because
430	// the tracer allows for tracing to begin (and advance
431	// generations) in the middle of a GC mark phase, so we need to
432	// record some state so that the tracer can pick it up to ensure
433	// a consistent trace result.
434	//
435	// TODO(mknyszek): Hide the details of inMarkAssist in tracer
436	// functions and simplify all the state tracking. This is a lot.
437	enteredMarkAssistForTracing := false
438retry:
439	if gcCPULimiter.limiting() {
440		// If the CPU limiter is enabled, intentionally don't
441		// assist to reduce the amount of CPU time spent in the GC.
442		if enteredMarkAssistForTracing {
443			trace := traceAcquire()
444			if trace.ok() {
445				trace.GCMarkAssistDone()
446				// Set this *after* we trace the end to make sure
447				// that we emit an in-progress event if this is
448				// the first event for the goroutine in the trace
449				// or trace generation. Also, do this between
450				// acquire/release because this is part of the
451				// goroutine's trace state, and it must be atomic
452				// with respect to the tracer.
453				gp.inMarkAssist = false
454				traceRelease(trace)
455			} else {
456				// This state is tracked even if tracing isn't enabled.
457				// It's only used by the new tracer.
458				// See the comment on enteredMarkAssistForTracing.
459				gp.inMarkAssist = false
460			}
461		}
462		return
463	}
464	// Compute the amount of scan work we need to do to make the
465	// balance positive. When the required amount of work is low,
466	// we over-assist to build up credit for future allocations
467	// and amortize the cost of assisting.
468	assistWorkPerByte := gcController.assistWorkPerByte.Load()
469	assistBytesPerWork := gcController.assistBytesPerWork.Load()
470	debtBytes := -gp.gcAssistBytes
471	scanWork := int64(assistWorkPerByte * float64(debtBytes))
472	if scanWork < gcOverAssistWork {
473		scanWork = gcOverAssistWork
474		debtBytes = int64(assistBytesPerWork * float64(scanWork))
475	}
476
477	// Steal as much credit as we can from the background GC's
478	// scan credit. This is racy and may drop the background
479	// credit below 0 if two mutators steal at the same time. This
480	// will just cause steals to fail until credit is accumulated
481	// again, so in the long run it doesn't really matter, but we
482	// do have to handle the negative credit case.
483	bgScanCredit := gcController.bgScanCredit.Load()
484	stolen := int64(0)
485	if bgScanCredit > 0 {
486		if bgScanCredit < scanWork {
487			stolen = bgScanCredit
488			gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(stolen))
489		} else {
490			stolen = scanWork
491			gp.gcAssistBytes += debtBytes
492		}
493		gcController.bgScanCredit.Add(-stolen)
494
495		scanWork -= stolen
496
497		if scanWork == 0 {
498			// We were able to steal all of the credit we
499			// needed.
500			if enteredMarkAssistForTracing {
501				trace := traceAcquire()
502				if trace.ok() {
503					trace.GCMarkAssistDone()
504					// Set this *after* we trace the end to make sure
505					// that we emit an in-progress event if this is
506					// the first event for the goroutine in the trace
507					// or trace generation. Also, do this between
508					// acquire/release because this is part of the
509					// goroutine's trace state, and it must be atomic
510					// with respect to the tracer.
511					gp.inMarkAssist = false
512					traceRelease(trace)
513				} else {
514					// This state is tracked even if tracing isn't enabled.
515					// It's only used by the new tracer.
516					// See the comment on enteredMarkAssistForTracing.
517					gp.inMarkAssist = false
518				}
519			}
520			return
521		}
522	}
523	if !enteredMarkAssistForTracing {
524		trace := traceAcquire()
525		if trace.ok() {
526			trace.GCMarkAssistStart()
527			// Set this *after* we trace the start, otherwise we may
528			// emit an in-progress event for an assist we're about to start.
529			gp.inMarkAssist = true
530			traceRelease(trace)
531		} else {
532			gp.inMarkAssist = true
533		}
534		// In the new tracer, set enter mark assist tracing if we
535		// ever pass this point, because we must manage inMarkAssist
536		// correctly.
537		//
538		// See the comment on enteredMarkAssistForTracing.
539		enteredMarkAssistForTracing = true
540	}
541
542	// Perform assist work
543	systemstack(func() {
544		gcAssistAlloc1(gp, scanWork)
545		// The user stack may have moved, so this can't touch
546		// anything on it until it returns from systemstack.
547	})
548
549	completed := gp.param != nil
550	gp.param = nil
551	if completed {
552		gcMarkDone()
553	}
554
555	if gp.gcAssistBytes < 0 {
556		// We were unable steal enough credit or perform
557		// enough work to pay off the assist debt. We need to
558		// do one of these before letting the mutator allocate
559		// more to prevent over-allocation.
560		//
561		// If this is because we were preempted, reschedule
562		// and try some more.
563		if gp.preempt {
564			Gosched()
565			goto retry
566		}
567
568		// Add this G to an assist queue and park. When the GC
569		// has more background credit, it will satisfy queued
570		// assists before flushing to the global credit pool.
571		//
572		// Note that this does *not* get woken up when more
573		// work is added to the work list. The theory is that
574		// there wasn't enough work to do anyway, so we might
575		// as well let background marking take care of the
576		// work that is available.
577		if !gcParkAssist() {
578			goto retry
579		}
580
581		// At this point either background GC has satisfied
582		// this G's assist debt, or the GC cycle is over.
583	}
584	if enteredMarkAssistForTracing {
585		trace := traceAcquire()
586		if trace.ok() {
587			trace.GCMarkAssistDone()
588			// Set this *after* we trace the end to make sure
589			// that we emit an in-progress event if this is
590			// the first event for the goroutine in the trace
591			// or trace generation. Also, do this between
592			// acquire/release because this is part of the
593			// goroutine's trace state, and it must be atomic
594			// with respect to the tracer.
595			gp.inMarkAssist = false
596			traceRelease(trace)
597		} else {
598			// This state is tracked even if tracing isn't enabled.
599			// It's only used by the new tracer.
600			// See the comment on enteredMarkAssistForTracing.
601			gp.inMarkAssist = false
602		}
603	}
604}
605
606// gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
607// stack. This is a separate function to make it easier to see that
608// we're not capturing anything from the user stack, since the user
609// stack may move while we're in this function.
610//
611// gcAssistAlloc1 indicates whether this assist completed the mark
612// phase by setting gp.param to non-nil. This can't be communicated on
613// the stack since it may move.
614//
615//go:systemstack
616func gcAssistAlloc1(gp *g, scanWork int64) {
617	// Clear the flag indicating that this assist completed the
618	// mark phase.
619	gp.param = nil
620
621	if atomic.Load(&gcBlackenEnabled) == 0 {
622		// The gcBlackenEnabled check in malloc races with the
623		// store that clears it but an atomic check in every malloc
624		// would be a performance hit.
625		// Instead we recheck it here on the non-preemptible system
626		// stack to determine if we should perform an assist.
627
628		// GC is done, so ignore any remaining debt.
629		gp.gcAssistBytes = 0
630		return
631	}
632	// Track time spent in this assist. Since we're on the
633	// system stack, this is non-preemptible, so we can
634	// just measure start and end time.
635	//
636	// Limiter event tracking might be disabled if we end up here
637	// while on a mark worker.
638	startTime := nanotime()
639	trackLimiterEvent := gp.m.p.ptr().limiterEvent.start(limiterEventMarkAssist, startTime)
640
641	decnwait := atomic.Xadd(&work.nwait, -1)
642	if decnwait == work.nproc {
643		println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
644		throw("nwait > work.nprocs")
645	}
646
647	// gcDrainN requires the caller to be preemptible.
648	casGToWaitingForGC(gp, _Grunning, waitReasonGCAssistMarking)
649
650	// drain own cached work first in the hopes that it
651	// will be more cache friendly.
652	gcw := &getg().m.p.ptr().gcw
653	workDone := gcDrainN(gcw, scanWork)
654
655	casgstatus(gp, _Gwaiting, _Grunning)
656
657	// Record that we did this much scan work.
658	//
659	// Back out the number of bytes of assist credit that
660	// this scan work counts for. The "1+" is a poor man's
661	// round-up, to ensure this adds credit even if
662	// assistBytesPerWork is very low.
663	assistBytesPerWork := gcController.assistBytesPerWork.Load()
664	gp.gcAssistBytes += 1 + int64(assistBytesPerWork*float64(workDone))
665
666	// If this is the last worker and we ran out of work,
667	// signal a completion point.
668	incnwait := atomic.Xadd(&work.nwait, +1)
669	if incnwait > work.nproc {
670		println("runtime: work.nwait=", incnwait,
671			"work.nproc=", work.nproc)
672		throw("work.nwait > work.nproc")
673	}
674
675	if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
676		// This has reached a background completion point. Set
677		// gp.param to a non-nil value to indicate this. It
678		// doesn't matter what we set it to (it just has to be
679		// a valid pointer).
680		gp.param = unsafe.Pointer(gp)
681	}
682	now := nanotime()
683	duration := now - startTime
684	pp := gp.m.p.ptr()
685	pp.gcAssistTime += duration
686	if trackLimiterEvent {
687		pp.limiterEvent.stop(limiterEventMarkAssist, now)
688	}
689	if pp.gcAssistTime > gcAssistTimeSlack {
690		gcController.assistTime.Add(pp.gcAssistTime)
691		gcCPULimiter.update(now)
692		pp.gcAssistTime = 0
693	}
694}
695
696// gcWakeAllAssists wakes all currently blocked assists. This is used
697// at the end of a GC cycle. gcBlackenEnabled must be false to prevent
698// new assists from going to sleep after this point.
699func gcWakeAllAssists() {
700	lock(&work.assistQueue.lock)
701	list := work.assistQueue.q.popList()
702	injectglist(&list)
703	unlock(&work.assistQueue.lock)
704}
705
706// gcParkAssist puts the current goroutine on the assist queue and parks.
707//
708// gcParkAssist reports whether the assist is now satisfied. If it
709// returns false, the caller must retry the assist.
710func gcParkAssist() bool {
711	lock(&work.assistQueue.lock)
712	// If the GC cycle finished while we were getting the lock,
713	// exit the assist. The cycle can't finish while we hold the
714	// lock.
715	if atomic.Load(&gcBlackenEnabled) == 0 {
716		unlock(&work.assistQueue.lock)
717		return true
718	}
719
720	gp := getg()
721	oldList := work.assistQueue.q
722	work.assistQueue.q.pushBack(gp)
723
724	// Recheck for background credit now that this G is in
725	// the queue, but can still back out. This avoids a
726	// race in case background marking has flushed more
727	// credit since we checked above.
728	if gcController.bgScanCredit.Load() > 0 {
729		work.assistQueue.q = oldList
730		if oldList.tail != 0 {
731			oldList.tail.ptr().schedlink.set(nil)
732		}
733		unlock(&work.assistQueue.lock)
734		return false
735	}
736	// Park.
737	goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2)
738	return true
739}
740
741// gcFlushBgCredit flushes scanWork units of background scan work
742// credit. This first satisfies blocked assists on the
743// work.assistQueue and then flushes any remaining credit to
744// gcController.bgScanCredit.
745//
746// Write barriers are disallowed because this is used by gcDrain after
747// it has ensured that all work is drained and this must preserve that
748// condition.
749//
750//go:nowritebarrierrec
751func gcFlushBgCredit(scanWork int64) {
752	if work.assistQueue.q.empty() {
753		// Fast path; there are no blocked assists. There's a
754		// small window here where an assist may add itself to
755		// the blocked queue and park. If that happens, we'll
756		// just get it on the next flush.
757		gcController.bgScanCredit.Add(scanWork)
758		return
759	}
760
761	assistBytesPerWork := gcController.assistBytesPerWork.Load()
762	scanBytes := int64(float64(scanWork) * assistBytesPerWork)
763
764	lock(&work.assistQueue.lock)
765	for !work.assistQueue.q.empty() && scanBytes > 0 {
766		gp := work.assistQueue.q.pop()
767		// Note that gp.gcAssistBytes is negative because gp
768		// is in debt. Think carefully about the signs below.
769		if scanBytes+gp.gcAssistBytes >= 0 {
770			// Satisfy this entire assist debt.
771			scanBytes += gp.gcAssistBytes
772			gp.gcAssistBytes = 0
773			// It's important that we *not* put gp in
774			// runnext. Otherwise, it's possible for user
775			// code to exploit the GC worker's high
776			// scheduler priority to get itself always run
777			// before other goroutines and always in the
778			// fresh quantum started by GC.
779			ready(gp, 0, false)
780		} else {
781			// Partially satisfy this assist.
782			gp.gcAssistBytes += scanBytes
783			scanBytes = 0
784			// As a heuristic, we move this assist to the
785			// back of the queue so that large assists
786			// can't clog up the assist queue and
787			// substantially delay small assists.
788			work.assistQueue.q.pushBack(gp)
789			break
790		}
791	}
792
793	if scanBytes > 0 {
794		// Convert from scan bytes back to work.
795		assistWorkPerByte := gcController.assistWorkPerByte.Load()
796		scanWork = int64(float64(scanBytes) * assistWorkPerByte)
797		gcController.bgScanCredit.Add(scanWork)
798	}
799	unlock(&work.assistQueue.lock)
800}
801
802// scanstack scans gp's stack, greying all pointers found on the stack.
803//
804// Returns the amount of scan work performed, but doesn't update
805// gcController.stackScanWork or flush any credit. Any background credit produced
806// by this function should be flushed by its caller. scanstack itself can't
807// safely flush because it may result in trying to wake up a goroutine that
808// was just scanned, resulting in a self-deadlock.
809//
810// scanstack will also shrink the stack if it is safe to do so. If it
811// is not, it schedules a stack shrink for the next synchronous safe
812// point.
813//
814// scanstack is marked go:systemstack because it must not be preempted
815// while using a workbuf.
816//
817//go:nowritebarrier
818//go:systemstack
819func scanstack(gp *g, gcw *gcWork) int64 {
820	if readgstatus(gp)&_Gscan == 0 {
821		print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
822		throw("scanstack - bad status")
823	}
824
825	switch readgstatus(gp) &^ _Gscan {
826	default:
827		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
828		throw("mark - bad status")
829	case _Gdead:
830		return 0
831	case _Grunning:
832		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
833		throw("scanstack: goroutine not stopped")
834	case _Grunnable, _Gsyscall, _Gwaiting:
835		// ok
836	}
837
838	if gp == getg() {
839		throw("can't scan our own stack")
840	}
841
842	// scannedSize is the amount of work we'll be reporting.
843	//
844	// It is less than the allocated size (which is hi-lo).
845	var sp uintptr
846	if gp.syscallsp != 0 {
847		sp = gp.syscallsp // If in a system call this is the stack pointer (gp.sched.sp can be 0 in this case on Windows).
848	} else {
849		sp = gp.sched.sp
850	}
851	scannedSize := gp.stack.hi - sp
852
853	// Keep statistics for initial stack size calculation.
854	// Note that this accumulates the scanned size, not the allocated size.
855	p := getg().m.p.ptr()
856	p.scannedStackSize += uint64(scannedSize)
857	p.scannedStacks++
858
859	if isShrinkStackSafe(gp) {
860		// Shrink the stack if not much of it is being used.
861		shrinkstack(gp)
862	} else {
863		// Otherwise, shrink the stack at the next sync safe point.
864		gp.preemptShrink = true
865	}
866
867	var state stackScanState
868	state.stack = gp.stack
869
870	if stackTraceDebug {
871		println("stack trace goroutine", gp.goid)
872	}
873
874	if debugScanConservative && gp.asyncSafePoint {
875		print("scanning async preempted goroutine ", gp.goid, " stack [", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
876	}
877
878	// Scan the saved context register. This is effectively a live
879	// register that gets moved back and forth between the
880	// register and sched.ctxt without a write barrier.
881	if gp.sched.ctxt != nil {
882		scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
883	}
884
885	// Scan the stack. Accumulate a list of stack objects.
886	var u unwinder
887	for u.init(gp, 0); u.valid(); u.next() {
888		scanframeworker(&u.frame, &state, gcw)
889	}
890
891	// Find additional pointers that point into the stack from the heap.
892	// Currently this includes defers and panics. See also function copystack.
893
894	// Find and trace other pointers in defer records.
895	for d := gp._defer; d != nil; d = d.link {
896		if d.fn != nil {
897			// Scan the func value, which could be a stack allocated closure.
898			// See issue 30453.
899			scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
900		}
901		if d.link != nil {
902			// The link field of a stack-allocated defer record might point
903			// to a heap-allocated defer record. Keep that heap record live.
904			scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
905		}
906		// Retain defers records themselves.
907		// Defer records might not be reachable from the G through regular heap
908		// tracing because the defer linked list might weave between the stack and the heap.
909		if d.heap {
910			scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
911		}
912	}
913	if gp._panic != nil {
914		// Panics are always stack allocated.
915		state.putPtr(uintptr(unsafe.Pointer(gp._panic)), false)
916	}
917
918	// Find and scan all reachable stack objects.
919	//
920	// The state's pointer queue prioritizes precise pointers over
921	// conservative pointers so that we'll prefer scanning stack
922	// objects precisely.
923	state.buildIndex()
924	for {
925		p, conservative := state.getPtr()
926		if p == 0 {
927			break
928		}
929		obj := state.findObject(p)
930		if obj == nil {
931			continue
932		}
933		r := obj.r
934		if r == nil {
935			// We've already scanned this object.
936			continue
937		}
938		obj.setRecord(nil) // Don't scan it again.
939		if stackTraceDebug {
940			printlock()
941			print("  live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of size", obj.size)
942			if conservative {
943				print(" (conservative)")
944			}
945			println()
946			printunlock()
947		}
948		gcdata := r.gcdata()
949		var s *mspan
950		if r.useGCProg() {
951			// This path is pretty unlikely, an object large enough
952			// to have a GC program allocated on the stack.
953			// We need some space to unpack the program into a straight
954			// bitmask, which we allocate/free here.
955			// TODO: it would be nice if there were a way to run a GC
956			// program without having to store all its bits. We'd have
957			// to change from a Lempel-Ziv style program to something else.
958			// Or we can forbid putting objects on stacks if they require
959			// a gc program (see issue 27447).
960			s = materializeGCProg(r.ptrdata(), gcdata)
961			gcdata = (*byte)(unsafe.Pointer(s.startAddr))
962		}
963
964		b := state.stack.lo + uintptr(obj.off)
965		if conservative {
966			scanConservative(b, r.ptrdata(), gcdata, gcw, &state)
967		} else {
968			scanblock(b, r.ptrdata(), gcdata, gcw, &state)
969		}
970
971		if s != nil {
972			dematerializeGCProg(s)
973		}
974	}
975
976	// Deallocate object buffers.
977	// (Pointer buffers were all deallocated in the loop above.)
978	for state.head != nil {
979		x := state.head
980		state.head = x.next
981		if stackTraceDebug {
982			for i := 0; i < x.nobj; i++ {
983				obj := &x.obj[i]
984				if obj.r == nil { // reachable
985					continue
986				}
987				println("  dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of size", obj.r.size)
988				// Note: not necessarily really dead - only reachable-from-ptr dead.
989			}
990		}
991		x.nobj = 0
992		putempty((*workbuf)(unsafe.Pointer(x)))
993	}
994	if state.buf != nil || state.cbuf != nil || state.freeBuf != nil {
995		throw("remaining pointer buffers")
996	}
997	return int64(scannedSize)
998}
999
1000// Scan a stack frame: local variables and function arguments/results.
1001//
1002//go:nowritebarrier
1003func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) {
1004	if _DebugGC > 1 && frame.continpc != 0 {
1005		print("scanframe ", funcname(frame.fn), "\n")
1006	}
1007
1008	isAsyncPreempt := frame.fn.valid() && frame.fn.funcID == abi.FuncID_asyncPreempt
1009	isDebugCall := frame.fn.valid() && frame.fn.funcID == abi.FuncID_debugCallV2
1010	if state.conservative || isAsyncPreempt || isDebugCall {
1011		if debugScanConservative {
1012			println("conservatively scanning function", funcname(frame.fn), "at PC", hex(frame.continpc))
1013		}
1014
1015		// Conservatively scan the frame. Unlike the precise
1016		// case, this includes the outgoing argument space
1017		// since we may have stopped while this function was
1018		// setting up a call.
1019		//
1020		// TODO: We could narrow this down if the compiler
1021		// produced a single map per function of stack slots
1022		// and registers that ever contain a pointer.
1023		if frame.varp != 0 {
1024			size := frame.varp - frame.sp
1025			if size > 0 {
1026				scanConservative(frame.sp, size, nil, gcw, state)
1027			}
1028		}
1029
1030		// Scan arguments to this frame.
1031		if n := frame.argBytes(); n != 0 {
1032			// TODO: We could pass the entry argument map
1033			// to narrow this down further.
1034			scanConservative(frame.argp, n, nil, gcw, state)
1035		}
1036
1037		if isAsyncPreempt || isDebugCall {
1038			// This function's frame contained the
1039			// registers for the asynchronously stopped
1040			// parent frame. Scan the parent
1041			// conservatively.
1042			state.conservative = true
1043		} else {
1044			// We only wanted to scan those two frames
1045			// conservatively. Clear the flag for future
1046			// frames.
1047			state.conservative = false
1048		}
1049		return
1050	}
1051
1052	locals, args, objs := frame.getStackMap(false)
1053
1054	// Scan local variables if stack frame has been allocated.
1055	if locals.n > 0 {
1056		size := uintptr(locals.n) * goarch.PtrSize
1057		scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
1058	}
1059
1060	// Scan arguments.
1061	if args.n > 0 {
1062		scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
1063	}
1064
1065	// Add all stack objects to the stack object list.
1066	if frame.varp != 0 {
1067		// varp is 0 for defers, where there are no locals.
1068		// In that case, there can't be a pointer to its args, either.
1069		// (And all args would be scanned above anyway.)
1070		for i := range objs {
1071			obj := &objs[i]
1072			off := obj.off
1073			base := frame.varp // locals base pointer
1074			if off >= 0 {
1075				base = frame.argp // arguments and return values base pointer
1076			}
1077			ptr := base + uintptr(off)
1078			if ptr < frame.sp {
1079				// object hasn't been allocated in the frame yet.
1080				continue
1081			}
1082			if stackTraceDebug {
1083				println("stkobj at", hex(ptr), "of size", obj.size)
1084			}
1085			state.addObject(ptr, obj)
1086		}
1087	}
1088}
1089
1090type gcDrainFlags int
1091
1092const (
1093	gcDrainUntilPreempt gcDrainFlags = 1 << iota
1094	gcDrainFlushBgCredit
1095	gcDrainIdle
1096	gcDrainFractional
1097)
1098
1099// gcDrainMarkWorkerIdle is a wrapper for gcDrain that exists to better account
1100// mark time in profiles.
1101func gcDrainMarkWorkerIdle(gcw *gcWork) {
1102	gcDrain(gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
1103}
1104
1105// gcDrainMarkWorkerDedicated is a wrapper for gcDrain that exists to better account
1106// mark time in profiles.
1107func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool) {
1108	flags := gcDrainFlushBgCredit
1109	if untilPreempt {
1110		flags |= gcDrainUntilPreempt
1111	}
1112	gcDrain(gcw, flags)
1113}
1114
1115// gcDrainMarkWorkerFractional is a wrapper for gcDrain that exists to better account
1116// mark time in profiles.
1117func gcDrainMarkWorkerFractional(gcw *gcWork) {
1118	gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
1119}
1120
1121// gcDrain scans roots and objects in work buffers, blackening grey
1122// objects until it is unable to get more work. It may return before
1123// GC is done; it's the caller's responsibility to balance work from
1124// other Ps.
1125//
1126// If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
1127// is set.
1128//
1129// If flags&gcDrainIdle != 0, gcDrain returns when there is other work
1130// to do.
1131//
1132// If flags&gcDrainFractional != 0, gcDrain self-preempts when
1133// pollFractionalWorkerExit() returns true. This implies
1134// gcDrainNoBlock.
1135//
1136// If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
1137// credit to gcController.bgScanCredit every gcCreditSlack units of
1138// scan work.
1139//
1140// gcDrain will always return if there is a pending STW or forEachP.
1141//
1142// Disabling write barriers is necessary to ensure that after we've
1143// confirmed that we've drained gcw, that we don't accidentally end
1144// up flipping that condition by immediately adding work in the form
1145// of a write barrier buffer flush.
1146//
1147// Don't set nowritebarrierrec because it's safe for some callees to
1148// have write barriers enabled.
1149//
1150//go:nowritebarrier
1151func gcDrain(gcw *gcWork, flags gcDrainFlags) {
1152	if !writeBarrier.enabled {
1153		throw("gcDrain phase incorrect")
1154	}
1155
1156	// N.B. We must be running in a non-preemptible context, so it's
1157	// safe to hold a reference to our P here.
1158	gp := getg().m.curg
1159	pp := gp.m.p.ptr()
1160	preemptible := flags&gcDrainUntilPreempt != 0
1161	flushBgCredit := flags&gcDrainFlushBgCredit != 0
1162	idle := flags&gcDrainIdle != 0
1163
1164	initScanWork := gcw.heapScanWork
1165
1166	// checkWork is the scan work before performing the next
1167	// self-preempt check.
1168	checkWork := int64(1<<63 - 1)
1169	var check func() bool
1170	if flags&(gcDrainIdle|gcDrainFractional) != 0 {
1171		checkWork = initScanWork + drainCheckThreshold
1172		if idle {
1173			check = pollWork
1174		} else if flags&gcDrainFractional != 0 {
1175			check = pollFractionalWorkerExit
1176		}
1177	}
1178
1179	// Drain root marking jobs.
1180	if work.markrootNext < work.markrootJobs {
1181		// Stop if we're preemptible, if someone wants to STW, or if
1182		// someone is calling forEachP.
1183		for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
1184			job := atomic.Xadd(&work.markrootNext, +1) - 1
1185			if job >= work.markrootJobs {
1186				break
1187			}
1188			markroot(gcw, job, flushBgCredit)
1189			if check != nil && check() {
1190				goto done
1191			}
1192		}
1193	}
1194
1195	// Drain heap marking jobs.
1196	//
1197	// Stop if we're preemptible, if someone wants to STW, or if
1198	// someone is calling forEachP.
1199	//
1200	// TODO(mknyszek): Consider always checking gp.preempt instead
1201	// of having the preempt flag, and making an exception for certain
1202	// mark workers in retake. That might be simpler than trying to
1203	// enumerate all the reasons why we might want to preempt, even
1204	// if we're supposed to be mostly non-preemptible.
1205	for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
1206		// Try to keep work available on the global queue. We used to
1207		// check if there were waiting workers, but it's better to
1208		// just keep work available than to make workers wait. In the
1209		// worst case, we'll do O(log(_WorkbufSize)) unnecessary
1210		// balances.
1211		if work.full == 0 {
1212			gcw.balance()
1213		}
1214
1215		b := gcw.tryGetFast()
1216		if b == 0 {
1217			b = gcw.tryGet()
1218			if b == 0 {
1219				// Flush the write barrier
1220				// buffer; this may create
1221				// more work.
1222				wbBufFlush()
1223				b = gcw.tryGet()
1224			}
1225		}
1226		if b == 0 {
1227			// Unable to get work.
1228			break
1229		}
1230		scanobject(b, gcw)
1231
1232		// Flush background scan work credit to the global
1233		// account if we've accumulated enough locally so
1234		// mutator assists can draw on it.
1235		if gcw.heapScanWork >= gcCreditSlack {
1236			gcController.heapScanWork.Add(gcw.heapScanWork)
1237			if flushBgCredit {
1238				gcFlushBgCredit(gcw.heapScanWork - initScanWork)
1239				initScanWork = 0
1240			}
1241			checkWork -= gcw.heapScanWork
1242			gcw.heapScanWork = 0
1243
1244			if checkWork <= 0 {
1245				checkWork += drainCheckThreshold
1246				if check != nil && check() {
1247					break
1248				}
1249			}
1250		}
1251	}
1252
1253done:
1254	// Flush remaining scan work credit.
1255	if gcw.heapScanWork > 0 {
1256		gcController.heapScanWork.Add(gcw.heapScanWork)
1257		if flushBgCredit {
1258			gcFlushBgCredit(gcw.heapScanWork - initScanWork)
1259		}
1260		gcw.heapScanWork = 0
1261	}
1262}
1263
1264// gcDrainN blackens grey objects until it has performed roughly
1265// scanWork units of scan work or the G is preempted. This is
1266// best-effort, so it may perform less work if it fails to get a work
1267// buffer. Otherwise, it will perform at least n units of work, but
1268// may perform more because scanning is always done in whole object
1269// increments. It returns the amount of scan work performed.
1270//
1271// The caller goroutine must be in a preemptible state (e.g.,
1272// _Gwaiting) to prevent deadlocks during stack scanning. As a
1273// consequence, this must be called on the system stack.
1274//
1275//go:nowritebarrier
1276//go:systemstack
1277func gcDrainN(gcw *gcWork, scanWork int64) int64 {
1278	if !writeBarrier.enabled {
1279		throw("gcDrainN phase incorrect")
1280	}
1281
1282	// There may already be scan work on the gcw, which we don't
1283	// want to claim was done by this call.
1284	workFlushed := -gcw.heapScanWork
1285
1286	// In addition to backing out because of a preemption, back out
1287	// if the GC CPU limiter is enabled.
1288	gp := getg().m.curg
1289	for !gp.preempt && !gcCPULimiter.limiting() && workFlushed+gcw.heapScanWork < scanWork {
1290		// See gcDrain comment.
1291		if work.full == 0 {
1292			gcw.balance()
1293		}
1294
1295		b := gcw.tryGetFast()
1296		if b == 0 {
1297			b = gcw.tryGet()
1298			if b == 0 {
1299				// Flush the write barrier buffer;
1300				// this may create more work.
1301				wbBufFlush()
1302				b = gcw.tryGet()
1303			}
1304		}
1305
1306		if b == 0 {
1307			// Try to do a root job.
1308			if work.markrootNext < work.markrootJobs {
1309				job := atomic.Xadd(&work.markrootNext, +1) - 1
1310				if job < work.markrootJobs {
1311					workFlushed += markroot(gcw, job, false)
1312					continue
1313				}
1314			}
1315			// No heap or root jobs.
1316			break
1317		}
1318
1319		scanobject(b, gcw)
1320
1321		// Flush background scan work credit.
1322		if gcw.heapScanWork >= gcCreditSlack {
1323			gcController.heapScanWork.Add(gcw.heapScanWork)
1324			workFlushed += gcw.heapScanWork
1325			gcw.heapScanWork = 0
1326		}
1327	}
1328
1329	// Unlike gcDrain, there's no need to flush remaining work
1330	// here because this never flushes to bgScanCredit and
1331	// gcw.dispose will flush any remaining work to scanWork.
1332
1333	return workFlushed + gcw.heapScanWork
1334}
1335
1336// scanblock scans b as scanobject would, but using an explicit
1337// pointer bitmap instead of the heap bitmap.
1338//
1339// This is used to scan non-heap roots, so it does not update
1340// gcw.bytesMarked or gcw.heapScanWork.
1341//
1342// If stk != nil, possible stack pointers are also reported to stk.putPtr.
1343//
1344//go:nowritebarrier
1345func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
1346	// Use local copies of original parameters, so that a stack trace
1347	// due to one of the throws below shows the original block
1348	// base and extent.
1349	b := b0
1350	n := n0
1351
1352	for i := uintptr(0); i < n; {
1353		// Find bits for the next word.
1354		bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
1355		if bits == 0 {
1356			i += goarch.PtrSize * 8
1357			continue
1358		}
1359		for j := 0; j < 8 && i < n; j++ {
1360			if bits&1 != 0 {
1361				// Same work as in scanobject; see comments there.
1362				p := *(*uintptr)(unsafe.Pointer(b + i))
1363				if p != 0 {
1364					if obj, span, objIndex := findObject(p, b, i); obj != 0 {
1365						greyobject(obj, b, i, span, gcw, objIndex)
1366					} else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
1367						stk.putPtr(p, false)
1368					}
1369				}
1370			}
1371			bits >>= 1
1372			i += goarch.PtrSize
1373		}
1374	}
1375}
1376
1377// scanobject scans the object starting at b, adding pointers to gcw.
1378// b must point to the beginning of a heap object or an oblet.
1379// scanobject consults the GC bitmap for the pointer mask and the
1380// spans for the size of the object.
1381//
1382//go:nowritebarrier
1383func scanobject(b uintptr, gcw *gcWork) {
1384	// Prefetch object before we scan it.
1385	//
1386	// This will overlap fetching the beginning of the object with initial
1387	// setup before we start scanning the object.
1388	sys.Prefetch(b)
1389
1390	// Find the bits for b and the size of the object at b.
1391	//
1392	// b is either the beginning of an object, in which case this
1393	// is the size of the object to scan, or it points to an
1394	// oblet, in which case we compute the size to scan below.
1395	s := spanOfUnchecked(b)
1396	n := s.elemsize
1397	if n == 0 {
1398		throw("scanobject n == 0")
1399	}
1400	if s.spanclass.noscan() {
1401		// Correctness-wise this is ok, but it's inefficient
1402		// if noscan objects reach here.
1403		throw("scanobject of a noscan object")
1404	}
1405
1406	var tp typePointers
1407	if n > maxObletBytes {
1408		// Large object. Break into oblets for better
1409		// parallelism and lower latency.
1410		if b == s.base() {
1411			// Enqueue the other oblets to scan later.
1412			// Some oblets may be in b's scalar tail, but
1413			// these will be marked as "no more pointers",
1414			// so we'll drop out immediately when we go to
1415			// scan those.
1416			for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
1417				if !gcw.putFast(oblet) {
1418					gcw.put(oblet)
1419				}
1420			}
1421		}
1422
1423		// Compute the size of the oblet. Since this object
1424		// must be a large object, s.base() is the beginning
1425		// of the object.
1426		n = s.base() + s.elemsize - b
1427		n = min(n, maxObletBytes)
1428		tp = s.typePointersOfUnchecked(s.base())
1429		tp = tp.fastForward(b-tp.addr, b+n)
1430	} else {
1431		tp = s.typePointersOfUnchecked(b)
1432	}
1433
1434	var scanSize uintptr
1435	for {
1436		var addr uintptr
1437		if tp, addr = tp.nextFast(); addr == 0 {
1438			if tp, addr = tp.next(b + n); addr == 0 {
1439				break
1440			}
1441		}
1442
1443		// Keep track of farthest pointer we found, so we can
1444		// update heapScanWork. TODO: is there a better metric,
1445		// now that we can skip scalar portions pretty efficiently?
1446		scanSize = addr - b + goarch.PtrSize
1447
1448		// Work here is duplicated in scanblock and above.
1449		// If you make changes here, make changes there too.
1450		obj := *(*uintptr)(unsafe.Pointer(addr))
1451
1452		// At this point we have extracted the next potential pointer.
1453		// Quickly filter out nil and pointers back to the current object.
1454		if obj != 0 && obj-b >= n {
1455			// Test if obj points into the Go heap and, if so,
1456			// mark the object.
1457			//
1458			// Note that it's possible for findObject to
1459			// fail if obj points to a just-allocated heap
1460			// object because of a race with growing the
1461			// heap. In this case, we know the object was
1462			// just allocated and hence will be marked by
1463			// allocation itself.
1464			if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
1465				greyobject(obj, b, addr-b, span, gcw, objIndex)
1466			}
1467		}
1468	}
1469	gcw.bytesMarked += uint64(n)
1470	gcw.heapScanWork += int64(scanSize)
1471}
1472
1473// scanConservative scans block [b, b+n) conservatively, treating any
1474// pointer-like value in the block as a pointer.
1475//
1476// If ptrmask != nil, only words that are marked in ptrmask are
1477// considered as potential pointers.
1478//
1479// If state != nil, it's assumed that [b, b+n) is a block in the stack
1480// and may contain pointers to stack objects.
1481func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState) {
1482	if debugScanConservative {
1483		printlock()
1484		print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
1485		hexdumpWords(b, b+n, func(p uintptr) byte {
1486			if ptrmask != nil {
1487				word := (p - b) / goarch.PtrSize
1488				bits := *addb(ptrmask, word/8)
1489				if (bits>>(word%8))&1 == 0 {
1490					return '$'
1491				}
1492			}
1493
1494			val := *(*uintptr)(unsafe.Pointer(p))
1495			if state != nil && state.stack.lo <= val && val < state.stack.hi {
1496				return '@'
1497			}
1498
1499			span := spanOfHeap(val)
1500			if span == nil {
1501				return ' '
1502			}
1503			idx := span.objIndex(val)
1504			if span.isFree(idx) {
1505				return ' '
1506			}
1507			return '*'
1508		})
1509		printunlock()
1510	}
1511
1512	for i := uintptr(0); i < n; i += goarch.PtrSize {
1513		if ptrmask != nil {
1514			word := i / goarch.PtrSize
1515			bits := *addb(ptrmask, word/8)
1516			if bits == 0 {
1517				// Skip 8 words (the loop increment will do the 8th)
1518				//
1519				// This must be the first time we've
1520				// seen this word of ptrmask, so i
1521				// must be 8-word-aligned, but check
1522				// our reasoning just in case.
1523				if i%(goarch.PtrSize*8) != 0 {
1524					throw("misaligned mask")
1525				}
1526				i += goarch.PtrSize*8 - goarch.PtrSize
1527				continue
1528			}
1529			if (bits>>(word%8))&1 == 0 {
1530				continue
1531			}
1532		}
1533
1534		val := *(*uintptr)(unsafe.Pointer(b + i))
1535
1536		// Check if val points into the stack.
1537		if state != nil && state.stack.lo <= val && val < state.stack.hi {
1538			// val may point to a stack object. This
1539			// object may be dead from last cycle and
1540			// hence may contain pointers to unallocated
1541			// objects, but unlike heap objects we can't
1542			// tell if it's already dead. Hence, if all
1543			// pointers to this object are from
1544			// conservative scanning, we have to scan it
1545			// defensively, too.
1546			state.putPtr(val, true)
1547			continue
1548		}
1549
1550		// Check if val points to a heap span.
1551		span := spanOfHeap(val)
1552		if span == nil {
1553			continue
1554		}
1555
1556		// Check if val points to an allocated object.
1557		idx := span.objIndex(val)
1558		if span.isFree(idx) {
1559			continue
1560		}
1561
1562		// val points to an allocated object. Mark it.
1563		obj := span.base() + idx*span.elemsize
1564		greyobject(obj, b, i, span, gcw, idx)
1565	}
1566}
1567
1568// Shade the object if it isn't already.
1569// The object is not nil and known to be in the heap.
1570// Preemption must be disabled.
1571//
1572//go:nowritebarrier
1573func shade(b uintptr) {
1574	if obj, span, objIndex := findObject(b, 0, 0); obj != 0 {
1575		gcw := &getg().m.p.ptr().gcw
1576		greyobject(obj, 0, 0, span, gcw, objIndex)
1577	}
1578}
1579
1580// obj is the start of an object with mark mbits.
1581// If it isn't already marked, mark it and enqueue into gcw.
1582// base and off are for debugging only and could be removed.
1583//
1584// See also wbBufFlush1, which partially duplicates this logic.
1585//
1586//go:nowritebarrierrec
1587func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
1588	// obj should be start of allocation, and so must be at least pointer-aligned.
1589	if obj&(goarch.PtrSize-1) != 0 {
1590		throw("greyobject: obj not pointer-aligned")
1591	}
1592	mbits := span.markBitsForIndex(objIndex)
1593
1594	if useCheckmark {
1595		if setCheckmark(obj, base, off, mbits) {
1596			// Already marked.
1597			return
1598		}
1599	} else {
1600		if debug.gccheckmark > 0 && span.isFree(objIndex) {
1601			print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
1602			gcDumpObject("base", base, off)
1603			gcDumpObject("obj", obj, ^uintptr(0))
1604			getg().m.traceback = 2
1605			throw("marking free object")
1606		}
1607
1608		// If marked we have nothing to do.
1609		if mbits.isMarked() {
1610			return
1611		}
1612		mbits.setMarked()
1613
1614		// Mark span.
1615		arena, pageIdx, pageMask := pageIndexOf(span.base())
1616		if arena.pageMarks[pageIdx]&pageMask == 0 {
1617			atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1618		}
1619
1620		// If this is a noscan object, fast-track it to black
1621		// instead of greying it.
1622		if span.spanclass.noscan() {
1623			gcw.bytesMarked += uint64(span.elemsize)
1624			return
1625		}
1626	}
1627
1628	// We're adding obj to P's local workbuf, so it's likely
1629	// this object will be processed soon by the same P.
1630	// Even if the workbuf gets flushed, there will likely still be
1631	// some benefit on platforms with inclusive shared caches.
1632	sys.Prefetch(obj)
1633	// Queue the obj for scanning.
1634	if !gcw.putFast(obj) {
1635		gcw.put(obj)
1636	}
1637}
1638
1639// gcDumpObject dumps the contents of obj for debugging and marks the
1640// field at byte offset off in obj.
1641func gcDumpObject(label string, obj, off uintptr) {
1642	s := spanOf(obj)
1643	print(label, "=", hex(obj))
1644	if s == nil {
1645		print(" s=nil\n")
1646		return
1647	}
1648	print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
1649	if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
1650		print(mSpanStateNames[state], "\n")
1651	} else {
1652		print("unknown(", state, ")\n")
1653	}
1654
1655	skipped := false
1656	size := s.elemsize
1657	if s.state.get() == mSpanManual && size == 0 {
1658		// We're printing something from a stack frame. We
1659		// don't know how big it is, so just show up to an
1660		// including off.
1661		size = off + goarch.PtrSize
1662	}
1663	for i := uintptr(0); i < size; i += goarch.PtrSize {
1664		// For big objects, just print the beginning (because
1665		// that usually hints at the object's type) and the
1666		// fields around off.
1667		if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
1668			skipped = true
1669			continue
1670		}
1671		if skipped {
1672			print(" ...\n")
1673			skipped = false
1674		}
1675		print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
1676		if i == off {
1677			print(" <==")
1678		}
1679		print("\n")
1680	}
1681	if skipped {
1682		print(" ...\n")
1683	}
1684}
1685
1686// gcmarknewobject marks a newly allocated object black. obj must
1687// not contain any non-nil pointers.
1688//
1689// This is nosplit so it can manipulate a gcWork without preemption.
1690//
1691//go:nowritebarrier
1692//go:nosplit
1693func gcmarknewobject(span *mspan, obj uintptr) {
1694	if useCheckmark { // The world should be stopped so this should not happen.
1695		throw("gcmarknewobject called while doing checkmark")
1696	}
1697
1698	// Mark object.
1699	objIndex := span.objIndex(obj)
1700	span.markBitsForIndex(objIndex).setMarked()
1701
1702	// Mark span.
1703	arena, pageIdx, pageMask := pageIndexOf(span.base())
1704	if arena.pageMarks[pageIdx]&pageMask == 0 {
1705		atomic.Or8(&arena.pageMarks[pageIdx], pageMask)
1706	}
1707
1708	gcw := &getg().m.p.ptr().gcw
1709	gcw.bytesMarked += uint64(span.elemsize)
1710}
1711
1712// gcMarkTinyAllocs greys all active tiny alloc blocks.
1713//
1714// The world must be stopped.
1715func gcMarkTinyAllocs() {
1716	assertWorldStopped()
1717
1718	for _, p := range allp {
1719		c := p.mcache
1720		if c == nil || c.tiny == 0 {
1721			continue
1722		}
1723		_, span, objIndex := findObject(c.tiny, 0, 0)
1724		gcw := &p.gcw
1725		greyobject(c.tiny, 0, 0, span, gcw, objIndex)
1726	}
1727}
1728