1// Copyright 2010 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Export guts for testing.
6
7package runtime
8
9import (
10	"internal/abi"
11	"internal/goarch"
12	"internal/goos"
13	"internal/runtime/atomic"
14	"runtime/internal/sys"
15	"unsafe"
16)
17
18var Fadd64 = fadd64
19var Fsub64 = fsub64
20var Fmul64 = fmul64
21var Fdiv64 = fdiv64
22var F64to32 = f64to32
23var F32to64 = f32to64
24var Fcmp64 = fcmp64
25var Fintto64 = fintto64
26var F64toint = f64toint
27
28var Entersyscall = entersyscall
29var Exitsyscall = exitsyscall
30var LockedOSThread = lockedOSThread
31var Xadduintptr = atomic.Xadduintptr
32
33var ReadRandomFailed = &readRandomFailed
34
35var Fastlog2 = fastlog2
36
37var Atoi = atoi
38var Atoi32 = atoi32
39var ParseByteCount = parseByteCount
40
41var Nanotime = nanotime
42var NetpollBreak = netpollBreak
43var Usleep = usleep
44
45var PhysPageSize = physPageSize
46var PhysHugePageSize = physHugePageSize
47
48var NetpollGenericInit = netpollGenericInit
49
50var Memmove = memmove
51var MemclrNoHeapPointers = memclrNoHeapPointers
52
53var CgoCheckPointer = cgoCheckPointer
54
55const CrashStackImplemented = crashStackImplemented
56
57const TracebackInnerFrames = tracebackInnerFrames
58const TracebackOuterFrames = tracebackOuterFrames
59
60var MapKeys = keys
61var MapValues = values
62
63var LockPartialOrder = lockPartialOrder
64
65type TimeTimer = timeTimer
66
67type LockRank lockRank
68
69func (l LockRank) String() string {
70	return lockRank(l).String()
71}
72
73const PreemptMSupported = preemptMSupported
74
75type LFNode struct {
76	Next    uint64
77	Pushcnt uintptr
78}
79
80func LFStackPush(head *uint64, node *LFNode) {
81	(*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
82}
83
84func LFStackPop(head *uint64) *LFNode {
85	return (*LFNode)((*lfstack)(head).pop())
86}
87func LFNodeValidate(node *LFNode) {
88	lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
89}
90
91func Netpoll(delta int64) {
92	systemstack(func() {
93		netpoll(delta)
94	})
95}
96
97func GCMask(x any) (ret []byte) {
98	systemstack(func() {
99		ret = getgcmask(x)
100	})
101	return
102}
103
104func RunSchedLocalQueueTest() {
105	pp := new(p)
106	gs := make([]g, len(pp.runq))
107	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
108	for i := 0; i < len(pp.runq); i++ {
109		if g, _ := runqget(pp); g != nil {
110			throw("runq is not empty initially")
111		}
112		for j := 0; j < i; j++ {
113			runqput(pp, &gs[i], false)
114		}
115		for j := 0; j < i; j++ {
116			if g, _ := runqget(pp); g != &gs[i] {
117				print("bad element at iter ", i, "/", j, "\n")
118				throw("bad element")
119			}
120		}
121		if g, _ := runqget(pp); g != nil {
122			throw("runq is not empty afterwards")
123		}
124	}
125}
126
127func RunSchedLocalQueueStealTest() {
128	p1 := new(p)
129	p2 := new(p)
130	gs := make([]g, len(p1.runq))
131	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
132	for i := 0; i < len(p1.runq); i++ {
133		for j := 0; j < i; j++ {
134			gs[j].sig = 0
135			runqput(p1, &gs[j], false)
136		}
137		gp := runqsteal(p2, p1, true)
138		s := 0
139		if gp != nil {
140			s++
141			gp.sig++
142		}
143		for {
144			gp, _ = runqget(p2)
145			if gp == nil {
146				break
147			}
148			s++
149			gp.sig++
150		}
151		for {
152			gp, _ = runqget(p1)
153			if gp == nil {
154				break
155			}
156			gp.sig++
157		}
158		for j := 0; j < i; j++ {
159			if gs[j].sig != 1 {
160				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
161				throw("bad element")
162			}
163		}
164		if s != i/2 && s != i/2+1 {
165			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
166			throw("bad steal")
167		}
168	}
169}
170
171func RunSchedLocalQueueEmptyTest(iters int) {
172	// Test that runq is not spuriously reported as empty.
173	// Runq emptiness affects scheduling decisions and spurious emptiness
174	// can lead to underutilization (both runnable Gs and idle Ps coexist
175	// for arbitrary long time).
176	done := make(chan bool, 1)
177	p := new(p)
178	gs := make([]g, 2)
179	Escape(gs) // Ensure gs doesn't move, since we use guintptrs
180	ready := new(uint32)
181	for i := 0; i < iters; i++ {
182		*ready = 0
183		next0 := (i & 1) == 0
184		next1 := (i & 2) == 0
185		runqput(p, &gs[0], next0)
186		go func() {
187			for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
188			}
189			if runqempty(p) {
190				println("next:", next0, next1)
191				throw("queue is empty")
192			}
193			done <- true
194		}()
195		for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
196		}
197		runqput(p, &gs[1], next1)
198		runqget(p)
199		<-done
200		runqget(p)
201	}
202}
203
204var (
205	StringHash = stringHash
206	BytesHash  = bytesHash
207	Int32Hash  = int32Hash
208	Int64Hash  = int64Hash
209	MemHash    = memhash
210	MemHash32  = memhash32
211	MemHash64  = memhash64
212	EfaceHash  = efaceHash
213	IfaceHash  = ifaceHash
214)
215
216var UseAeshash = &useAeshash
217
218func MemclrBytes(b []byte) {
219	s := (*slice)(unsafe.Pointer(&b))
220	memclrNoHeapPointers(s.array, uintptr(s.len))
221}
222
223const HashLoad = hashLoad
224
225// entry point for testing
226func GostringW(w []uint16) (s string) {
227	systemstack(func() {
228		s = gostringw(&w[0])
229	})
230	return
231}
232
233var Open = open
234var Close = closefd
235var Read = read
236var Write = write
237
238func Envs() []string     { return envs }
239func SetEnvs(e []string) { envs = e }
240
241const PtrSize = goarch.PtrSize
242
243var ForceGCPeriod = &forcegcperiod
244
245// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
246// the "environment" traceback level, so later calls to
247// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
248func SetTracebackEnv(level string) {
249	setTraceback(level)
250	traceback_env = traceback_cache
251}
252
253var ReadUnaligned32 = readUnaligned32
254var ReadUnaligned64 = readUnaligned64
255
256func CountPagesInUse() (pagesInUse, counted uintptr) {
257	stw := stopTheWorld(stwForTestCountPagesInUse)
258
259	pagesInUse = mheap_.pagesInUse.Load()
260
261	for _, s := range mheap_.allspans {
262		if s.state.get() == mSpanInUse {
263			counted += s.npages
264		}
265	}
266
267	startTheWorld(stw)
268
269	return
270}
271
272func Fastrand() uint32          { return uint32(rand()) }
273func Fastrand64() uint64        { return rand() }
274func Fastrandn(n uint32) uint32 { return randn(n) }
275
276type ProfBuf profBuf
277
278func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
279	return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
280}
281
282func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
283	(*profBuf)(p).write(tag, now, hdr, stk)
284}
285
286const (
287	ProfBufBlocking    = profBufBlocking
288	ProfBufNonBlocking = profBufNonBlocking
289)
290
291func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
292	return (*profBuf)(p).read(mode)
293}
294
295func (p *ProfBuf) Close() {
296	(*profBuf)(p).close()
297}
298
299type CPUStats = cpuStats
300
301func ReadCPUStats() CPUStats {
302	return work.cpuStats
303}
304
305func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
306	stw := stopTheWorld(stwForTestReadMetricsSlow)
307
308	// Initialize the metrics beforehand because this could
309	// allocate and skew the stats.
310	metricsLock()
311	initMetrics()
312
313	systemstack(func() {
314		// Donate the racectx to g0. readMetricsLocked calls into the race detector
315		// via map access.
316		getg().racectx = getg().m.curg.racectx
317
318		// Read the metrics once before in case it allocates and skews the metrics.
319		// readMetricsLocked is designed to only allocate the first time it is called
320		// with a given slice of samples. In effect, this extra read tests that this
321		// remains true, since otherwise the second readMetricsLocked below could
322		// allocate before it returns.
323		readMetricsLocked(samplesp, len, cap)
324
325		// Read memstats first. It's going to flush
326		// the mcaches which readMetrics does not do, so
327		// going the other way around may result in
328		// inconsistent statistics.
329		readmemstats_m(memStats)
330
331		// Read metrics again. We need to be sure we're on the
332		// system stack with readmemstats_m so that we don't call into
333		// the stack allocator and adjust metrics between there and here.
334		readMetricsLocked(samplesp, len, cap)
335
336		// Undo the donation.
337		getg().racectx = 0
338	})
339	metricsUnlock()
340
341	startTheWorld(stw)
342}
343
344var DoubleCheckReadMemStats = &doubleCheckReadMemStats
345
346// ReadMemStatsSlow returns both the runtime-computed MemStats and
347// MemStats accumulated by scanning the heap.
348func ReadMemStatsSlow() (base, slow MemStats) {
349	stw := stopTheWorld(stwForTestReadMemStatsSlow)
350
351	// Run on the system stack to avoid stack growth allocation.
352	systemstack(func() {
353		// Make sure stats don't change.
354		getg().m.mallocing++
355
356		readmemstats_m(&base)
357
358		// Initialize slow from base and zero the fields we're
359		// recomputing.
360		slow = base
361		slow.Alloc = 0
362		slow.TotalAlloc = 0
363		slow.Mallocs = 0
364		slow.Frees = 0
365		slow.HeapReleased = 0
366		var bySize [_NumSizeClasses]struct {
367			Mallocs, Frees uint64
368		}
369
370		// Add up current allocations in spans.
371		for _, s := range mheap_.allspans {
372			if s.state.get() != mSpanInUse {
373				continue
374			}
375			if s.isUnusedUserArenaChunk() {
376				continue
377			}
378			if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
379				slow.Mallocs++
380				slow.Alloc += uint64(s.elemsize)
381			} else {
382				slow.Mallocs += uint64(s.allocCount)
383				slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
384				bySize[sizeclass].Mallocs += uint64(s.allocCount)
385			}
386		}
387
388		// Add in frees by just reading the stats for those directly.
389		var m heapStatsDelta
390		memstats.heapStats.unsafeRead(&m)
391
392		// Collect per-sizeclass free stats.
393		var smallFree uint64
394		for i := 0; i < _NumSizeClasses; i++ {
395			slow.Frees += m.smallFreeCount[i]
396			bySize[i].Frees += m.smallFreeCount[i]
397			bySize[i].Mallocs += m.smallFreeCount[i]
398			smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
399		}
400		slow.Frees += m.tinyAllocCount + m.largeFreeCount
401		slow.Mallocs += slow.Frees
402
403		slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
404
405		for i := range slow.BySize {
406			slow.BySize[i].Mallocs = bySize[i].Mallocs
407			slow.BySize[i].Frees = bySize[i].Frees
408		}
409
410		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
411			chunk := mheap_.pages.tryChunkOf(i)
412			if chunk == nil {
413				continue
414			}
415			pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
416			slow.HeapReleased += uint64(pg) * pageSize
417		}
418		for _, p := range allp {
419			pg := sys.OnesCount64(p.pcache.scav)
420			slow.HeapReleased += uint64(pg) * pageSize
421		}
422
423		getg().m.mallocing--
424	})
425
426	startTheWorld(stw)
427	return
428}
429
430// ShrinkStackAndVerifyFramePointers attempts to shrink the stack of the current goroutine
431// and verifies that unwinding the new stack doesn't crash, even if the old
432// stack has been freed or reused (simulated via poisoning).
433func ShrinkStackAndVerifyFramePointers() {
434	before := stackPoisonCopy
435	defer func() { stackPoisonCopy = before }()
436	stackPoisonCopy = 1
437
438	gp := getg()
439	systemstack(func() {
440		shrinkstack(gp)
441	})
442	// If our new stack contains frame pointers into the old stack, this will
443	// crash because the old stack has been poisoned.
444	FPCallers(make([]uintptr, 1024))
445}
446
447// BlockOnSystemStack switches to the system stack, prints "x\n" to
448// stderr, and blocks in a stack containing
449// "runtime.blockOnSystemStackInternal".
450func BlockOnSystemStack() {
451	systemstack(blockOnSystemStackInternal)
452}
453
454func blockOnSystemStackInternal() {
455	print("x\n")
456	lock(&deadlock)
457	lock(&deadlock)
458}
459
460type RWMutex struct {
461	rw rwmutex
462}
463
464func (rw *RWMutex) Init() {
465	rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
466}
467
468func (rw *RWMutex) RLock() {
469	rw.rw.rlock()
470}
471
472func (rw *RWMutex) RUnlock() {
473	rw.rw.runlock()
474}
475
476func (rw *RWMutex) Lock() {
477	rw.rw.lock()
478}
479
480func (rw *RWMutex) Unlock() {
481	rw.rw.unlock()
482}
483
484const RuntimeHmapSize = unsafe.Sizeof(hmap{})
485
486func MapBucketsCount(m map[int]int) int {
487	h := *(**hmap)(unsafe.Pointer(&m))
488	return 1 << h.B
489}
490
491func MapBucketsPointerIsNil(m map[int]int) bool {
492	h := *(**hmap)(unsafe.Pointer(&m))
493	return h.buckets == nil
494}
495
496func OverLoadFactor(count int, B uint8) bool {
497	return overLoadFactor(count, B)
498}
499
500func LockOSCounts() (external, internal uint32) {
501	gp := getg()
502	if gp.m.lockedExt+gp.m.lockedInt == 0 {
503		if gp.lockedm != 0 {
504			panic("lockedm on non-locked goroutine")
505		}
506	} else {
507		if gp.lockedm == 0 {
508			panic("nil lockedm on locked goroutine")
509		}
510	}
511	return gp.m.lockedExt, gp.m.lockedInt
512}
513
514//go:noinline
515func TracebackSystemstack(stk []uintptr, i int) int {
516	if i == 0 {
517		pc, sp := getcallerpc(), getcallersp()
518		var u unwinder
519		u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
520		return tracebackPCs(&u, 0, stk)
521	}
522	n := 0
523	systemstack(func() {
524		n = TracebackSystemstack(stk, i-1)
525	})
526	return n
527}
528
529func KeepNArenaHints(n int) {
530	hint := mheap_.arenaHints
531	for i := 1; i < n; i++ {
532		hint = hint.next
533		if hint == nil {
534			return
535		}
536	}
537	hint.next = nil
538}
539
540// MapNextArenaHint reserves a page at the next arena growth hint,
541// preventing the arena from growing there, and returns the range of
542// addresses that are no longer viable.
543//
544// This may fail to reserve memory. If it fails, it still returns the
545// address range it attempted to reserve.
546func MapNextArenaHint() (start, end uintptr, ok bool) {
547	hint := mheap_.arenaHints
548	addr := hint.addr
549	if hint.down {
550		start, end = addr-heapArenaBytes, addr
551		addr -= physPageSize
552	} else {
553		start, end = addr, addr+heapArenaBytes
554	}
555	got := sysReserve(unsafe.Pointer(addr), physPageSize)
556	ok = (addr == uintptr(got))
557	if !ok {
558		// We were unable to get the requested reservation.
559		// Release what we did get and fail.
560		sysFreeOS(got, physPageSize)
561	}
562	return
563}
564
565func GetNextArenaHint() uintptr {
566	return mheap_.arenaHints.addr
567}
568
569type G = g
570
571type Sudog = sudog
572
573func Getg() *G {
574	return getg()
575}
576
577func Goid() uint64 {
578	return getg().goid
579}
580
581func GIsWaitingOnMutex(gp *G) bool {
582	return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
583}
584
585var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
586
587//go:noinline
588func PanicForTesting(b []byte, i int) byte {
589	return unexportedPanicForTesting(b, i)
590}
591
592//go:noinline
593func unexportedPanicForTesting(b []byte, i int) byte {
594	return b[i]
595}
596
597func G0StackOverflow() {
598	systemstack(func() {
599		g0 := getg()
600		sp := getcallersp()
601		// The stack bounds for g0 stack is not always precise.
602		// Use an artificially small stack, to trigger a stack overflow
603		// without actually run out of the system stack (which may seg fault).
604		g0.stack.lo = sp - 4096 - stackSystem
605		g0.stackguard0 = g0.stack.lo + stackGuard
606		g0.stackguard1 = g0.stackguard0
607
608		stackOverflow(nil)
609	})
610}
611
612func stackOverflow(x *byte) {
613	var buf [256]byte
614	stackOverflow(&buf[0])
615}
616
617func MapTombstoneCheck(m map[int]int) {
618	// Make sure emptyOne and emptyRest are distributed correctly.
619	// We should have a series of filled and emptyOne cells, followed by
620	// a series of emptyRest cells.
621	h := *(**hmap)(unsafe.Pointer(&m))
622	i := any(m)
623	t := *(**maptype)(unsafe.Pointer(&i))
624
625	for x := 0; x < 1<<h.B; x++ {
626		b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
627		n := 0
628		for b := b0; b != nil; b = b.overflow(t) {
629			for i := 0; i < abi.MapBucketCount; i++ {
630				if b.tophash[i] != emptyRest {
631					n++
632				}
633			}
634		}
635		k := 0
636		for b := b0; b != nil; b = b.overflow(t) {
637			for i := 0; i < abi.MapBucketCount; i++ {
638				if k < n && b.tophash[i] == emptyRest {
639					panic("early emptyRest")
640				}
641				if k >= n && b.tophash[i] != emptyRest {
642					panic("late non-emptyRest")
643				}
644				if k == n-1 && b.tophash[i] == emptyOne {
645					panic("last non-emptyRest entry is emptyOne")
646				}
647				k++
648			}
649		}
650	}
651}
652
653func RunGetgThreadSwitchTest() {
654	// Test that getg works correctly with thread switch.
655	// With gccgo, if we generate getg inlined, the backend
656	// may cache the address of the TLS variable, which
657	// will become invalid after a thread switch. This test
658	// checks that the bad caching doesn't happen.
659
660	ch := make(chan int)
661	go func(ch chan int) {
662		ch <- 5
663		LockOSThread()
664	}(ch)
665
666	g1 := getg()
667
668	// Block on a receive. This is likely to get us a thread
669	// switch. If we yield to the sender goroutine, it will
670	// lock the thread, forcing us to resume on a different
671	// thread.
672	<-ch
673
674	g2 := getg()
675	if g1 != g2 {
676		panic("g1 != g2")
677	}
678
679	// Also test getg after some control flow, as the
680	// backend is sensitive to control flow.
681	g3 := getg()
682	if g1 != g3 {
683		panic("g1 != g3")
684	}
685}
686
687const (
688	PageSize         = pageSize
689	PallocChunkPages = pallocChunkPages
690	PageAlloc64Bit   = pageAlloc64Bit
691	PallocSumBytes   = pallocSumBytes
692)
693
694// Expose pallocSum for testing.
695type PallocSum pallocSum
696
697func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
698func (m PallocSum) Start() uint                    { return pallocSum(m).start() }
699func (m PallocSum) Max() uint                      { return pallocSum(m).max() }
700func (m PallocSum) End() uint                      { return pallocSum(m).end() }
701
702// Expose pallocBits for testing.
703type PallocBits pallocBits
704
705func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
706	return (*pallocBits)(b).find(npages, searchIdx)
707}
708func (b *PallocBits) AllocRange(i, n uint)       { (*pallocBits)(b).allocRange(i, n) }
709func (b *PallocBits) Free(i, n uint)             { (*pallocBits)(b).free(i, n) }
710func (b *PallocBits) Summarize() PallocSum       { return PallocSum((*pallocBits)(b).summarize()) }
711func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
712
713// SummarizeSlow is a slow but more obviously correct implementation
714// of (*pallocBits).summarize. Used for testing.
715func SummarizeSlow(b *PallocBits) PallocSum {
716	var start, most, end uint
717
718	const N = uint(len(b)) * 64
719	for start < N && (*pageBits)(b).get(start) == 0 {
720		start++
721	}
722	for end < N && (*pageBits)(b).get(N-end-1) == 0 {
723		end++
724	}
725	run := uint(0)
726	for i := uint(0); i < N; i++ {
727		if (*pageBits)(b).get(i) == 0 {
728			run++
729		} else {
730			run = 0
731		}
732		most = max(most, run)
733	}
734	return PackPallocSum(start, most, end)
735}
736
737// Expose non-trivial helpers for testing.
738func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
739
740// Given two PallocBits, returns a set of bit ranges where
741// they differ.
742func DiffPallocBits(a, b *PallocBits) []BitRange {
743	ba := (*pageBits)(a)
744	bb := (*pageBits)(b)
745
746	var d []BitRange
747	base, size := uint(0), uint(0)
748	for i := uint(0); i < uint(len(ba))*64; i++ {
749		if ba.get(i) != bb.get(i) {
750			if size == 0 {
751				base = i
752			}
753			size++
754		} else {
755			if size != 0 {
756				d = append(d, BitRange{base, size})
757			}
758			size = 0
759		}
760	}
761	if size != 0 {
762		d = append(d, BitRange{base, size})
763	}
764	return d
765}
766
767// StringifyPallocBits gets the bits in the bit range r from b,
768// and returns a string containing the bits as ASCII 0 and 1
769// characters.
770func StringifyPallocBits(b *PallocBits, r BitRange) string {
771	str := ""
772	for j := r.I; j < r.I+r.N; j++ {
773		if (*pageBits)(b).get(j) != 0 {
774			str += "1"
775		} else {
776			str += "0"
777		}
778	}
779	return str
780}
781
782// Expose pallocData for testing.
783type PallocData pallocData
784
785func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
786	return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
787}
788func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
789func (d *PallocData) ScavengedSetRange(i, n uint) {
790	(*pallocData)(d).scavenged.setRange(i, n)
791}
792func (d *PallocData) PallocBits() *PallocBits {
793	return (*PallocBits)(&(*pallocData)(d).pallocBits)
794}
795func (d *PallocData) Scavenged() *PallocBits {
796	return (*PallocBits)(&(*pallocData)(d).scavenged)
797}
798
799// Expose fillAligned for testing.
800func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
801
802// Expose pageCache for testing.
803type PageCache pageCache
804
805const PageCachePages = pageCachePages
806
807func NewPageCache(base uintptr, cache, scav uint64) PageCache {
808	return PageCache(pageCache{base: base, cache: cache, scav: scav})
809}
810func (c *PageCache) Empty() bool   { return (*pageCache)(c).empty() }
811func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
812func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
813func (c *PageCache) Scav() uint64  { return (*pageCache)(c).scav }
814func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
815	return (*pageCache)(c).alloc(npages)
816}
817func (c *PageCache) Flush(s *PageAlloc) {
818	cp := (*pageCache)(c)
819	sp := (*pageAlloc)(s)
820
821	systemstack(func() {
822		// None of the tests need any higher-level locking, so we just
823		// take the lock internally.
824		lock(sp.mheapLock)
825		cp.flush(sp)
826		unlock(sp.mheapLock)
827	})
828}
829
830// Expose chunk index type.
831type ChunkIdx chunkIdx
832
833// Expose pageAlloc for testing. Note that because pageAlloc is
834// not in the heap, so is PageAlloc.
835type PageAlloc pageAlloc
836
837func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
838	pp := (*pageAlloc)(p)
839
840	var addr, scav uintptr
841	systemstack(func() {
842		// None of the tests need any higher-level locking, so we just
843		// take the lock internally.
844		lock(pp.mheapLock)
845		addr, scav = pp.alloc(npages)
846		unlock(pp.mheapLock)
847	})
848	return addr, scav
849}
850func (p *PageAlloc) AllocToCache() PageCache {
851	pp := (*pageAlloc)(p)
852
853	var c PageCache
854	systemstack(func() {
855		// None of the tests need any higher-level locking, so we just
856		// take the lock internally.
857		lock(pp.mheapLock)
858		c = PageCache(pp.allocToCache())
859		unlock(pp.mheapLock)
860	})
861	return c
862}
863func (p *PageAlloc) Free(base, npages uintptr) {
864	pp := (*pageAlloc)(p)
865
866	systemstack(func() {
867		// None of the tests need any higher-level locking, so we just
868		// take the lock internally.
869		lock(pp.mheapLock)
870		pp.free(base, npages)
871		unlock(pp.mheapLock)
872	})
873}
874func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
875	return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
876}
877func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
878	pp := (*pageAlloc)(p)
879	systemstack(func() {
880		r = pp.scavenge(nbytes, nil, true)
881	})
882	return
883}
884func (p *PageAlloc) InUse() []AddrRange {
885	ranges := make([]AddrRange, 0, len(p.inUse.ranges))
886	for _, r := range p.inUse.ranges {
887		ranges = append(ranges, AddrRange{r})
888	}
889	return ranges
890}
891
892// Returns nil if the PallocData's L2 is missing.
893func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
894	ci := chunkIdx(i)
895	return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
896}
897
898// AddrRange is a wrapper around addrRange for testing.
899type AddrRange struct {
900	addrRange
901}
902
903// MakeAddrRange creates a new address range.
904func MakeAddrRange(base, limit uintptr) AddrRange {
905	return AddrRange{makeAddrRange(base, limit)}
906}
907
908// Base returns the virtual base address of the address range.
909func (a AddrRange) Base() uintptr {
910	return a.addrRange.base.addr()
911}
912
913// Base returns the virtual address of the limit of the address range.
914func (a AddrRange) Limit() uintptr {
915	return a.addrRange.limit.addr()
916}
917
918// Equals returns true if the two address ranges are exactly equal.
919func (a AddrRange) Equals(b AddrRange) bool {
920	return a == b
921}
922
923// Size returns the size in bytes of the address range.
924func (a AddrRange) Size() uintptr {
925	return a.addrRange.size()
926}
927
928// testSysStat is the sysStat passed to test versions of various
929// runtime structures. We do actually have to keep track of this
930// because otherwise memstats.mappedReady won't actually line up
931// with other stats in the runtime during tests.
932var testSysStat = &memstats.other_sys
933
934// AddrRanges is a wrapper around addrRanges for testing.
935type AddrRanges struct {
936	addrRanges
937	mutable bool
938}
939
940// NewAddrRanges creates a new empty addrRanges.
941//
942// Note that this initializes addrRanges just like in the
943// runtime, so its memory is persistentalloc'd. Call this
944// function sparingly since the memory it allocates is
945// leaked.
946//
947// This AddrRanges is mutable, so we can test methods like
948// Add.
949func NewAddrRanges() AddrRanges {
950	r := addrRanges{}
951	r.init(testSysStat)
952	return AddrRanges{r, true}
953}
954
955// MakeAddrRanges creates a new addrRanges populated with
956// the ranges in a.
957//
958// The returned AddrRanges is immutable, so methods like
959// Add will fail.
960func MakeAddrRanges(a ...AddrRange) AddrRanges {
961	// Methods that manipulate the backing store of addrRanges.ranges should
962	// not be used on the result from this function (e.g. add) since they may
963	// trigger reallocation. That would normally be fine, except the new
964	// backing store won't come from the heap, but from persistentalloc, so
965	// we'll leak some memory implicitly.
966	ranges := make([]addrRange, 0, len(a))
967	total := uintptr(0)
968	for _, r := range a {
969		ranges = append(ranges, r.addrRange)
970		total += r.Size()
971	}
972	return AddrRanges{addrRanges{
973		ranges:     ranges,
974		totalBytes: total,
975		sysStat:    testSysStat,
976	}, false}
977}
978
979// Ranges returns a copy of the ranges described by the
980// addrRanges.
981func (a *AddrRanges) Ranges() []AddrRange {
982	result := make([]AddrRange, 0, len(a.addrRanges.ranges))
983	for _, r := range a.addrRanges.ranges {
984		result = append(result, AddrRange{r})
985	}
986	return result
987}
988
989// FindSucc returns the successor to base. See addrRanges.findSucc
990// for more details.
991func (a *AddrRanges) FindSucc(base uintptr) int {
992	return a.findSucc(base)
993}
994
995// Add adds a new AddrRange to the AddrRanges.
996//
997// The AddrRange must be mutable (i.e. created by NewAddrRanges),
998// otherwise this method will throw.
999func (a *AddrRanges) Add(r AddrRange) {
1000	if !a.mutable {
1001		throw("attempt to mutate immutable AddrRanges")
1002	}
1003	a.add(r.addrRange)
1004}
1005
1006// TotalBytes returns the totalBytes field of the addrRanges.
1007func (a *AddrRanges) TotalBytes() uintptr {
1008	return a.addrRanges.totalBytes
1009}
1010
1011// BitRange represents a range over a bitmap.
1012type BitRange struct {
1013	I, N uint // bit index and length in bits
1014}
1015
1016// NewPageAlloc creates a new page allocator for testing and
1017// initializes it with the scav and chunks maps. Each key in these maps
1018// represents a chunk index and each value is a series of bit ranges to
1019// set within each bitmap's chunk.
1020//
1021// The initialization of the pageAlloc preserves the invariant that if a
1022// scavenged bit is set the alloc bit is necessarily unset, so some
1023// of the bits described by scav may be cleared in the final bitmap if
1024// ranges in chunks overlap with them.
1025//
1026// scav is optional, and if nil, the scavenged bitmap will be cleared
1027// (as opposed to all 1s, which it usually is). Furthermore, every
1028// chunk index in scav must appear in chunks; ones that do not are
1029// ignored.
1030func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1031	p := new(pageAlloc)
1032
1033	// We've got an entry, so initialize the pageAlloc.
1034	p.init(new(mutex), testSysStat, true)
1035	lockInit(p.mheapLock, lockRankMheap)
1036	for i, init := range chunks {
1037		addr := chunkBase(chunkIdx(i))
1038
1039		// Mark the chunk's existence in the pageAlloc.
1040		systemstack(func() {
1041			lock(p.mheapLock)
1042			p.grow(addr, pallocChunkBytes)
1043			unlock(p.mheapLock)
1044		})
1045
1046		// Initialize the bitmap and update pageAlloc metadata.
1047		ci := chunkIndex(addr)
1048		chunk := p.chunkOf(ci)
1049
1050		// Clear all the scavenged bits which grow set.
1051		chunk.scavenged.clearRange(0, pallocChunkPages)
1052
1053		// Simulate the allocation and subsequent free of all pages in
1054		// the chunk for the scavenge index. This sets the state equivalent
1055		// with all pages within the index being free.
1056		p.scav.index.alloc(ci, pallocChunkPages)
1057		p.scav.index.free(ci, 0, pallocChunkPages)
1058
1059		// Apply scavenge state if applicable.
1060		if scav != nil {
1061			if scvg, ok := scav[i]; ok {
1062				for _, s := range scvg {
1063					// Ignore the case of s.N == 0. setRange doesn't handle
1064					// it and it's a no-op anyway.
1065					if s.N != 0 {
1066						chunk.scavenged.setRange(s.I, s.N)
1067					}
1068				}
1069			}
1070		}
1071
1072		// Apply alloc state.
1073		for _, s := range init {
1074			// Ignore the case of s.N == 0. allocRange doesn't handle
1075			// it and it's a no-op anyway.
1076			if s.N != 0 {
1077				chunk.allocRange(s.I, s.N)
1078
1079				// Make sure the scavenge index is updated.
1080				p.scav.index.alloc(ci, s.N)
1081			}
1082		}
1083
1084		// Update heap metadata for the allocRange calls above.
1085		systemstack(func() {
1086			lock(p.mheapLock)
1087			p.update(addr, pallocChunkPages, false, false)
1088			unlock(p.mheapLock)
1089		})
1090	}
1091
1092	return (*PageAlloc)(p)
1093}
1094
1095// FreePageAlloc releases hard OS resources owned by the pageAlloc. Once this
1096// is called the pageAlloc may no longer be used. The object itself will be
1097// collected by the garbage collector once it is no longer live.
1098func FreePageAlloc(pp *PageAlloc) {
1099	p := (*pageAlloc)(pp)
1100
1101	// Free all the mapped space for the summary levels.
1102	if pageAlloc64Bit != 0 {
1103		for l := 0; l < summaryLevels; l++ {
1104			sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1105		}
1106	} else {
1107		resSize := uintptr(0)
1108		for _, s := range p.summary {
1109			resSize += uintptr(cap(s)) * pallocSumBytes
1110		}
1111		sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1112	}
1113
1114	// Free extra data structures.
1115	sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1116
1117	// Subtract back out whatever we mapped for the summaries.
1118	// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
1119	// (and in anger should actually be accounted for), and there's no other
1120	// way to figure out how much we actually mapped.
1121	gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1122	testSysStat.add(-int64(p.summaryMappedReady))
1123
1124	// Free the mapped space for chunks.
1125	for i := range p.chunks {
1126		if x := p.chunks[i]; x != nil {
1127			p.chunks[i] = nil
1128			// This memory comes from sysAlloc and will always be page-aligned.
1129			sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1130		}
1131	}
1132}
1133
1134// BaseChunkIdx is a convenient chunkIdx value which works on both
1135// 64 bit and 32 bit platforms, allowing the tests to share code
1136// between the two.
1137//
1138// This should not be higher than 0x100*pallocChunkBytes to support
1139// mips and mipsle, which only have 31-bit address spaces.
1140var BaseChunkIdx = func() ChunkIdx {
1141	var prefix uintptr
1142	if pageAlloc64Bit != 0 {
1143		prefix = 0xc000
1144	} else {
1145		prefix = 0x100
1146	}
1147	baseAddr := prefix * pallocChunkBytes
1148	if goos.IsAix != 0 {
1149		baseAddr += arenaBaseOffset
1150	}
1151	return ChunkIdx(chunkIndex(baseAddr))
1152}()
1153
1154// PageBase returns an address given a chunk index and a page index
1155// relative to that chunk.
1156func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1157	return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1158}
1159
1160type BitsMismatch struct {
1161	Base      uintptr
1162	Got, Want uint64
1163}
1164
1165func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1166	ok = true
1167
1168	// Run on the system stack to avoid stack growth allocation.
1169	systemstack(func() {
1170		getg().m.mallocing++
1171
1172		// Lock so that we can safely access the bitmap.
1173		lock(&mheap_.lock)
1174	chunkLoop:
1175		for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1176			chunk := mheap_.pages.tryChunkOf(i)
1177			if chunk == nil {
1178				continue
1179			}
1180			for j := 0; j < pallocChunkPages/64; j++ {
1181				// Run over each 64-bit bitmap section and ensure
1182				// scavenged is being cleared properly on allocation.
1183				// If a used bit and scavenged bit are both set, that's
1184				// an error, and could indicate a larger problem, or
1185				// an accounting problem.
1186				want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1187				got := chunk.scavenged[j]
1188				if want != got {
1189					ok = false
1190					if n >= len(mismatches) {
1191						break chunkLoop
1192					}
1193					mismatches[n] = BitsMismatch{
1194						Base: chunkBase(i) + uintptr(j)*64*pageSize,
1195						Got:  got,
1196						Want: want,
1197					}
1198					n++
1199				}
1200			}
1201		}
1202		unlock(&mheap_.lock)
1203
1204		getg().m.mallocing--
1205	})
1206	return
1207}
1208
1209func PageCachePagesLeaked() (leaked uintptr) {
1210	stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1211
1212	// Walk over destroyed Ps and look for unflushed caches.
1213	deadp := allp[len(allp):cap(allp)]
1214	for _, p := range deadp {
1215		// Since we're going past len(allp) we may see nil Ps.
1216		// Just ignore them.
1217		if p != nil {
1218			leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1219		}
1220	}
1221
1222	startTheWorld(stw)
1223	return
1224}
1225
1226type Mutex = mutex
1227
1228var Lock = lock
1229var Unlock = unlock
1230
1231var MutexContended = mutexContended
1232
1233func SemRootLock(addr *uint32) *mutex {
1234	root := semtable.rootFor(addr)
1235	return &root.lock
1236}
1237
1238var Semacquire = semacquire
1239var Semrelease1 = semrelease1
1240
1241func SemNwait(addr *uint32) uint32 {
1242	root := semtable.rootFor(addr)
1243	return root.nwait.Load()
1244}
1245
1246const SemTableSize = semTabSize
1247
1248// SemTable is a wrapper around semTable exported for testing.
1249type SemTable struct {
1250	semTable
1251}
1252
1253// Enqueue simulates enqueuing a waiter for a semaphore (or lock) at addr.
1254func (t *SemTable) Enqueue(addr *uint32) {
1255	s := acquireSudog()
1256	s.releasetime = 0
1257	s.acquiretime = 0
1258	s.ticket = 0
1259	t.semTable.rootFor(addr).queue(addr, s, false)
1260}
1261
1262// Dequeue simulates dequeuing a waiter for a semaphore (or lock) at addr.
1263//
1264// Returns true if there actually was a waiter to be dequeued.
1265func (t *SemTable) Dequeue(addr *uint32) bool {
1266	s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1267	if s != nil {
1268		releaseSudog(s)
1269		return true
1270	}
1271	return false
1272}
1273
1274// mspan wrapper for testing.
1275type MSpan mspan
1276
1277// Allocate an mspan for testing.
1278func AllocMSpan() *MSpan {
1279	var s *mspan
1280	systemstack(func() {
1281		lock(&mheap_.lock)
1282		s = (*mspan)(mheap_.spanalloc.alloc())
1283		unlock(&mheap_.lock)
1284	})
1285	return (*MSpan)(s)
1286}
1287
1288// Free an allocated mspan.
1289func FreeMSpan(s *MSpan) {
1290	systemstack(func() {
1291		lock(&mheap_.lock)
1292		mheap_.spanalloc.free(unsafe.Pointer(s))
1293		unlock(&mheap_.lock)
1294	})
1295}
1296
1297func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1298	s := (*mspan)(ms)
1299	s.nelems = uint16(len(bits) * 8)
1300	s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1301	result := s.countAlloc()
1302	s.gcmarkBits = nil
1303	return result
1304}
1305
1306const (
1307	TimeHistSubBucketBits = timeHistSubBucketBits
1308	TimeHistNumSubBuckets = timeHistNumSubBuckets
1309	TimeHistNumBuckets    = timeHistNumBuckets
1310	TimeHistMinBucketBits = timeHistMinBucketBits
1311	TimeHistMaxBucketBits = timeHistMaxBucketBits
1312)
1313
1314type TimeHistogram timeHistogram
1315
1316// Counts returns the counts for the given bucket, subBucket indices.
1317// Returns true if the bucket was valid, otherwise returns the counts
1318// for the overflow bucket if bucket > 0 or the underflow bucket if
1319// bucket < 0, and false.
1320func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1321	t := (*timeHistogram)(th)
1322	if bucket < 0 {
1323		return t.underflow.Load(), false
1324	}
1325	i := bucket*TimeHistNumSubBuckets + subBucket
1326	if i >= len(t.counts) {
1327		return t.overflow.Load(), false
1328	}
1329	return t.counts[i].Load(), true
1330}
1331
1332func (th *TimeHistogram) Record(duration int64) {
1333	(*timeHistogram)(th).record(duration)
1334}
1335
1336var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1337
1338func SetIntArgRegs(a int) int {
1339	lock(&finlock)
1340	old := intArgRegs
1341	if a >= 0 {
1342		intArgRegs = a
1343	}
1344	unlock(&finlock)
1345	return old
1346}
1347
1348func FinalizerGAsleep() bool {
1349	return fingStatus.Load()&fingWait != 0
1350}
1351
1352// For GCTestMoveStackOnNextCall, it's important not to introduce an
1353// extra layer of call, since then there's a return before the "real"
1354// next call.
1355var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1356
1357// For GCTestIsReachable, it's important that we do this as a call so
1358// escape analysis can see through it.
1359func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1360	return gcTestIsReachable(ptrs...)
1361}
1362
1363// For GCTestPointerClass, it's important that we do this as a call so
1364// escape analysis can see through it.
1365//
1366// This is nosplit because gcTestPointerClass is.
1367//
1368//go:nosplit
1369func GCTestPointerClass(p unsafe.Pointer) string {
1370	return gcTestPointerClass(p)
1371}
1372
1373const Raceenabled = raceenabled
1374
1375const (
1376	GCBackgroundUtilization            = gcBackgroundUtilization
1377	GCGoalUtilization                  = gcGoalUtilization
1378	DefaultHeapMinimum                 = defaultHeapMinimum
1379	MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1380	MemoryLimitMinHeapGoalHeadroom     = memoryLimitMinHeapGoalHeadroom
1381)
1382
1383type GCController struct {
1384	gcControllerState
1385}
1386
1387func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1388	// Force the controller to escape. We're going to
1389	// do 64-bit atomics on it, and if it gets stack-allocated
1390	// on a 32-bit architecture, it may get allocated unaligned
1391	// space.
1392	g := Escape(new(GCController))
1393	g.gcControllerState.test = true // Mark it as a test copy.
1394	g.init(int32(gcPercent), memoryLimit)
1395	return g
1396}
1397
1398func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1399	trigger, _ := c.trigger()
1400	if c.heapMarked > trigger {
1401		trigger = c.heapMarked
1402	}
1403	c.maxStackScan.Store(stackSize)
1404	c.globalsScan.Store(globalsSize)
1405	c.heapLive.Store(trigger)
1406	c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1407	c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1408}
1409
1410func (c *GCController) AssistWorkPerByte() float64 {
1411	return c.assistWorkPerByte.Load()
1412}
1413
1414func (c *GCController) HeapGoal() uint64 {
1415	return c.heapGoal()
1416}
1417
1418func (c *GCController) HeapLive() uint64 {
1419	return c.heapLive.Load()
1420}
1421
1422func (c *GCController) HeapMarked() uint64 {
1423	return c.heapMarked
1424}
1425
1426func (c *GCController) Triggered() uint64 {
1427	return c.triggered
1428}
1429
1430type GCControllerReviseDelta struct {
1431	HeapLive        int64
1432	HeapScan        int64
1433	HeapScanWork    int64
1434	StackScanWork   int64
1435	GlobalsScanWork int64
1436}
1437
1438func (c *GCController) Revise(d GCControllerReviseDelta) {
1439	c.heapLive.Add(d.HeapLive)
1440	c.heapScan.Add(d.HeapScan)
1441	c.heapScanWork.Add(d.HeapScanWork)
1442	c.stackScanWork.Add(d.StackScanWork)
1443	c.globalsScanWork.Add(d.GlobalsScanWork)
1444	c.revise()
1445}
1446
1447func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1448	c.assistTime.Store(assistTime)
1449	c.endCycle(elapsed, gomaxprocs, false)
1450	c.resetLive(bytesMarked)
1451	c.commit(false)
1452}
1453
1454func (c *GCController) AddIdleMarkWorker() bool {
1455	return c.addIdleMarkWorker()
1456}
1457
1458func (c *GCController) NeedIdleMarkWorker() bool {
1459	return c.needIdleMarkWorker()
1460}
1461
1462func (c *GCController) RemoveIdleMarkWorker() {
1463	c.removeIdleMarkWorker()
1464}
1465
1466func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1467	c.setMaxIdleMarkWorkers(max)
1468}
1469
1470var alwaysFalse bool
1471var escapeSink any
1472
1473func Escape[T any](x T) T {
1474	if alwaysFalse {
1475		escapeSink = x
1476	}
1477	return x
1478}
1479
1480// Acquirem blocks preemption.
1481func Acquirem() {
1482	acquirem()
1483}
1484
1485func Releasem() {
1486	releasem(getg().m)
1487}
1488
1489var Timediv = timediv
1490
1491type PIController struct {
1492	piController
1493}
1494
1495func NewPIController(kp, ti, tt, min, max float64) *PIController {
1496	return &PIController{piController{
1497		kp:  kp,
1498		ti:  ti,
1499		tt:  tt,
1500		min: min,
1501		max: max,
1502	}}
1503}
1504
1505func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1506	return c.piController.next(input, setpoint, period)
1507}
1508
1509const (
1510	CapacityPerProc          = capacityPerProc
1511	GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1512)
1513
1514type GCCPULimiter struct {
1515	limiter gcCPULimiterState
1516}
1517
1518func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1519	// Force the controller to escape. We're going to
1520	// do 64-bit atomics on it, and if it gets stack-allocated
1521	// on a 32-bit architecture, it may get allocated unaligned
1522	// space.
1523	l := Escape(new(GCCPULimiter))
1524	l.limiter.test = true
1525	l.limiter.resetCapacity(now, gomaxprocs)
1526	return l
1527}
1528
1529func (l *GCCPULimiter) Fill() uint64 {
1530	return l.limiter.bucket.fill
1531}
1532
1533func (l *GCCPULimiter) Capacity() uint64 {
1534	return l.limiter.bucket.capacity
1535}
1536
1537func (l *GCCPULimiter) Overflow() uint64 {
1538	return l.limiter.overflow
1539}
1540
1541func (l *GCCPULimiter) Limiting() bool {
1542	return l.limiter.limiting()
1543}
1544
1545func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1546	return l.limiter.needUpdate(now)
1547}
1548
1549func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1550	l.limiter.startGCTransition(enableGC, now)
1551}
1552
1553func (l *GCCPULimiter) FinishGCTransition(now int64) {
1554	l.limiter.finishGCTransition(now)
1555}
1556
1557func (l *GCCPULimiter) Update(now int64) {
1558	l.limiter.update(now)
1559}
1560
1561func (l *GCCPULimiter) AddAssistTime(t int64) {
1562	l.limiter.addAssistTime(t)
1563}
1564
1565func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1566	l.limiter.resetCapacity(now, nprocs)
1567}
1568
1569const ScavengePercent = scavengePercent
1570
1571type Scavenger struct {
1572	Sleep      func(int64) int64
1573	Scavenge   func(uintptr) (uintptr, int64)
1574	ShouldStop func() bool
1575	GoMaxProcs func() int32
1576
1577	released  atomic.Uintptr
1578	scavenger scavengerState
1579	stop      chan<- struct{}
1580	done      <-chan struct{}
1581}
1582
1583func (s *Scavenger) Start() {
1584	if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1585		panic("must populate all stubs")
1586	}
1587
1588	// Install hooks.
1589	s.scavenger.sleepStub = s.Sleep
1590	s.scavenger.scavenge = s.Scavenge
1591	s.scavenger.shouldStop = s.ShouldStop
1592	s.scavenger.gomaxprocs = s.GoMaxProcs
1593
1594	// Start up scavenger goroutine, and wait for it to be ready.
1595	stop := make(chan struct{})
1596	s.stop = stop
1597	done := make(chan struct{})
1598	s.done = done
1599	go func() {
1600		// This should match bgscavenge, loosely.
1601		s.scavenger.init()
1602		s.scavenger.park()
1603		for {
1604			select {
1605			case <-stop:
1606				close(done)
1607				return
1608			default:
1609			}
1610			released, workTime := s.scavenger.run()
1611			if released == 0 {
1612				s.scavenger.park()
1613				continue
1614			}
1615			s.released.Add(released)
1616			s.scavenger.sleep(workTime)
1617		}
1618	}()
1619	if !s.BlockUntilParked(1e9 /* 1 second */) {
1620		panic("timed out waiting for scavenger to get ready")
1621	}
1622}
1623
1624// BlockUntilParked blocks until the scavenger parks, or until
1625// timeout is exceeded. Returns true if the scavenger parked.
1626//
1627// Note that in testing, parked means something slightly different.
1628// In anger, the scavenger parks to sleep, too, but in testing,
1629// it only parks when it actually has no work to do.
1630func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1631	// Just spin, waiting for it to park.
1632	//
1633	// The actual parking process is racy with respect to
1634	// wakeups, which is fine, but for testing we need something
1635	// a bit more robust.
1636	start := nanotime()
1637	for nanotime()-start < timeout {
1638		lock(&s.scavenger.lock)
1639		parked := s.scavenger.parked
1640		unlock(&s.scavenger.lock)
1641		if parked {
1642			return true
1643		}
1644		Gosched()
1645	}
1646	return false
1647}
1648
1649// Released returns how many bytes the scavenger released.
1650func (s *Scavenger) Released() uintptr {
1651	return s.released.Load()
1652}
1653
1654// Wake wakes up a parked scavenger to keep running.
1655func (s *Scavenger) Wake() {
1656	s.scavenger.wake()
1657}
1658
1659// Stop cleans up the scavenger's resources. The scavenger
1660// must be parked for this to work.
1661func (s *Scavenger) Stop() {
1662	lock(&s.scavenger.lock)
1663	parked := s.scavenger.parked
1664	unlock(&s.scavenger.lock)
1665	if !parked {
1666		panic("tried to clean up scavenger that is not parked")
1667	}
1668	close(s.stop)
1669	s.Wake()
1670	<-s.done
1671}
1672
1673type ScavengeIndex struct {
1674	i scavengeIndex
1675}
1676
1677func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1678	s := new(ScavengeIndex)
1679	// This is a bit lazy but we easily guarantee we'll be able
1680	// to reference all the relevant chunks. The worst-case
1681	// memory usage here is 512 MiB, but tests generally use
1682	// small offsets from BaseChunkIdx, which results in ~100s
1683	// of KiB in memory use.
1684	//
1685	// This may still be worth making better, at least by sharing
1686	// this fairly large array across calls with a sync.Pool or
1687	// something. Currently, when the tests are run serially,
1688	// it takes around 0.5s. Not all that much, but if we have
1689	// a lot of tests like this it could add up.
1690	s.i.chunks = make([]atomicScavChunkData, max)
1691	s.i.min.Store(uintptr(min))
1692	s.i.max.Store(uintptr(max))
1693	s.i.minHeapIdx.Store(uintptr(min))
1694	s.i.test = true
1695	return s
1696}
1697
1698func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1699	ci, off := s.i.find(force)
1700	return ChunkIdx(ci), off
1701}
1702
1703func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1704	sc, ec := chunkIndex(base), chunkIndex(limit-1)
1705	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1706
1707	if sc == ec {
1708		// The range doesn't cross any chunk boundaries.
1709		s.i.alloc(sc, ei+1-si)
1710	} else {
1711		// The range crosses at least one chunk boundary.
1712		s.i.alloc(sc, pallocChunkPages-si)
1713		for c := sc + 1; c < ec; c++ {
1714			s.i.alloc(c, pallocChunkPages)
1715		}
1716		s.i.alloc(ec, ei+1)
1717	}
1718}
1719
1720func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1721	sc, ec := chunkIndex(base), chunkIndex(limit-1)
1722	si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1723
1724	if sc == ec {
1725		// The range doesn't cross any chunk boundaries.
1726		s.i.free(sc, si, ei+1-si)
1727	} else {
1728		// The range crosses at least one chunk boundary.
1729		s.i.free(sc, si, pallocChunkPages-si)
1730		for c := sc + 1; c < ec; c++ {
1731			s.i.free(c, 0, pallocChunkPages)
1732		}
1733		s.i.free(ec, 0, ei+1)
1734	}
1735}
1736
1737func (s *ScavengeIndex) ResetSearchAddrs() {
1738	for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1739		addr, marked := a.Load()
1740		if marked {
1741			a.StoreUnmark(addr, addr)
1742		}
1743		a.Clear()
1744	}
1745	s.i.freeHWM = minOffAddr
1746}
1747
1748func (s *ScavengeIndex) NextGen() {
1749	s.i.nextGen()
1750}
1751
1752func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1753	s.i.setEmpty(chunkIdx(ci))
1754}
1755
1756func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1757	sc0 := scavChunkData{
1758		gen:            gen,
1759		inUse:          inUse,
1760		lastInUse:      lastInUse,
1761		scavChunkFlags: scavChunkFlags(flags),
1762	}
1763	scp := sc0.pack()
1764	sc1 := unpackScavChunkData(scp)
1765	return sc0 == sc1
1766}
1767
1768const GTrackingPeriod = gTrackingPeriod
1769
1770var ZeroBase = unsafe.Pointer(&zerobase)
1771
1772const UserArenaChunkBytes = userArenaChunkBytes
1773
1774type UserArena struct {
1775	arena *userArena
1776}
1777
1778func NewUserArena() *UserArena {
1779	return &UserArena{newUserArena()}
1780}
1781
1782func (a *UserArena) New(out *any) {
1783	i := efaceOf(out)
1784	typ := i._type
1785	if typ.Kind_&abi.KindMask != abi.Pointer {
1786		panic("new result of non-ptr type")
1787	}
1788	typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1789	i.data = a.arena.new(typ)
1790}
1791
1792func (a *UserArena) Slice(sl any, cap int) {
1793	a.arena.slice(sl, cap)
1794}
1795
1796func (a *UserArena) Free() {
1797	a.arena.free()
1798}
1799
1800func GlobalWaitingArenaChunks() int {
1801	n := 0
1802	systemstack(func() {
1803		lock(&mheap_.lock)
1804		for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1805			n++
1806		}
1807		unlock(&mheap_.lock)
1808	})
1809	return n
1810}
1811
1812func UserArenaClone[T any](s T) T {
1813	return arena_heapify(s).(T)
1814}
1815
1816var AlignUp = alignUp
1817
1818func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1819	return blockUntilEmptyFinalizerQueue(timeout)
1820}
1821
1822func FrameStartLine(f *Frame) int {
1823	return f.startLine
1824}
1825
1826// PersistentAlloc allocates some memory that lives outside the Go heap.
1827// This memory will never be freed; use sparingly.
1828func PersistentAlloc(n uintptr) unsafe.Pointer {
1829	return persistentalloc(n, 0, &memstats.other_sys)
1830}
1831
1832// FPCallers works like Callers and uses frame pointer unwinding to populate
1833// pcBuf with the return addresses of the physical frames on the stack.
1834func FPCallers(pcBuf []uintptr) int {
1835	return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1836}
1837
1838const FramePointerEnabled = framepointer_enabled
1839
1840var (
1841	IsPinned      = isPinned
1842	GetPinCounter = pinnerGetPinCounter
1843)
1844
1845func SetPinnerLeakPanic(f func()) {
1846	pinnerLeakPanic = f
1847}
1848func GetPinnerLeakPanic() func() {
1849	return pinnerLeakPanic
1850}
1851
1852var testUintptr uintptr
1853
1854func MyGenericFunc[T any]() {
1855	systemstack(func() {
1856		testUintptr = 4
1857	})
1858}
1859
1860func UnsafePoint(pc uintptr) bool {
1861	fi := findfunc(pc)
1862	v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1863	switch v {
1864	case abi.UnsafePointUnsafe:
1865		return true
1866	case abi.UnsafePointSafe:
1867		return false
1868	case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1869		// These are all interruptible, they just encode a nonstandard
1870		// way of recovering when interrupted.
1871		return false
1872	default:
1873		var buf [20]byte
1874		panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1875	}
1876}
1877
1878type TraceMap struct {
1879	traceMap
1880}
1881
1882func (m *TraceMap) PutString(s string) (uint64, bool) {
1883	return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1884}
1885
1886func (m *TraceMap) Reset() {
1887	m.traceMap.reset()
1888}
1889