1// Copyright 2014 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/abi"
9	"internal/goarch"
10	"internal/runtime/atomic"
11	"internal/stringslite"
12	"runtime/internal/sys"
13	"unsafe"
14)
15
16// throwType indicates the current type of ongoing throw, which affects the
17// amount of detail printed to stderr. Higher values include more detail.
18type throwType uint32
19
20const (
21	// throwTypeNone means that we are not throwing.
22	throwTypeNone throwType = iota
23
24	// throwTypeUser is a throw due to a problem with the application.
25	//
26	// These throws do not include runtime frames, system goroutines, or
27	// frame metadata.
28	throwTypeUser
29
30	// throwTypeRuntime is a throw due to a problem with Go itself.
31	//
32	// These throws include as much information as possible to aid in
33	// debugging the runtime, including runtime frames, system goroutines,
34	// and frame metadata.
35	throwTypeRuntime
36)
37
38// We have two different ways of doing defers. The older way involves creating a
39// defer record at the time that a defer statement is executing and adding it to a
40// defer chain. This chain is inspected by the deferreturn call at all function
41// exits in order to run the appropriate defer calls. A cheaper way (which we call
42// open-coded defers) is used for functions in which no defer statements occur in
43// loops. In that case, we simply store the defer function/arg information into
44// specific stack slots at the point of each defer statement, as well as setting a
45// bit in a bitmask. At each function exit, we add inline code to directly make
46// the appropriate defer calls based on the bitmask and fn/arg information stored
47// on the stack. During panic/Goexit processing, the appropriate defer calls are
48// made using extra funcdata info that indicates the exact stack slots that
49// contain the bitmask and defer fn/args.
50
51// Check to make sure we can really generate a panic. If the panic
52// was generated from the runtime, or from inside malloc, then convert
53// to a throw of msg.
54// pc should be the program counter of the compiler-generated code that
55// triggered this panic.
56func panicCheck1(pc uintptr, msg string) {
57	if goarch.IsWasm == 0 && stringslite.HasPrefix(funcname(findfunc(pc)), "runtime.") {
58		// Note: wasm can't tail call, so we can't get the original caller's pc.
59		throw(msg)
60	}
61	// TODO: is this redundant? How could we be in malloc
62	// but not in the runtime? runtime/internal/*, maybe?
63	gp := getg()
64	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
65		throw(msg)
66	}
67}
68
69// Same as above, but calling from the runtime is allowed.
70//
71// Using this function is necessary for any panic that may be
72// generated by runtime.sigpanic, since those are always called by the
73// runtime.
74func panicCheck2(err string) {
75	// panic allocates, so to avoid recursive malloc, turn panics
76	// during malloc into throws.
77	gp := getg()
78	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
79		throw(err)
80	}
81}
82
83// Many of the following panic entry-points turn into throws when they
84// happen in various runtime contexts. These should never happen in
85// the runtime, and if they do, they indicate a serious issue and
86// should not be caught by user code.
87//
88// The panic{Index,Slice,divide,shift} functions are called by
89// code generated by the compiler for out of bounds index expressions,
90// out of bounds slice expressions, division by zero, and shift by negative.
91// The panicdivide (again), panicoverflow, panicfloat, and panicmem
92// functions are called by the signal handler when a signal occurs
93// indicating the respective problem.
94//
95// Since panic{Index,Slice,shift} are never called directly, and
96// since the runtime package should never have an out of bounds slice
97// or array reference or negative shift, if we see those functions called from the
98// runtime package we turn the panic into a throw. That will dump the
99// entire runtime stack for easier debugging.
100//
101// The entry points called by the signal handler will be called from
102// runtime.sigpanic, so we can't disallow calls from the runtime to
103// these (they always look like they're called from the runtime).
104// Hence, for these, we just check for clearly bad runtime conditions.
105//
106// The panic{Index,Slice} functions are implemented in assembly and tail call
107// to the goPanic{Index,Slice} functions below. This is done so we can use
108// a space-minimal register calling convention.
109
110// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
111//
112//go:yeswritebarrierrec
113func goPanicIndex(x int, y int) {
114	panicCheck1(getcallerpc(), "index out of range")
115	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
116}
117
118//go:yeswritebarrierrec
119func goPanicIndexU(x uint, y int) {
120	panicCheck1(getcallerpc(), "index out of range")
121	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
122}
123
124// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
125//
126//go:yeswritebarrierrec
127func goPanicSliceAlen(x int, y int) {
128	panicCheck1(getcallerpc(), "slice bounds out of range")
129	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
130}
131
132//go:yeswritebarrierrec
133func goPanicSliceAlenU(x uint, y int) {
134	panicCheck1(getcallerpc(), "slice bounds out of range")
135	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
136}
137
138//go:yeswritebarrierrec
139func goPanicSliceAcap(x int, y int) {
140	panicCheck1(getcallerpc(), "slice bounds out of range")
141	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
142}
143
144//go:yeswritebarrierrec
145func goPanicSliceAcapU(x uint, y int) {
146	panicCheck1(getcallerpc(), "slice bounds out of range")
147	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
148}
149
150// failures in the comparisons for s[x:y], 0 <= x <= y
151//
152//go:yeswritebarrierrec
153func goPanicSliceB(x int, y int) {
154	panicCheck1(getcallerpc(), "slice bounds out of range")
155	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
156}
157
158//go:yeswritebarrierrec
159func goPanicSliceBU(x uint, y int) {
160	panicCheck1(getcallerpc(), "slice bounds out of range")
161	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
162}
163
164// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
165func goPanicSlice3Alen(x int, y int) {
166	panicCheck1(getcallerpc(), "slice bounds out of range")
167	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
168}
169func goPanicSlice3AlenU(x uint, y int) {
170	panicCheck1(getcallerpc(), "slice bounds out of range")
171	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
172}
173func goPanicSlice3Acap(x int, y int) {
174	panicCheck1(getcallerpc(), "slice bounds out of range")
175	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
176}
177func goPanicSlice3AcapU(x uint, y int) {
178	panicCheck1(getcallerpc(), "slice bounds out of range")
179	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
180}
181
182// failures in the comparisons for s[:x:y], 0 <= x <= y
183func goPanicSlice3B(x int, y int) {
184	panicCheck1(getcallerpc(), "slice bounds out of range")
185	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
186}
187func goPanicSlice3BU(x uint, y int) {
188	panicCheck1(getcallerpc(), "slice bounds out of range")
189	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
190}
191
192// failures in the comparisons for s[x:y:], 0 <= x <= y
193func goPanicSlice3C(x int, y int) {
194	panicCheck1(getcallerpc(), "slice bounds out of range")
195	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
196}
197func goPanicSlice3CU(x uint, y int) {
198	panicCheck1(getcallerpc(), "slice bounds out of range")
199	panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
200}
201
202// failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
203func goPanicSliceConvert(x int, y int) {
204	panicCheck1(getcallerpc(), "slice length too short to convert to array or pointer to array")
205	panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
206}
207
208// Implemented in assembly, as they take arguments in registers.
209// Declared here to mark them as ABIInternal.
210func panicIndex(x int, y int)
211func panicIndexU(x uint, y int)
212func panicSliceAlen(x int, y int)
213func panicSliceAlenU(x uint, y int)
214func panicSliceAcap(x int, y int)
215func panicSliceAcapU(x uint, y int)
216func panicSliceB(x int, y int)
217func panicSliceBU(x uint, y int)
218func panicSlice3Alen(x int, y int)
219func panicSlice3AlenU(x uint, y int)
220func panicSlice3Acap(x int, y int)
221func panicSlice3AcapU(x uint, y int)
222func panicSlice3B(x int, y int)
223func panicSlice3BU(x uint, y int)
224func panicSlice3C(x int, y int)
225func panicSlice3CU(x uint, y int)
226func panicSliceConvert(x int, y int)
227
228var shiftError = error(errorString("negative shift amount"))
229
230//go:yeswritebarrierrec
231func panicshift() {
232	panicCheck1(getcallerpc(), "negative shift amount")
233	panic(shiftError)
234}
235
236var divideError = error(errorString("integer divide by zero"))
237
238//go:yeswritebarrierrec
239func panicdivide() {
240	panicCheck2("integer divide by zero")
241	panic(divideError)
242}
243
244var overflowError = error(errorString("integer overflow"))
245
246func panicoverflow() {
247	panicCheck2("integer overflow")
248	panic(overflowError)
249}
250
251var floatError = error(errorString("floating point error"))
252
253func panicfloat() {
254	panicCheck2("floating point error")
255	panic(floatError)
256}
257
258var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
259
260func panicmem() {
261	panicCheck2("invalid memory address or nil pointer dereference")
262	panic(memoryError)
263}
264
265func panicmemAddr(addr uintptr) {
266	panicCheck2("invalid memory address or nil pointer dereference")
267	panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
268}
269
270// Create a new deferred function fn, which has no arguments and results.
271// The compiler turns a defer statement into a call to this.
272func deferproc(fn func()) {
273	gp := getg()
274	if gp.m.curg != gp {
275		// go code on the system stack can't defer
276		throw("defer on system stack")
277	}
278
279	d := newdefer()
280	d.link = gp._defer
281	gp._defer = d
282	d.fn = fn
283	d.pc = getcallerpc()
284	// We must not be preempted between calling getcallersp and
285	// storing it to d.sp because getcallersp's result is a
286	// uintptr stack pointer.
287	d.sp = getcallersp()
288
289	// deferproc returns 0 normally.
290	// a deferred func that stops a panic
291	// makes the deferproc return 1.
292	// the code the compiler generates always
293	// checks the return value and jumps to the
294	// end of the function if deferproc returns != 0.
295	return0()
296	// No code can go here - the C return register has
297	// been set and must not be clobbered.
298}
299
300var rangeDoneError = error(errorString("range function continued iteration after function for loop body returned false"))
301var rangePanicError = error(errorString("range function continued iteration after loop body panic"))
302var rangeExhaustedError = error(errorString("range function continued iteration after whole loop exit"))
303var rangeMissingPanicError = error(errorString("range function recovered a loop body panic and did not resume panicking"))
304
305//go:noinline
306func panicrangestate(state int) {
307	switch abi.RF_State(state) {
308	case abi.RF_DONE:
309		panic(rangeDoneError)
310	case abi.RF_PANIC:
311		panic(rangePanicError)
312	case abi.RF_EXHAUSTED:
313		panic(rangeExhaustedError)
314	case abi.RF_MISSING_PANIC:
315		panic(rangeMissingPanicError)
316	}
317	throw("unexpected state passed to panicrangestate")
318}
319
320// deferrangefunc is called by functions that are about to
321// execute a range-over-function loop in which the loop body
322// may execute a defer statement. That defer needs to add to
323// the chain for the current function, not the func literal synthesized
324// to represent the loop body. To do that, the original function
325// calls deferrangefunc to obtain an opaque token representing
326// the current frame, and then the loop body uses deferprocat
327// instead of deferproc to add to that frame's defer lists.
328//
329// The token is an 'any' with underlying type *atomic.Pointer[_defer].
330// It is the atomically-updated head of a linked list of _defer structs
331// representing deferred calls. At the same time, we create a _defer
332// struct on the main g._defer list with d.head set to this head pointer.
333//
334// The g._defer list is now a linked list of deferred calls,
335// but an atomic list hanging off:
336//
337//		g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
338//	                             | .head
339//	                             |
340//	                             +--> dY -> dX -> nil
341//
342// with each -> indicating a d.link pointer, and where drangefunc
343// has the d.rangefunc = true bit set.
344// Note that the function being ranged over may have added
345// its own defers (d4 and d3), so drangefunc need not be at the
346// top of the list when deferprocat is used. This is why we pass
347// the atomic head explicitly.
348//
349// To keep misbehaving programs from crashing the runtime,
350// deferprocat pushes new defers onto the .head list atomically.
351// The fact that it is a separate list from the main goroutine
352// defer list means that the main goroutine's defers can still
353// be handled non-atomically.
354//
355// In the diagram, dY and dX are meant to be processed when
356// drangefunc would be processed, which is to say the defer order
357// should be d4, d3, dY, dX, d2, d1. To make that happen,
358// when defer processing reaches a d with rangefunc=true,
359// it calls deferconvert to atomically take the extras
360// away from d.head and then adds them to the main list.
361//
362// That is, deferconvert changes this list:
363//
364//		g._defer => drangefunc -> d2 -> d1 -> nil
365//	                 | .head
366//	                 |
367//	                 +--> dY -> dX -> nil
368//
369// into this list:
370//
371//	g._defer => dY -> dX -> d2 -> d1 -> nil
372//
373// It also poisons *drangefunc.head so that any future
374// deferprocat using that head will throw.
375// (The atomic head is ordinary garbage collected memory so that
376// it's not a problem if user code holds onto it beyond
377// the lifetime of drangefunc.)
378//
379// TODO: We could arrange for the compiler to call into the
380// runtime after the loop finishes normally, to do an eager
381// deferconvert, which would catch calling the loop body
382// and having it defer after the loop is done. If we have a
383// more general catch of loop body misuse, though, this
384// might not be worth worrying about in addition.
385//
386// See also ../cmd/compile/internal/rangefunc/rewrite.go.
387func deferrangefunc() any {
388	gp := getg()
389	if gp.m.curg != gp {
390		// go code on the system stack can't defer
391		throw("defer on system stack")
392	}
393
394	d := newdefer()
395	d.link = gp._defer
396	gp._defer = d
397	d.pc = getcallerpc()
398	// We must not be preempted between calling getcallersp and
399	// storing it to d.sp because getcallersp's result is a
400	// uintptr stack pointer.
401	d.sp = getcallersp()
402
403	d.rangefunc = true
404	d.head = new(atomic.Pointer[_defer])
405
406	return d.head
407}
408
409// badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
410func badDefer() *_defer {
411	return (*_defer)(unsafe.Pointer(uintptr(1)))
412}
413
414// deferprocat is like deferproc but adds to the atomic list represented by frame.
415// See the doc comment for deferrangefunc for details.
416func deferprocat(fn func(), frame any) {
417	head := frame.(*atomic.Pointer[_defer])
418	if raceenabled {
419		racewritepc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferprocat))
420	}
421	d1 := newdefer()
422	d1.fn = fn
423	for {
424		d1.link = head.Load()
425		if d1.link == badDefer() {
426			throw("defer after range func returned")
427		}
428		if head.CompareAndSwap(d1.link, d1) {
429			break
430		}
431	}
432
433	// Must be last - see deferproc above.
434	return0()
435}
436
437// deferconvert converts the rangefunc defer list of d0 into an ordinary list
438// following d0.
439// See the doc comment for deferrangefunc for details.
440func deferconvert(d0 *_defer) {
441	head := d0.head
442	if raceenabled {
443		racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert))
444	}
445	tail := d0.link
446	d0.rangefunc = false
447
448	var d *_defer
449	for {
450		d = head.Load()
451		if head.CompareAndSwap(d, badDefer()) {
452			break
453		}
454	}
455	if d == nil {
456		return
457	}
458	for d1 := d; ; d1 = d1.link {
459		d1.sp = d0.sp
460		d1.pc = d0.pc
461		if d1.link == nil {
462			d1.link = tail
463			break
464		}
465	}
466	d0.link = d
467	return
468}
469
470// deferprocStack queues a new deferred function with a defer record on the stack.
471// The defer record must have its fn field initialized.
472// All other fields can contain junk.
473// Nosplit because of the uninitialized pointer fields on the stack.
474//
475//go:nosplit
476func deferprocStack(d *_defer) {
477	gp := getg()
478	if gp.m.curg != gp {
479		// go code on the system stack can't defer
480		throw("defer on system stack")
481	}
482	// fn is already set.
483	// The other fields are junk on entry to deferprocStack and
484	// are initialized here.
485	d.heap = false
486	d.rangefunc = false
487	d.sp = getcallersp()
488	d.pc = getcallerpc()
489	// The lines below implement:
490	//   d.panic = nil
491	//   d.fd = nil
492	//   d.link = gp._defer
493	//   d.head = nil
494	//   gp._defer = d
495	// But without write barriers. The first three are writes to
496	// the stack so they don't need a write barrier, and furthermore
497	// are to uninitialized memory, so they must not use a write barrier.
498	// The fourth write does not require a write barrier because we
499	// explicitly mark all the defer structures, so we don't need to
500	// keep track of pointers to them with a write barrier.
501	*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
502	*(*uintptr)(unsafe.Pointer(&d.head)) = 0
503	*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
504
505	return0()
506	// No code can go here - the C return register has
507	// been set and must not be clobbered.
508}
509
510// Each P holds a pool for defers.
511
512// Allocate a Defer, usually using per-P pool.
513// Each defer must be released with freedefer.  The defer is not
514// added to any defer chain yet.
515func newdefer() *_defer {
516	var d *_defer
517	mp := acquirem()
518	pp := mp.p.ptr()
519	if len(pp.deferpool) == 0 && sched.deferpool != nil {
520		lock(&sched.deferlock)
521		for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
522			d := sched.deferpool
523			sched.deferpool = d.link
524			d.link = nil
525			pp.deferpool = append(pp.deferpool, d)
526		}
527		unlock(&sched.deferlock)
528	}
529	if n := len(pp.deferpool); n > 0 {
530		d = pp.deferpool[n-1]
531		pp.deferpool[n-1] = nil
532		pp.deferpool = pp.deferpool[:n-1]
533	}
534	releasem(mp)
535	mp, pp = nil, nil
536
537	if d == nil {
538		// Allocate new defer.
539		d = new(_defer)
540	}
541	d.heap = true
542	return d
543}
544
545// popDefer pops the head of gp's defer list and frees it.
546func popDefer(gp *g) {
547	d := gp._defer
548	d.fn = nil // Can in theory point to the stack
549	// We must not copy the stack between the updating gp._defer and setting
550	// d.link to nil. Between these two steps, d is not on any defer list, so
551	// stack copying won't adjust stack pointers in it (namely, d.link). Hence,
552	// if we were to copy the stack, d could then contain a stale pointer.
553	gp._defer = d.link
554	d.link = nil
555	// After this point we can copy the stack.
556
557	if !d.heap {
558		return
559	}
560
561	mp := acquirem()
562	pp := mp.p.ptr()
563	if len(pp.deferpool) == cap(pp.deferpool) {
564		// Transfer half of local cache to the central cache.
565		var first, last *_defer
566		for len(pp.deferpool) > cap(pp.deferpool)/2 {
567			n := len(pp.deferpool)
568			d := pp.deferpool[n-1]
569			pp.deferpool[n-1] = nil
570			pp.deferpool = pp.deferpool[:n-1]
571			if first == nil {
572				first = d
573			} else {
574				last.link = d
575			}
576			last = d
577		}
578		lock(&sched.deferlock)
579		last.link = sched.deferpool
580		sched.deferpool = first
581		unlock(&sched.deferlock)
582	}
583
584	*d = _defer{}
585
586	pp.deferpool = append(pp.deferpool, d)
587
588	releasem(mp)
589	mp, pp = nil, nil
590}
591
592// deferreturn runs deferred functions for the caller's frame.
593// The compiler inserts a call to this at the end of any
594// function which calls defer.
595func deferreturn() {
596	var p _panic
597	p.deferreturn = true
598
599	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
600	for {
601		fn, ok := p.nextDefer()
602		if !ok {
603			break
604		}
605		fn()
606	}
607}
608
609// Goexit terminates the goroutine that calls it. No other goroutine is affected.
610// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
611// is not a panic, any recover calls in those deferred functions will return nil.
612//
613// Calling Goexit from the main goroutine terminates that goroutine
614// without func main returning. Since func main has not returned,
615// the program continues execution of other goroutines.
616// If all other goroutines exit, the program crashes.
617func Goexit() {
618	// Create a panic object for Goexit, so we can recognize when it might be
619	// bypassed by a recover().
620	var p _panic
621	p.goexit = true
622
623	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
624	for {
625		fn, ok := p.nextDefer()
626		if !ok {
627			break
628		}
629		fn()
630	}
631
632	goexit1()
633}
634
635// Call all Error and String methods before freezing the world.
636// Used when crashing with panicking.
637func preprintpanics(p *_panic) {
638	defer func() {
639		text := "panic while printing panic value"
640		switch r := recover().(type) {
641		case nil:
642			// nothing to do
643		case string:
644			throw(text + ": " + r)
645		default:
646			throw(text + ": type " + toRType(efaceOf(&r)._type).string())
647		}
648	}()
649	for p != nil {
650		switch v := p.arg.(type) {
651		case error:
652			p.arg = v.Error()
653		case stringer:
654			p.arg = v.String()
655		}
656		p = p.link
657	}
658}
659
660// Print all currently active panics. Used when crashing.
661// Should only be called after preprintpanics.
662func printpanics(p *_panic) {
663	if p.link != nil {
664		printpanics(p.link)
665		if !p.link.goexit {
666			print("\t")
667		}
668	}
669	if p.goexit {
670		return
671	}
672	print("panic: ")
673	printpanicval(p.arg)
674	if p.recovered {
675		print(" [recovered]")
676	}
677	print("\n")
678}
679
680// readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
681// uint32 and a pointer to the byte following the varint.
682//
683// The implementation is the same with runtime.readvarint, except that this function
684// uses unsafe.Pointer for speed.
685func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) {
686	var r uint32
687	var shift int
688	for {
689		b := *(*uint8)(fd)
690		fd = add(fd, unsafe.Sizeof(b))
691		if b < 128 {
692			return r + uint32(b)<<shift, fd
693		}
694		r += uint32(b&0x7F) << (shift & 31)
695		shift += 7
696		if shift > 28 {
697			panic("Bad varint")
698		}
699	}
700}
701
702// A PanicNilError happens when code calls panic(nil).
703//
704// Before Go 1.21, programs that called panic(nil) observed recover returning nil.
705// Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError.
706// Programs can change back to the old behavior by setting GODEBUG=panicnil=1.
707type PanicNilError struct {
708	// This field makes PanicNilError structurally different from
709	// any other struct in this package, and the _ makes it different
710	// from any struct in other packages too.
711	// This avoids any accidental conversions being possible
712	// between this struct and some other struct sharing the same fields,
713	// like happened in go.dev/issue/56603.
714	_ [0]*PanicNilError
715}
716
717func (*PanicNilError) Error() string { return "panic called with nil argument" }
718func (*PanicNilError) RuntimeError() {}
719
720var panicnil = &godebugInc{name: "panicnil"}
721
722// The implementation of the predeclared function panic.
723// The compiler emits calls to this function.
724//
725// gopanic should be an internal detail,
726// but widely used packages access it using linkname.
727// Notable members of the hall of shame include:
728//   - go.undefinedlabs.com/scopeagent
729//   - github.com/goplus/igop
730//
731// Do not remove or change the type signature.
732// See go.dev/issue/67401.
733//
734//go:linkname gopanic
735func gopanic(e any) {
736	if e == nil {
737		if debug.panicnil.Load() != 1 {
738			e = new(PanicNilError)
739		} else {
740			panicnil.IncNonDefault()
741		}
742	}
743
744	gp := getg()
745	if gp.m.curg != gp {
746		print("panic: ")
747		printpanicval(e)
748		print("\n")
749		throw("panic on system stack")
750	}
751
752	if gp.m.mallocing != 0 {
753		print("panic: ")
754		printpanicval(e)
755		print("\n")
756		throw("panic during malloc")
757	}
758	if gp.m.preemptoff != "" {
759		print("panic: ")
760		printpanicval(e)
761		print("\n")
762		print("preempt off reason: ")
763		print(gp.m.preemptoff)
764		print("\n")
765		throw("panic during preemptoff")
766	}
767	if gp.m.locks != 0 {
768		print("panic: ")
769		printpanicval(e)
770		print("\n")
771		throw("panic holding locks")
772	}
773
774	var p _panic
775	p.arg = e
776
777	runningPanicDefers.Add(1)
778
779	p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
780	for {
781		fn, ok := p.nextDefer()
782		if !ok {
783			break
784		}
785		fn()
786	}
787
788	// If we're tracing, flush the current generation to make the trace more
789	// readable.
790	//
791	// TODO(aktau): Handle a panic from within traceAdvance more gracefully.
792	// Currently it would hang. Not handled now because it is very unlikely, and
793	// already unrecoverable.
794	if traceEnabled() {
795		traceAdvance(false)
796	}
797
798	// ran out of deferred calls - old-school panic now
799	// Because it is unsafe to call arbitrary user code after freezing
800	// the world, we call preprintpanics to invoke all necessary Error
801	// and String methods to prepare the panic strings before startpanic.
802	preprintpanics(&p)
803
804	fatalpanic(&p)   // should not return
805	*(*int)(nil) = 0 // not reached
806}
807
808// start initializes a panic to start unwinding the stack.
809//
810// If p.goexit is true, then start may return multiple times.
811func (p *_panic) start(pc uintptr, sp unsafe.Pointer) {
812	gp := getg()
813
814	// Record the caller's PC and SP, so recovery can identify panics
815	// that have been recovered. Also, so that if p is from Goexit, we
816	// can restart its defer processing loop if a recovered panic tries
817	// to jump past it.
818	p.startPC = getcallerpc()
819	p.startSP = unsafe.Pointer(getcallersp())
820
821	if p.deferreturn {
822		p.sp = sp
823
824		if s := (*savedOpenDeferState)(gp.param); s != nil {
825			// recovery saved some state for us, so that we can resume
826			// calling open-coded defers without unwinding the stack.
827
828			gp.param = nil
829
830			p.retpc = s.retpc
831			p.deferBitsPtr = (*byte)(add(sp, s.deferBitsOffset))
832			p.slotsPtr = add(sp, s.slotsOffset)
833		}
834
835		return
836	}
837
838	p.link = gp._panic
839	gp._panic = (*_panic)(noescape(unsafe.Pointer(p)))
840
841	// Initialize state machine, and find the first frame with a defer.
842	//
843	// Note: We could use startPC and startSP here, but callers will
844	// never have defer statements themselves. By starting at their
845	// caller instead, we avoid needing to unwind through an extra
846	// frame. It also somewhat simplifies the terminating condition for
847	// deferreturn.
848	p.lr, p.fp = pc, sp
849	p.nextFrame()
850}
851
852// nextDefer returns the next deferred function to invoke, if any.
853//
854// Note: The "ok bool" result is necessary to correctly handle when
855// the deferred function itself was nil (e.g., "defer (func())(nil)").
856func (p *_panic) nextDefer() (func(), bool) {
857	gp := getg()
858
859	if !p.deferreturn {
860		if gp._panic != p {
861			throw("bad panic stack")
862		}
863
864		if p.recovered {
865			mcall(recovery) // does not return
866			throw("recovery failed")
867		}
868	}
869
870	// The assembler adjusts p.argp in wrapper functions that shouldn't
871	// be visible to recover(), so we need to restore it each iteration.
872	p.argp = add(p.startSP, sys.MinFrameSize)
873
874	for {
875		for p.deferBitsPtr != nil {
876			bits := *p.deferBitsPtr
877
878			// Check whether any open-coded defers are still pending.
879			//
880			// Note: We need to check this upfront (rather than after
881			// clearing the top bit) because it's possible that Goexit
882			// invokes a deferred call, and there were still more pending
883			// open-coded defers in the frame; but then the deferred call
884			// panic and invoked the remaining defers in the frame, before
885			// recovering and restarting the Goexit loop.
886			if bits == 0 {
887				p.deferBitsPtr = nil
888				break
889			}
890
891			// Find index of top bit set.
892			i := 7 - uintptr(sys.LeadingZeros8(bits))
893
894			// Clear bit and store it back.
895			bits &^= 1 << i
896			*p.deferBitsPtr = bits
897
898			return *(*func())(add(p.slotsPtr, i*goarch.PtrSize)), true
899		}
900
901	Recheck:
902		if d := gp._defer; d != nil && d.sp == uintptr(p.sp) {
903			if d.rangefunc {
904				deferconvert(d)
905				popDefer(gp)
906				goto Recheck
907			}
908
909			fn := d.fn
910
911			// TODO(mdempsky): Instead of having each deferproc call have
912			// its own "deferreturn(); return" sequence, we should just make
913			// them reuse the one we emit for open-coded defers.
914			p.retpc = d.pc
915
916			// Unlink and free.
917			popDefer(gp)
918
919			return fn, true
920		}
921
922		if !p.nextFrame() {
923			return nil, false
924		}
925	}
926}
927
928// nextFrame finds the next frame that contains deferred calls, if any.
929func (p *_panic) nextFrame() (ok bool) {
930	if p.lr == 0 {
931		return false
932	}
933
934	gp := getg()
935	systemstack(func() {
936		var limit uintptr
937		if d := gp._defer; d != nil {
938			limit = d.sp
939		}
940
941		var u unwinder
942		u.initAt(p.lr, uintptr(p.fp), 0, gp, 0)
943		for {
944			if !u.valid() {
945				p.lr = 0
946				return // ok == false
947			}
948
949			// TODO(mdempsky): If we populate u.frame.fn.deferreturn for
950			// every frame containing a defer (not just open-coded defers),
951			// then we can simply loop until we find the next frame where
952			// it's non-zero.
953
954			if u.frame.sp == limit {
955				break // found a frame with linked defers
956			}
957
958			if p.initOpenCodedDefers(u.frame.fn, unsafe.Pointer(u.frame.varp)) {
959				break // found a frame with open-coded defers
960			}
961
962			u.next()
963		}
964
965		p.lr = u.frame.lr
966		p.sp = unsafe.Pointer(u.frame.sp)
967		p.fp = unsafe.Pointer(u.frame.fp)
968
969		ok = true
970	})
971
972	return
973}
974
975func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool {
976	fd := funcdata(fn, abi.FUNCDATA_OpenCodedDeferInfo)
977	if fd == nil {
978		return false
979	}
980
981	if fn.deferreturn == 0 {
982		throw("missing deferreturn")
983	}
984
985	deferBitsOffset, fd := readvarintUnsafe(fd)
986	deferBitsPtr := (*uint8)(add(varp, -uintptr(deferBitsOffset)))
987	if *deferBitsPtr == 0 {
988		return false // has open-coded defers, but none pending
989	}
990
991	slotsOffset, fd := readvarintUnsafe(fd)
992
993	p.retpc = fn.entry() + uintptr(fn.deferreturn)
994	p.deferBitsPtr = deferBitsPtr
995	p.slotsPtr = add(varp, -uintptr(slotsOffset))
996
997	return true
998}
999
1000// The implementation of the predeclared function recover.
1001// Cannot split the stack because it needs to reliably
1002// find the stack segment of its caller.
1003//
1004// TODO(rsc): Once we commit to CopyStackAlways,
1005// this doesn't need to be nosplit.
1006//
1007//go:nosplit
1008func gorecover(argp uintptr) any {
1009	// Must be in a function running as part of a deferred call during the panic.
1010	// Must be called from the topmost function of the call
1011	// (the function used in the defer statement).
1012	// p.argp is the argument pointer of that topmost deferred function call.
1013	// Compare against argp reported by caller.
1014	// If they match, the caller is the one who can recover.
1015	gp := getg()
1016	p := gp._panic
1017	if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
1018		p.recovered = true
1019		return p.arg
1020	}
1021	return nil
1022}
1023
1024//go:linkname sync_throw sync.throw
1025func sync_throw(s string) {
1026	throw(s)
1027}
1028
1029//go:linkname sync_fatal sync.fatal
1030func sync_fatal(s string) {
1031	fatal(s)
1032}
1033
1034// throw triggers a fatal error that dumps a stack trace and exits.
1035//
1036// throw should be used for runtime-internal fatal errors where Go itself,
1037// rather than user code, may be at fault for the failure.
1038//
1039// NOTE: temporarily marked "go:noinline" pending investigation/fix of
1040// issue #67274, so as to fix longtest builders.
1041//
1042// throw should be an internal detail,
1043// but widely used packages access it using linkname.
1044// Notable members of the hall of shame include:
1045//   - github.com/bytedance/sonic
1046//   - github.com/cockroachdb/pebble
1047//   - github.com/dgraph-io/ristretto
1048//   - github.com/outcaste-io/ristretto
1049//   - github.com/pingcap/br
1050//   - gvisor.dev/gvisor
1051//   - github.com/sagernet/gvisor
1052//
1053// Do not remove or change the type signature.
1054// See go.dev/issue/67401.
1055//
1056//go:linkname throw
1057//go:nosplit
1058func throw(s string) {
1059	// Everything throw does should be recursively nosplit so it
1060	// can be called even when it's unsafe to grow the stack.
1061	systemstack(func() {
1062		print("fatal error: ")
1063		printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
1064		print("\n")
1065	})
1066
1067	fatalthrow(throwTypeRuntime)
1068}
1069
1070// fatal triggers a fatal error that dumps a stack trace and exits.
1071//
1072// fatal is equivalent to throw, but is used when user code is expected to be
1073// at fault for the failure, such as racing map writes.
1074//
1075// fatal does not include runtime frames, system goroutines, or frame metadata
1076// (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
1077//
1078//go:nosplit
1079func fatal(s string) {
1080	// Everything fatal does should be recursively nosplit so it
1081	// can be called even when it's unsafe to grow the stack.
1082	systemstack(func() {
1083		print("fatal error: ")
1084		printindented(s) // logically printpanicval(s), but avoids convTstring write barrier
1085		print("\n")
1086	})
1087
1088	fatalthrow(throwTypeUser)
1089}
1090
1091// runningPanicDefers is non-zero while running deferred functions for panic.
1092// This is used to try hard to get a panic stack trace out when exiting.
1093var runningPanicDefers atomic.Uint32
1094
1095// panicking is non-zero when crashing the program for an unrecovered panic.
1096var panicking atomic.Uint32
1097
1098// paniclk is held while printing the panic information and stack trace,
1099// so that two concurrent panics don't overlap their output.
1100var paniclk mutex
1101
1102// Unwind the stack after a deferred function calls recover
1103// after a panic. Then arrange to continue running as though
1104// the caller of the deferred function returned normally.
1105//
1106// However, if unwinding the stack would skip over a Goexit call, we
1107// return into the Goexit loop instead, so it can continue processing
1108// defers instead.
1109func recovery(gp *g) {
1110	p := gp._panic
1111	pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp)
1112	p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0
1113
1114	// Unwind the panic stack.
1115	for ; p != nil && uintptr(p.startSP) < sp; p = p.link {
1116		// Don't allow jumping past a pending Goexit.
1117		// Instead, have its _panic.start() call return again.
1118		//
1119		// TODO(mdempsky): In this case, Goexit will resume walking the
1120		// stack where it left off, which means it will need to rewalk
1121		// frames that we've already processed.
1122		//
1123		// There's a similar issue with nested panics, when the inner
1124		// panic supercedes the outer panic. Again, we end up needing to
1125		// walk the same stack frames.
1126		//
1127		// These are probably pretty rare occurrences in practice, and
1128		// they don't seem any worse than the existing logic. But if we
1129		// move the unwinding state into _panic, we could detect when we
1130		// run into where the last panic started, and then just pick up
1131		// where it left off instead.
1132		//
1133		// With how subtle defer handling is, this might not actually be
1134		// worthwhile though.
1135		if p.goexit {
1136			pc, sp = p.startPC, uintptr(p.startSP)
1137			saveOpenDeferState = false // goexit is unwinding the stack anyway
1138			break
1139		}
1140
1141		runningPanicDefers.Add(-1)
1142	}
1143	gp._panic = p
1144
1145	if p == nil { // must be done with signal
1146		gp.sig = 0
1147	}
1148
1149	if gp.param != nil {
1150		throw("unexpected gp.param")
1151	}
1152	if saveOpenDeferState {
1153		// If we're returning to deferreturn and there are more open-coded
1154		// defers for it to call, save enough state for it to be able to
1155		// pick up where p0 left off.
1156		gp.param = unsafe.Pointer(&savedOpenDeferState{
1157			retpc: p0.retpc,
1158
1159			// We need to save deferBitsPtr and slotsPtr too, but those are
1160			// stack pointers. To avoid issues around heap objects pointing
1161			// to the stack, save them as offsets from SP.
1162			deferBitsOffset: uintptr(unsafe.Pointer(p0.deferBitsPtr)) - uintptr(p0.sp),
1163			slotsOffset:     uintptr(p0.slotsPtr) - uintptr(p0.sp),
1164		})
1165	}
1166
1167	// TODO(mdempsky): Currently, we rely on frames containing "defer"
1168	// to end with "CALL deferreturn; RET". This allows deferreturn to
1169	// finish running any pending defers in the frame.
1170	//
1171	// But we should be able to tell whether there are still pending
1172	// defers here. If there aren't, we can just jump directly to the
1173	// "RET" instruction. And if there are, we don't need an actual
1174	// "CALL deferreturn" instruction; we can simulate it with something
1175	// like:
1176	//
1177	//	if usesLR {
1178	//		lr = pc
1179	//	} else {
1180	//		sp -= sizeof(pc)
1181	//		*(*uintptr)(sp) = pc
1182	//	}
1183	//	pc = funcPC(deferreturn)
1184	//
1185	// So that we effectively tail call into deferreturn, such that it
1186	// then returns to the simple "RET" epilogue. That would save the
1187	// overhead of the "deferreturn" call when there aren't actually any
1188	// pending defers left, and shrink the TEXT size of compiled
1189	// binaries. (Admittedly, both of these are modest savings.)
1190
1191	// Ensure we're recovering within the appropriate stack.
1192	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
1193		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
1194		throw("bad recovery")
1195	}
1196
1197	// Make the deferproc for this d return again,
1198	// this time returning 1. The calling function will
1199	// jump to the standard return epilogue.
1200	gp.sched.sp = sp
1201	gp.sched.pc = pc
1202	gp.sched.lr = 0
1203	// Restore the bp on platforms that support frame pointers.
1204	// N.B. It's fine to not set anything for platforms that don't
1205	// support frame pointers, since nothing consumes them.
1206	switch {
1207	case goarch.IsAmd64 != 0:
1208		// on x86, fp actually points one word higher than the top of
1209		// the frame since the return address is saved on the stack by
1210		// the caller
1211		gp.sched.bp = fp - 2*goarch.PtrSize
1212	case goarch.IsArm64 != 0:
1213		// on arm64, the architectural bp points one word higher
1214		// than the sp. fp is totally useless to us here, because it
1215		// only gets us to the caller's fp.
1216		gp.sched.bp = sp - goarch.PtrSize
1217	}
1218	gp.sched.ret = 1
1219	gogo(&gp.sched)
1220}
1221
1222// fatalthrow implements an unrecoverable runtime throw. It freezes the
1223// system, prints stack traces starting from its caller, and terminates the
1224// process.
1225//
1226//go:nosplit
1227func fatalthrow(t throwType) {
1228	pc := getcallerpc()
1229	sp := getcallersp()
1230	gp := getg()
1231
1232	if gp.m.throwing == throwTypeNone {
1233		gp.m.throwing = t
1234	}
1235
1236	// Switch to the system stack to avoid any stack growth, which may make
1237	// things worse if the runtime is in a bad state.
1238	systemstack(func() {
1239		if isSecureMode() {
1240			exit(2)
1241		}
1242
1243		startpanic_m()
1244
1245		if dopanic_m(gp, pc, sp) {
1246			// crash uses a decent amount of nosplit stack and we're already
1247			// low on stack in throw, so crash on the system stack (unlike
1248			// fatalpanic).
1249			crash()
1250		}
1251
1252		exit(2)
1253	})
1254
1255	*(*int)(nil) = 0 // not reached
1256}
1257
1258// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
1259// that if msgs != nil, fatalpanic also prints panic messages and decrements
1260// runningPanicDefers once main is blocked from exiting.
1261//
1262//go:nosplit
1263func fatalpanic(msgs *_panic) {
1264	pc := getcallerpc()
1265	sp := getcallersp()
1266	gp := getg()
1267	var docrash bool
1268	// Switch to the system stack to avoid any stack growth, which
1269	// may make things worse if the runtime is in a bad state.
1270	systemstack(func() {
1271		if startpanic_m() && msgs != nil {
1272			// There were panic messages and startpanic_m
1273			// says it's okay to try to print them.
1274
1275			// startpanic_m set panicking, which will
1276			// block main from exiting, so now OK to
1277			// decrement runningPanicDefers.
1278			runningPanicDefers.Add(-1)
1279
1280			printpanics(msgs)
1281		}
1282
1283		docrash = dopanic_m(gp, pc, sp)
1284	})
1285
1286	if docrash {
1287		// By crashing outside the above systemstack call, debuggers
1288		// will not be confused when generating a backtrace.
1289		// Function crash is marked nosplit to avoid stack growth.
1290		crash()
1291	}
1292
1293	systemstack(func() {
1294		exit(2)
1295	})
1296
1297	*(*int)(nil) = 0 // not reached
1298}
1299
1300// startpanic_m prepares for an unrecoverable panic.
1301//
1302// It returns true if panic messages should be printed, or false if
1303// the runtime is in bad shape and should just print stacks.
1304//
1305// It must not have write barriers even though the write barrier
1306// explicitly ignores writes once dying > 0. Write barriers still
1307// assume that g.m.p != nil, and this function may not have P
1308// in some contexts (e.g. a panic in a signal handler for a signal
1309// sent to an M with no P).
1310//
1311//go:nowritebarrierrec
1312func startpanic_m() bool {
1313	gp := getg()
1314	if mheap_.cachealloc.size == 0 { // very early
1315		print("runtime: panic before malloc heap initialized\n")
1316	}
1317	// Disallow malloc during an unrecoverable panic. A panic
1318	// could happen in a signal handler, or in a throw, or inside
1319	// malloc itself. We want to catch if an allocation ever does
1320	// happen (even if we're not in one of these situations).
1321	gp.m.mallocing++
1322
1323	// If we're dying because of a bad lock count, set it to a
1324	// good lock count so we don't recursively panic below.
1325	if gp.m.locks < 0 {
1326		gp.m.locks = 1
1327	}
1328
1329	switch gp.m.dying {
1330	case 0:
1331		// Setting dying >0 has the side-effect of disabling this G's writebuf.
1332		gp.m.dying = 1
1333		panicking.Add(1)
1334		lock(&paniclk)
1335		if debug.schedtrace > 0 || debug.scheddetail > 0 {
1336			schedtrace(true)
1337		}
1338		freezetheworld()
1339		return true
1340	case 1:
1341		// Something failed while panicking.
1342		// Just print a stack trace and exit.
1343		gp.m.dying = 2
1344		print("panic during panic\n")
1345		return false
1346	case 2:
1347		// This is a genuine bug in the runtime, we couldn't even
1348		// print the stack trace successfully.
1349		gp.m.dying = 3
1350		print("stack trace unavailable\n")
1351		exit(4)
1352		fallthrough
1353	default:
1354		// Can't even print! Just exit.
1355		exit(5)
1356		return false // Need to return something.
1357	}
1358}
1359
1360var didothers bool
1361var deadlock mutex
1362
1363// gp is the crashing g running on this M, but may be a user G, while getg() is
1364// always g0.
1365func dopanic_m(gp *g, pc, sp uintptr) bool {
1366	if gp.sig != 0 {
1367		signame := signame(gp.sig)
1368		if signame != "" {
1369			print("[signal ", signame)
1370		} else {
1371			print("[signal ", hex(gp.sig))
1372		}
1373		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
1374	}
1375
1376	level, all, docrash := gotraceback()
1377	if level > 0 {
1378		if gp != gp.m.curg {
1379			all = true
1380		}
1381		if gp != gp.m.g0 {
1382			print("\n")
1383			goroutineheader(gp)
1384			traceback(pc, sp, 0, gp)
1385		} else if level >= 2 || gp.m.throwing >= throwTypeRuntime {
1386			print("\nruntime stack:\n")
1387			traceback(pc, sp, 0, gp)
1388		}
1389		if !didothers && all {
1390			didothers = true
1391			tracebackothers(gp)
1392		}
1393	}
1394	unlock(&paniclk)
1395
1396	if panicking.Add(-1) != 0 {
1397		// Some other m is panicking too.
1398		// Let it print what it needs to print.
1399		// Wait forever without chewing up cpu.
1400		// It will exit when it's done.
1401		lock(&deadlock)
1402		lock(&deadlock)
1403	}
1404
1405	printDebugLog()
1406
1407	return docrash
1408}
1409
1410// canpanic returns false if a signal should throw instead of
1411// panicking.
1412//
1413//go:nosplit
1414func canpanic() bool {
1415	gp := getg()
1416	mp := acquirem()
1417
1418	// Is it okay for gp to panic instead of crashing the program?
1419	// Yes, as long as it is running Go code, not runtime code,
1420	// and not stuck in a system call.
1421	if gp != mp.curg {
1422		releasem(mp)
1423		return false
1424	}
1425	// N.B. mp.locks != 1 instead of 0 to account for acquirem.
1426	if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
1427		releasem(mp)
1428		return false
1429	}
1430	status := readgstatus(gp)
1431	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
1432		releasem(mp)
1433		return false
1434	}
1435	if GOOS == "windows" && mp.libcallsp != 0 {
1436		releasem(mp)
1437		return false
1438	}
1439	releasem(mp)
1440	return true
1441}
1442
1443// shouldPushSigpanic reports whether pc should be used as sigpanic's
1444// return PC (pushing a frame for the call). Otherwise, it should be
1445// left alone so that LR is used as sigpanic's return PC, effectively
1446// replacing the top-most frame with sigpanic. This is used by
1447// preparePanic.
1448func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
1449	if pc == 0 {
1450		// Probably a call to a nil func. The old LR is more
1451		// useful in the stack trace. Not pushing the frame
1452		// will make the trace look like a call to sigpanic
1453		// instead. (Otherwise the trace will end at sigpanic
1454		// and we won't get to see who faulted.)
1455		return false
1456	}
1457	// If we don't recognize the PC as code, but we do recognize
1458	// the link register as code, then this assumes the panic was
1459	// caused by a call to non-code. In this case, we want to
1460	// ignore this call to make unwinding show the context.
1461	//
1462	// If we running C code, we're not going to recognize pc as a
1463	// Go function, so just assume it's good. Otherwise, traceback
1464	// may try to read a stale LR that looks like a Go code
1465	// pointer and wander into the woods.
1466	if gp.m.incgo || findfunc(pc).valid() {
1467		// This wasn't a bad call, so use PC as sigpanic's
1468		// return PC.
1469		return true
1470	}
1471	if findfunc(lr).valid() {
1472		// This was a bad call, but the LR is good, so use the
1473		// LR as sigpanic's return PC.
1474		return false
1475	}
1476	// Neither the PC or LR is good. Hopefully pushing a frame
1477	// will work.
1478	return true
1479}
1480
1481// isAbortPC reports whether pc is the program counter at which
1482// runtime.abort raises a signal.
1483//
1484// It is nosplit because it's part of the isgoexception
1485// implementation.
1486//
1487//go:nosplit
1488func isAbortPC(pc uintptr) bool {
1489	f := findfunc(pc)
1490	if !f.valid() {
1491		return false
1492	}
1493	return f.funcID == abi.FuncID_abort
1494}
1495