1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Cgo call and callback support.
6//
7// To call into the C function f from Go, the cgo-generated code calls
8// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
9// gcc-compiled function written by cgo.
10//
11// runtime.cgocall (below) calls entersyscall so as not to block
12// other goroutines or the garbage collector, and then calls
13// runtime.asmcgocall(_cgo_Cfunc_f, frame).
14//
15// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
16// (assumed to be an operating system-allocated stack, so safe to run
17// gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
18//
19// _cgo_Cfunc_f invokes the actual C function f with arguments
20// taken from the frame structure, records the results in the frame,
21// and returns to runtime.asmcgocall.
22//
23// After it regains control, runtime.asmcgocall switches back to the
24// original g (m->curg)'s stack and returns to runtime.cgocall.
25//
26// After it regains control, runtime.cgocall calls exitsyscall, which blocks
27// until this m can run Go code without violating the $GOMAXPROCS limit,
28// and then unlocks g from m.
29//
30// The above description skipped over the possibility of the gcc-compiled
31// function f calling back into Go. If that happens, we continue down
32// the rabbit hole during the execution of f.
33//
34// To make it possible for gcc-compiled C code to call a Go function p.GoF,
35// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
36// know about packages).  The gcc-compiled C function f calls GoF.
37//
38// GoF initializes "frame", a structure containing all of its
39// arguments and slots for p.GoF's results. It calls
40// crosscall2(_cgoexp_GoF, frame, framesize, ctxt) using the gcc ABI.
41//
42// crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from
43// the gcc function call ABI to the gc function call ABI. At this
44// point we're in the Go runtime, but we're still running on m.g0's
45// stack and outside the $GOMAXPROCS limit. crosscall2 calls
46// runtime.cgocallback(_cgoexp_GoF, frame, ctxt) using the gc ABI.
47// (crosscall2's framesize argument is no longer used, but there's one
48// case where SWIG calls crosscall2 directly and expects to pass this
49// argument. See _cgo_panic.)
50//
51// runtime.cgocallback (in asm_$GOARCH.s) switches from m.g0's stack
52// to the original g (m.curg)'s stack, on which it calls
53// runtime.cgocallbackg(_cgoexp_GoF, frame, ctxt). As part of the
54// stack switch, runtime.cgocallback saves the current SP as
55// m.g0.sched.sp, so that any use of m.g0's stack during the execution
56// of the callback will be done below the existing stack frames.
57// Before overwriting m.g0.sched.sp, it pushes the old value on the
58// m.g0 stack, so that it can be restored later.
59//
60// runtime.cgocallbackg (below) is now running on a real goroutine
61// stack (not an m.g0 stack).  First it calls runtime.exitsyscall, which will
62// block until the $GOMAXPROCS limit allows running this goroutine.
63// Once exitsyscall has returned, it is safe to do things like call the memory
64// allocator or invoke the Go callback function.  runtime.cgocallbackg
65// first defers a function to unwind m.g0.sched.sp, so that if p.GoF
66// panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack
67// and the m.curg stack will be unwound in lock step.
68// Then it calls _cgoexp_GoF(frame).
69//
70// _cgoexp_GoF, which was generated by cmd/cgo, unpacks the arguments
71// from frame, calls p.GoF, writes the results back to frame, and
72// returns. Now we start unwinding this whole process.
73//
74// runtime.cgocallbackg pops but does not execute the deferred
75// function to unwind m.g0.sched.sp, calls runtime.entersyscall, and
76// returns to runtime.cgocallback.
77//
78// After it regains control, runtime.cgocallback switches back to
79// m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old
80// m.g0.sched.sp value from the stack, and returns to crosscall2.
81//
82// crosscall2 restores the callee-save registers for gcc and returns
83// to GoF, which unpacks any result values and returns to f.
84
85package runtime
86
87import (
88	"internal/abi"
89	"internal/goarch"
90	"internal/goexperiment"
91	"runtime/internal/sys"
92	"unsafe"
93)
94
95// Addresses collected in a cgo backtrace when crashing.
96// Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
97type cgoCallers [32]uintptr
98
99// argset matches runtime/cgo/linux_syscall.c:argset_t
100type argset struct {
101	args   unsafe.Pointer
102	retval uintptr
103}
104
105// wrapper for syscall package to call cgocall for libc (cgo) calls.
106//
107//go:linkname syscall_cgocaller syscall.cgocaller
108//go:nosplit
109//go:uintptrescapes
110func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr {
111	as := argset{args: unsafe.Pointer(&args[0])}
112	cgocall(fn, unsafe.Pointer(&as))
113	return as.retval
114}
115
116var ncgocall uint64 // number of cgo calls in total for dead m
117
118// Call from Go to C.
119//
120// This must be nosplit because it's used for syscalls on some
121// platforms. Syscalls may have untyped arguments on the stack, so
122// it's not safe to grow or scan the stack.
123//
124// cgocall should be an internal detail,
125// but widely used packages access it using linkname.
126// Notable members of the hall of shame include:
127//   - github.com/ebitengine/purego
128//
129// Do not remove or change the type signature.
130// See go.dev/issue/67401.
131//
132//go:linkname cgocall
133//go:nosplit
134func cgocall(fn, arg unsafe.Pointer) int32 {
135	if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
136		throw("cgocall unavailable")
137	}
138
139	if fn == nil {
140		throw("cgocall nil")
141	}
142
143	if raceenabled {
144		racereleasemerge(unsafe.Pointer(&racecgosync))
145	}
146
147	mp := getg().m
148	mp.ncgocall++
149
150	// Reset traceback.
151	mp.cgoCallers[0] = 0
152
153	// Announce we are entering a system call
154	// so that the scheduler knows to create another
155	// M to run goroutines while we are in the
156	// foreign code.
157	//
158	// The call to asmcgocall is guaranteed not to
159	// grow the stack and does not allocate memory,
160	// so it is safe to call while "in a system call", outside
161	// the $GOMAXPROCS accounting.
162	//
163	// fn may call back into Go code, in which case we'll exit the
164	// "system call", run the Go code (which may grow the stack),
165	// and then re-enter the "system call" reusing the PC and SP
166	// saved by entersyscall here.
167	entersyscall()
168
169	// Tell asynchronous preemption that we're entering external
170	// code. We do this after entersyscall because this may block
171	// and cause an async preemption to fail, but at this point a
172	// sync preemption will succeed (though this is not a matter
173	// of correctness).
174	osPreemptExtEnter(mp)
175
176	mp.incgo = true
177	// We use ncgo as a check during execution tracing for whether there is
178	// any C on the call stack, which there will be after this point. If
179	// there isn't, we can use frame pointer unwinding to collect call
180	// stacks efficiently. This will be the case for the first Go-to-C call
181	// on a stack, so it's preferable to update it here, after we emit a
182	// trace event in entersyscall above.
183	mp.ncgo++
184
185	errno := asmcgocall(fn, arg)
186
187	// Update accounting before exitsyscall because exitsyscall may
188	// reschedule us on to a different M.
189	mp.incgo = false
190	mp.ncgo--
191
192	osPreemptExtExit(mp)
193
194	// Save current syscall parameters, so m.winsyscall can be
195	// used again if callback decide to make syscall.
196	winsyscall := mp.winsyscall
197
198	exitsyscall()
199
200	getg().m.winsyscall = winsyscall
201
202	// Note that raceacquire must be called only after exitsyscall has
203	// wired this M to a P.
204	if raceenabled {
205		raceacquire(unsafe.Pointer(&racecgosync))
206	}
207
208	// From the garbage collector's perspective, time can move
209	// backwards in the sequence above. If there's a callback into
210	// Go code, GC will see this function at the call to
211	// asmcgocall. When the Go call later returns to C, the
212	// syscall PC/SP is rolled back and the GC sees this function
213	// back at the call to entersyscall. Normally, fn and arg
214	// would be live at entersyscall and dead at asmcgocall, so if
215	// time moved backwards, GC would see these arguments as dead
216	// and then live. Prevent these undead arguments from crashing
217	// GC by forcing them to stay live across this time warp.
218	KeepAlive(fn)
219	KeepAlive(arg)
220	KeepAlive(mp)
221
222	return errno
223}
224
225// Set or reset the system stack bounds for a callback on sp.
226//
227// Must be nosplit because it is called by needm prior to fully initializing
228// the M.
229//
230//go:nosplit
231func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
232	g0 := mp.g0
233
234	inBound := sp > g0.stack.lo && sp <= g0.stack.hi
235	if mp.ncgo > 0 && !inBound {
236		// ncgo > 0 indicates that this M was in Go further up the stack
237		// (it called C and is now receiving a callback).
238		//
239		// !inBound indicates that we were called with SP outside the
240		// expected system stack bounds (C changed the stack out from
241		// under us between the cgocall and cgocallback?).
242		//
243		// It is not safe for the C call to change the stack out from
244		// under us, so throw.
245
246		// Note that this case isn't possible for signal == true, as
247		// that is always passing a new M from needm.
248
249		// Stack is bogus, but reset the bounds anyway so we can print.
250		hi := g0.stack.hi
251		lo := g0.stack.lo
252		g0.stack.hi = sp + 1024
253		g0.stack.lo = sp - 32*1024
254		g0.stackguard0 = g0.stack.lo + stackGuard
255		g0.stackguard1 = g0.stackguard0
256
257		print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]")
258		print("\n")
259		exit(2)
260	}
261
262	if !mp.isextra {
263		// We allocated the stack for standard Ms. Don't replace the
264		// stack bounds with estimated ones when we already initialized
265		// with the exact ones.
266		return
267	}
268
269	// This M does not have Go further up the stack. However, it may have
270	// previously called into Go, initializing the stack bounds. Between
271	// that call returning and now the stack may have changed (perhaps the
272	// C thread is running a coroutine library). We need to update the
273	// stack bounds for this case.
274	//
275	// N.B. we need to update the stack bounds even if SP appears to
276	// already be in bounds. Our "bounds" may actually be estimated dummy
277	// bounds (below). The actual stack bounds could have shifted but still
278	// have partial overlap with our dummy bounds. If we failed to update
279	// in that case, we could find ourselves seemingly called near the
280	// bottom of the stack bounds, where we quickly run out of space.
281
282	// Set the stack bounds to match the current stack. If we don't
283	// actually know how big the stack is, like we don't know how big any
284	// scheduling stack is, but we assume there's at least 32 kB. If we
285	// can get a more accurate stack bound from pthread, use that, provided
286	// it actually contains SP..
287	g0.stack.hi = sp + 1024
288	g0.stack.lo = sp - 32*1024
289	if !signal && _cgo_getstackbound != nil {
290		// Don't adjust if called from the signal handler.
291		// We are on the signal stack, not the pthread stack.
292		// (We could get the stack bounds from sigaltstack, but
293		// we're getting out of the signal handler very soon
294		// anyway. Not worth it.)
295		var bounds [2]uintptr
296		asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
297		// getstackbound is an unsupported no-op on Windows.
298		//
299		// Don't use these bounds if they don't contain SP. Perhaps we
300		// were called by something not using the standard thread
301		// stack.
302		if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] {
303			g0.stack.lo = bounds[0]
304			g0.stack.hi = bounds[1]
305		}
306	}
307	g0.stackguard0 = g0.stack.lo + stackGuard
308	g0.stackguard1 = g0.stackguard0
309}
310
311// Call from C back to Go. fn must point to an ABIInternal Go entry-point.
312//
313//go:nosplit
314func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
315	gp := getg()
316	if gp != gp.m.curg {
317		println("runtime: bad g in cgocallback")
318		exit(2)
319	}
320
321	sp := gp.m.g0.sched.sp // system sp saved by cgocallback.
322	callbackUpdateSystemStack(gp.m, sp, false)
323
324	// The call from C is on gp.m's g0 stack, so we must ensure
325	// that we stay on that M. We have to do this before calling
326	// exitsyscall, since it would otherwise be free to move us to
327	// a different M. The call to unlockOSThread is in this function
328	// after cgocallbackg1, or in the case of panicking, in unwindm.
329	lockOSThread()
330
331	checkm := gp.m
332
333	// Save current syscall parameters, so m.winsyscall can be
334	// used again if callback decide to make syscall.
335	winsyscall := gp.m.winsyscall
336
337	// entersyscall saves the caller's SP to allow the GC to trace the Go
338	// stack. However, since we're returning to an earlier stack frame and
339	// need to pair with the entersyscall() call made by cgocall, we must
340	// save syscall* and let reentersyscall restore them.
341	//
342	// Note: savedsp and savedbp MUST be held in locals as an unsafe.Pointer.
343	// When we call into Go, the stack is free to be moved. If these locals
344	// aren't visible in the stack maps, they won't get updated properly,
345	// and will end up being stale when restored by reentersyscall.
346	savedsp := unsafe.Pointer(gp.syscallsp)
347	savedpc := gp.syscallpc
348	savedbp := unsafe.Pointer(gp.syscallbp)
349	exitsyscall() // coming out of cgo call
350	gp.m.incgo = false
351	if gp.m.isextra {
352		gp.m.isExtraInC = false
353	}
354
355	osPreemptExtExit(gp.m)
356
357	if gp.nocgocallback {
358		panic("runtime: function marked with #cgo nocallback called back into Go")
359	}
360
361	cgocallbackg1(fn, frame, ctxt)
362
363	// At this point we're about to call unlockOSThread.
364	// The following code must not change to a different m.
365	// This is enforced by checking incgo in the schedule function.
366	gp.m.incgo = true
367	unlockOSThread()
368
369	if gp.m.isextra {
370		gp.m.isExtraInC = true
371	}
372
373	if gp.m != checkm {
374		throw("m changed unexpectedly in cgocallbackg")
375	}
376
377	osPreemptExtEnter(gp.m)
378
379	// going back to cgo call
380	reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp))
381
382	gp.m.winsyscall = winsyscall
383}
384
385func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) {
386	gp := getg()
387
388	if gp.m.needextram || extraMWaiters.Load() > 0 {
389		gp.m.needextram = false
390		systemstack(newextram)
391	}
392
393	if ctxt != 0 {
394		s := append(gp.cgoCtxt, ctxt)
395
396		// Now we need to set gp.cgoCtxt = s, but we could get
397		// a SIGPROF signal while manipulating the slice, and
398		// the SIGPROF handler could pick up gp.cgoCtxt while
399		// tracing up the stack.  We need to ensure that the
400		// handler always sees a valid slice, so set the
401		// values in an order such that it always does.
402		p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
403		atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0]))
404		p.cap = cap(s)
405		p.len = len(s)
406
407		defer func(gp *g) {
408			// Decrease the length of the slice by one, safely.
409			p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
410			p.len--
411		}(gp)
412	}
413
414	if gp.m.ncgo == 0 {
415		// The C call to Go came from a thread not currently running
416		// any Go. In the case of -buildmode=c-archive or c-shared,
417		// this call may be coming in before package initialization
418		// is complete. Wait until it is.
419		<-main_init_done
420	}
421
422	// Check whether the profiler needs to be turned on or off; this route to
423	// run Go code does not use runtime.execute, so bypasses the check there.
424	hz := sched.profilehz
425	if gp.m.profilehz != hz {
426		setThreadCPUProfiler(hz)
427	}
428
429	// Add entry to defer stack in case of panic.
430	restore := true
431	defer unwindm(&restore)
432
433	if raceenabled {
434		raceacquire(unsafe.Pointer(&racecgosync))
435	}
436
437	// Invoke callback. This function is generated by cmd/cgo and
438	// will unpack the argument frame and call the Go function.
439	var cb func(frame unsafe.Pointer)
440	cbFV := funcval{uintptr(fn)}
441	*(*unsafe.Pointer)(unsafe.Pointer(&cb)) = noescape(unsafe.Pointer(&cbFV))
442	cb(frame)
443
444	if raceenabled {
445		racereleasemerge(unsafe.Pointer(&racecgosync))
446	}
447
448	// Do not unwind m->g0->sched.sp.
449	// Our caller, cgocallback, will do that.
450	restore = false
451}
452
453func unwindm(restore *bool) {
454	if *restore {
455		// Restore sp saved by cgocallback during
456		// unwind of g's stack (see comment at top of file).
457		mp := acquirem()
458		sched := &mp.g0.sched
459		sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign)))
460
461		// Do the accounting that cgocall will not have a chance to do
462		// during an unwind.
463		//
464		// In the case where a Go call originates from C, ncgo is 0
465		// and there is no matching cgocall to end.
466		if mp.ncgo > 0 {
467			mp.incgo = false
468			mp.ncgo--
469			osPreemptExtExit(mp)
470		}
471
472		// Undo the call to lockOSThread in cgocallbackg, only on the
473		// panicking path. In normal return case cgocallbackg will call
474		// unlockOSThread, ensuring no preemption point after the unlock.
475		// Here we don't need to worry about preemption, because we're
476		// panicking out of the callback and unwinding the g0 stack,
477		// instead of reentering cgo (which requires the same thread).
478		unlockOSThread()
479
480		releasem(mp)
481	}
482}
483
484// called from assembly.
485func badcgocallback() {
486	throw("misaligned stack in cgocallback")
487}
488
489// called from (incomplete) assembly.
490func cgounimpl() {
491	throw("cgo not implemented")
492}
493
494var racecgosync uint64 // represents possible synchronization in C code
495
496// Pointer checking for cgo code.
497
498// We want to detect all cases where a program that does not use
499// unsafe makes a cgo call passing a Go pointer to memory that
500// contains an unpinned Go pointer. Here a Go pointer is defined as a
501// pointer to memory allocated by the Go runtime. Programs that use
502// unsafe can evade this restriction easily, so we don't try to catch
503// them. The cgo program will rewrite all possibly bad pointer
504// arguments to call cgoCheckPointer, where we can catch cases of a Go
505// pointer pointing to an unpinned Go pointer.
506
507// Complicating matters, taking the address of a slice or array
508// element permits the C program to access all elements of the slice
509// or array. In that case we will see a pointer to a single element,
510// but we need to check the entire data structure.
511
512// The cgoCheckPointer call takes additional arguments indicating that
513// it was called on an address expression. An additional argument of
514// true means that it only needs to check a single element. An
515// additional argument of a slice or array means that it needs to
516// check the entire slice/array, but nothing else. Otherwise, the
517// pointer could be anything, and we check the entire heap object,
518// which is conservative but safe.
519
520// When and if we implement a moving garbage collector,
521// cgoCheckPointer will pin the pointer for the duration of the cgo
522// call.  (This is necessary but not sufficient; the cgo program will
523// also have to change to pin Go pointers that cannot point to Go
524// pointers.)
525
526// cgoCheckPointer checks if the argument contains a Go pointer that
527// points to an unpinned Go pointer, and panics if it does.
528func cgoCheckPointer(ptr any, arg any) {
529	if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
530		return
531	}
532
533	ep := efaceOf(&ptr)
534	t := ep._type
535
536	top := true
537	if arg != nil && (t.Kind_&abi.KindMask == abi.Pointer || t.Kind_&abi.KindMask == abi.UnsafePointer) {
538		p := ep.data
539		if t.Kind_&abi.KindDirectIface == 0 {
540			p = *(*unsafe.Pointer)(p)
541		}
542		if p == nil || !cgoIsGoPointer(p) {
543			return
544		}
545		aep := efaceOf(&arg)
546		switch aep._type.Kind_ & abi.KindMask {
547		case abi.Bool:
548			if t.Kind_&abi.KindMask == abi.UnsafePointer {
549				// We don't know the type of the element.
550				break
551			}
552			pt := (*ptrtype)(unsafe.Pointer(t))
553			cgoCheckArg(pt.Elem, p, true, false, cgoCheckPointerFail)
554			return
555		case abi.Slice:
556			// Check the slice rather than the pointer.
557			ep = aep
558			t = ep._type
559		case abi.Array:
560			// Check the array rather than the pointer.
561			// Pass top as false since we have a pointer
562			// to the array.
563			ep = aep
564			t = ep._type
565			top = false
566		default:
567			throw("can't happen")
568		}
569	}
570
571	cgoCheckArg(t, ep.data, t.Kind_&abi.KindDirectIface == 0, top, cgoCheckPointerFail)
572}
573
574const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer"
575const cgoResultFail = "cgo result is unpinned Go pointer or points to unpinned Go pointer"
576
577// cgoCheckArg is the real work of cgoCheckPointer. The argument p
578// is either a pointer to the value (of type t), or the value itself,
579// depending on indir. The top parameter is whether we are at the top
580// level, where Go pointers are allowed. Go pointers to pinned objects are
581// allowed as long as they don't reference other unpinned pointers.
582func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
583	if !t.Pointers() || p == nil {
584		// If the type has no pointers there is nothing to do.
585		return
586	}
587
588	switch t.Kind_ & abi.KindMask {
589	default:
590		throw("can't happen")
591	case abi.Array:
592		at := (*arraytype)(unsafe.Pointer(t))
593		if !indir {
594			if at.Len != 1 {
595				throw("can't happen")
596			}
597			cgoCheckArg(at.Elem, p, at.Elem.Kind_&abi.KindDirectIface == 0, top, msg)
598			return
599		}
600		for i := uintptr(0); i < at.Len; i++ {
601			cgoCheckArg(at.Elem, p, true, top, msg)
602			p = add(p, at.Elem.Size_)
603		}
604	case abi.Chan, abi.Map:
605		// These types contain internal pointers that will
606		// always be allocated in the Go heap. It's never OK
607		// to pass them to C.
608		panic(errorString(msg))
609	case abi.Func:
610		if indir {
611			p = *(*unsafe.Pointer)(p)
612		}
613		if !cgoIsGoPointer(p) {
614			return
615		}
616		panic(errorString(msg))
617	case abi.Interface:
618		it := *(**_type)(p)
619		if it == nil {
620			return
621		}
622		// A type known at compile time is OK since it's
623		// constant. A type not known at compile time will be
624		// in the heap and will not be OK.
625		if inheap(uintptr(unsafe.Pointer(it))) {
626			panic(errorString(msg))
627		}
628		p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
629		if !cgoIsGoPointer(p) {
630			return
631		}
632		if !top && !isPinned(p) {
633			panic(errorString(msg))
634		}
635		cgoCheckArg(it, p, it.Kind_&abi.KindDirectIface == 0, false, msg)
636	case abi.Slice:
637		st := (*slicetype)(unsafe.Pointer(t))
638		s := (*slice)(p)
639		p = s.array
640		if p == nil || !cgoIsGoPointer(p) {
641			return
642		}
643		if !top && !isPinned(p) {
644			panic(errorString(msg))
645		}
646		if !st.Elem.Pointers() {
647			return
648		}
649		for i := 0; i < s.cap; i++ {
650			cgoCheckArg(st.Elem, p, true, false, msg)
651			p = add(p, st.Elem.Size_)
652		}
653	case abi.String:
654		ss := (*stringStruct)(p)
655		if !cgoIsGoPointer(ss.str) {
656			return
657		}
658		if !top && !isPinned(ss.str) {
659			panic(errorString(msg))
660		}
661	case abi.Struct:
662		st := (*structtype)(unsafe.Pointer(t))
663		if !indir {
664			if len(st.Fields) != 1 {
665				throw("can't happen")
666			}
667			cgoCheckArg(st.Fields[0].Typ, p, st.Fields[0].Typ.Kind_&abi.KindDirectIface == 0, top, msg)
668			return
669		}
670		for _, f := range st.Fields {
671			if !f.Typ.Pointers() {
672				continue
673			}
674			cgoCheckArg(f.Typ, add(p, f.Offset), true, top, msg)
675		}
676	case abi.Pointer, abi.UnsafePointer:
677		if indir {
678			p = *(*unsafe.Pointer)(p)
679			if p == nil {
680				return
681			}
682		}
683
684		if !cgoIsGoPointer(p) {
685			return
686		}
687		if !top && !isPinned(p) {
688			panic(errorString(msg))
689		}
690
691		cgoCheckUnknownPointer(p, msg)
692	}
693}
694
695// cgoCheckUnknownPointer is called for an arbitrary pointer into Go
696// memory. It checks whether that Go memory contains any other
697// pointer into unpinned Go memory. If it does, we panic.
698// The return values are unused but useful to see in panic tracebacks.
699func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
700	if inheap(uintptr(p)) {
701		b, span, _ := findObject(uintptr(p), 0, 0)
702		base = b
703		if base == 0 {
704			return
705		}
706		tp := span.typePointersOfUnchecked(base)
707		for {
708			var addr uintptr
709			if tp, addr = tp.next(base + span.elemsize); addr == 0 {
710				break
711			}
712			pp := *(*unsafe.Pointer)(unsafe.Pointer(addr))
713			if cgoIsGoPointer(pp) && !isPinned(pp) {
714				panic(errorString(msg))
715			}
716		}
717		return
718	}
719
720	for _, datap := range activeModules() {
721		if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
722			// We have no way to know the size of the object.
723			// We have to assume that it might contain a pointer.
724			panic(errorString(msg))
725		}
726		// In the text or noptr sections, we know that the
727		// pointer does not point to a Go pointer.
728	}
729
730	return
731}
732
733// cgoIsGoPointer reports whether the pointer is a Go pointer--a
734// pointer to Go memory. We only care about Go memory that might
735// contain pointers.
736//
737//go:nosplit
738//go:nowritebarrierrec
739func cgoIsGoPointer(p unsafe.Pointer) bool {
740	if p == nil {
741		return false
742	}
743
744	if inHeapOrStack(uintptr(p)) {
745		return true
746	}
747
748	for _, datap := range activeModules() {
749		if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
750			return true
751		}
752	}
753
754	return false
755}
756
757// cgoInRange reports whether p is between start and end.
758//
759//go:nosplit
760//go:nowritebarrierrec
761func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
762	return start <= uintptr(p) && uintptr(p) < end
763}
764
765// cgoCheckResult is called to check the result parameter of an
766// exported Go function. It panics if the result is or contains any
767// other pointer into unpinned Go memory.
768func cgoCheckResult(val any) {
769	if !goexperiment.CgoCheck2 && debug.cgocheck == 0 {
770		return
771	}
772
773	ep := efaceOf(&val)
774	t := ep._type
775	cgoCheckArg(t, ep.data, t.Kind_&abi.KindDirectIface == 0, false, cgoResultFail)
776}
777