1// Copyright 2018 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Though the debug call function feature is not enabled on
6// ppc64, inserted ppc64 to avoid missing Go declaration error
7// for debugCallPanicked while building runtime.test
8//go:build amd64 || arm64 || ppc64le || ppc64
9
10package runtime
11
12import (
13	"internal/abi"
14	"unsafe"
15)
16
17const (
18	debugCallSystemStack = "executing on Go runtime stack"
19	debugCallUnknownFunc = "call from unknown function"
20	debugCallRuntime     = "call from within the Go runtime"
21	debugCallUnsafePoint = "call not at safe point"
22)
23
24func debugCallV2()
25func debugCallPanicked(val any)
26
27// debugCallCheck checks whether it is safe to inject a debugger
28// function call with return PC pc. If not, it returns a string
29// explaining why.
30//
31//go:nosplit
32func debugCallCheck(pc uintptr) string {
33	// No user calls from the system stack.
34	if getg() != getg().m.curg {
35		return debugCallSystemStack
36	}
37	if sp := getcallersp(); !(getg().stack.lo < sp && sp <= getg().stack.hi) {
38		// Fast syscalls (nanotime) and racecall switch to the
39		// g0 stack without switching g. We can't safely make
40		// a call in this state. (We can't even safely
41		// systemstack.)
42		return debugCallSystemStack
43	}
44
45	// Switch to the system stack to avoid overflowing the user
46	// stack.
47	var ret string
48	systemstack(func() {
49		f := findfunc(pc)
50		if !f.valid() {
51			ret = debugCallUnknownFunc
52			return
53		}
54
55		name := funcname(f)
56
57		switch name {
58		case "debugCall32",
59			"debugCall64",
60			"debugCall128",
61			"debugCall256",
62			"debugCall512",
63			"debugCall1024",
64			"debugCall2048",
65			"debugCall4096",
66			"debugCall8192",
67			"debugCall16384",
68			"debugCall32768",
69			"debugCall65536":
70			// These functions are allowed so that the debugger can initiate multiple function calls.
71			// See: https://golang.org/cl/161137/
72			return
73		}
74
75		// Disallow calls from the runtime. We could
76		// potentially make this condition tighter (e.g., not
77		// when locks are held), but there are enough tightly
78		// coded sequences (e.g., defer handling) that it's
79		// better to play it safe.
80		if pfx := "runtime."; len(name) > len(pfx) && name[:len(pfx)] == pfx {
81			ret = debugCallRuntime
82			return
83		}
84
85		// Check that this isn't an unsafe-point.
86		if pc != f.entry() {
87			pc--
88		}
89		up := pcdatavalue(f, abi.PCDATA_UnsafePoint, pc)
90		if up != abi.UnsafePointSafe {
91			// Not at a safe point.
92			ret = debugCallUnsafePoint
93		}
94	})
95	return ret
96}
97
98// debugCallWrap starts a new goroutine to run a debug call and blocks
99// the calling goroutine. On the goroutine, it prepares to recover
100// panics from the debug call, and then calls the call dispatching
101// function at PC dispatch.
102//
103// This must be deeply nosplit because there are untyped values on the
104// stack from debugCallV2.
105//
106//go:nosplit
107func debugCallWrap(dispatch uintptr) {
108	var lockedExt uint32
109	callerpc := getcallerpc()
110	gp := getg()
111
112	// Lock ourselves to the OS thread.
113	//
114	// Debuggers rely on us running on the same thread until we get to
115	// dispatch the function they asked as to.
116	//
117	// We're going to transfer this to the new G we just created.
118	lockOSThread()
119
120	// Create a new goroutine to execute the call on. Run this on
121	// the system stack to avoid growing our stack.
122	systemstack(func() {
123		// TODO(mknyszek): It would be nice to wrap these arguments in an allocated
124		// closure and start the goroutine with that closure, but the compiler disallows
125		// implicit closure allocation in the runtime.
126		fn := debugCallWrap1
127		newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc, false, waitReasonZero)
128		args := &debugCallWrapArgs{
129			dispatch: dispatch,
130			callingG: gp,
131		}
132		newg.param = unsafe.Pointer(args)
133
134		// Transfer locked-ness to the new goroutine.
135		// Save lock state to restore later.
136		mp := gp.m
137		if mp != gp.lockedm.ptr() {
138			throw("inconsistent lockedm")
139		}
140		// Save the external lock count and clear it so
141		// that it can't be unlocked from the debug call.
142		// Note: we already locked internally to the thread,
143		// so if we were locked before we're still locked now.
144		lockedExt = mp.lockedExt
145		mp.lockedExt = 0
146
147		mp.lockedg.set(newg)
148		newg.lockedm.set(mp)
149		gp.lockedm = 0
150
151		// Mark the calling goroutine as being at an async
152		// safe-point, since it has a few conservative frames
153		// at the bottom of the stack. This also prevents
154		// stack shrinks.
155		gp.asyncSafePoint = true
156
157		// Stash newg away so we can execute it below (mcall's
158		// closure can't capture anything).
159		gp.schedlink.set(newg)
160	})
161
162	// Switch to the new goroutine.
163	mcall(func(gp *g) {
164		// Get newg.
165		newg := gp.schedlink.ptr()
166		gp.schedlink = 0
167
168		// Park the calling goroutine.
169		trace := traceAcquire()
170		if trace.ok() {
171			// Trace the event before the transition. It may take a
172			// stack trace, but we won't own the stack after the
173			// transition anymore.
174			trace.GoPark(traceBlockDebugCall, 1)
175		}
176		casGToWaiting(gp, _Grunning, waitReasonDebugCall)
177		if trace.ok() {
178			traceRelease(trace)
179		}
180		dropg()
181
182		// Directly execute the new goroutine. The debug
183		// protocol will continue on the new goroutine, so
184		// it's important we not just let the scheduler do
185		// this or it may resume a different goroutine.
186		execute(newg, true)
187	})
188
189	// We'll resume here when the call returns.
190
191	// Restore locked state.
192	mp := gp.m
193	mp.lockedExt = lockedExt
194	mp.lockedg.set(gp)
195	gp.lockedm.set(mp)
196
197	// Undo the lockOSThread we did earlier.
198	unlockOSThread()
199
200	gp.asyncSafePoint = false
201}
202
203type debugCallWrapArgs struct {
204	dispatch uintptr
205	callingG *g
206}
207
208// debugCallWrap1 is the continuation of debugCallWrap on the callee
209// goroutine.
210func debugCallWrap1() {
211	gp := getg()
212	args := (*debugCallWrapArgs)(gp.param)
213	dispatch, callingG := args.dispatch, args.callingG
214	gp.param = nil
215
216	// Dispatch call and trap panics.
217	debugCallWrap2(dispatch)
218
219	// Resume the caller goroutine.
220	getg().schedlink.set(callingG)
221	mcall(func(gp *g) {
222		callingG := gp.schedlink.ptr()
223		gp.schedlink = 0
224
225		// Unlock this goroutine from the M if necessary. The
226		// calling G will relock.
227		if gp.lockedm != 0 {
228			gp.lockedm = 0
229			gp.m.lockedg = 0
230		}
231
232		// Switch back to the calling goroutine. At some point
233		// the scheduler will schedule us again and we'll
234		// finish exiting.
235		trace := traceAcquire()
236		if trace.ok() {
237			// Trace the event before the transition. It may take a
238			// stack trace, but we won't own the stack after the
239			// transition anymore.
240			trace.GoSched()
241		}
242		casgstatus(gp, _Grunning, _Grunnable)
243		if trace.ok() {
244			traceRelease(trace)
245		}
246		dropg()
247		lock(&sched.lock)
248		globrunqput(gp)
249		unlock(&sched.lock)
250
251		trace = traceAcquire()
252		casgstatus(callingG, _Gwaiting, _Grunnable)
253		if trace.ok() {
254			trace.GoUnpark(callingG, 0)
255			traceRelease(trace)
256		}
257		execute(callingG, true)
258	})
259}
260
261func debugCallWrap2(dispatch uintptr) {
262	// Call the dispatch function and trap panics.
263	var dispatchF func()
264	dispatchFV := funcval{dispatch}
265	*(*unsafe.Pointer)(unsafe.Pointer(&dispatchF)) = noescape(unsafe.Pointer(&dispatchFV))
266
267	var ok bool
268	defer func() {
269		if !ok {
270			err := recover()
271			debugCallPanicked(err)
272		}
273	}()
274	dispatchF()
275	ok = true
276}
277