1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 
5 //go:build unix
6 
7 // When cross-compiling with clang to linux/armv5, atomics are emulated
8 // and cause a compiler warning. This results in a build failure since
9 // cgo uses -Werror. See #65290.
10 #pragma GCC diagnostic ignored "-Wpragmas"
11 #pragma GCC diagnostic ignored "-Wunknown-warning-option"
12 #pragma GCC diagnostic ignored "-Watomic-alignment"
13 
14 #include <pthread.h>
15 #include <errno.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h> // strerror
19 #include <time.h>
20 #include "libcgo.h"
21 #include "libcgo_unix.h"
22 
23 static pthread_cond_t runtime_init_cond = PTHREAD_COND_INITIALIZER;
24 static pthread_mutex_t runtime_init_mu = PTHREAD_MUTEX_INITIALIZER;
25 static int runtime_init_done;
26 
27 // pthread_g is a pthread specific key, for storing the g that binded to the C thread.
28 // The registered pthread_key_destructor will dropm, when the pthread-specified value g is not NULL,
29 // while a C thread is exiting.
30 static pthread_key_t pthread_g;
31 static void pthread_key_destructor(void* g);
32 uintptr_t x_cgo_pthread_key_created;
33 void (*x_crosscall2_ptr)(void (*fn)(void *), void *, int, size_t);
34 
35 // The context function, used when tracing back C calls into Go.
36 static void (*cgo_context_function)(struct context_arg*);
37 
38 void
x_cgo_sys_thread_create(void * (* func)(void *),void * arg)39 x_cgo_sys_thread_create(void* (*func)(void*), void* arg) {
40 	pthread_t p;
41 	int err = _cgo_try_pthread_create(&p, NULL, func, arg);
42 	if (err != 0) {
43 		fprintf(stderr, "pthread_create failed: %s", strerror(err));
44 		abort();
45 	}
46 }
47 
48 uintptr_t
_cgo_wait_runtime_init_done(void)49 _cgo_wait_runtime_init_done(void) {
50 	void (*pfn)(struct context_arg*);
51 	pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
52 
53 	int done = 2;
54 	if (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) != done) {
55 		pthread_mutex_lock(&runtime_init_mu);
56 		while (__atomic_load_n(&runtime_init_done, __ATOMIC_CONSUME) == 0) {
57 			pthread_cond_wait(&runtime_init_cond, &runtime_init_mu);
58 		}
59 
60 		// The key and x_cgo_pthread_key_created are for the whole program,
61 		// whereas the specific and destructor is per thread.
62 		if (x_cgo_pthread_key_created == 0 && pthread_key_create(&pthread_g, pthread_key_destructor) == 0) {
63 			x_cgo_pthread_key_created = 1;
64 		}
65 
66 
67 		// TODO(iant): For the case of a new C thread calling into Go, such
68 		// as when using -buildmode=c-archive, we know that Go runtime
69 		// initialization is complete but we do not know that all Go init
70 		// functions have been run. We should not fetch cgo_context_function
71 		// until they have been, because that is where a call to
72 		// SetCgoTraceback is likely to occur. We are going to wait for Go
73 		// initialization to be complete anyhow, later, by waiting for
74 		// main_init_done to be closed in cgocallbackg1. We should wait here
75 		// instead. See also issue #15943.
76 		pfn = __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
77 
78 		__atomic_store_n(&runtime_init_done, done, __ATOMIC_RELEASE);
79 		pthread_mutex_unlock(&runtime_init_mu);
80 	}
81 
82 	if (pfn != nil) {
83 		struct context_arg arg;
84 
85 		arg.Context = 0;
86 		(*pfn)(&arg);
87 		return arg.Context;
88 	}
89 	return 0;
90 }
91 
92 // _cgo_set_stacklo sets g->stacklo based on the stack size.
93 // This is common code called from x_cgo_init, which is itself
94 // called by rt0_go in the runtime package.
_cgo_set_stacklo(G * g,uintptr * pbounds)95 void _cgo_set_stacklo(G *g, uintptr *pbounds)
96 {
97 	uintptr bounds[2];
98 
99 	// pbounds can be passed in by the caller; see gcc_linux_amd64.c.
100 	if (pbounds == NULL) {
101 		pbounds = &bounds[0];
102 	}
103 
104 	x_cgo_getstackbound(pbounds);
105 
106 	g->stacklo = *pbounds;
107 
108 	// Sanity check the results now, rather than getting a
109 	// morestack on g0 crash.
110 	if (g->stacklo >= g->stackhi) {
111 		fprintf(stderr, "runtime/cgo: bad stack bounds: lo=%p hi=%p\n", (void*)(g->stacklo), (void*)(g->stackhi));
112 		abort();
113 	}
114 }
115 
116 // Store the g into a thread-specific value associated with the pthread key pthread_g.
117 // And pthread_key_destructor will dropm when the thread is exiting.
x_cgo_bindm(void * g)118 void x_cgo_bindm(void* g) {
119 	// We assume this will always succeed, otherwise, there might be extra M leaking,
120 	// when a C thread exits after a cgo call.
121 	// We only invoke this function once per thread in runtime.needAndBindM,
122 	// and the next calls just reuse the bound m.
123 	pthread_setspecific(pthread_g, g);
124 }
125 
126 void
x_cgo_notify_runtime_init_done(void * dummy)127 x_cgo_notify_runtime_init_done(void* dummy __attribute__ ((unused))) {
128 	pthread_mutex_lock(&runtime_init_mu);
129 	__atomic_store_n(&runtime_init_done, 1, __ATOMIC_RELEASE);
130 	pthread_cond_broadcast(&runtime_init_cond);
131 	pthread_mutex_unlock(&runtime_init_mu);
132 }
133 
134 // Sets the context function to call to record the traceback context
135 // when calling a Go function from C code. Called from runtime.SetCgoTraceback.
x_cgo_set_context_function(void (* context)(struct context_arg *))136 void x_cgo_set_context_function(void (*context)(struct context_arg*)) {
137 	__atomic_store_n(&cgo_context_function, context, __ATOMIC_RELEASE);
138 }
139 
140 // Gets the context function.
_cgo_get_context_function(void)141 void (*(_cgo_get_context_function(void)))(struct context_arg*) {
142 	return __atomic_load_n(&cgo_context_function, __ATOMIC_CONSUME);
143 }
144 
145 // _cgo_try_pthread_create retries pthread_create if it fails with
146 // EAGAIN.
147 int
_cgo_try_pthread_create(pthread_t * thread,const pthread_attr_t * attr,void * (* pfn)(void *),void * arg)148 _cgo_try_pthread_create(pthread_t* thread, const pthread_attr_t* attr, void* (*pfn)(void*), void* arg) {
149 	int tries;
150 	int err;
151 	struct timespec ts;
152 
153 	for (tries = 0; tries < 20; tries++) {
154 		err = pthread_create(thread, attr, pfn, arg);
155 		if (err == 0) {
156 			pthread_detach(*thread);
157 			return 0;
158 		}
159 		if (err != EAGAIN) {
160 			return err;
161 		}
162 		ts.tv_sec = 0;
163 		ts.tv_nsec = (tries + 1) * 1000 * 1000; // Milliseconds.
164 		nanosleep(&ts, nil);
165 	}
166 	return EAGAIN;
167 }
168 
169 static void
pthread_key_destructor(void * g)170 pthread_key_destructor(void* g) {
171 	if (x_crosscall2_ptr != NULL) {
172 		// fn == NULL means dropm.
173 		// We restore g by using the stored g, before dropm in runtime.cgocallback,
174 		// since the g stored in the TLS by Go might be cleared in some platforms,
175 		// before this destructor invoked.
176 		x_crosscall2_ptr(NULL, g, 0, 0);
177 	}
178 }
179