1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <ruby/ruby.h>
20
21 #include "rb_channel.h"
22
23 #include <ruby/thread.h>
24
25 #include "rb_byte_buffer.h"
26 #include "rb_call.h"
27 #include "rb_channel_args.h"
28 #include "rb_channel_credentials.h"
29 #include "rb_completion_queue.h"
30 #include "rb_grpc.h"
31 #include "rb_grpc_imports.generated.h"
32 #include "rb_server.h"
33 #include "rb_xds_channel_credentials.h"
34
35 #include <grpc/grpc.h>
36 #include <grpc/grpc_security.h>
37 #include <grpc/support/alloc.h>
38 #include <grpc/support/log.h>
39 #include <grpc/support/time.h>
40
41 /* id_channel is the name of the hidden ivar that preserves a reference to the
42 * channel on a call, so that calls are not GCed before their channel. */
43 static ID id_channel;
44
45 /* id_target is the name of the hidden ivar that preserves a reference to the
46 * target string used to create the call, preserved so that it does not get
47 * GCed before the channel */
48 static ID id_target;
49
50 /* hidden ivar that synchronizes post-fork channel re-creation */
51 static ID id_channel_recreation_mu;
52
53 /* id_insecure_channel is used to indicate that a channel is insecure */
54 static VALUE id_insecure_channel;
55
56 /* grpc_rb_cChannel is the ruby class that proxies grpc_channel. */
57 static VALUE grpc_rb_cChannel = Qnil;
58
59 /* Used during the conversion of a hash to channel args during channel setup */
60 static VALUE grpc_rb_cChannelArgs;
61
62 typedef struct bg_watched_channel {
63 grpc_channel* channel;
64 // these fields must only be accessed under global_connection_polling_mu
65 struct bg_watched_channel* next;
66 int channel_destroyed;
67 int refcount;
68 } bg_watched_channel;
69
70 /* grpc_rb_channel wraps a grpc_channel. */
71 typedef struct grpc_rb_channel {
72 VALUE credentials;
73 grpc_channel_args args;
74 /* The actual channel (protected in a wrapper to tell when it's safe to
75 * destroy) */
76 bg_watched_channel* bg_wrapped;
77 } grpc_rb_channel;
78
79 typedef enum { CONTINUOUS_WATCH, WATCH_STATE_API } watch_state_op_type;
80
81 typedef struct watch_state_op {
82 watch_state_op_type op_type;
83 // from event.success
84 union {
85 struct {
86 int success;
87 // has been called back due to a cq next call
88 int called_back;
89 } api_callback_args;
90 struct {
91 bg_watched_channel* bg;
92 } continuous_watch_callback_args;
93 } op;
94 } watch_state_op;
95
96 static bg_watched_channel* bg_watched_channel_list_head = NULL;
97
98 static void grpc_rb_channel_try_register_connection_polling(
99 bg_watched_channel* bg);
100 static void* channel_init_try_register_connection_polling_without_gil(
101 void* arg);
102
103 typedef struct channel_init_try_register_stack {
104 grpc_channel* channel;
105 grpc_rb_channel* wrapper;
106 } channel_init_try_register_stack;
107
108 static grpc_completion_queue* g_channel_polling_cq;
109 static gpr_mu global_connection_polling_mu;
110 static gpr_cv global_connection_polling_cv;
111 static int g_abort_channel_polling = 0;
112 static gpr_once g_once_init = GPR_ONCE_INIT;
113 static VALUE g_channel_polling_thread = Qnil;
114
115 static int bg_watched_channel_list_lookup(bg_watched_channel* bg);
116 static bg_watched_channel* bg_watched_channel_list_create_and_add(
117 grpc_channel* channel);
118 static void bg_watched_channel_list_free_and_remove(bg_watched_channel* bg);
119 static void run_poll_channels_loop_unblocking_func(void* arg);
120 static void* run_poll_channels_loop_unblocking_func_wrapper(void* arg);
121
122 // Needs to be called under global_connection_polling_mu
grpc_rb_channel_watch_connection_state_op_complete(watch_state_op * op,int success)123 static void grpc_rb_channel_watch_connection_state_op_complete(
124 watch_state_op* op, int success) {
125 GPR_ASSERT(!op->op.api_callback_args.called_back);
126 op->op.api_callback_args.called_back = 1;
127 op->op.api_callback_args.success = success;
128 // wake up the watch API call that's waiting on this op
129 gpr_cv_broadcast(&global_connection_polling_cv);
130 }
131
132 /* Avoids destroying a channel twice. */
grpc_rb_channel_safe_destroy(bg_watched_channel * bg)133 static void grpc_rb_channel_safe_destroy(bg_watched_channel* bg) {
134 gpr_mu_lock(&global_connection_polling_mu);
135 GPR_ASSERT(bg_watched_channel_list_lookup(bg));
136 if (!bg->channel_destroyed) {
137 grpc_channel_destroy(bg->channel);
138 bg->channel_destroyed = 1;
139 }
140 bg->refcount--;
141 if (bg->refcount == 0) {
142 bg_watched_channel_list_free_and_remove(bg);
143 }
144 gpr_mu_unlock(&global_connection_polling_mu);
145 }
146
channel_safe_destroy_without_gil(void * arg)147 static void* channel_safe_destroy_without_gil(void* arg) {
148 grpc_rb_channel_safe_destroy((bg_watched_channel*)arg);
149 return NULL;
150 }
151
grpc_rb_channel_free_internal(void * p)152 static void grpc_rb_channel_free_internal(void* p) {
153 grpc_rb_channel* ch = NULL;
154 if (p == NULL) {
155 return;
156 };
157 ch = (grpc_rb_channel*)p;
158 if (ch->bg_wrapped != NULL) {
159 /* assumption made here: it's ok to directly gpr_mu_lock the global
160 * connection polling mutex because we're in a finalizer,
161 * and we can count on this thread to not be interrupted or
162 * yield the gil. */
163 grpc_rb_channel_safe_destroy(ch->bg_wrapped);
164 grpc_rb_channel_args_destroy(&ch->args);
165 }
166 xfree(p);
167 }
168
169 /* Destroys Channel instances. */
grpc_rb_channel_free(void * p)170 static void grpc_rb_channel_free(void* p) { grpc_rb_channel_free_internal(p); }
171
172 /* Protects the mark object from GC */
grpc_rb_channel_mark(void * p)173 static void grpc_rb_channel_mark(void* p) {
174 grpc_rb_channel* channel = NULL;
175 if (p == NULL) {
176 return;
177 }
178 channel = (grpc_rb_channel*)p;
179 if (channel->credentials != Qnil) {
180 rb_gc_mark(channel->credentials);
181 }
182 }
183
184 static rb_data_type_t grpc_channel_data_type = {"grpc_channel",
185 {grpc_rb_channel_mark,
186 grpc_rb_channel_free,
187 GRPC_RB_MEMSIZE_UNAVAILABLE,
188 {NULL, NULL}},
189 NULL,
190 NULL,
191 #ifdef RUBY_TYPED_FREE_IMMEDIATELY
192 RUBY_TYPED_FREE_IMMEDIATELY
193 #endif
194 };
195
196 /* Allocates grpc_rb_channel instances. */
grpc_rb_channel_alloc(VALUE cls)197 static VALUE grpc_rb_channel_alloc(VALUE cls) {
198 grpc_ruby_init();
199 grpc_rb_channel* wrapper = ALLOC(grpc_rb_channel);
200 wrapper->bg_wrapped = NULL;
201 wrapper->credentials = Qnil;
202 MEMZERO(&wrapper->args, grpc_channel_args, 1);
203 return TypedData_Wrap_Struct(cls, &grpc_channel_data_type, wrapper);
204 }
205
206 /*
207 call-seq:
208 insecure_channel = Channel:new("myhost:8080", {'arg1': 'value1'},
209 :this_channel_is_insecure)
210 creds = ...
211 secure_channel = Channel:new("myhost:443", {'arg1': 'value1'}, creds)
212
213 Creates channel instances. */
grpc_rb_channel_init(int argc,VALUE * argv,VALUE self)214 static VALUE grpc_rb_channel_init(int argc, VALUE* argv, VALUE self) {
215 VALUE channel_args = Qnil;
216 VALUE credentials = Qnil;
217 VALUE target = Qnil;
218 grpc_rb_channel* wrapper = NULL;
219 grpc_channel* ch = NULL;
220 grpc_channel_credentials* creds = NULL;
221 char* target_chars = NULL;
222 channel_init_try_register_stack stack;
223
224 grpc_ruby_fork_guard();
225 /* "3" == 3 mandatory args */
226 rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);
227
228 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
229 target_chars = StringValueCStr(target);
230 grpc_rb_hash_convert_to_channel_args(channel_args, &wrapper->args);
231 if (TYPE(credentials) == T_SYMBOL) {
232 if (id_insecure_channel != SYM2ID(credentials)) {
233 rb_raise(rb_eTypeError,
234 "bad creds symbol, want :this_channel_is_insecure");
235 return Qnil;
236 }
237 grpc_channel_credentials* insecure_creds =
238 grpc_insecure_credentials_create();
239 ch = grpc_channel_create(target_chars, insecure_creds, &wrapper->args);
240 grpc_channel_credentials_release(insecure_creds);
241 } else {
242 wrapper->credentials = credentials;
243 if (grpc_rb_is_channel_credentials(credentials)) {
244 creds = grpc_rb_get_wrapped_channel_credentials(credentials);
245 } else if (grpc_rb_is_xds_channel_credentials(credentials)) {
246 creds = grpc_rb_get_wrapped_xds_channel_credentials(credentials);
247 } else {
248 rb_raise(rb_eTypeError,
249 "bad creds, want ChannelCredentials or XdsChannelCredentials");
250 return Qnil;
251 }
252 ch = grpc_channel_create(target_chars, creds, &wrapper->args);
253 }
254
255 GPR_ASSERT(ch);
256 stack.channel = ch;
257 stack.wrapper = wrapper;
258 rb_thread_call_without_gvl(
259 channel_init_try_register_connection_polling_without_gil, &stack, NULL,
260 NULL);
261 if (ch == NULL) {
262 rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
263 target_chars);
264 return Qnil;
265 }
266 rb_ivar_set(self, id_target, target);
267 rb_ivar_set(self, id_channel_recreation_mu, rb_mutex_new());
268 return self;
269 }
270
271 typedef struct get_state_stack {
272 bg_watched_channel* bg;
273 int try_to_connect;
274 int out;
275 } get_state_stack;
276
get_state_without_gil(void * arg)277 static void* get_state_without_gil(void* arg) {
278 get_state_stack* stack = (get_state_stack*)arg;
279
280 gpr_mu_lock(&global_connection_polling_mu);
281 if (stack->bg->channel_destroyed) {
282 stack->out = GRPC_CHANNEL_SHUTDOWN;
283 } else {
284 stack->out = grpc_channel_check_connectivity_state(stack->bg->channel,
285 stack->try_to_connect);
286 }
287 gpr_mu_unlock(&global_connection_polling_mu);
288
289 return NULL;
290 }
291
292 /*
293 call-seq:
294 ch.connectivity_state -> state
295 ch.connectivity_state(true) -> state
296
297 Indicates the current state of the channel, whose value is one of the
298 constants defined in GRPC::Core::ConnectivityStates.
299
300 It also tries to connect if the channel is idle in the second form. */
grpc_rb_channel_get_connectivity_state(int argc,VALUE * argv,VALUE self)301 static VALUE grpc_rb_channel_get_connectivity_state(int argc, VALUE* argv,
302 VALUE self) {
303 VALUE try_to_connect_param = Qfalse;
304 grpc_rb_channel* wrapper = NULL;
305 get_state_stack stack;
306
307 /* "01" == 0 mandatory args, 1 (try_to_connect) is optional */
308 rb_scan_args(argc, argv, "01", &try_to_connect_param);
309
310 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
311 if (wrapper->bg_wrapped == NULL) {
312 rb_raise(rb_eRuntimeError, "closed!");
313 return Qnil;
314 }
315
316 stack.bg = wrapper->bg_wrapped;
317 stack.try_to_connect = RTEST(try_to_connect_param) ? 1 : 0;
318 rb_thread_call_without_gvl(get_state_without_gil, &stack, NULL, NULL);
319
320 return LONG2NUM(stack.out);
321 }
322
323 typedef struct watch_state_stack {
324 bg_watched_channel* bg_wrapped;
325 gpr_timespec deadline;
326 int last_state;
327 } watch_state_stack;
328
wait_for_watch_state_op_complete_without_gvl(void * arg)329 static void* wait_for_watch_state_op_complete_without_gvl(void* arg) {
330 watch_state_stack* stack = (watch_state_stack*)arg;
331 watch_state_op* op = NULL;
332 void* success = (void*)0;
333
334 gpr_mu_lock(&global_connection_polling_mu);
335 // it's unsafe to do a "watch" after "channel polling abort" because the cq
336 // has been shut down.
337 if (g_abort_channel_polling || stack->bg_wrapped->channel_destroyed) {
338 gpr_mu_unlock(&global_connection_polling_mu);
339 return (void*)0;
340 }
341 op = gpr_zalloc(sizeof(watch_state_op));
342 op->op_type = WATCH_STATE_API;
343 grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel,
344 stack->last_state, stack->deadline,
345 g_channel_polling_cq, op);
346
347 while (!op->op.api_callback_args.called_back) {
348 gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu,
349 gpr_inf_future(GPR_CLOCK_REALTIME));
350 }
351 if (op->op.api_callback_args.success) {
352 success = (void*)1;
353 }
354 gpr_free(op);
355 gpr_mu_unlock(&global_connection_polling_mu);
356
357 return success;
358 }
wait_for_watch_state_op_complete_unblocking_func(void * arg)359 static void wait_for_watch_state_op_complete_unblocking_func(void* arg) {
360 bg_watched_channel* bg = (bg_watched_channel*)arg;
361 gpr_mu_lock(&global_connection_polling_mu);
362 if (!bg->channel_destroyed) {
363 grpc_channel_destroy(bg->channel);
364 bg->channel_destroyed = 1;
365 }
366 gpr_mu_unlock(&global_connection_polling_mu);
367 }
368
369 /* Wait until the channel's connectivity state becomes different from
370 * "last_state", or "deadline" expires.
371 * Returns true if the channel's connectivity state becomes different
372 * from "last_state" within "deadline".
373 * Returns false if "deadline" expires before the channel's connectivity
374 * state changes from "last_state".
375 * */
grpc_rb_channel_watch_connectivity_state(VALUE self,VALUE last_state,VALUE deadline)376 static VALUE grpc_rb_channel_watch_connectivity_state(VALUE self,
377 VALUE last_state,
378 VALUE deadline) {
379 grpc_rb_channel* wrapper = NULL;
380 watch_state_stack stack;
381 void* op_success = 0;
382
383 grpc_ruby_fork_guard();
384 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
385
386 if (wrapper->bg_wrapped == NULL) {
387 rb_raise(rb_eRuntimeError, "closed!");
388 return Qnil;
389 }
390
391 if (!FIXNUM_P(last_state)) {
392 rb_raise(
393 rb_eTypeError,
394 "bad type for last_state. want a GRPC::Core::ChannelState constant");
395 return Qnil;
396 }
397
398 stack.bg_wrapped = wrapper->bg_wrapped;
399 stack.deadline = grpc_rb_time_timeval(deadline, 0),
400 stack.last_state = NUM2LONG(last_state);
401
402 op_success = rb_thread_call_without_gvl(
403 wait_for_watch_state_op_complete_without_gvl, &stack,
404 wait_for_watch_state_op_complete_unblocking_func, wrapper->bg_wrapped);
405
406 return op_success ? Qtrue : Qfalse;
407 }
408
grpc_rb_channel_maybe_recreate_channel_after_fork(grpc_rb_channel * wrapper,VALUE target)409 static void grpc_rb_channel_maybe_recreate_channel_after_fork(
410 grpc_rb_channel* wrapper, VALUE target) {
411 // TODO(apolcyn): maybe check if fork support is enabled here.
412 // The only way we can get bg->channel_destroyed without bg itself being
413 // NULL is if we destroyed the channel during GRPC::prefork.
414 bg_watched_channel* bg = wrapper->bg_wrapped;
415 if (bg->channel_destroyed) {
416 // There must be one ref at this point, held by the ruby-level channel
417 // object, drop this one here.
418 GPR_ASSERT(bg->refcount == 1);
419 rb_thread_call_without_gvl(channel_safe_destroy_without_gil, bg, NULL,
420 NULL);
421 // re-create C-core channel
422 const char* target_str = StringValueCStr(target);
423 grpc_channel* channel;
424 if (wrapper->credentials == Qnil) {
425 grpc_channel_credentials* insecure_creds =
426 grpc_insecure_credentials_create();
427 channel = grpc_channel_create(target_str, insecure_creds, &wrapper->args);
428 grpc_channel_credentials_release(insecure_creds);
429 } else {
430 grpc_channel_credentials* creds;
431 if (grpc_rb_is_channel_credentials(wrapper->credentials)) {
432 creds = grpc_rb_get_wrapped_channel_credentials(wrapper->credentials);
433 } else if (grpc_rb_is_xds_channel_credentials(wrapper->credentials)) {
434 creds =
435 grpc_rb_get_wrapped_xds_channel_credentials(wrapper->credentials);
436 } else {
437 rb_raise(rb_eTypeError,
438 "failed to re-create channel after fork: bad creds, want "
439 "ChannelCredentials or XdsChannelCredentials");
440 return;
441 }
442 channel = grpc_channel_create(target_str, creds, &wrapper->args);
443 }
444 // re-register with channel polling thread
445 channel_init_try_register_stack stack;
446 stack.channel = channel;
447 stack.wrapper = wrapper;
448 rb_thread_call_without_gvl(
449 channel_init_try_register_connection_polling_without_gil, &stack, NULL,
450 NULL);
451 }
452 }
453
454 /* Create a call given a grpc_channel, in order to call method. The request
455 is not sent until grpc_call_invoke is called. */
grpc_rb_channel_create_call(VALUE self,VALUE parent,VALUE mask,VALUE method,VALUE host,VALUE deadline)456 static VALUE grpc_rb_channel_create_call(VALUE self, VALUE parent, VALUE mask,
457 VALUE method, VALUE host,
458 VALUE deadline) {
459 VALUE res = Qnil;
460 grpc_rb_channel* wrapper = NULL;
461 grpc_call* call = NULL;
462 grpc_call* parent_call = NULL;
463 grpc_completion_queue* cq = NULL;
464 int flags = GRPC_PROPAGATE_DEFAULTS;
465 grpc_slice method_slice;
466 grpc_slice host_slice;
467 grpc_slice* host_slice_ptr = NULL;
468 char* tmp_str = NULL;
469
470 grpc_ruby_fork_guard();
471 if (host != Qnil) {
472 host_slice =
473 grpc_slice_from_copied_buffer(RSTRING_PTR(host), RSTRING_LEN(host));
474 host_slice_ptr = &host_slice;
475 }
476 if (mask != Qnil) {
477 flags = NUM2UINT(mask);
478 }
479 if (parent != Qnil) {
480 parent_call = grpc_rb_get_wrapped_call(parent);
481 }
482
483 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
484 if (wrapper->bg_wrapped == NULL) {
485 rb_raise(rb_eRuntimeError, "closed!");
486 return Qnil;
487 }
488 // TODO(apolcyn): only do this check if fork support is enabled
489 rb_mutex_lock(rb_ivar_get(self, id_channel_recreation_mu));
490 grpc_rb_channel_maybe_recreate_channel_after_fork(
491 wrapper, rb_ivar_get(self, id_target));
492 rb_mutex_unlock(rb_ivar_get(self, id_channel_recreation_mu));
493
494 cq = grpc_completion_queue_create_for_pluck(NULL);
495 method_slice =
496 grpc_slice_from_copied_buffer(RSTRING_PTR(method), RSTRING_LEN(method));
497 call = grpc_channel_create_call(wrapper->bg_wrapped->channel, parent_call,
498 flags, cq, method_slice, host_slice_ptr,
499 grpc_rb_time_timeval(deadline,
500 /* absolute time */ 0),
501 NULL);
502
503 if (call == NULL) {
504 tmp_str = grpc_slice_to_c_string(method_slice);
505 rb_raise(rb_eRuntimeError, "cannot create call with method %s", tmp_str);
506 return Qnil;
507 }
508
509 grpc_slice_unref(method_slice);
510 if (host_slice_ptr != NULL) {
511 grpc_slice_unref(host_slice);
512 }
513
514 res = grpc_rb_wrap_call(call, cq);
515
516 /* Make this channel an instance attribute of the call so that it is not GCed
517 * before the call. */
518 rb_ivar_set(res, id_channel, self);
519 return res;
520 }
521
522 /* Closes the channel, calling it's destroy method */
523 /* Note this is an API-level call; a wrapped channel's finalizer doesn't call
524 * this */
grpc_rb_channel_destroy(VALUE self)525 static VALUE grpc_rb_channel_destroy(VALUE self) {
526 grpc_rb_channel* wrapper = NULL;
527
528 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
529 if (wrapper->bg_wrapped != NULL) {
530 rb_thread_call_without_gvl(channel_safe_destroy_without_gil,
531 wrapper->bg_wrapped, NULL, NULL);
532 wrapper->bg_wrapped = NULL;
533 }
534
535 return Qnil;
536 }
537
538 /* Called to obtain the target that this channel accesses. */
grpc_rb_channel_get_target(VALUE self)539 static VALUE grpc_rb_channel_get_target(VALUE self) {
540 grpc_rb_channel* wrapper = NULL;
541 VALUE res = Qnil;
542 char* target = NULL;
543
544 TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
545 target = grpc_channel_get_target(wrapper->bg_wrapped->channel);
546 res = rb_str_new2(target);
547 gpr_free(target);
548
549 return res;
550 }
551
552 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_lookup(bg_watched_channel * target)553 static int bg_watched_channel_list_lookup(bg_watched_channel* target) {
554 bg_watched_channel* cur = bg_watched_channel_list_head;
555
556 while (cur != NULL) {
557 if (cur == target) {
558 return 1;
559 }
560 cur = cur->next;
561 }
562
563 return 0;
564 }
565
566 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_create_and_add(grpc_channel * channel)567 static bg_watched_channel* bg_watched_channel_list_create_and_add(
568 grpc_channel* channel) {
569 bg_watched_channel* watched = gpr_zalloc(sizeof(bg_watched_channel));
570
571 watched->channel = channel;
572 watched->next = bg_watched_channel_list_head;
573 watched->refcount = 1;
574 bg_watched_channel_list_head = watched;
575 return watched;
576 }
577
578 /* Needs to be called under global_connection_polling_mu */
bg_watched_channel_list_free_and_remove(bg_watched_channel * target)579 static void bg_watched_channel_list_free_and_remove(
580 bg_watched_channel* target) {
581 bg_watched_channel* bg = NULL;
582
583 GPR_ASSERT(bg_watched_channel_list_lookup(target));
584 GPR_ASSERT(target->channel_destroyed && target->refcount == 0);
585 if (bg_watched_channel_list_head == target) {
586 bg_watched_channel_list_head = target->next;
587 gpr_free(target);
588 return;
589 }
590 bg = bg_watched_channel_list_head;
591 while (bg != NULL && bg->next != NULL) {
592 if (bg->next == target) {
593 bg->next = bg->next->next;
594 gpr_free(target);
595 return;
596 }
597 bg = bg->next;
598 }
599 GPR_ASSERT(0);
600 }
601
602 /* Initialize a grpc_rb_channel's "protected grpc_channel" and try to push
603 * it onto the background thread for constant watches. */
channel_init_try_register_connection_polling_without_gil(void * arg)604 static void* channel_init_try_register_connection_polling_without_gil(
605 void* arg) {
606 channel_init_try_register_stack* stack =
607 (channel_init_try_register_stack*)arg;
608
609 gpr_mu_lock(&global_connection_polling_mu);
610 stack->wrapper->bg_wrapped =
611 bg_watched_channel_list_create_and_add(stack->channel);
612 grpc_rb_channel_try_register_connection_polling(stack->wrapper->bg_wrapped);
613 gpr_mu_unlock(&global_connection_polling_mu);
614 return NULL;
615 }
616
617 // Needs to be called under global_connection_poolling_mu
grpc_rb_channel_try_register_connection_polling(bg_watched_channel * bg)618 static void grpc_rb_channel_try_register_connection_polling(
619 bg_watched_channel* bg) {
620 grpc_connectivity_state conn_state;
621 watch_state_op* op = NULL;
622 if (bg->refcount == 0) {
623 GPR_ASSERT(bg->channel_destroyed);
624 bg_watched_channel_list_free_and_remove(bg);
625 return;
626 }
627 GPR_ASSERT(bg->refcount == 1);
628 if (bg->channel_destroyed || g_abort_channel_polling) {
629 return;
630 }
631 conn_state = grpc_channel_check_connectivity_state(bg->channel, 0);
632 if (conn_state == GRPC_CHANNEL_SHUTDOWN) {
633 return;
634 }
635 GPR_ASSERT(bg_watched_channel_list_lookup(bg));
636 // prevent bg from being free'd by GC while background thread is watching it
637 bg->refcount++;
638 op = gpr_zalloc(sizeof(watch_state_op));
639 op->op_type = CONTINUOUS_WATCH;
640 op->op.continuous_watch_callback_args.bg = bg;
641 grpc_channel_watch_connectivity_state(bg->channel, conn_state,
642 gpr_inf_future(GPR_CLOCK_REALTIME),
643 g_channel_polling_cq, op);
644 }
645
646 // Note this loop breaks out with a single call of
647 // "run_poll_channels_loop_no_gil".
648 // This assumes that a ruby call the unblocking func
649 // indicates process shutdown.
650 // In the worst case, this stops polling channel connectivity
651 // early and falls back to current behavior.
run_poll_channels_loop_no_gil(void * arg)652 static void* run_poll_channels_loop_no_gil(void* arg) {
653 grpc_event event;
654 watch_state_op* op = NULL;
655 bg_watched_channel* bg = NULL;
656 (void)arg;
657 gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - begin");
658
659 gpr_mu_lock(&global_connection_polling_mu);
660 gpr_cv_broadcast(&global_connection_polling_cv);
661 gpr_mu_unlock(&global_connection_polling_mu);
662
663 for (;;) {
664 event = grpc_completion_queue_next(
665 g_channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
666 if (event.type == GRPC_QUEUE_SHUTDOWN) {
667 break;
668 }
669 gpr_mu_lock(&global_connection_polling_mu);
670 if (event.type == GRPC_OP_COMPLETE) {
671 op = (watch_state_op*)event.tag;
672 if (op->op_type == CONTINUOUS_WATCH) {
673 bg = (bg_watched_channel*)op->op.continuous_watch_callback_args.bg;
674 bg->refcount--;
675 grpc_rb_channel_try_register_connection_polling(bg);
676 gpr_free(op);
677 } else if (op->op_type == WATCH_STATE_API) {
678 grpc_rb_channel_watch_connection_state_op_complete(
679 (watch_state_op*)event.tag, event.success);
680 } else {
681 GPR_ASSERT(0);
682 }
683 }
684 gpr_mu_unlock(&global_connection_polling_mu);
685 }
686 grpc_completion_queue_destroy(g_channel_polling_cq);
687 gpr_log(GPR_DEBUG,
688 "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling "
689 "loop");
690 return NULL;
691 }
692
run_poll_channels_loop_unblocking_func(void * arg)693 static void run_poll_channels_loop_unblocking_func(void* arg) {
694 run_poll_channels_loop_unblocking_func_wrapper(arg);
695 }
696
697 // Notify the channel polling loop to cleanup and shutdown.
run_poll_channels_loop_unblocking_func_wrapper(void * arg)698 static void* run_poll_channels_loop_unblocking_func_wrapper(void* arg) {
699 bg_watched_channel* bg = NULL;
700 (void)arg;
701
702 gpr_mu_lock(&global_connection_polling_mu);
703 gpr_log(GPR_DEBUG,
704 "GRPC_RUBY: run_poll_channels_loop_unblocking_func - begin aborting "
705 "connection polling");
706 // early out after first time through
707 if (g_abort_channel_polling) {
708 gpr_mu_unlock(&global_connection_polling_mu);
709 return NULL;
710 }
711 g_abort_channel_polling = 1;
712
713 // force pending watches to end by switching to shutdown state
714 bg = bg_watched_channel_list_head;
715 while (bg != NULL) {
716 if (!bg->channel_destroyed) {
717 grpc_channel_destroy(bg->channel);
718 bg->channel_destroyed = 1;
719 }
720 bg = bg->next;
721 }
722
723 gpr_log(GPR_DEBUG, "GRPC_RUBY: cq shutdown on global polling cq. pid: %d",
724 getpid());
725 grpc_completion_queue_shutdown(g_channel_polling_cq);
726 gpr_cv_broadcast(&global_connection_polling_cv);
727 gpr_mu_unlock(&global_connection_polling_mu);
728 gpr_log(GPR_DEBUG,
729 "GRPC_RUBY: run_poll_channels_loop_unblocking_func - end aborting "
730 "connection polling");
731 return NULL;
732 }
733
734 // Poll channel connectivity states in background thread without the GIL.
run_poll_channels_loop(void * arg)735 static VALUE run_poll_channels_loop(void* arg) {
736 (void)arg;
737 gpr_log(
738 GPR_DEBUG,
739 "GRPC_RUBY: run_poll_channels_loop - create connection polling thread");
740 rb_thread_call_without_gvl(run_poll_channels_loop_no_gil, NULL,
741 run_poll_channels_loop_unblocking_func, NULL);
742 return Qnil;
743 }
744
set_abort_channel_polling_without_gil(void * arg)745 static void* set_abort_channel_polling_without_gil(void* arg) {
746 (void)arg;
747 gpr_mu_lock(&global_connection_polling_mu);
748 g_abort_channel_polling = 1;
749 gpr_cv_broadcast(&global_connection_polling_cv);
750 gpr_mu_unlock(&global_connection_polling_mu);
751 return NULL;
752 }
753
do_basic_init()754 static void do_basic_init() {
755 gpr_mu_init(&global_connection_polling_mu);
756 gpr_cv_init(&global_connection_polling_cv);
757 }
758
759 /* Temporary fix for
760 * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
761 * Transports in idle channels can get destroyed. Normally c-core re-connects,
762 * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
763 * only calls c-core's "completion_queu_pluck" API.
764 * This uses a global background thread that calls
765 * "completion_queue_next" on registered "watch_channel_connectivity_state"
766 * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
767 * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
768 */
grpc_rb_channel_polling_thread_start()769 void grpc_rb_channel_polling_thread_start() {
770 gpr_once_init(&g_once_init, do_basic_init);
771 GPR_ASSERT(!RTEST(g_channel_polling_thread));
772 GPR_ASSERT(!g_abort_channel_polling);
773 GPR_ASSERT(g_channel_polling_cq == NULL);
774
775 g_channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
776 g_channel_polling_thread = rb_thread_create(run_poll_channels_loop, NULL);
777
778 if (!RTEST(g_channel_polling_thread)) {
779 gpr_log(GPR_ERROR, "GRPC_RUBY: failed to spawn channel polling thread");
780 rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
781 NULL, NULL);
782 return;
783 }
784 }
785
grpc_rb_channel_polling_thread_stop()786 void grpc_rb_channel_polling_thread_stop() {
787 if (!RTEST(g_channel_polling_thread)) {
788 gpr_log(GPR_ERROR,
789 "GRPC_RUBY: channel polling thread stop: thread was not started");
790 return;
791 }
792 rb_thread_call_without_gvl(run_poll_channels_loop_unblocking_func_wrapper,
793 NULL, NULL, NULL);
794 rb_funcall(g_channel_polling_thread, rb_intern("join"), 0);
795 // state associated with the channel polling thread is destroyed, reset so
796 // we can start again later
797 g_channel_polling_thread = Qnil;
798 g_abort_channel_polling = false;
799 g_channel_polling_cq = NULL;
800 }
801
Init_grpc_propagate_masks()802 static void Init_grpc_propagate_masks() {
803 /* Constants representing call propagation masks in grpc.h */
804 VALUE grpc_rb_mPropagateMasks =
805 rb_define_module_under(grpc_rb_mGrpcCore, "PropagateMasks");
806 rb_define_const(grpc_rb_mPropagateMasks, "DEADLINE",
807 UINT2NUM(GRPC_PROPAGATE_DEADLINE));
808 rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_STATS_CONTEXT",
809 UINT2NUM(GRPC_PROPAGATE_CENSUS_STATS_CONTEXT));
810 rb_define_const(grpc_rb_mPropagateMasks, "CENSUS_TRACING_CONTEXT",
811 UINT2NUM(GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT));
812 rb_define_const(grpc_rb_mPropagateMasks, "CANCELLATION",
813 UINT2NUM(GRPC_PROPAGATE_CANCELLATION));
814 rb_define_const(grpc_rb_mPropagateMasks, "DEFAULTS",
815 UINT2NUM(GRPC_PROPAGATE_DEFAULTS));
816 }
817
Init_grpc_connectivity_states()818 static void Init_grpc_connectivity_states() {
819 /* Constants representing call propagation masks in grpc.h */
820 VALUE grpc_rb_mConnectivityStates =
821 rb_define_module_under(grpc_rb_mGrpcCore, "ConnectivityStates");
822 rb_define_const(grpc_rb_mConnectivityStates, "IDLE",
823 LONG2NUM(GRPC_CHANNEL_IDLE));
824 rb_define_const(grpc_rb_mConnectivityStates, "CONNECTING",
825 LONG2NUM(GRPC_CHANNEL_CONNECTING));
826 rb_define_const(grpc_rb_mConnectivityStates, "READY",
827 LONG2NUM(GRPC_CHANNEL_READY));
828 rb_define_const(grpc_rb_mConnectivityStates, "TRANSIENT_FAILURE",
829 LONG2NUM(GRPC_CHANNEL_TRANSIENT_FAILURE));
830 rb_define_const(grpc_rb_mConnectivityStates, "FATAL_FAILURE",
831 LONG2NUM(GRPC_CHANNEL_SHUTDOWN));
832 }
833
Init_grpc_channel()834 void Init_grpc_channel() {
835 rb_global_variable(&g_channel_polling_thread);
836 grpc_rb_cChannelArgs = rb_define_class("TmpChannelArgs", rb_cObject);
837 rb_undef_alloc_func(grpc_rb_cChannelArgs);
838 grpc_rb_cChannel =
839 rb_define_class_under(grpc_rb_mGrpcCore, "Channel", rb_cObject);
840
841 /* Allocates an object managed by the ruby runtime */
842 rb_define_alloc_func(grpc_rb_cChannel, grpc_rb_channel_alloc);
843
844 /* Provides a ruby constructor and support for dup/clone. */
845 rb_define_method(grpc_rb_cChannel, "initialize", grpc_rb_channel_init, -1);
846 rb_define_method(grpc_rb_cChannel, "initialize_copy",
847 grpc_rb_cannot_init_copy, 1);
848
849 /* Add ruby analogues of the Channel methods. */
850 rb_define_method(grpc_rb_cChannel, "connectivity_state",
851 grpc_rb_channel_get_connectivity_state, -1);
852 rb_define_method(grpc_rb_cChannel, "watch_connectivity_state",
853 grpc_rb_channel_watch_connectivity_state, 2);
854 rb_define_method(grpc_rb_cChannel, "create_call", grpc_rb_channel_create_call,
855 5);
856 rb_define_method(grpc_rb_cChannel, "target", grpc_rb_channel_get_target, 0);
857 rb_define_method(grpc_rb_cChannel, "destroy", grpc_rb_channel_destroy, 0);
858 rb_define_alias(grpc_rb_cChannel, "close", "destroy");
859
860 id_channel = rb_intern("__channel");
861 id_target = rb_intern("__target");
862 id_channel_recreation_mu = rb_intern("__channel_recreation_mu");
863 rb_define_const(grpc_rb_cChannel, "SSL_TARGET",
864 ID2SYM(rb_intern(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)));
865 rb_define_const(grpc_rb_cChannel, "ENABLE_CENSUS",
866 ID2SYM(rb_intern(GRPC_ARG_ENABLE_CENSUS)));
867 rb_define_const(grpc_rb_cChannel, "MAX_CONCURRENT_STREAMS",
868 ID2SYM(rb_intern(GRPC_ARG_MAX_CONCURRENT_STREAMS)));
869 rb_define_const(grpc_rb_cChannel, "MAX_MESSAGE_LENGTH",
870 ID2SYM(rb_intern(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH)));
871 id_insecure_channel = rb_intern("this_channel_is_insecure");
872 Init_grpc_propagate_masks();
873 Init_grpc_connectivity_states();
874 }
875
876 /* Gets the wrapped channel from the ruby wrapper */
grpc_rb_get_wrapped_channel(VALUE v)877 grpc_channel* grpc_rb_get_wrapped_channel(VALUE v) {
878 grpc_rb_channel* wrapper = NULL;
879 TypedData_Get_Struct(v, grpc_rb_channel, &grpc_channel_data_type, wrapper);
880 return wrapper->bg_wrapped->channel;
881 }
882