xref: /aosp_15_r20/external/grpc-grpc/src/core/lib/iomgr/combiner.cc (revision cc02d7e222339f7a4f6ba5f422e6413f4bd931f2)
1 //
2 //
3 // Copyright 2016 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 //     http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18 
19 #include <grpc/support/port_platform.h>
20 
21 #include "src/core/lib/iomgr/combiner.h"
22 
23 #include <assert.h>
24 #include <inttypes.h>
25 #include <string.h>
26 
27 #include <grpc/support/alloc.h>
28 #include <grpc/support/log.h>
29 
30 #include "src/core/lib/experiments/experiments.h"
31 #include "src/core/lib/gprpp/crash.h"
32 #include "src/core/lib/gprpp/mpscq.h"
33 #include "src/core/lib/iomgr/executor.h"
34 #include "src/core/lib/iomgr/iomgr_internal.h"
35 
36 grpc_core::DebugOnlyTraceFlag grpc_combiner_trace(false, "combiner");
37 
38 #define GRPC_COMBINER_TRACE(fn)          \
39   do {                                   \
40     if (grpc_combiner_trace.enabled()) { \
41       fn;                                \
42     }                                    \
43   } while (0)
44 
45 #define STATE_UNORPHANED 1
46 #define STATE_ELEM_COUNT_LOW_BIT 2
47 
48 static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* closure,
49                           grpc_error_handle error);
50 static void combiner_finally_exec(grpc_core::Combiner* lock,
51                                   grpc_closure* closure,
52                                   grpc_error_handle error);
53 
grpc_combiner_create(std::shared_ptr<grpc_event_engine::experimental::EventEngine> event_engine)54 grpc_core::Combiner* grpc_combiner_create(
55     std::shared_ptr<grpc_event_engine::experimental::EventEngine>
56         event_engine) {
57   grpc_core::Combiner* lock = new grpc_core::Combiner();
58   lock->event_engine = event_engine;
59   gpr_ref_init(&lock->refs, 1);
60   gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
61   grpc_closure_list_init(&lock->final_list);
62   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock));
63   return lock;
64 }
65 
really_destroy(grpc_core::Combiner * lock)66 static void really_destroy(grpc_core::Combiner* lock) {
67   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock));
68   GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
69   delete lock;
70 }
71 
start_destroy(grpc_core::Combiner * lock)72 static void start_destroy(grpc_core::Combiner* lock) {
73   gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
74   GRPC_COMBINER_TRACE(gpr_log(
75       GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
76   if (old_state == 1) {
77     really_destroy(lock);
78   }
79 }
80 
81 #ifndef NDEBUG
82 #define GRPC_COMBINER_DEBUG_SPAM(op, delta)                                \
83   if (grpc_combiner_trace.enabled()) {                                     \
84     gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,                            \
85             "C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op),        \
86             gpr_atm_no_barrier_load(&lock->refs.count),                    \
87             gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason); \
88   }
89 #else
90 #define GRPC_COMBINER_DEBUG_SPAM(op, delta)
91 #endif
92 
grpc_combiner_unref(grpc_core::Combiner * lock GRPC_COMBINER_DEBUG_ARGS)93 void grpc_combiner_unref(grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
94   GRPC_COMBINER_DEBUG_SPAM("UNREF", -1);
95   if (gpr_unref(&lock->refs)) {
96     start_destroy(lock);
97   }
98 }
99 
grpc_combiner_ref(grpc_core::Combiner * lock GRPC_COMBINER_DEBUG_ARGS)100 grpc_core::Combiner* grpc_combiner_ref(
101     grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
102   GRPC_COMBINER_DEBUG_SPAM("  REF", 1);
103   gpr_ref(&lock->refs);
104   return lock;
105 }
106 
push_last_on_exec_ctx(grpc_core::Combiner * lock)107 static void push_last_on_exec_ctx(grpc_core::Combiner* lock) {
108   lock->next_combiner_on_this_exec_ctx = nullptr;
109   if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
110     grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
111         grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
112   } else {
113     grpc_core::ExecCtx::Get()
114         ->combiner_data()
115         ->last_combiner->next_combiner_on_this_exec_ctx = lock;
116     grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
117   }
118 }
119 
push_first_on_exec_ctx(grpc_core::Combiner * lock)120 static void push_first_on_exec_ctx(grpc_core::Combiner* lock) {
121   lock->next_combiner_on_this_exec_ctx =
122       grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
123   grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = lock;
124   if (lock->next_combiner_on_this_exec_ctx == nullptr) {
125     grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
126   }
127 }
128 
combiner_exec(grpc_core::Combiner * lock,grpc_closure * cl,grpc_error_handle error)129 static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* cl,
130                           grpc_error_handle error) {
131   gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
132   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
133                               "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
134                               lock, cl, last));
135   if (last == 1) {
136     gpr_atm_no_barrier_store(
137         &lock->initiating_exec_ctx_or_null,
138         reinterpret_cast<gpr_atm>(grpc_core::ExecCtx::Get()));
139     // first element on this list: add it to the list of combiner locks
140     // executing within this exec_ctx
141     push_last_on_exec_ctx(lock);
142   } else {
143     // there may be a race with setting here: if that happens, we may delay
144     // offload for one or two actions, and that's fine
145     gpr_atm initiator =
146         gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null);
147     if (initiator != 0 &&
148         initiator != reinterpret_cast<gpr_atm>(grpc_core::ExecCtx::Get())) {
149       gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0);
150     }
151   }
152   GPR_ASSERT(last & STATE_UNORPHANED);  // ensure lock has not been destroyed
153   assert(cl->cb);
154   cl->error_data.error = grpc_core::internal::StatusAllocHeapPtr(error);
155   lock->queue.Push(cl->next_data.mpscq_node.get());
156 }
157 
move_next()158 static void move_next() {
159   grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
160       grpc_core::ExecCtx::Get()
161           ->combiner_data()
162           ->active_combiner->next_combiner_on_this_exec_ctx;
163   if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
164     grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = nullptr;
165   }
166 }
167 
queue_offload(grpc_core::Combiner * lock)168 static void queue_offload(grpc_core::Combiner* lock) {
169   move_next();
170   // Make the combiner look uncontended by storing a non-null value here, so
171   // that we don't immediately offload again.
172   gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 1);
173   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock));
174   lock->event_engine->Run([lock] {
175     grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
176     grpc_core::ExecCtx exec_ctx(0);
177     push_last_on_exec_ctx(lock);
178     exec_ctx.Flush();
179   });
180 }
181 
grpc_combiner_continue_exec_ctx()182 bool grpc_combiner_continue_exec_ctx() {
183   grpc_core::Combiner* lock =
184       grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
185   if (lock == nullptr) {
186     return false;
187   }
188 
189   bool contended =
190       gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0;
191 
192   GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
193                               "C:%p grpc_combiner_continue_exec_ctx "
194                               "contended=%d "
195                               "exec_ctx_ready_to_finish=%d "
196                               "time_to_execute_final_list=%d",
197                               lock, contended,
198                               grpc_core::ExecCtx::Get()->IsReadyToFinish(),
199                               lock->time_to_execute_final_list));
200 
201   // offload only if both (1) the combiner is contended and has more than one
202   // closure to execute, and (2) the current execution context needs to finish
203   // as soon as possible
204   if (contended && grpc_core::ExecCtx::Get()->IsReadyToFinish()) {
205     // this execution context wants to move on: schedule remaining work to be
206     // picked up on the executor
207     queue_offload(lock);
208     return true;
209   }
210 
211   if (!lock->time_to_execute_final_list ||
212       // peek to see if something new has shown up, and execute that with
213       // priority
214       (gpr_atm_acq_load(&lock->state) >> 1) > 1) {
215     grpc_core::MultiProducerSingleConsumerQueue::Node* n = lock->queue.Pop();
216     GRPC_COMBINER_TRACE(
217         gpr_log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n));
218     if (n == nullptr) {
219       // queue is in an inconsistent state: use this as a cue that we should
220       // go off and do something else for a while (and come back later)
221       queue_offload(lock);
222       return true;
223     }
224     grpc_closure* cl = reinterpret_cast<grpc_closure*>(n);
225 #ifndef NDEBUG
226     cl->scheduled = false;
227 #endif
228     grpc_error_handle cl_err =
229         grpc_core::internal::StatusMoveFromHeapPtr(cl->error_data.error);
230     cl->error_data.error = 0;
231     cl->cb(cl->cb_arg, std::move(cl_err));
232   } else {
233     grpc_closure* c = lock->final_list.head;
234     GPR_ASSERT(c != nullptr);
235     grpc_closure_list_init(&lock->final_list);
236     int loops = 0;
237     while (c != nullptr) {
238       GRPC_COMBINER_TRACE(
239           gpr_log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock, loops, c));
240       grpc_closure* next = c->next_data.next;
241 #ifndef NDEBUG
242       c->scheduled = false;
243 #endif
244       grpc_error_handle error =
245           grpc_core::internal::StatusMoveFromHeapPtr(c->error_data.error);
246       c->error_data.error = 0;
247       c->cb(c->cb_arg, std::move(error));
248       c = next;
249     }
250   }
251 
252   move_next();
253   lock->time_to_execute_final_list = false;
254   gpr_atm old_state =
255       gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
256   GRPC_COMBINER_TRACE(
257       gpr_log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
258 // Define a macro to ease readability of the following switch statement.
259 #define OLD_STATE_WAS(orphaned, elem_count) \
260   (((orphaned) ? 0 : STATE_UNORPHANED) |    \
261    ((elem_count) * STATE_ELEM_COUNT_LOW_BIT))
262   // Depending on what the previous state was, we need to perform different
263   // actions.
264   switch (old_state) {
265     default:
266       // we have multiple queued work items: just continue executing them
267       break;
268     case OLD_STATE_WAS(false, 2):
269     case OLD_STATE_WAS(true, 2):
270       // we're down to one queued item: if it's the final list we should do that
271       if (!grpc_closure_list_empty(lock->final_list)) {
272         lock->time_to_execute_final_list = true;
273       }
274       break;
275     case OLD_STATE_WAS(false, 1):
276       // had one count, one unorphaned --> unlocked unorphaned
277       return true;
278     case OLD_STATE_WAS(true, 1):
279       // and one count, one orphaned --> unlocked and orphaned
280       really_destroy(lock);
281       return true;
282     case OLD_STATE_WAS(false, 0):
283     case OLD_STATE_WAS(true, 0):
284       // these values are illegal - representing an already unlocked or
285       // deleted lock
286       GPR_UNREACHABLE_CODE(return true);
287   }
288   push_first_on_exec_ctx(lock);
289   return true;
290 }
291 
292 static void enqueue_finally(void* closure, grpc_error_handle error);
293 
combiner_finally_exec(grpc_core::Combiner * lock,grpc_closure * closure,grpc_error_handle error)294 static void combiner_finally_exec(grpc_core::Combiner* lock,
295                                   grpc_closure* closure,
296                                   grpc_error_handle error) {
297   GPR_ASSERT(lock != nullptr);
298   GRPC_COMBINER_TRACE(gpr_log(
299       GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure,
300       grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
301   if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
302     // Using error_data.scratch to store the combiner so that it can be accessed
303     // in enqueue_finally.
304     closure->error_data.scratch = reinterpret_cast<uintptr_t>(lock);
305     lock->Run(GRPC_CLOSURE_CREATE(enqueue_finally, closure, nullptr), error);
306     return;
307   }
308 
309   if (grpc_closure_list_empty(lock->final_list)) {
310     gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
311   }
312   grpc_closure_list_append(&lock->final_list, closure, error);
313 }
314 
enqueue_finally(void * closure,grpc_error_handle error)315 static void enqueue_finally(void* closure, grpc_error_handle error) {
316   grpc_closure* cl = static_cast<grpc_closure*>(closure);
317   grpc_core::Combiner* lock =
318       reinterpret_cast<grpc_core::Combiner*>(cl->error_data.scratch);
319   cl->error_data.scratch = 0;
320   combiner_finally_exec(lock, cl, error);
321 }
322 
323 namespace grpc_core {
Run(grpc_closure * closure,grpc_error_handle error)324 void Combiner::Run(grpc_closure* closure, grpc_error_handle error) {
325   combiner_exec(this, closure, error);
326 }
327 
FinallyRun(grpc_closure * closure,grpc_error_handle error)328 void Combiner::FinallyRun(grpc_closure* closure, grpc_error_handle error) {
329   combiner_finally_exec(this, closure, error);
330 }
331 
ForceOffload()332 void Combiner::ForceOffload() {
333   gpr_atm_no_barrier_store(&initiating_exec_ctx_or_null, 0);
334   ExecCtx::Get()->SetReadyToFinishFlag();
335 }
336 
337 }  // namespace grpc_core
338