1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Authors: Dipankar Sarma <[email protected]>
8 * Manfred Spraul <[email protected]>
9 * Paul E. McKenney <[email protected]>
10 *
11 * Based on the original work by Paul McKenney <[email protected]>
12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 *
14 * For detailed explanation of Read-Copy Update mechanism see -
15 * Documentation/RCU
16 */
17
18 #define pr_fmt(fmt) "rcu: " fmt
19
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/rcupdate_wait.h>
26 #include <linux/interrupt.h>
27 #include <linux/sched.h>
28 #include <linux/sched/debug.h>
29 #include <linux/nmi.h>
30 #include <linux/atomic.h>
31 #include <linux/bitops.h>
32 #include <linux/export.h>
33 #include <linux/completion.h>
34 #include <linux/kmemleak.h>
35 #include <linux/moduleparam.h>
36 #include <linux/panic.h>
37 #include <linux/panic_notifier.h>
38 #include <linux/percpu.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/mutex.h>
42 #include <linux/time.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/wait.h>
45 #include <linux/kthread.h>
46 #include <uapi/linux/sched/types.h>
47 #include <linux/prefetch.h>
48 #include <linux/delay.h>
49 #include <linux/random.h>
50 #include <linux/trace_events.h>
51 #include <linux/suspend.h>
52 #include <linux/ftrace.h>
53 #include <linux/tick.h>
54 #include <linux/sysrq.h>
55 #include <linux/kprobes.h>
56 #include <linux/gfp.h>
57 #include <linux/oom.h>
58 #include <linux/smpboot.h>
59 #include <linux/jiffies.h>
60 #include <linux/slab.h>
61 #include <linux/sched/isolation.h>
62 #include <linux/sched/clock.h>
63 #include <linux/vmalloc.h>
64 #include <linux/mm.h>
65 #include <linux/kasan.h>
66 #include <linux/context_tracking.h>
67 #include "../time/tick-internal.h"
68
69 #include "tree.h"
70 #include "rcu.h"
71
72 #ifdef MODULE_PARAM_PREFIX
73 #undef MODULE_PARAM_PREFIX
74 #endif
75 #define MODULE_PARAM_PREFIX "rcutree."
76
77 /* Data structures. */
78 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *);
79
80 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
81 .gpwrap = true,
82 };
83 static struct rcu_state rcu_state = {
84 .level = { &rcu_state.node[0] },
85 .gp_state = RCU_GP_IDLE,
86 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
87 .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
88 .barrier_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.barrier_lock),
89 .name = RCU_NAME,
90 .abbr = RCU_ABBR,
91 .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
92 .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
93 .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
94 .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
95 rcu_sr_normal_gp_cleanup_work),
96 .srs_cleanups_pending = ATOMIC_INIT(0),
97 #ifdef CONFIG_RCU_NOCB_CPU
98 .nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex),
99 #endif
100 };
101
102 /* Dump rcu_node combining tree at boot to verify correct setup. */
103 static bool dump_tree;
104 module_param(dump_tree, bool, 0444);
105 /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
106 static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
107 #ifndef CONFIG_PREEMPT_RT
108 module_param(use_softirq, bool, 0444);
109 #endif
110 /* Control rcu_node-tree auto-balancing at boot time. */
111 static bool rcu_fanout_exact;
112 module_param(rcu_fanout_exact, bool, 0444);
113 /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
114 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
115 module_param(rcu_fanout_leaf, int, 0444);
116 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
117 /* Number of rcu_nodes at specified level. */
118 int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
119 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
120
121 /*
122 * The rcu_scheduler_active variable is initialized to the value
123 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
124 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
125 * RCU can assume that there is but one task, allowing RCU to (for example)
126 * optimize synchronize_rcu() to a simple barrier(). When this variable
127 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
128 * to detect real grace periods. This variable is also used to suppress
129 * boot-time false positives from lockdep-RCU error checking. Finally, it
130 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
131 * is fully initialized, including all of its kthreads having been spawned.
132 */
133 int rcu_scheduler_active __read_mostly;
134 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
135
136 /*
137 * The rcu_scheduler_fully_active variable transitions from zero to one
138 * during the early_initcall() processing, which is after the scheduler
139 * is capable of creating new tasks. So RCU processing (for example,
140 * creating tasks for RCU priority boosting) must be delayed until after
141 * rcu_scheduler_fully_active transitions from zero to one. We also
142 * currently delay invocation of any RCU callbacks until after this point.
143 *
144 * It might later prove better for people registering RCU callbacks during
145 * early boot to take responsibility for these callbacks, but one step at
146 * a time.
147 */
148 static int rcu_scheduler_fully_active __read_mostly;
149
150 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
151 unsigned long gps, unsigned long flags);
152 static void invoke_rcu_core(void);
153 static void rcu_report_exp_rdp(struct rcu_data *rdp);
154 static void sync_sched_exp_online_cleanup(int cpu);
155 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
156 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
157 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
158 static bool rcu_init_invoked(void);
159 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
160 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
161
162 /*
163 * rcuc/rcub/rcuop kthread realtime priority. The "rcuop"
164 * real-time priority(enabling/disabling) is controlled by
165 * the extra CONFIG_RCU_NOCB_CPU_CB_BOOST configuration.
166 */
167 static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
168 module_param(kthread_prio, int, 0444);
169
170 /* Delay in jiffies for grace-period initialization delays, debug only. */
171
172 static int gp_preinit_delay;
173 module_param(gp_preinit_delay, int, 0444);
174 static int gp_init_delay;
175 module_param(gp_init_delay, int, 0444);
176 static int gp_cleanup_delay;
177 module_param(gp_cleanup_delay, int, 0444);
178 static int nohz_full_patience_delay;
179 module_param(nohz_full_patience_delay, int, 0444);
180 static int nohz_full_patience_delay_jiffies;
181
182 // Add delay to rcu_read_unlock() for strict grace periods.
183 static int rcu_unlock_delay;
184 #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
185 module_param(rcu_unlock_delay, int, 0444);
186 #endif
187
188 /* Retrieve RCU kthreads priority for rcutorture */
rcu_get_gp_kthreads_prio(void)189 int rcu_get_gp_kthreads_prio(void)
190 {
191 return kthread_prio;
192 }
193 EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
194
195 /*
196 * Number of grace periods between delays, normalized by the duration of
197 * the delay. The longer the delay, the more the grace periods between
198 * each delay. The reason for this normalization is that it means that,
199 * for non-zero delays, the overall slowdown of grace periods is constant
200 * regardless of the duration of the delay. This arrangement balances
201 * the need for long delays to increase some race probabilities with the
202 * need for fast grace periods to increase other race probabilities.
203 */
204 #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
205
206 /*
207 * Return true if an RCU grace period is in progress. The READ_ONCE()s
208 * permit this function to be invoked without holding the root rcu_node
209 * structure's ->lock, but of course results can be subject to change.
210 */
rcu_gp_in_progress(void)211 static int rcu_gp_in_progress(void)
212 {
213 return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
214 }
215
216 /*
217 * Return the number of callbacks queued on the specified CPU.
218 * Handles both the nocbs and normal cases.
219 */
rcu_get_n_cbs_cpu(int cpu)220 static long rcu_get_n_cbs_cpu(int cpu)
221 {
222 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
223
224 if (rcu_segcblist_is_enabled(&rdp->cblist))
225 return rcu_segcblist_n_cbs(&rdp->cblist);
226 return 0;
227 }
228
229 /**
230 * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing
231 *
232 * Mark a quiescent state for RCU, Tasks RCU, and Tasks Trace RCU.
233 * This is a special-purpose function to be used in the softirq
234 * infrastructure and perhaps the occasional long-running softirq
235 * handler.
236 *
237 * Note that from RCU's viewpoint, a call to rcu_softirq_qs() is
238 * equivalent to momentarily completely enabling preemption. For
239 * example, given this code::
240 *
241 * local_bh_disable();
242 * do_something();
243 * rcu_softirq_qs(); // A
244 * do_something_else();
245 * local_bh_enable(); // B
246 *
247 * A call to synchronize_rcu() that began concurrently with the
248 * call to do_something() would be guaranteed to wait only until
249 * execution reached statement A. Without that rcu_softirq_qs(),
250 * that same synchronize_rcu() would instead be guaranteed to wait
251 * until execution reached statement B.
252 */
rcu_softirq_qs(void)253 void rcu_softirq_qs(void)
254 {
255 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
256 lock_is_held(&rcu_lock_map) ||
257 lock_is_held(&rcu_sched_lock_map),
258 "Illegal rcu_softirq_qs() in RCU read-side critical section");
259 rcu_qs();
260 rcu_preempt_deferred_qs(current);
261 rcu_tasks_qs(current, false);
262 }
263
264 /*
265 * Reset the current CPU's RCU_WATCHING counter to indicate that the
266 * newly onlined CPU is no longer in an extended quiescent state.
267 * This will either leave the counter unchanged, or increment it
268 * to the next non-quiescent value.
269 *
270 * The non-atomic test/increment sequence works because the upper bits
271 * of the ->state variable are manipulated only by the corresponding CPU,
272 * or when the corresponding CPU is offline.
273 */
rcu_watching_online(void)274 static void rcu_watching_online(void)
275 {
276 if (ct_rcu_watching() & CT_RCU_WATCHING)
277 return;
278 ct_state_inc(CT_RCU_WATCHING);
279 }
280
281 /*
282 * Return true if the snapshot returned from ct_rcu_watching()
283 * indicates that RCU is in an extended quiescent state.
284 */
rcu_watching_snap_in_eqs(int snap)285 static bool rcu_watching_snap_in_eqs(int snap)
286 {
287 return !(snap & CT_RCU_WATCHING);
288 }
289
290 /**
291 * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU
292 * since the specified @snap?
293 *
294 * @rdp: The rcu_data corresponding to the CPU for which to check EQS.
295 * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS.
296 *
297 * Returns true if the CPU corresponding to @rdp has spent some time in an
298 * extended quiescent state since @snap. Note that this doesn't check if it
299 * /still/ is in an EQS, just that it went through one since @snap.
300 *
301 * This is meant to be used in a loop waiting for a CPU to go through an EQS.
302 */
rcu_watching_snap_stopped_since(struct rcu_data * rdp,int snap)303 static bool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap)
304 {
305 /*
306 * The first failing snapshot is already ordered against the accesses
307 * performed by the remote CPU after it exits idle.
308 *
309 * The second snapshot therefore only needs to order against accesses
310 * performed by the remote CPU prior to entering idle and therefore can
311 * rely solely on acquire semantics.
312 */
313 if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap)))
314 return true;
315
316 return snap != ct_rcu_watching_cpu_acquire(rdp->cpu);
317 }
318
319 /*
320 * Return true if the referenced integer is zero while the specified
321 * CPU remains within a single extended quiescent state.
322 */
rcu_watching_zero_in_eqs(int cpu,int * vp)323 bool rcu_watching_zero_in_eqs(int cpu, int *vp)
324 {
325 int snap;
326
327 // If not quiescent, force back to earlier extended quiescent state.
328 snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
329 smp_rmb(); // Order CT state and *vp reads.
330 if (READ_ONCE(*vp))
331 return false; // Non-zero, so report failure;
332 smp_rmb(); // Order *vp read and CT state re-read.
333
334 // If still in the same extended quiescent state, we are good!
335 return snap == ct_rcu_watching_cpu(cpu);
336 }
337
338 /*
339 * Let the RCU core know that this CPU has gone through the scheduler,
340 * which is a quiescent state. This is called when the need for a
341 * quiescent state is urgent, so we burn an atomic operation and full
342 * memory barriers to let the RCU core know about it, regardless of what
343 * this CPU might (or might not) do in the near future.
344 *
345 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
346 *
347 * The caller must have disabled interrupts and must not be idle.
348 */
rcu_momentary_eqs(void)349 notrace void rcu_momentary_eqs(void)
350 {
351 int seq;
352
353 raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
354 seq = ct_state_inc(2 * CT_RCU_WATCHING);
355 /* It is illegal to call this from idle state. */
356 WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
357 rcu_preempt_deferred_qs(current);
358 }
359 EXPORT_SYMBOL_GPL(rcu_momentary_eqs);
360
361 /**
362 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
363 *
364 * If the current CPU is idle and running at a first-level (not nested)
365 * interrupt, or directly, from idle, return true.
366 *
367 * The caller must have at least disabled IRQs.
368 */
rcu_is_cpu_rrupt_from_idle(void)369 static int rcu_is_cpu_rrupt_from_idle(void)
370 {
371 long nesting;
372
373 /*
374 * Usually called from the tick; but also used from smp_function_call()
375 * for expedited grace periods. This latter can result in running from
376 * the idle task, instead of an actual IPI.
377 */
378 lockdep_assert_irqs_disabled();
379
380 /* Check for counter underflows */
381 RCU_LOCKDEP_WARN(ct_nesting() < 0,
382 "RCU nesting counter underflow!");
383 RCU_LOCKDEP_WARN(ct_nmi_nesting() <= 0,
384 "RCU nmi_nesting counter underflow/zero!");
385
386 /* Are we at first interrupt nesting level? */
387 nesting = ct_nmi_nesting();
388 if (nesting > 1)
389 return false;
390
391 /*
392 * If we're not in an interrupt, we must be in the idle task!
393 */
394 WARN_ON_ONCE(!nesting && !is_idle_task(current));
395
396 /* Does CPU appear to be idle from an RCU standpoint? */
397 return ct_nesting() == 0;
398 }
399
400 #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
401 // Maximum callbacks per rcu_do_batch ...
402 #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
403 static long blimit = DEFAULT_RCU_BLIMIT;
404 #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
405 static long qhimark = DEFAULT_RCU_QHIMARK;
406 #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
407 static long qlowmark = DEFAULT_RCU_QLOMARK;
408 #define DEFAULT_RCU_QOVLD_MULT 2
409 #define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
410 static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
411 static long qovld_calc = -1; // No pre-initialization lock acquisitions!
412
413 module_param(blimit, long, 0444);
414 module_param(qhimark, long, 0444);
415 module_param(qlowmark, long, 0444);
416 module_param(qovld, long, 0444);
417
418 static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
419 static ulong jiffies_till_next_fqs = ULONG_MAX;
420 static bool rcu_kick_kthreads;
421 static int rcu_divisor = 7;
422 module_param(rcu_divisor, int, 0644);
423
424 /* Force an exit from rcu_do_batch() after 3 milliseconds. */
425 static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
426 module_param(rcu_resched_ns, long, 0644);
427
428 /*
429 * How long the grace period must be before we start recruiting
430 * quiescent-state help from rcu_note_context_switch().
431 */
432 static ulong jiffies_till_sched_qs = ULONG_MAX;
433 module_param(jiffies_till_sched_qs, ulong, 0444);
434 static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
435 module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
436
437 /*
438 * Make sure that we give the grace-period kthread time to detect any
439 * idle CPUs before taking active measures to force quiescent states.
440 * However, don't go below 100 milliseconds, adjusted upwards for really
441 * large systems.
442 */
adjust_jiffies_till_sched_qs(void)443 static void adjust_jiffies_till_sched_qs(void)
444 {
445 unsigned long j;
446
447 /* If jiffies_till_sched_qs was specified, respect the request. */
448 if (jiffies_till_sched_qs != ULONG_MAX) {
449 WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
450 return;
451 }
452 /* Otherwise, set to third fqs scan, but bound below on large system. */
453 j = READ_ONCE(jiffies_till_first_fqs) +
454 2 * READ_ONCE(jiffies_till_next_fqs);
455 if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
456 j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
457 pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
458 WRITE_ONCE(jiffies_to_sched_qs, j);
459 }
460
param_set_first_fqs_jiffies(const char * val,const struct kernel_param * kp)461 static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
462 {
463 ulong j;
464 int ret = kstrtoul(val, 0, &j);
465
466 if (!ret) {
467 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
468 adjust_jiffies_till_sched_qs();
469 }
470 return ret;
471 }
472
param_set_next_fqs_jiffies(const char * val,const struct kernel_param * kp)473 static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
474 {
475 ulong j;
476 int ret = kstrtoul(val, 0, &j);
477
478 if (!ret) {
479 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
480 adjust_jiffies_till_sched_qs();
481 }
482 return ret;
483 }
484
485 static const struct kernel_param_ops first_fqs_jiffies_ops = {
486 .set = param_set_first_fqs_jiffies,
487 .get = param_get_ulong,
488 };
489
490 static const struct kernel_param_ops next_fqs_jiffies_ops = {
491 .set = param_set_next_fqs_jiffies,
492 .get = param_get_ulong,
493 };
494
495 module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
496 module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
497 module_param(rcu_kick_kthreads, bool, 0644);
498
499 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
500 static int rcu_pending(int user);
501
502 /*
503 * Return the number of RCU GPs completed thus far for debug & stats.
504 */
rcu_get_gp_seq(void)505 unsigned long rcu_get_gp_seq(void)
506 {
507 return READ_ONCE(rcu_state.gp_seq);
508 }
509 EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
510
511 /*
512 * Return the number of RCU expedited batches completed thus far for
513 * debug & stats. Odd numbers mean that a batch is in progress, even
514 * numbers mean idle. The value returned will thus be roughly double
515 * the cumulative batches since boot.
516 */
rcu_exp_batches_completed(void)517 unsigned long rcu_exp_batches_completed(void)
518 {
519 return rcu_state.expedited_sequence;
520 }
521 EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
522
523 /*
524 * Return the root node of the rcu_state structure.
525 */
rcu_get_root(void)526 static struct rcu_node *rcu_get_root(void)
527 {
528 return &rcu_state.node[0];
529 }
530
531 /*
532 * Send along grace-period-related data for rcutorture diagnostics.
533 */
rcutorture_get_gp_data(int * flags,unsigned long * gp_seq)534 void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq)
535 {
536 *flags = READ_ONCE(rcu_state.gp_flags);
537 *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
538 }
539 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
540
541 #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
542 /*
543 * An empty function that will trigger a reschedule on
544 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
545 */
late_wakeup_func(struct irq_work * work)546 static void late_wakeup_func(struct irq_work *work)
547 {
548 }
549
550 static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
551 IRQ_WORK_INIT(late_wakeup_func);
552
553 /*
554 * If either:
555 *
556 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
557 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
558 *
559 * In these cases the late RCU wake ups aren't supported in the resched loops and our
560 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
561 * get re-enabled again.
562 */
rcu_irq_work_resched(void)563 noinstr void rcu_irq_work_resched(void)
564 {
565 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
566
567 if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
568 return;
569
570 if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
571 return;
572
573 instrumentation_begin();
574 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
575 irq_work_queue(this_cpu_ptr(&late_wakeup_work));
576 }
577 instrumentation_end();
578 }
579 #endif /* #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) */
580
581 #ifdef CONFIG_PROVE_RCU
582 /**
583 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
584 */
rcu_irq_exit_check_preempt(void)585 void rcu_irq_exit_check_preempt(void)
586 {
587 lockdep_assert_irqs_disabled();
588
589 RCU_LOCKDEP_WARN(ct_nesting() <= 0,
590 "RCU nesting counter underflow/zero!");
591 RCU_LOCKDEP_WARN(ct_nmi_nesting() !=
592 CT_NESTING_IRQ_NONIDLE,
593 "Bad RCU nmi_nesting counter\n");
594 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
595 "RCU in extended quiescent state!");
596 }
597 #endif /* #ifdef CONFIG_PROVE_RCU */
598
599 #ifdef CONFIG_NO_HZ_FULL
600 /**
601 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
602 *
603 * The scheduler tick is not normally enabled when CPUs enter the kernel
604 * from nohz_full userspace execution. After all, nohz_full userspace
605 * execution is an RCU quiescent state and the time executing in the kernel
606 * is quite short. Except of course when it isn't. And it is not hard to
607 * cause a large system to spend tens of seconds or even minutes looping
608 * in the kernel, which can cause a number of problems, include RCU CPU
609 * stall warnings.
610 *
611 * Therefore, if a nohz_full CPU fails to report a quiescent state
612 * in a timely manner, the RCU grace-period kthread sets that CPU's
613 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
614 * exception will invoke this function, which will turn on the scheduler
615 * tick, which will enable RCU to detect that CPU's quiescent states,
616 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
617 * The tick will be disabled once a quiescent state is reported for
618 * this CPU.
619 *
620 * Of course, in carefully tuned systems, there might never be an
621 * interrupt or exception. In that case, the RCU grace-period kthread
622 * will eventually cause one to happen. However, in less carefully
623 * controlled environments, this function allows RCU to get what it
624 * needs without creating otherwise useless interruptions.
625 */
__rcu_irq_enter_check_tick(void)626 void __rcu_irq_enter_check_tick(void)
627 {
628 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
629
630 // If we're here from NMI there's nothing to do.
631 if (in_nmi())
632 return;
633
634 RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(),
635 "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
636
637 if (!tick_nohz_full_cpu(rdp->cpu) ||
638 !READ_ONCE(rdp->rcu_urgent_qs) ||
639 READ_ONCE(rdp->rcu_forced_tick)) {
640 // RCU doesn't need nohz_full help from this CPU, or it is
641 // already getting that help.
642 return;
643 }
644
645 // We get here only when not in an extended quiescent state and
646 // from interrupts (as opposed to NMIs). Therefore, (1) RCU is
647 // already watching and (2) The fact that we are in an interrupt
648 // handler and that the rcu_node lock is an irq-disabled lock
649 // prevents self-deadlock. So we can safely recheck under the lock.
650 // Note that the nohz_full state currently cannot change.
651 raw_spin_lock_rcu_node(rdp->mynode);
652 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
653 // A nohz_full CPU is in the kernel and RCU needs a
654 // quiescent state. Turn on the tick!
655 WRITE_ONCE(rdp->rcu_forced_tick, true);
656 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
657 }
658 raw_spin_unlock_rcu_node(rdp->mynode);
659 }
660 NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
661 #endif /* CONFIG_NO_HZ_FULL */
662
663 /*
664 * Check to see if any future non-offloaded RCU-related work will need
665 * to be done by the current CPU, even if none need be done immediately,
666 * returning 1 if so. This function is part of the RCU implementation;
667 * it is -not- an exported member of the RCU API. This is used by
668 * the idle-entry code to figure out whether it is safe to disable the
669 * scheduler-clock interrupt.
670 *
671 * Just check whether or not this CPU has non-offloaded RCU callbacks
672 * queued.
673 */
rcu_needs_cpu(void)674 int rcu_needs_cpu(void)
675 {
676 return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
677 !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
678 }
679
680 /*
681 * If any sort of urgency was applied to the current CPU (for example,
682 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
683 * to get to a quiescent state, disable it.
684 */
rcu_disable_urgency_upon_qs(struct rcu_data * rdp)685 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
686 {
687 raw_lockdep_assert_held_rcu_node(rdp->mynode);
688 WRITE_ONCE(rdp->rcu_urgent_qs, false);
689 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
690 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
691 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
692 WRITE_ONCE(rdp->rcu_forced_tick, false);
693 }
694 }
695
696 /**
697 * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
698 *
699 * Return @true if RCU is watching the running CPU and @false otherwise.
700 * An @true return means that this CPU can safely enter RCU read-side
701 * critical sections.
702 *
703 * Although calls to rcu_is_watching() from most parts of the kernel
704 * will return @true, there are important exceptions. For example, if the
705 * current CPU is deep within its idle loop, in kernel entry/exit code,
706 * or offline, rcu_is_watching() will return @false.
707 *
708 * Make notrace because it can be called by the internal functions of
709 * ftrace, and making this notrace removes unnecessary recursion calls.
710 */
rcu_is_watching(void)711 notrace bool rcu_is_watching(void)
712 {
713 bool ret;
714
715 preempt_disable_notrace();
716 ret = rcu_is_watching_curr_cpu();
717 preempt_enable_notrace();
718 return ret;
719 }
720 EXPORT_SYMBOL_GPL(rcu_is_watching);
721
722 /*
723 * If a holdout task is actually running, request an urgent quiescent
724 * state from its CPU. This is unsynchronized, so migrations can cause
725 * the request to go to the wrong CPU. Which is OK, all that will happen
726 * is that the CPU's next context switch will be a bit slower and next
727 * time around this task will generate another request.
728 */
rcu_request_urgent_qs_task(struct task_struct * t)729 void rcu_request_urgent_qs_task(struct task_struct *t)
730 {
731 int cpu;
732
733 barrier();
734 cpu = task_cpu(t);
735 if (!task_curr(t))
736 return; /* This task is not running on that CPU. */
737 smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
738 }
739
740 /*
741 * When trying to report a quiescent state on behalf of some other CPU,
742 * it is our responsibility to check for and handle potential overflow
743 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
744 * After all, the CPU might be in deep idle state, and thus executing no
745 * code whatsoever.
746 */
rcu_gpnum_ovf(struct rcu_node * rnp,struct rcu_data * rdp)747 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
748 {
749 raw_lockdep_assert_held_rcu_node(rnp);
750 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
751 rnp->gp_seq))
752 WRITE_ONCE(rdp->gpwrap, true);
753 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
754 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
755 }
756
757 /*
758 * Snapshot the specified CPU's RCU_WATCHING counter so that we can later
759 * credit them with an implicit quiescent state. Return 1 if this CPU
760 * is in dynticks idle mode, which is an extended quiescent state.
761 */
rcu_watching_snap_save(struct rcu_data * rdp)762 static int rcu_watching_snap_save(struct rcu_data *rdp)
763 {
764 /*
765 * Full ordering between remote CPU's post idle accesses and updater's
766 * accesses prior to current GP (and also the started GP sequence number)
767 * is enforced by rcu_seq_start() implicit barrier and even further by
768 * smp_mb__after_unlock_lock() barriers chained all the way throughout the
769 * rnp locking tree since rcu_gp_init() and up to the current leaf rnp
770 * locking.
771 *
772 * Ordering between remote CPU's pre idle accesses and post grace period
773 * updater's accesses is enforced by the below acquire semantic.
774 */
775 rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu);
776 if (rcu_watching_snap_in_eqs(rdp->watching_snap)) {
777 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
778 rcu_gpnum_ovf(rdp->mynode, rdp);
779 return 1;
780 }
781 return 0;
782 }
783
784 /*
785 * Returns positive if the specified CPU has passed through a quiescent state
786 * by virtue of being in or having passed through an dynticks idle state since
787 * the last call to rcu_watching_snap_save() for this same CPU, or by
788 * virtue of having been offline.
789 *
790 * Returns negative if the specified CPU needs a force resched.
791 *
792 * Returns zero otherwise.
793 */
rcu_watching_snap_recheck(struct rcu_data * rdp)794 static int rcu_watching_snap_recheck(struct rcu_data *rdp)
795 {
796 unsigned long jtsq;
797 int ret = 0;
798 struct rcu_node *rnp = rdp->mynode;
799
800 /*
801 * If the CPU passed through or entered a dynticks idle phase with
802 * no active irq/NMI handlers, then we can safely pretend that the CPU
803 * already acknowledged the request to pass through a quiescent
804 * state. Either way, that CPU cannot possibly be in an RCU
805 * read-side critical section that started before the beginning
806 * of the current RCU grace period.
807 */
808 if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) {
809 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
810 rcu_gpnum_ovf(rnp, rdp);
811 return 1;
812 }
813
814 /*
815 * Complain if a CPU that is considered to be offline from RCU's
816 * perspective has not yet reported a quiescent state. After all,
817 * the offline CPU should have reported a quiescent state during
818 * the CPU-offline process, or, failing that, by rcu_gp_init()
819 * if it ran concurrently with either the CPU going offline or the
820 * last task on a leaf rcu_node structure exiting its RCU read-side
821 * critical section while all CPUs corresponding to that structure
822 * are offline. This added warning detects bugs in any of these
823 * code paths.
824 *
825 * The rcu_node structure's ->lock is held here, which excludes
826 * the relevant portions the CPU-hotplug code, the grace-period
827 * initialization code, and the rcu_read_unlock() code paths.
828 *
829 * For more detail, please refer to the "Hotplug CPU" section
830 * of RCU's Requirements documentation.
831 */
832 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
833 struct rcu_node *rnp1;
834
835 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
836 __func__, rnp->grplo, rnp->grphi, rnp->level,
837 (long)rnp->gp_seq, (long)rnp->completedqs);
838 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
839 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
840 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
841 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
842 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
843 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state,
844 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state);
845 return 1; /* Break things loose after complaining. */
846 }
847
848 /*
849 * A CPU running for an extended time within the kernel can
850 * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
851 * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
852 * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the
853 * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
854 * variable are safe because the assignments are repeated if this
855 * CPU failed to pass through a quiescent state. This code
856 * also checks .jiffies_resched in case jiffies_to_sched_qs
857 * is set way high.
858 */
859 jtsq = READ_ONCE(jiffies_to_sched_qs);
860 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
861 (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
862 time_after(jiffies, rcu_state.jiffies_resched) ||
863 rcu_state.cbovld)) {
864 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
865 /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
866 smp_store_release(&rdp->rcu_urgent_qs, true);
867 } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
868 WRITE_ONCE(rdp->rcu_urgent_qs, true);
869 }
870
871 /*
872 * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
873 * The above code handles this, but only for straight cond_resched().
874 * And some in-kernel loops check need_resched() before calling
875 * cond_resched(), which defeats the above code for CPUs that are
876 * running in-kernel with scheduling-clock interrupts disabled.
877 * So hit them over the head with the resched_cpu() hammer!
878 */
879 if (tick_nohz_full_cpu(rdp->cpu) &&
880 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
881 rcu_state.cbovld)) {
882 WRITE_ONCE(rdp->rcu_urgent_qs, true);
883 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
884 ret = -1;
885 }
886
887 /*
888 * If more than halfway to RCU CPU stall-warning time, invoke
889 * resched_cpu() more frequently to try to loosen things up a bit.
890 * Also check to see if the CPU is getting hammered with interrupts,
891 * but only once per grace period, just to keep the IPIs down to
892 * a dull roar.
893 */
894 if (time_after(jiffies, rcu_state.jiffies_resched)) {
895 if (time_after(jiffies,
896 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
897 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
898 ret = -1;
899 }
900 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
901 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
902 (rnp->ffmask & rdp->grpmask)) {
903 rdp->rcu_iw_pending = true;
904 rdp->rcu_iw_gp_seq = rnp->gp_seq;
905 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
906 }
907
908 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
909 int cpu = rdp->cpu;
910 struct rcu_snap_record *rsrp;
911 struct kernel_cpustat *kcsp;
912
913 kcsp = &kcpustat_cpu(cpu);
914
915 rsrp = &rdp->snap_record;
916 rsrp->cputime_irq = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
917 rsrp->cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
918 rsrp->cputime_system = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);
919 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
920 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
921 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
922 rsrp->jiffies = jiffies;
923 rsrp->gp_seq = rdp->gp_seq;
924 }
925 }
926
927 return ret;
928 }
929
930 /* Trace-event wrapper function for trace_rcu_future_grace_period. */
trace_rcu_this_gp(struct rcu_node * rnp,struct rcu_data * rdp,unsigned long gp_seq_req,const char * s)931 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
932 unsigned long gp_seq_req, const char *s)
933 {
934 trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
935 gp_seq_req, rnp->level,
936 rnp->grplo, rnp->grphi, s);
937 }
938
939 /*
940 * rcu_start_this_gp - Request the start of a particular grace period
941 * @rnp_start: The leaf node of the CPU from which to start.
942 * @rdp: The rcu_data corresponding to the CPU from which to start.
943 * @gp_seq_req: The gp_seq of the grace period to start.
944 *
945 * Start the specified grace period, as needed to handle newly arrived
946 * callbacks. The required future grace periods are recorded in each
947 * rcu_node structure's ->gp_seq_needed field. Returns true if there
948 * is reason to awaken the grace-period kthread.
949 *
950 * The caller must hold the specified rcu_node structure's ->lock, which
951 * is why the caller is responsible for waking the grace-period kthread.
952 *
953 * Returns true if the GP thread needs to be awakened else false.
954 */
rcu_start_this_gp(struct rcu_node * rnp_start,struct rcu_data * rdp,unsigned long gp_seq_req)955 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
956 unsigned long gp_seq_req)
957 {
958 bool ret = false;
959 struct rcu_node *rnp;
960
961 /*
962 * Use funnel locking to either acquire the root rcu_node
963 * structure's lock or bail out if the need for this grace period
964 * has already been recorded -- or if that grace period has in
965 * fact already started. If there is already a grace period in
966 * progress in a non-leaf node, no recording is needed because the
967 * end of the grace period will scan the leaf rcu_node structures.
968 * Note that rnp_start->lock must not be released.
969 */
970 raw_lockdep_assert_held_rcu_node(rnp_start);
971 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
972 for (rnp = rnp_start; 1; rnp = rnp->parent) {
973 if (rnp != rnp_start)
974 raw_spin_lock_rcu_node(rnp);
975 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
976 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
977 (rnp != rnp_start &&
978 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
979 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
980 TPS("Prestarted"));
981 goto unlock_out;
982 }
983 WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
984 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
985 /*
986 * We just marked the leaf or internal node, and a
987 * grace period is in progress, which means that
988 * rcu_gp_cleanup() will see the marking. Bail to
989 * reduce contention.
990 */
991 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
992 TPS("Startedleaf"));
993 goto unlock_out;
994 }
995 if (rnp != rnp_start && rnp->parent != NULL)
996 raw_spin_unlock_rcu_node(rnp);
997 if (!rnp->parent)
998 break; /* At root, and perhaps also leaf. */
999 }
1000
1001 /* If GP already in progress, just leave, otherwise start one. */
1002 if (rcu_gp_in_progress()) {
1003 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1004 goto unlock_out;
1005 }
1006 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1007 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1008 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1009 if (!READ_ONCE(rcu_state.gp_kthread)) {
1010 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1011 goto unlock_out;
1012 }
1013 trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1014 ret = true; /* Caller must wake GP kthread. */
1015 unlock_out:
1016 /* Push furthest requested GP to leaf node and rcu_data structure. */
1017 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1018 WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1019 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1020 }
1021 if (rnp != rnp_start)
1022 raw_spin_unlock_rcu_node(rnp);
1023 return ret;
1024 }
1025
1026 /*
1027 * Clean up any old requests for the just-ended grace period. Also return
1028 * whether any additional grace periods have been requested.
1029 */
rcu_future_gp_cleanup(struct rcu_node * rnp)1030 static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1031 {
1032 bool needmore;
1033 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1034
1035 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1036 if (!needmore)
1037 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1038 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1039 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1040 return needmore;
1041 }
1042
swake_up_one_online_ipi(void * arg)1043 static void swake_up_one_online_ipi(void *arg)
1044 {
1045 struct swait_queue_head *wqh = arg;
1046
1047 swake_up_one(wqh);
1048 }
1049
swake_up_one_online(struct swait_queue_head * wqh)1050 static void swake_up_one_online(struct swait_queue_head *wqh)
1051 {
1052 int cpu = get_cpu();
1053
1054 /*
1055 * If called from rcutree_report_cpu_starting(), wake up
1056 * is dangerous that late in the CPU-down hotplug process. The
1057 * scheduler might queue an ignored hrtimer. Defer the wake up
1058 * to an online CPU instead.
1059 */
1060 if (unlikely(cpu_is_offline(cpu))) {
1061 int target;
1062
1063 target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
1064 cpu_online_mask);
1065
1066 smp_call_function_single(target, swake_up_one_online_ipi,
1067 wqh, 0);
1068 put_cpu();
1069 } else {
1070 put_cpu();
1071 swake_up_one(wqh);
1072 }
1073 }
1074
1075 /*
1076 * Awaken the grace-period kthread. Don't do a self-awaken (unless in an
1077 * interrupt or softirq handler, in which case we just might immediately
1078 * sleep upon return, resulting in a grace-period hang), and don't bother
1079 * awakening when there is nothing for the grace-period kthread to do
1080 * (as in several CPUs raced to awaken, we lost), and finally don't try
1081 * to awaken a kthread that has not yet been created. If all those checks
1082 * are passed, track some debug information and awaken.
1083 *
1084 * So why do the self-wakeup when in an interrupt or softirq handler
1085 * in the grace-period kthread's context? Because the kthread might have
1086 * been interrupted just as it was going to sleep, and just after the final
1087 * pre-sleep check of the awaken condition. In this case, a wakeup really
1088 * is required, and is therefore supplied.
1089 */
rcu_gp_kthread_wake(void)1090 static void rcu_gp_kthread_wake(void)
1091 {
1092 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1093
1094 if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
1095 !READ_ONCE(rcu_state.gp_flags) || !t)
1096 return;
1097 WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1098 WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1099 swake_up_one_online(&rcu_state.gp_wq);
1100 }
1101
1102 /*
1103 * If there is room, assign a ->gp_seq number to any callbacks on this
1104 * CPU that have not already been assigned. Also accelerate any callbacks
1105 * that were previously assigned a ->gp_seq number that has since proven
1106 * to be too conservative, which can happen if callbacks get assigned a
1107 * ->gp_seq number while RCU is idle, but with reference to a non-root
1108 * rcu_node structure. This function is idempotent, so it does not hurt
1109 * to call it repeatedly. Returns an flag saying that we should awaken
1110 * the RCU grace-period kthread.
1111 *
1112 * The caller must hold rnp->lock with interrupts disabled.
1113 */
rcu_accelerate_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1114 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1115 {
1116 unsigned long gp_seq_req;
1117 bool ret = false;
1118
1119 rcu_lockdep_assert_cblist_protected(rdp);
1120 raw_lockdep_assert_held_rcu_node(rnp);
1121
1122 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1123 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1124 return false;
1125
1126 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1127
1128 /*
1129 * Callbacks are often registered with incomplete grace-period
1130 * information. Something about the fact that getting exact
1131 * information requires acquiring a global lock... RCU therefore
1132 * makes a conservative estimate of the grace period number at which
1133 * a given callback will become ready to invoke. The following
1134 * code checks this estimate and improves it when possible, thus
1135 * accelerating callback invocation to an earlier grace-period
1136 * number.
1137 */
1138 gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1139 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1140 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1141
1142 /* Trace depending on how much we were able to accelerate. */
1143 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1144 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1145 else
1146 trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1147
1148 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1149
1150 return ret;
1151 }
1152
1153 /*
1154 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1155 * rcu_node structure's ->lock be held. It consults the cached value
1156 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1157 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1158 * while holding the leaf rcu_node structure's ->lock.
1159 */
rcu_accelerate_cbs_unlocked(struct rcu_node * rnp,struct rcu_data * rdp)1160 static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1161 struct rcu_data *rdp)
1162 {
1163 unsigned long c;
1164 bool needwake;
1165
1166 rcu_lockdep_assert_cblist_protected(rdp);
1167 c = rcu_seq_snap(&rcu_state.gp_seq);
1168 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1169 /* Old request still live, so mark recent callbacks. */
1170 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1171 return;
1172 }
1173 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1174 needwake = rcu_accelerate_cbs(rnp, rdp);
1175 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1176 if (needwake)
1177 rcu_gp_kthread_wake();
1178 }
1179
1180 /*
1181 * Move any callbacks whose grace period has completed to the
1182 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1183 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1184 * sublist. This function is idempotent, so it does not hurt to
1185 * invoke it repeatedly. As long as it is not invoked -too- often...
1186 * Returns true if the RCU grace-period kthread needs to be awakened.
1187 *
1188 * The caller must hold rnp->lock with interrupts disabled.
1189 */
rcu_advance_cbs(struct rcu_node * rnp,struct rcu_data * rdp)1190 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1191 {
1192 rcu_lockdep_assert_cblist_protected(rdp);
1193 raw_lockdep_assert_held_rcu_node(rnp);
1194
1195 /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1196 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1197 return false;
1198
1199 /*
1200 * Find all callbacks whose ->gp_seq numbers indicate that they
1201 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1202 */
1203 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1204
1205 /* Classify any remaining callbacks. */
1206 return rcu_accelerate_cbs(rnp, rdp);
1207 }
1208
1209 /*
1210 * Move and classify callbacks, but only if doing so won't require
1211 * that the RCU grace-period kthread be awakened.
1212 */
rcu_advance_cbs_nowake(struct rcu_node * rnp,struct rcu_data * rdp)1213 static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1214 struct rcu_data *rdp)
1215 {
1216 rcu_lockdep_assert_cblist_protected(rdp);
1217 if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
1218 return;
1219 // The grace period cannot end while we hold the rcu_node lock.
1220 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
1221 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1222 raw_spin_unlock_rcu_node(rnp);
1223 }
1224
1225 /*
1226 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1227 * quiescent state. This is intended to be invoked when the CPU notices
1228 * a new grace period.
1229 */
rcu_strict_gp_check_qs(void)1230 static void rcu_strict_gp_check_qs(void)
1231 {
1232 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1233 rcu_read_lock();
1234 rcu_read_unlock();
1235 }
1236 }
1237
1238 /*
1239 * Update CPU-local rcu_data state to record the beginnings and ends of
1240 * grace periods. The caller must hold the ->lock of the leaf rcu_node
1241 * structure corresponding to the current CPU, and must have irqs disabled.
1242 * Returns true if the grace-period kthread needs to be awakened.
1243 */
__note_gp_changes(struct rcu_node * rnp,struct rcu_data * rdp)1244 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1245 {
1246 bool ret = false;
1247 bool need_qs;
1248 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1249
1250 raw_lockdep_assert_held_rcu_node(rnp);
1251
1252 if (rdp->gp_seq == rnp->gp_seq)
1253 return false; /* Nothing to do. */
1254
1255 /* Handle the ends of any preceding grace periods first. */
1256 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1257 unlikely(READ_ONCE(rdp->gpwrap))) {
1258 if (!offloaded)
1259 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1260 rdp->core_needs_qs = false;
1261 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1262 } else {
1263 if (!offloaded)
1264 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1265 if (rdp->core_needs_qs)
1266 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1267 }
1268
1269 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1270 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1271 unlikely(READ_ONCE(rdp->gpwrap))) {
1272 /*
1273 * If the current grace period is waiting for this CPU,
1274 * set up to detect a quiescent state, otherwise don't
1275 * go looking for one.
1276 */
1277 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1278 need_qs = !!(rnp->qsmask & rdp->grpmask);
1279 rdp->cpu_no_qs.b.norm = need_qs;
1280 rdp->core_needs_qs = need_qs;
1281 zero_cpu_stall_ticks(rdp);
1282 }
1283 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1284 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1285 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1286 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1287 WRITE_ONCE(rdp->last_sched_clock, jiffies);
1288 WRITE_ONCE(rdp->gpwrap, false);
1289 rcu_gpnum_ovf(rnp, rdp);
1290 return ret;
1291 }
1292
note_gp_changes(struct rcu_data * rdp)1293 static void note_gp_changes(struct rcu_data *rdp)
1294 {
1295 unsigned long flags;
1296 bool needwake;
1297 struct rcu_node *rnp;
1298
1299 local_irq_save(flags);
1300 rnp = rdp->mynode;
1301 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1302 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1303 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1304 local_irq_restore(flags);
1305 return;
1306 }
1307 needwake = __note_gp_changes(rnp, rdp);
1308 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1309 rcu_strict_gp_check_qs();
1310 if (needwake)
1311 rcu_gp_kthread_wake();
1312 }
1313
1314 static atomic_t *rcu_gp_slow_suppress;
1315
1316 /* Register a counter to suppress debugging grace-period delays. */
rcu_gp_slow_register(atomic_t * rgssp)1317 void rcu_gp_slow_register(atomic_t *rgssp)
1318 {
1319 WARN_ON_ONCE(rcu_gp_slow_suppress);
1320
1321 WRITE_ONCE(rcu_gp_slow_suppress, rgssp);
1322 }
1323 EXPORT_SYMBOL_GPL(rcu_gp_slow_register);
1324
1325 /* Unregister a counter, with NULL for not caring which. */
rcu_gp_slow_unregister(atomic_t * rgssp)1326 void rcu_gp_slow_unregister(atomic_t *rgssp)
1327 {
1328 WARN_ON_ONCE(rgssp && rgssp != rcu_gp_slow_suppress && rcu_gp_slow_suppress != NULL);
1329
1330 WRITE_ONCE(rcu_gp_slow_suppress, NULL);
1331 }
1332 EXPORT_SYMBOL_GPL(rcu_gp_slow_unregister);
1333
rcu_gp_slow_is_suppressed(void)1334 static bool rcu_gp_slow_is_suppressed(void)
1335 {
1336 atomic_t *rgssp = READ_ONCE(rcu_gp_slow_suppress);
1337
1338 return rgssp && atomic_read(rgssp);
1339 }
1340
rcu_gp_slow(int delay)1341 static void rcu_gp_slow(int delay)
1342 {
1343 if (!rcu_gp_slow_is_suppressed() && delay > 0 &&
1344 !(rcu_seq_ctr(rcu_state.gp_seq) % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1345 schedule_timeout_idle(delay);
1346 }
1347
1348 static unsigned long sleep_duration;
1349
1350 /* Allow rcutorture to stall the grace-period kthread. */
rcu_gp_set_torture_wait(int duration)1351 void rcu_gp_set_torture_wait(int duration)
1352 {
1353 if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1354 WRITE_ONCE(sleep_duration, duration);
1355 }
1356 EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1357
1358 /* Actually implement the aforementioned wait. */
rcu_gp_torture_wait(void)1359 static void rcu_gp_torture_wait(void)
1360 {
1361 unsigned long duration;
1362
1363 if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1364 return;
1365 duration = xchg(&sleep_duration, 0UL);
1366 if (duration > 0) {
1367 pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1368 schedule_timeout_idle(duration);
1369 pr_alert("%s: Wait complete\n", __func__);
1370 }
1371 }
1372
1373 /*
1374 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1375 * processing.
1376 */
rcu_strict_gp_boundary(void * unused)1377 static void rcu_strict_gp_boundary(void *unused)
1378 {
1379 invoke_rcu_core();
1380 }
1381
1382 // Make the polled API aware of the beginning of a grace period.
rcu_poll_gp_seq_start(unsigned long * snap)1383 static void rcu_poll_gp_seq_start(unsigned long *snap)
1384 {
1385 struct rcu_node *rnp = rcu_get_root();
1386
1387 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1388 raw_lockdep_assert_held_rcu_node(rnp);
1389
1390 // If RCU was idle, note beginning of GP.
1391 if (!rcu_seq_state(rcu_state.gp_seq_polled))
1392 rcu_seq_start(&rcu_state.gp_seq_polled);
1393
1394 // Either way, record current state.
1395 *snap = rcu_state.gp_seq_polled;
1396 }
1397
1398 // Make the polled API aware of the end of a grace period.
rcu_poll_gp_seq_end(unsigned long * snap)1399 static void rcu_poll_gp_seq_end(unsigned long *snap)
1400 {
1401 struct rcu_node *rnp = rcu_get_root();
1402
1403 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1404 raw_lockdep_assert_held_rcu_node(rnp);
1405
1406 // If the previously noted GP is still in effect, record the
1407 // end of that GP. Either way, zero counter to avoid counter-wrap
1408 // problems.
1409 if (*snap && *snap == rcu_state.gp_seq_polled) {
1410 rcu_seq_end(&rcu_state.gp_seq_polled);
1411 rcu_state.gp_seq_polled_snap = 0;
1412 rcu_state.gp_seq_polled_exp_snap = 0;
1413 } else {
1414 *snap = 0;
1415 }
1416 }
1417
1418 // Make the polled API aware of the beginning of a grace period, but
1419 // where caller does not hold the root rcu_node structure's lock.
rcu_poll_gp_seq_start_unlocked(unsigned long * snap)1420 static void rcu_poll_gp_seq_start_unlocked(unsigned long *snap)
1421 {
1422 unsigned long flags;
1423 struct rcu_node *rnp = rcu_get_root();
1424
1425 if (rcu_init_invoked()) {
1426 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1427 lockdep_assert_irqs_enabled();
1428 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1429 }
1430 rcu_poll_gp_seq_start(snap);
1431 if (rcu_init_invoked())
1432 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1433 }
1434
1435 // Make the polled API aware of the end of a grace period, but where
1436 // caller does not hold the root rcu_node structure's lock.
rcu_poll_gp_seq_end_unlocked(unsigned long * snap)1437 static void rcu_poll_gp_seq_end_unlocked(unsigned long *snap)
1438 {
1439 unsigned long flags;
1440 struct rcu_node *rnp = rcu_get_root();
1441
1442 if (rcu_init_invoked()) {
1443 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
1444 lockdep_assert_irqs_enabled();
1445 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1446 }
1447 rcu_poll_gp_seq_end(snap);
1448 if (rcu_init_invoked())
1449 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1450 }
1451
1452 /*
1453 * There is a single llist, which is used for handling
1454 * synchronize_rcu() users' enqueued rcu_synchronize nodes.
1455 * Within this llist, there are two tail pointers:
1456 *
1457 * wait tail: Tracks the set of nodes, which need to
1458 * wait for the current GP to complete.
1459 * done tail: Tracks the set of nodes, for which grace
1460 * period has elapsed. These nodes processing
1461 * will be done as part of the cleanup work
1462 * execution by a kworker.
1463 *
1464 * At every grace period init, a new wait node is added
1465 * to the llist. This wait node is used as wait tail
1466 * for this new grace period. Given that there are a fixed
1467 * number of wait nodes, if all wait nodes are in use
1468 * (which can happen when kworker callback processing
1469 * is delayed) and additional grace period is requested.
1470 * This means, a system is slow in processing callbacks.
1471 *
1472 * TODO: If a slow processing is detected, a first node
1473 * in the llist should be used as a wait-tail for this
1474 * grace period, therefore users which should wait due
1475 * to a slow process are handled by _this_ grace period
1476 * and not next.
1477 *
1478 * Below is an illustration of how the done and wait
1479 * tail pointers move from one set of rcu_synchronize nodes
1480 * to the other, as grace periods start and finish and
1481 * nodes are processed by kworker.
1482 *
1483 *
1484 * a. Initial llist callbacks list:
1485 *
1486 * +----------+ +--------+ +-------+
1487 * | | | | | |
1488 * | head |---------> | cb2 |--------->| cb1 |
1489 * | | | | | |
1490 * +----------+ +--------+ +-------+
1491 *
1492 *
1493 *
1494 * b. New GP1 Start:
1495 *
1496 * WAIT TAIL
1497 * |
1498 * |
1499 * v
1500 * +----------+ +--------+ +--------+ +-------+
1501 * | | | | | | | |
1502 * | head ------> wait |------> cb2 |------> | cb1 |
1503 * | | | head1 | | | | |
1504 * +----------+ +--------+ +--------+ +-------+
1505 *
1506 *
1507 *
1508 * c. GP completion:
1509 *
1510 * WAIT_TAIL == DONE_TAIL
1511 *
1512 * DONE TAIL
1513 * |
1514 * |
1515 * v
1516 * +----------+ +--------+ +--------+ +-------+
1517 * | | | | | | | |
1518 * | head ------> wait |------> cb2 |------> | cb1 |
1519 * | | | head1 | | | | |
1520 * +----------+ +--------+ +--------+ +-------+
1521 *
1522 *
1523 *
1524 * d. New callbacks and GP2 start:
1525 *
1526 * WAIT TAIL DONE TAIL
1527 * | |
1528 * | |
1529 * v v
1530 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1531 * | | | | | | | | | | | | | |
1532 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1533 * | | | head2| | | | | |head1| | | | |
1534 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1535 *
1536 *
1537 *
1538 * e. GP2 completion:
1539 *
1540 * WAIT_TAIL == DONE_TAIL
1541 * DONE TAIL
1542 * |
1543 * |
1544 * v
1545 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1546 * | | | | | | | | | | | | | |
1547 * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 |
1548 * | | | head2| | | | | |head1| | | | |
1549 * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+
1550 *
1551 *
1552 * While the llist state transitions from d to e, a kworker
1553 * can start executing rcu_sr_normal_gp_cleanup_work() and
1554 * can observe either the old done tail (@c) or the new
1555 * done tail (@e). So, done tail updates and reads need
1556 * to use the rel-acq semantics. If the concurrent kworker
1557 * observes the old done tail, the newly queued work
1558 * execution will process the updated done tail. If the
1559 * concurrent kworker observes the new done tail, then
1560 * the newly queued work will skip processing the done
1561 * tail, as workqueue semantics guarantees that the new
1562 * work is executed only after the previous one completes.
1563 *
1564 * f. kworker callbacks processing complete:
1565 *
1566 *
1567 * DONE TAIL
1568 * |
1569 * |
1570 * v
1571 * +----------+ +--------+
1572 * | | | |
1573 * | head ------> wait |
1574 * | | | head2 |
1575 * +----------+ +--------+
1576 *
1577 */
rcu_sr_is_wait_head(struct llist_node * node)1578 static bool rcu_sr_is_wait_head(struct llist_node *node)
1579 {
1580 return &(rcu_state.srs_wait_nodes)[0].node <= node &&
1581 node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node;
1582 }
1583
rcu_sr_get_wait_head(void)1584 static struct llist_node *rcu_sr_get_wait_head(void)
1585 {
1586 struct sr_wait_node *sr_wn;
1587 int i;
1588
1589 for (i = 0; i < SR_NORMAL_GP_WAIT_HEAD_MAX; i++) {
1590 sr_wn = &(rcu_state.srs_wait_nodes)[i];
1591
1592 if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1))
1593 return &sr_wn->node;
1594 }
1595
1596 return NULL;
1597 }
1598
rcu_sr_put_wait_head(struct llist_node * node)1599 static void rcu_sr_put_wait_head(struct llist_node *node)
1600 {
1601 struct sr_wait_node *sr_wn = container_of(node, struct sr_wait_node, node);
1602
1603 atomic_set_release(&sr_wn->inuse, 0);
1604 }
1605
1606 /* Disabled by default. */
1607 static int rcu_normal_wake_from_gp;
1608 module_param(rcu_normal_wake_from_gp, int, 0644);
1609 static struct workqueue_struct *sync_wq;
1610
rcu_sr_normal_complete(struct llist_node * node)1611 static void rcu_sr_normal_complete(struct llist_node *node)
1612 {
1613 struct rcu_synchronize *rs = container_of(
1614 (struct rcu_head *) node, struct rcu_synchronize, head);
1615 unsigned long oldstate = (unsigned long) rs->head.func;
1616
1617 WARN_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) &&
1618 !poll_state_synchronize_rcu(oldstate),
1619 "A full grace period is not passed yet: %lu",
1620 rcu_seq_diff(get_state_synchronize_rcu(), oldstate));
1621
1622 /* Finally. */
1623 complete(&rs->completion);
1624 }
1625
rcu_sr_normal_gp_cleanup_work(struct work_struct * work)1626 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
1627 {
1628 struct llist_node *done, *rcu, *next, *head;
1629
1630 /*
1631 * This work execution can potentially execute
1632 * while a new done tail is being updated by
1633 * grace period kthread in rcu_sr_normal_gp_cleanup().
1634 * So, read and updates of done tail need to
1635 * follow acq-rel semantics.
1636 *
1637 * Given that wq semantics guarantees that a single work
1638 * cannot execute concurrently by multiple kworkers,
1639 * the done tail list manipulations are protected here.
1640 */
1641 done = smp_load_acquire(&rcu_state.srs_done_tail);
1642 if (WARN_ON_ONCE(!done))
1643 return;
1644
1645 WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
1646 head = done->next;
1647 done->next = NULL;
1648
1649 /*
1650 * The dummy node, which is pointed to by the
1651 * done tail which is acq-read above is not removed
1652 * here. This allows lockless additions of new
1653 * rcu_synchronize nodes in rcu_sr_normal_add_req(),
1654 * while the cleanup work executes. The dummy
1655 * nodes is removed, in next round of cleanup
1656 * work execution.
1657 */
1658 llist_for_each_safe(rcu, next, head) {
1659 if (!rcu_sr_is_wait_head(rcu)) {
1660 rcu_sr_normal_complete(rcu);
1661 continue;
1662 }
1663
1664 rcu_sr_put_wait_head(rcu);
1665 }
1666
1667 /* Order list manipulations with atomic access. */
1668 atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
1669 }
1670
1671 /*
1672 * Helper function for rcu_gp_cleanup().
1673 */
rcu_sr_normal_gp_cleanup(void)1674 static void rcu_sr_normal_gp_cleanup(void)
1675 {
1676 struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
1677 int done = 0;
1678
1679 wait_tail = rcu_state.srs_wait_tail;
1680 if (wait_tail == NULL)
1681 return;
1682
1683 rcu_state.srs_wait_tail = NULL;
1684 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1685 WARN_ON_ONCE(!rcu_sr_is_wait_head(wait_tail));
1686
1687 /*
1688 * Process (a) and (d) cases. See an illustration.
1689 */
1690 llist_for_each_safe(rcu, next, wait_tail->next) {
1691 if (rcu_sr_is_wait_head(rcu))
1692 break;
1693
1694 rcu_sr_normal_complete(rcu);
1695 // It can be last, update a next on this step.
1696 wait_tail->next = next;
1697
1698 if (++done == SR_MAX_USERS_WAKE_FROM_GP)
1699 break;
1700 }
1701
1702 /*
1703 * Fast path, no more users to process except putting the second last
1704 * wait head if no inflight-workers. If there are in-flight workers,
1705 * they will remove the last wait head.
1706 *
1707 * Note that the ACQUIRE orders atomic access with list manipulation.
1708 */
1709 if (wait_tail->next && wait_tail->next->next == NULL &&
1710 rcu_sr_is_wait_head(wait_tail->next) &&
1711 !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
1712 rcu_sr_put_wait_head(wait_tail->next);
1713 wait_tail->next = NULL;
1714 }
1715
1716 /* Concurrent sr_normal_gp_cleanup work might observe this update. */
1717 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
1718 smp_store_release(&rcu_state.srs_done_tail, wait_tail);
1719
1720 /*
1721 * We schedule a work in order to perform a final processing
1722 * of outstanding users(if still left) and releasing wait-heads
1723 * added by rcu_sr_normal_gp_init() call.
1724 */
1725 if (wait_tail->next) {
1726 atomic_inc(&rcu_state.srs_cleanups_pending);
1727 if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
1728 atomic_dec(&rcu_state.srs_cleanups_pending);
1729 }
1730 }
1731
1732 /*
1733 * Helper function for rcu_gp_init().
1734 */
rcu_sr_normal_gp_init(void)1735 static bool rcu_sr_normal_gp_init(void)
1736 {
1737 struct llist_node *first;
1738 struct llist_node *wait_head;
1739 bool start_new_poll = false;
1740
1741 first = READ_ONCE(rcu_state.srs_next.first);
1742 if (!first || rcu_sr_is_wait_head(first))
1743 return start_new_poll;
1744
1745 wait_head = rcu_sr_get_wait_head();
1746 if (!wait_head) {
1747 // Kick another GP to retry.
1748 start_new_poll = true;
1749 return start_new_poll;
1750 }
1751
1752 /* Inject a wait-dummy-node. */
1753 llist_add(wait_head, &rcu_state.srs_next);
1754
1755 /*
1756 * A waiting list of rcu_synchronize nodes should be empty on
1757 * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(),
1758 * rolls it over. If not, it is a BUG, warn a user.
1759 */
1760 WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL);
1761 rcu_state.srs_wait_tail = wait_head;
1762 ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
1763
1764 return start_new_poll;
1765 }
1766
rcu_sr_normal_add_req(struct rcu_synchronize * rs)1767 static void rcu_sr_normal_add_req(struct rcu_synchronize *rs)
1768 {
1769 llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
1770 }
1771
1772 /*
1773 * Initialize a new grace period. Return false if no grace period required.
1774 */
rcu_gp_init(void)1775 static noinline_for_stack bool rcu_gp_init(void)
1776 {
1777 unsigned long flags;
1778 unsigned long oldmask;
1779 unsigned long mask;
1780 struct rcu_data *rdp;
1781 struct rcu_node *rnp = rcu_get_root();
1782 bool start_new_poll;
1783
1784 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1785 raw_spin_lock_irq_rcu_node(rnp);
1786 if (!rcu_state.gp_flags) {
1787 /* Spurious wakeup, tell caller to go back to sleep. */
1788 raw_spin_unlock_irq_rcu_node(rnp);
1789 return false;
1790 }
1791 WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1792
1793 if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1794 /*
1795 * Grace period already in progress, don't start another.
1796 * Not supposed to be able to happen.
1797 */
1798 raw_spin_unlock_irq_rcu_node(rnp);
1799 return false;
1800 }
1801
1802 /* Advance to a new grace period and initialize state. */
1803 record_gp_stall_check_time();
1804 /* Record GP times before starting GP, hence rcu_seq_start(). */
1805 rcu_seq_start(&rcu_state.gp_seq);
1806 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1807 start_new_poll = rcu_sr_normal_gp_init();
1808 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1809 rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
1810 raw_spin_unlock_irq_rcu_node(rnp);
1811
1812 /*
1813 * The "start_new_poll" is set to true, only when this GP is not able
1814 * to handle anything and there are outstanding users. It happens when
1815 * the rcu_sr_normal_gp_init() function was not able to insert a dummy
1816 * separator to the llist, because there were no left any dummy-nodes.
1817 *
1818 * Number of dummy-nodes is fixed, it could be that we are run out of
1819 * them, if so we start a new pool request to repeat a try. It is rare
1820 * and it means that a system is doing a slow processing of callbacks.
1821 */
1822 if (start_new_poll)
1823 (void) start_poll_synchronize_rcu();
1824
1825 /*
1826 * Apply per-leaf buffered online and offline operations to
1827 * the rcu_node tree. Note that this new grace period need not
1828 * wait for subsequent online CPUs, and that RCU hooks in the CPU
1829 * offlining path, when combined with checks in this function,
1830 * will handle CPUs that are currently going offline or that will
1831 * go offline later. Please also refer to "Hotplug CPU" section
1832 * of RCU's Requirements documentation.
1833 */
1834 WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1835 /* Exclude CPU hotplug operations. */
1836 rcu_for_each_leaf_node(rnp) {
1837 local_irq_disable();
1838 arch_spin_lock(&rcu_state.ofl_lock);
1839 raw_spin_lock_rcu_node(rnp);
1840 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1841 !rnp->wait_blkd_tasks) {
1842 /* Nothing to do on this leaf rcu_node structure. */
1843 raw_spin_unlock_rcu_node(rnp);
1844 arch_spin_unlock(&rcu_state.ofl_lock);
1845 local_irq_enable();
1846 continue;
1847 }
1848
1849 /* Record old state, apply changes to ->qsmaskinit field. */
1850 oldmask = rnp->qsmaskinit;
1851 rnp->qsmaskinit = rnp->qsmaskinitnext;
1852
1853 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1854 if (!oldmask != !rnp->qsmaskinit) {
1855 if (!oldmask) { /* First online CPU for rcu_node. */
1856 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1857 rcu_init_new_rnp(rnp);
1858 } else if (rcu_preempt_has_tasks(rnp)) {
1859 rnp->wait_blkd_tasks = true; /* blocked tasks */
1860 } else { /* Last offline CPU and can propagate. */
1861 rcu_cleanup_dead_rnp(rnp);
1862 }
1863 }
1864
1865 /*
1866 * If all waited-on tasks from prior grace period are
1867 * done, and if all this rcu_node structure's CPUs are
1868 * still offline, propagate up the rcu_node tree and
1869 * clear ->wait_blkd_tasks. Otherwise, if one of this
1870 * rcu_node structure's CPUs has since come back online,
1871 * simply clear ->wait_blkd_tasks.
1872 */
1873 if (rnp->wait_blkd_tasks &&
1874 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1875 rnp->wait_blkd_tasks = false;
1876 if (!rnp->qsmaskinit)
1877 rcu_cleanup_dead_rnp(rnp);
1878 }
1879
1880 raw_spin_unlock_rcu_node(rnp);
1881 arch_spin_unlock(&rcu_state.ofl_lock);
1882 local_irq_enable();
1883 }
1884 rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1885
1886 /*
1887 * Set the quiescent-state-needed bits in all the rcu_node
1888 * structures for all currently online CPUs in breadth-first
1889 * order, starting from the root rcu_node structure, relying on the
1890 * layout of the tree within the rcu_state.node[] array. Note that
1891 * other CPUs will access only the leaves of the hierarchy, thus
1892 * seeing that no grace period is in progress, at least until the
1893 * corresponding leaf node has been initialized.
1894 *
1895 * The grace period cannot complete until the initialization
1896 * process finishes, because this kthread handles both.
1897 */
1898 WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1899 rcu_for_each_node_breadth_first(rnp) {
1900 rcu_gp_slow(gp_init_delay);
1901 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1902 rdp = this_cpu_ptr(&rcu_data);
1903 rcu_preempt_check_blocked_tasks(rnp);
1904 rnp->qsmask = rnp->qsmaskinit;
1905 WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1906 if (rnp == rdp->mynode)
1907 (void)__note_gp_changes(rnp, rdp);
1908 rcu_preempt_boost_start_gp(rnp);
1909 trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1910 rnp->level, rnp->grplo,
1911 rnp->grphi, rnp->qsmask);
1912 /* Quiescent states for tasks on any now-offline CPUs. */
1913 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1914 rnp->rcu_gp_init_mask = mask;
1915 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1916 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1917 else
1918 raw_spin_unlock_irq_rcu_node(rnp);
1919 cond_resched_tasks_rcu_qs();
1920 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1921 }
1922
1923 // If strict, make all CPUs aware of new grace period.
1924 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1925 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1926
1927 return true;
1928 }
1929
1930 /*
1931 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1932 * time.
1933 */
rcu_gp_fqs_check_wake(int * gfp)1934 static bool rcu_gp_fqs_check_wake(int *gfp)
1935 {
1936 struct rcu_node *rnp = rcu_get_root();
1937
1938 // If under overload conditions, force an immediate FQS scan.
1939 if (*gfp & RCU_GP_FLAG_OVLD)
1940 return true;
1941
1942 // Someone like call_rcu() requested a force-quiescent-state scan.
1943 *gfp = READ_ONCE(rcu_state.gp_flags);
1944 if (*gfp & RCU_GP_FLAG_FQS)
1945 return true;
1946
1947 // The current grace period has completed.
1948 if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1949 return true;
1950
1951 return false;
1952 }
1953
1954 /*
1955 * Do one round of quiescent-state forcing.
1956 */
rcu_gp_fqs(bool first_time)1957 static void rcu_gp_fqs(bool first_time)
1958 {
1959 int nr_fqs = READ_ONCE(rcu_state.nr_fqs_jiffies_stall);
1960 struct rcu_node *rnp = rcu_get_root();
1961
1962 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1963 WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1964
1965 WARN_ON_ONCE(nr_fqs > 3);
1966 /* Only countdown nr_fqs for stall purposes if jiffies moves. */
1967 if (nr_fqs) {
1968 if (nr_fqs == 1) {
1969 WRITE_ONCE(rcu_state.jiffies_stall,
1970 jiffies + rcu_jiffies_till_stall_check());
1971 }
1972 WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, --nr_fqs);
1973 }
1974
1975 if (first_time) {
1976 /* Collect dyntick-idle snapshots. */
1977 force_qs_rnp(rcu_watching_snap_save);
1978 } else {
1979 /* Handle dyntick-idle and offline CPUs. */
1980 force_qs_rnp(rcu_watching_snap_recheck);
1981 }
1982 /* Clear flag to prevent immediate re-entry. */
1983 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1984 raw_spin_lock_irq_rcu_node(rnp);
1985 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & ~RCU_GP_FLAG_FQS);
1986 raw_spin_unlock_irq_rcu_node(rnp);
1987 }
1988 }
1989
1990 /*
1991 * Loop doing repeated quiescent-state forcing until the grace period ends.
1992 */
rcu_gp_fqs_loop(void)1993 static noinline_for_stack void rcu_gp_fqs_loop(void)
1994 {
1995 bool first_gp_fqs = true;
1996 int gf = 0;
1997 unsigned long j;
1998 int ret;
1999 struct rcu_node *rnp = rcu_get_root();
2000
2001 j = READ_ONCE(jiffies_till_first_fqs);
2002 if (rcu_state.cbovld)
2003 gf = RCU_GP_FLAG_OVLD;
2004 ret = 0;
2005 for (;;) {
2006 if (rcu_state.cbovld) {
2007 j = (j + 2) / 3;
2008 if (j <= 0)
2009 j = 1;
2010 }
2011 if (!ret || time_before(jiffies + j, rcu_state.jiffies_force_qs)) {
2012 WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
2013 /*
2014 * jiffies_force_qs before RCU_GP_WAIT_FQS state
2015 * update; required for stall checks.
2016 */
2017 smp_wmb();
2018 WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
2019 jiffies + (j ? 3 * j : 2));
2020 }
2021 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2022 TPS("fqswait"));
2023 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
2024 (void)swait_event_idle_timeout_exclusive(rcu_state.gp_wq,
2025 rcu_gp_fqs_check_wake(&gf), j);
2026 rcu_gp_torture_wait();
2027 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
2028 /* Locking provides needed memory barriers. */
2029 /*
2030 * Exit the loop if the root rcu_node structure indicates that the grace period
2031 * has ended, leave the loop. The rcu_preempt_blocked_readers_cgp(rnp) check
2032 * is required only for single-node rcu_node trees because readers blocking
2033 * the current grace period are queued only on leaf rcu_node structures.
2034 * For multi-node trees, checking the root node's ->qsmask suffices, because a
2035 * given root node's ->qsmask bit is cleared only when all CPUs and tasks from
2036 * the corresponding leaf nodes have passed through their quiescent state.
2037 */
2038 if (!READ_ONCE(rnp->qsmask) &&
2039 !rcu_preempt_blocked_readers_cgp(rnp))
2040 break;
2041 /* If time for quiescent-state forcing, do it. */
2042 if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
2043 (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
2044 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2045 TPS("fqsstart"));
2046 rcu_gp_fqs(first_gp_fqs);
2047 gf = 0;
2048 if (first_gp_fqs) {
2049 first_gp_fqs = false;
2050 gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
2051 }
2052 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2053 TPS("fqsend"));
2054 cond_resched_tasks_rcu_qs();
2055 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2056 ret = 0; /* Force full wait till next FQS. */
2057 j = READ_ONCE(jiffies_till_next_fqs);
2058 } else {
2059 /* Deal with stray signal. */
2060 cond_resched_tasks_rcu_qs();
2061 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2062 WARN_ON(signal_pending(current));
2063 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2064 TPS("fqswaitsig"));
2065 ret = 1; /* Keep old FQS timing. */
2066 j = jiffies;
2067 if (time_after(jiffies, rcu_state.jiffies_force_qs))
2068 j = 1;
2069 else
2070 j = rcu_state.jiffies_force_qs - j;
2071 gf = 0;
2072 }
2073 }
2074 }
2075
2076 /*
2077 * Clean up after the old grace period.
2078 */
rcu_gp_cleanup(void)2079 static noinline void rcu_gp_cleanup(void)
2080 {
2081 int cpu;
2082 bool needgp = false;
2083 unsigned long gp_duration;
2084 unsigned long new_gp_seq;
2085 bool offloaded;
2086 struct rcu_data *rdp;
2087 struct rcu_node *rnp = rcu_get_root();
2088 struct swait_queue_head *sq;
2089
2090 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2091 raw_spin_lock_irq_rcu_node(rnp);
2092 rcu_state.gp_end = jiffies;
2093 gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2094 if (gp_duration > rcu_state.gp_max)
2095 rcu_state.gp_max = gp_duration;
2096
2097 /*
2098 * We know the grace period is complete, but to everyone else
2099 * it appears to still be ongoing. But it is also the case
2100 * that to everyone else it looks like there is nothing that
2101 * they can do to advance the grace period. It is therefore
2102 * safe for us to drop the lock in order to mark the grace
2103 * period as completed in all of the rcu_node structures.
2104 */
2105 rcu_poll_gp_seq_end(&rcu_state.gp_seq_polled_snap);
2106 raw_spin_unlock_irq_rcu_node(rnp);
2107
2108 /*
2109 * Propagate new ->gp_seq value to rcu_node structures so that
2110 * other CPUs don't have to wait until the start of the next grace
2111 * period to process their callbacks. This also avoids some nasty
2112 * RCU grace-period initialization races by forcing the end of
2113 * the current grace period to be completely recorded in all of
2114 * the rcu_node structures before the beginning of the next grace
2115 * period is recorded in any of the rcu_node structures.
2116 */
2117 new_gp_seq = rcu_state.gp_seq;
2118 rcu_seq_end(&new_gp_seq);
2119 rcu_for_each_node_breadth_first(rnp) {
2120 raw_spin_lock_irq_rcu_node(rnp);
2121 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2122 dump_blkd_tasks(rnp, 10);
2123 WARN_ON_ONCE(rnp->qsmask);
2124 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2125 if (!rnp->parent)
2126 smp_mb(); // Order against failing poll_state_synchronize_rcu_full().
2127 rdp = this_cpu_ptr(&rcu_data);
2128 if (rnp == rdp->mynode)
2129 needgp = __note_gp_changes(rnp, rdp) || needgp;
2130 /* smp_mb() provided by prior unlock-lock pair. */
2131 needgp = rcu_future_gp_cleanup(rnp) || needgp;
2132 // Reset overload indication for CPUs no longer overloaded
2133 if (rcu_is_leaf_node(rnp))
2134 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2135 rdp = per_cpu_ptr(&rcu_data, cpu);
2136 check_cb_ovld_locked(rdp, rnp);
2137 }
2138 sq = rcu_nocb_gp_get(rnp);
2139 raw_spin_unlock_irq_rcu_node(rnp);
2140 rcu_nocb_gp_cleanup(sq);
2141 cond_resched_tasks_rcu_qs();
2142 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2143 rcu_gp_slow(gp_cleanup_delay);
2144 }
2145 rnp = rcu_get_root();
2146 raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2147
2148 /* Declare grace period done, trace first to use old GP number. */
2149 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2150 rcu_seq_end(&rcu_state.gp_seq);
2151 ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2152 WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2153 /* Check for GP requests since above loop. */
2154 rdp = this_cpu_ptr(&rcu_data);
2155 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2156 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2157 TPS("CleanupMore"));
2158 needgp = true;
2159 }
2160 /* Advance CBs to reduce false positives below. */
2161 offloaded = rcu_rdp_is_offloaded(rdp);
2162 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2163
2164 // We get here if a grace period was needed (“needgp”)
2165 // and the above call to rcu_accelerate_cbs() did not set
2166 // the RCU_GP_FLAG_INIT bit in ->gp_state (which records
2167 // the need for another grace period). The purpose
2168 // of the “offloaded” check is to avoid invoking
2169 // rcu_accelerate_cbs() on an offloaded CPU because we do not
2170 // hold the ->nocb_lock needed to safely access an offloaded
2171 // ->cblist. We do not want to acquire that lock because
2172 // it can be heavily contended during callback floods.
2173
2174 WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2175 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2176 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
2177 } else {
2178
2179 // We get here either if there is no need for an
2180 // additional grace period or if rcu_accelerate_cbs() has
2181 // already set the RCU_GP_FLAG_INIT bit in ->gp_flags.
2182 // So all we need to do is to clear all of the other
2183 // ->gp_flags bits.
2184
2185 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2186 }
2187 raw_spin_unlock_irq_rcu_node(rnp);
2188
2189 // Make synchronize_rcu() users aware of the end of old grace period.
2190 rcu_sr_normal_gp_cleanup();
2191
2192 // If strict, make all CPUs aware of the end of the old grace period.
2193 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2194 on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2195 }
2196
2197 /*
2198 * Body of kthread that handles grace periods.
2199 */
rcu_gp_kthread(void * unused)2200 static int __noreturn rcu_gp_kthread(void *unused)
2201 {
2202 rcu_bind_gp_kthread();
2203 for (;;) {
2204
2205 /* Handle grace-period start. */
2206 for (;;) {
2207 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2208 TPS("reqwait"));
2209 WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2210 swait_event_idle_exclusive(rcu_state.gp_wq,
2211 READ_ONCE(rcu_state.gp_flags) &
2212 RCU_GP_FLAG_INIT);
2213 rcu_gp_torture_wait();
2214 WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2215 /* Locking provides needed memory barrier. */
2216 if (rcu_gp_init())
2217 break;
2218 cond_resched_tasks_rcu_qs();
2219 WRITE_ONCE(rcu_state.gp_activity, jiffies);
2220 WARN_ON(signal_pending(current));
2221 trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2222 TPS("reqwaitsig"));
2223 }
2224
2225 /* Handle quiescent-state forcing. */
2226 rcu_gp_fqs_loop();
2227
2228 /* Handle grace-period end. */
2229 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2230 rcu_gp_cleanup();
2231 WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2232 }
2233 }
2234
2235 /*
2236 * Report a full set of quiescent states to the rcu_state data structure.
2237 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2238 * another grace period is required. Whether we wake the grace-period
2239 * kthread or it awakens itself for the next round of quiescent-state
2240 * forcing, that kthread will clean up after the just-completed grace
2241 * period. Note that the caller must hold rnp->lock, which is released
2242 * before return.
2243 */
rcu_report_qs_rsp(unsigned long flags)2244 static void rcu_report_qs_rsp(unsigned long flags)
2245 __releases(rcu_get_root()->lock)
2246 {
2247 raw_lockdep_assert_held_rcu_node(rcu_get_root());
2248 WARN_ON_ONCE(!rcu_gp_in_progress());
2249 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2250 raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2251 rcu_gp_kthread_wake();
2252 }
2253
2254 /*
2255 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2256 * Allows quiescent states for a group of CPUs to be reported at one go
2257 * to the specified rcu_node structure, though all the CPUs in the group
2258 * must be represented by the same rcu_node structure (which need not be a
2259 * leaf rcu_node structure, though it often will be). The gps parameter
2260 * is the grace-period snapshot, which means that the quiescent states
2261 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2262 * must be held upon entry, and it is released before return.
2263 *
2264 * As a special case, if mask is zero, the bit-already-cleared check is
2265 * disabled. This allows propagating quiescent state due to resumed tasks
2266 * during grace-period initialization.
2267 */
rcu_report_qs_rnp(unsigned long mask,struct rcu_node * rnp,unsigned long gps,unsigned long flags)2268 static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2269 unsigned long gps, unsigned long flags)
2270 __releases(rnp->lock)
2271 {
2272 unsigned long oldmask = 0;
2273 struct rcu_node *rnp_c;
2274
2275 raw_lockdep_assert_held_rcu_node(rnp);
2276
2277 /* Walk up the rcu_node hierarchy. */
2278 for (;;) {
2279 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2280
2281 /*
2282 * Our bit has already been cleared, or the
2283 * relevant grace period is already over, so done.
2284 */
2285 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2286 return;
2287 }
2288 WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2289 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2290 rcu_preempt_blocked_readers_cgp(rnp));
2291 WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2292 trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2293 mask, rnp->qsmask, rnp->level,
2294 rnp->grplo, rnp->grphi,
2295 !!rnp->gp_tasks);
2296 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2297
2298 /* Other bits still set at this level, so done. */
2299 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2300 return;
2301 }
2302 rnp->completedqs = rnp->gp_seq;
2303 mask = rnp->grpmask;
2304 if (rnp->parent == NULL) {
2305
2306 /* No more levels. Exit loop holding root lock. */
2307
2308 break;
2309 }
2310 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2311 rnp_c = rnp;
2312 rnp = rnp->parent;
2313 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2314 oldmask = READ_ONCE(rnp_c->qsmask);
2315 }
2316
2317 /*
2318 * Get here if we are the last CPU to pass through a quiescent
2319 * state for this grace period. Invoke rcu_report_qs_rsp()
2320 * to clean up and start the next grace period if one is needed.
2321 */
2322 rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2323 }
2324
2325 /*
2326 * Record a quiescent state for all tasks that were previously queued
2327 * on the specified rcu_node structure and that were blocking the current
2328 * RCU grace period. The caller must hold the corresponding rnp->lock with
2329 * irqs disabled, and this lock is released upon return, but irqs remain
2330 * disabled.
2331 */
2332 static void __maybe_unused
rcu_report_unblock_qs_rnp(struct rcu_node * rnp,unsigned long flags)2333 rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2334 __releases(rnp->lock)
2335 {
2336 unsigned long gps;
2337 unsigned long mask;
2338 struct rcu_node *rnp_p;
2339
2340 raw_lockdep_assert_held_rcu_node(rnp);
2341 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2342 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2343 rnp->qsmask != 0) {
2344 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2345 return; /* Still need more quiescent states! */
2346 }
2347
2348 rnp->completedqs = rnp->gp_seq;
2349 rnp_p = rnp->parent;
2350 if (rnp_p == NULL) {
2351 /*
2352 * Only one rcu_node structure in the tree, so don't
2353 * try to report up to its nonexistent parent!
2354 */
2355 rcu_report_qs_rsp(flags);
2356 return;
2357 }
2358
2359 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2360 gps = rnp->gp_seq;
2361 mask = rnp->grpmask;
2362 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2363 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
2364 rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2365 }
2366
2367 /*
2368 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2369 * structure. This must be called from the specified CPU.
2370 */
2371 static void
rcu_report_qs_rdp(struct rcu_data * rdp)2372 rcu_report_qs_rdp(struct rcu_data *rdp)
2373 {
2374 unsigned long flags;
2375 unsigned long mask;
2376 struct rcu_node *rnp;
2377
2378 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2379 rnp = rdp->mynode;
2380 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2381 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2382 rdp->gpwrap) {
2383
2384 /*
2385 * The grace period in which this quiescent state was
2386 * recorded has ended, so don't report it upwards.
2387 * We will instead need a new quiescent state that lies
2388 * within the current grace period.
2389 */
2390 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2391 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2392 return;
2393 }
2394 mask = rdp->grpmask;
2395 rdp->core_needs_qs = false;
2396 if ((rnp->qsmask & mask) == 0) {
2397 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2398 } else {
2399 /*
2400 * This GP can't end until cpu checks in, so all of our
2401 * callbacks can be processed during the next GP.
2402 *
2403 * NOCB kthreads have their own way to deal with that...
2404 */
2405 if (!rcu_rdp_is_offloaded(rdp)) {
2406 /*
2407 * The current GP has not yet ended, so it
2408 * should not be possible for rcu_accelerate_cbs()
2409 * to return true. So complain, but don't awaken.
2410 */
2411 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2412 }
2413
2414 rcu_disable_urgency_upon_qs(rdp);
2415 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2416 /* ^^^ Released rnp->lock */
2417 }
2418 }
2419
2420 /*
2421 * Check to see if there is a new grace period of which this CPU
2422 * is not yet aware, and if so, set up local rcu_data state for it.
2423 * Otherwise, see if this CPU has just passed through its first
2424 * quiescent state for this grace period, and record that fact if so.
2425 */
2426 static void
rcu_check_quiescent_state(struct rcu_data * rdp)2427 rcu_check_quiescent_state(struct rcu_data *rdp)
2428 {
2429 /* Check for grace-period ends and beginnings. */
2430 note_gp_changes(rdp);
2431
2432 /*
2433 * Does this CPU still need to do its part for current grace period?
2434 * If no, return and let the other CPUs do their part as well.
2435 */
2436 if (!rdp->core_needs_qs)
2437 return;
2438
2439 /*
2440 * Was there a quiescent state since the beginning of the grace
2441 * period? If no, then exit and wait for the next call.
2442 */
2443 if (rdp->cpu_no_qs.b.norm)
2444 return;
2445
2446 /*
2447 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2448 * judge of that).
2449 */
2450 rcu_report_qs_rdp(rdp);
2451 }
2452
2453 /* Return true if callback-invocation time limit exceeded. */
rcu_do_batch_check_time(long count,long tlimit,bool jlimit_check,unsigned long jlimit)2454 static bool rcu_do_batch_check_time(long count, long tlimit,
2455 bool jlimit_check, unsigned long jlimit)
2456 {
2457 // Invoke local_clock() only once per 32 consecutive callbacks.
2458 return unlikely(tlimit) &&
2459 (!likely(count & 31) ||
2460 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) &&
2461 jlimit_check && time_after(jiffies, jlimit))) &&
2462 local_clock() >= tlimit;
2463 }
2464
2465 /*
2466 * Invoke any RCU callbacks that have made it to the end of their grace
2467 * period. Throttle as specified by rdp->blimit.
2468 */
rcu_do_batch(struct rcu_data * rdp)2469 static void rcu_do_batch(struct rcu_data *rdp)
2470 {
2471 long bl;
2472 long count = 0;
2473 int div;
2474 bool __maybe_unused empty;
2475 unsigned long flags;
2476 unsigned long jlimit;
2477 bool jlimit_check = false;
2478 long pending;
2479 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2480 struct rcu_head *rhp;
2481 long tlimit = 0;
2482
2483 /* If no callbacks are ready, just return. */
2484 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2485 trace_rcu_batch_start(rcu_state.name,
2486 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2487 trace_rcu_batch_end(rcu_state.name, 0,
2488 !rcu_segcblist_empty(&rdp->cblist),
2489 need_resched(), is_idle_task(current),
2490 rcu_is_callbacks_kthread(rdp));
2491 return;
2492 }
2493
2494 /*
2495 * Extract the list of ready callbacks, disabling IRQs to prevent
2496 * races with call_rcu() from interrupt handlers. Leave the
2497 * callback counts, as rcu_barrier() needs to be conservative.
2498 *
2499 * Callbacks execution is fully ordered against preceding grace period
2500 * completion (materialized by rnp->gp_seq update) thanks to the
2501 * smp_mb__after_unlock_lock() upon node locking required for callbacks
2502 * advancing. In NOCB mode this ordering is then further relayed through
2503 * the nocb locking that protects both callbacks advancing and extraction.
2504 */
2505 rcu_nocb_lock_irqsave(rdp, flags);
2506 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2507 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2508 div = READ_ONCE(rcu_divisor);
2509 div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2510 bl = max(rdp->blimit, pending >> div);
2511 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2512 (IS_ENABLED(CONFIG_RCU_DOUBLE_CHECK_CB_TIME) || unlikely(bl > 100))) {
2513 const long npj = NSEC_PER_SEC / HZ;
2514 long rrn = READ_ONCE(rcu_resched_ns);
2515
2516 rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2517 tlimit = local_clock() + rrn;
2518 jlimit = jiffies + (rrn + npj + 1) / npj;
2519 jlimit_check = true;
2520 }
2521 trace_rcu_batch_start(rcu_state.name,
2522 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2523 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2524 if (rcu_rdp_is_offloaded(rdp))
2525 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2526
2527 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2528 rcu_nocb_unlock_irqrestore(rdp, flags);
2529
2530 /* Invoke callbacks. */
2531 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2532 rhp = rcu_cblist_dequeue(&rcl);
2533
2534 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2535 rcu_callback_t f;
2536
2537 count++;
2538 debug_rcu_head_unqueue(rhp);
2539
2540 rcu_lock_acquire(&rcu_callback_map);
2541 trace_rcu_invoke_callback(rcu_state.name, rhp);
2542
2543 f = rhp->func;
2544 debug_rcu_head_callback(rhp);
2545 WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2546 f(rhp);
2547
2548 rcu_lock_release(&rcu_callback_map);
2549
2550 /*
2551 * Stop only if limit reached and CPU has something to do.
2552 */
2553 if (in_serving_softirq()) {
2554 if (count >= bl && (need_resched() || !is_idle_task(current)))
2555 break;
2556 /*
2557 * Make sure we don't spend too much time here and deprive other
2558 * softirq vectors of CPU cycles.
2559 */
2560 if (rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit))
2561 break;
2562 } else {
2563 // In rcuc/rcuoc context, so no worries about
2564 // depriving other softirq vectors of CPU cycles.
2565 local_bh_enable();
2566 lockdep_assert_irqs_enabled();
2567 cond_resched_tasks_rcu_qs();
2568 lockdep_assert_irqs_enabled();
2569 local_bh_disable();
2570 // But rcuc kthreads can delay quiescent-state
2571 // reporting, so check time limits for them.
2572 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2573 rcu_do_batch_check_time(count, tlimit, jlimit_check, jlimit)) {
2574 rdp->rcu_cpu_has_work = 1;
2575 break;
2576 }
2577 }
2578 }
2579
2580 rcu_nocb_lock_irqsave(rdp, flags);
2581 rdp->n_cbs_invoked += count;
2582 trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2583 is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2584
2585 /* Update counts and requeue any remaining callbacks. */
2586 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2587 rcu_segcblist_add_len(&rdp->cblist, -count);
2588
2589 /* Reinstate batch limit if we have worked down the excess. */
2590 count = rcu_segcblist_n_cbs(&rdp->cblist);
2591 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2592 rdp->blimit = blimit;
2593
2594 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2595 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2596 rdp->qlen_last_fqs_check = 0;
2597 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2598 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2599 rdp->qlen_last_fqs_check = count;
2600
2601 /*
2602 * The following usually indicates a double call_rcu(). To track
2603 * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2604 */
2605 empty = rcu_segcblist_empty(&rdp->cblist);
2606 WARN_ON_ONCE(count == 0 && !empty);
2607 WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2608 count != 0 && empty);
2609 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2610 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2611
2612 rcu_nocb_unlock_irqrestore(rdp, flags);
2613
2614 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2615 }
2616
2617 /*
2618 * This function is invoked from each scheduling-clock interrupt,
2619 * and checks to see if this CPU is in a non-context-switch quiescent
2620 * state, for example, user mode or idle loop. It also schedules RCU
2621 * core processing. If the current grace period has gone on too long,
2622 * it will ask the scheduler to manufacture a context switch for the sole
2623 * purpose of providing the needed quiescent state.
2624 */
rcu_sched_clock_irq(int user)2625 void rcu_sched_clock_irq(int user)
2626 {
2627 unsigned long j;
2628
2629 if (IS_ENABLED(CONFIG_PROVE_RCU)) {
2630 j = jiffies;
2631 WARN_ON_ONCE(time_before(j, __this_cpu_read(rcu_data.last_sched_clock)));
2632 __this_cpu_write(rcu_data.last_sched_clock, j);
2633 }
2634 trace_rcu_utilization(TPS("Start scheduler-tick"));
2635 lockdep_assert_irqs_disabled();
2636 raw_cpu_inc(rcu_data.ticks_this_gp);
2637 /* The load-acquire pairs with the store-release setting to true. */
2638 if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2639 /* Idle and userspace execution already are quiescent states. */
2640 if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2641 set_tsk_need_resched(current);
2642 set_preempt_need_resched();
2643 }
2644 __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2645 }
2646 rcu_flavor_sched_clock_irq(user);
2647 if (rcu_pending(user))
2648 invoke_rcu_core();
2649 if (user || rcu_is_cpu_rrupt_from_idle())
2650 rcu_note_voluntary_context_switch(current);
2651 lockdep_assert_irqs_disabled();
2652
2653 trace_rcu_utilization(TPS("End scheduler-tick"));
2654 }
2655
2656 /*
2657 * Scan the leaf rcu_node structures. For each structure on which all
2658 * CPUs have reported a quiescent state and on which there are tasks
2659 * blocking the current grace period, initiate RCU priority boosting.
2660 * Otherwise, invoke the specified function to check dyntick state for
2661 * each CPU that has not yet reported a quiescent state.
2662 */
force_qs_rnp(int (* f)(struct rcu_data * rdp))2663 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2664 {
2665 int cpu;
2666 unsigned long flags;
2667 struct rcu_node *rnp;
2668
2669 rcu_state.cbovld = rcu_state.cbovldnext;
2670 rcu_state.cbovldnext = false;
2671 rcu_for_each_leaf_node(rnp) {
2672 unsigned long mask = 0;
2673 unsigned long rsmask = 0;
2674
2675 cond_resched_tasks_rcu_qs();
2676 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2677 rcu_state.cbovldnext |= !!rnp->cbovldmask;
2678 if (rnp->qsmask == 0) {
2679 if (rcu_preempt_blocked_readers_cgp(rnp)) {
2680 /*
2681 * No point in scanning bits because they
2682 * are all zero. But we might need to
2683 * priority-boost blocked readers.
2684 */
2685 rcu_initiate_boost(rnp, flags);
2686 /* rcu_initiate_boost() releases rnp->lock */
2687 continue;
2688 }
2689 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2690 continue;
2691 }
2692 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2693 struct rcu_data *rdp;
2694 int ret;
2695
2696 rdp = per_cpu_ptr(&rcu_data, cpu);
2697 ret = f(rdp);
2698 if (ret > 0) {
2699 mask |= rdp->grpmask;
2700 rcu_disable_urgency_upon_qs(rdp);
2701 }
2702 if (ret < 0)
2703 rsmask |= rdp->grpmask;
2704 }
2705 if (mask != 0) {
2706 /* Idle/offline CPUs, report (releases rnp->lock). */
2707 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2708 } else {
2709 /* Nothing to do here, so just drop the lock. */
2710 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2711 }
2712
2713 for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
2714 resched_cpu(cpu);
2715 }
2716 }
2717
2718 /*
2719 * Force quiescent states on reluctant CPUs, and also detect which
2720 * CPUs are in dyntick-idle mode.
2721 */
rcu_force_quiescent_state(void)2722 void rcu_force_quiescent_state(void)
2723 {
2724 unsigned long flags;
2725 bool ret;
2726 struct rcu_node *rnp;
2727 struct rcu_node *rnp_old = NULL;
2728
2729 if (!rcu_gp_in_progress())
2730 return;
2731 /* Funnel through hierarchy to reduce memory contention. */
2732 rnp = raw_cpu_read(rcu_data.mynode);
2733 for (; rnp != NULL; rnp = rnp->parent) {
2734 ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2735 !raw_spin_trylock(&rnp->fqslock);
2736 if (rnp_old != NULL)
2737 raw_spin_unlock(&rnp_old->fqslock);
2738 if (ret)
2739 return;
2740 rnp_old = rnp;
2741 }
2742 /* rnp_old == rcu_get_root(), rnp == NULL. */
2743
2744 /* Reached the root of the rcu_node tree, acquire lock. */
2745 raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2746 raw_spin_unlock(&rnp_old->fqslock);
2747 if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2748 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2749 return; /* Someone beat us to it. */
2750 }
2751 WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_FQS);
2752 raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2753 rcu_gp_kthread_wake();
2754 }
2755 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2756
2757 // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2758 // grace periods.
strict_work_handler(struct work_struct * work)2759 static void strict_work_handler(struct work_struct *work)
2760 {
2761 rcu_read_lock();
2762 rcu_read_unlock();
2763 }
2764
2765 /* Perform RCU core processing work for the current CPU. */
rcu_core(void)2766 static __latent_entropy void rcu_core(void)
2767 {
2768 unsigned long flags;
2769 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2770 struct rcu_node *rnp = rdp->mynode;
2771
2772 if (cpu_is_offline(smp_processor_id()))
2773 return;
2774 trace_rcu_utilization(TPS("Start RCU core"));
2775 WARN_ON_ONCE(!rdp->beenonline);
2776
2777 /* Report any deferred quiescent states if preemption enabled. */
2778 if (IS_ENABLED(CONFIG_PREEMPT_COUNT) && (!(preempt_count() & PREEMPT_MASK))) {
2779 rcu_preempt_deferred_qs(current);
2780 } else if (rcu_preempt_need_deferred_qs(current)) {
2781 set_tsk_need_resched(current);
2782 set_preempt_need_resched();
2783 }
2784
2785 /* Update RCU state based on any recent quiescent states. */
2786 rcu_check_quiescent_state(rdp);
2787
2788 /* No grace period and unregistered callbacks? */
2789 if (!rcu_gp_in_progress() &&
2790 rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) {
2791 local_irq_save(flags);
2792 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2793 rcu_accelerate_cbs_unlocked(rnp, rdp);
2794 local_irq_restore(flags);
2795 }
2796
2797 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2798
2799 /* If there are callbacks ready, invoke them. */
2800 if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2801 likely(READ_ONCE(rcu_scheduler_fully_active))) {
2802 rcu_do_batch(rdp);
2803 /* Re-invoke RCU core processing if there are callbacks remaining. */
2804 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2805 invoke_rcu_core();
2806 }
2807
2808 /* Do any needed deferred wakeups of rcuo kthreads. */
2809 do_nocb_deferred_wakeup(rdp);
2810 trace_rcu_utilization(TPS("End RCU core"));
2811
2812 // If strict GPs, schedule an RCU reader in a clean environment.
2813 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2814 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2815 }
2816
rcu_core_si(void)2817 static void rcu_core_si(void)
2818 {
2819 rcu_core();
2820 }
2821
rcu_wake_cond(struct task_struct * t,int status)2822 static void rcu_wake_cond(struct task_struct *t, int status)
2823 {
2824 /*
2825 * If the thread is yielding, only wake it when this
2826 * is invoked from idle
2827 */
2828 if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2829 wake_up_process(t);
2830 }
2831
invoke_rcu_core_kthread(void)2832 static void invoke_rcu_core_kthread(void)
2833 {
2834 struct task_struct *t;
2835 unsigned long flags;
2836
2837 local_irq_save(flags);
2838 __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2839 t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2840 if (t != NULL && t != current)
2841 rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2842 local_irq_restore(flags);
2843 }
2844
2845 /*
2846 * Wake up this CPU's rcuc kthread to do RCU core processing.
2847 */
invoke_rcu_core(void)2848 static void invoke_rcu_core(void)
2849 {
2850 if (!cpu_online(smp_processor_id()))
2851 return;
2852 if (use_softirq)
2853 raise_softirq(RCU_SOFTIRQ);
2854 else
2855 invoke_rcu_core_kthread();
2856 }
2857
rcu_cpu_kthread_park(unsigned int cpu)2858 static void rcu_cpu_kthread_park(unsigned int cpu)
2859 {
2860 per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2861 }
2862
rcu_cpu_kthread_should_run(unsigned int cpu)2863 static int rcu_cpu_kthread_should_run(unsigned int cpu)
2864 {
2865 return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2866 }
2867
2868 /*
2869 * Per-CPU kernel thread that invokes RCU callbacks. This replaces
2870 * the RCU softirq used in configurations of RCU that do not support RCU
2871 * priority boosting.
2872 */
rcu_cpu_kthread(unsigned int cpu)2873 static void rcu_cpu_kthread(unsigned int cpu)
2874 {
2875 unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2876 char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2877 unsigned long *j = this_cpu_ptr(&rcu_data.rcuc_activity);
2878 int spincnt;
2879
2880 trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2881 for (spincnt = 0; spincnt < 10; spincnt++) {
2882 WRITE_ONCE(*j, jiffies);
2883 local_bh_disable();
2884 *statusp = RCU_KTHREAD_RUNNING;
2885 local_irq_disable();
2886 work = *workp;
2887 WRITE_ONCE(*workp, 0);
2888 local_irq_enable();
2889 if (work)
2890 rcu_core();
2891 local_bh_enable();
2892 if (!READ_ONCE(*workp)) {
2893 trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2894 *statusp = RCU_KTHREAD_WAITING;
2895 return;
2896 }
2897 }
2898 *statusp = RCU_KTHREAD_YIELDING;
2899 trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2900 schedule_timeout_idle(2);
2901 trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2902 *statusp = RCU_KTHREAD_WAITING;
2903 WRITE_ONCE(*j, jiffies);
2904 }
2905
2906 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2907 .store = &rcu_data.rcu_cpu_kthread_task,
2908 .thread_should_run = rcu_cpu_kthread_should_run,
2909 .thread_fn = rcu_cpu_kthread,
2910 .thread_comm = "rcuc/%u",
2911 .setup = rcu_cpu_kthread_setup,
2912 .park = rcu_cpu_kthread_park,
2913 };
2914
2915 /*
2916 * Spawn per-CPU RCU core processing kthreads.
2917 */
rcu_spawn_core_kthreads(void)2918 static int __init rcu_spawn_core_kthreads(void)
2919 {
2920 int cpu;
2921
2922 for_each_possible_cpu(cpu)
2923 per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2924 if (use_softirq)
2925 return 0;
2926 WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2927 "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2928 return 0;
2929 }
2930
rcutree_enqueue(struct rcu_data * rdp,struct rcu_head * head,rcu_callback_t func)2931 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func)
2932 {
2933 rcu_segcblist_enqueue(&rdp->cblist, head);
2934 if (__is_kvfree_rcu_offset((unsigned long)func))
2935 trace_rcu_kvfree_callback(rcu_state.name, head,
2936 (unsigned long)func,
2937 rcu_segcblist_n_cbs(&rdp->cblist));
2938 else
2939 trace_rcu_callback(rcu_state.name, head,
2940 rcu_segcblist_n_cbs(&rdp->cblist));
2941 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2942 }
2943
2944 /*
2945 * Handle any core-RCU processing required by a call_rcu() invocation.
2946 */
call_rcu_core(struct rcu_data * rdp,struct rcu_head * head,rcu_callback_t func,unsigned long flags)2947 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2948 rcu_callback_t func, unsigned long flags)
2949 {
2950 rcutree_enqueue(rdp, head, func);
2951 /*
2952 * If called from an extended quiescent state, invoke the RCU
2953 * core in order to force a re-evaluation of RCU's idleness.
2954 */
2955 if (!rcu_is_watching())
2956 invoke_rcu_core();
2957
2958 /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2959 if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2960 return;
2961
2962 /*
2963 * Force the grace period if too many callbacks or too long waiting.
2964 * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2965 * if some other CPU has recently done so. Also, don't bother
2966 * invoking rcu_force_quiescent_state() if the newly enqueued callback
2967 * is the only one waiting for a grace period to complete.
2968 */
2969 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2970 rdp->qlen_last_fqs_check + qhimark)) {
2971
2972 /* Are we ignoring a completed grace period? */
2973 note_gp_changes(rdp);
2974
2975 /* Start a new grace period if one not already started. */
2976 if (!rcu_gp_in_progress()) {
2977 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2978 } else {
2979 /* Give the grace period a kick. */
2980 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2981 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2982 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2983 rcu_force_quiescent_state();
2984 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2985 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2986 }
2987 }
2988 }
2989
2990 /*
2991 * RCU callback function to leak a callback.
2992 */
rcu_leak_callback(struct rcu_head * rhp)2993 static void rcu_leak_callback(struct rcu_head *rhp)
2994 {
2995 }
2996
2997 /*
2998 * Check and if necessary update the leaf rcu_node structure's
2999 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3000 * number of queued RCU callbacks. The caller must hold the leaf rcu_node
3001 * structure's ->lock.
3002 */
check_cb_ovld_locked(struct rcu_data * rdp,struct rcu_node * rnp)3003 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
3004 {
3005 raw_lockdep_assert_held_rcu_node(rnp);
3006 if (qovld_calc <= 0)
3007 return; // Early boot and wildcard value set.
3008 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
3009 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
3010 else
3011 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
3012 }
3013
3014 /*
3015 * Check and if necessary update the leaf rcu_node structure's
3016 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
3017 * number of queued RCU callbacks. No locks need be held, but the
3018 * caller must have disabled interrupts.
3019 *
3020 * Note that this function ignores the possibility that there are a lot
3021 * of callbacks all of which have already seen the end of their respective
3022 * grace periods. This omission is due to the need for no-CBs CPUs to
3023 * be holding ->nocb_lock to do this check, which is too heavy for a
3024 * common-case operation.
3025 */
check_cb_ovld(struct rcu_data * rdp)3026 static void check_cb_ovld(struct rcu_data *rdp)
3027 {
3028 struct rcu_node *const rnp = rdp->mynode;
3029
3030 if (qovld_calc <= 0 ||
3031 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3032 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3033 return; // Early boot wildcard value or already set correctly.
3034 raw_spin_lock_rcu_node(rnp);
3035 check_cb_ovld_locked(rdp, rnp);
3036 raw_spin_unlock_rcu_node(rnp);
3037 }
3038
3039 static void
__call_rcu_common(struct rcu_head * head,rcu_callback_t func,bool lazy_in)3040 __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
3041 {
3042 static atomic_t doublefrees;
3043 unsigned long flags;
3044 bool lazy;
3045 struct rcu_data *rdp;
3046
3047 /* Misaligned rcu_head! */
3048 WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3049
3050 if (debug_rcu_head_queue(head)) {
3051 /*
3052 * Probable double call_rcu(), so leak the callback.
3053 * Use rcu:rcu_callback trace event to find the previous
3054 * time callback was passed to call_rcu().
3055 */
3056 if (atomic_inc_return(&doublefrees) < 4) {
3057 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
3058 mem_dump_obj(head);
3059 }
3060 WRITE_ONCE(head->func, rcu_leak_callback);
3061 return;
3062 }
3063 head->func = func;
3064 head->next = NULL;
3065 kasan_record_aux_stack(head);
3066
3067 local_irq_save(flags);
3068 rdp = this_cpu_ptr(&rcu_data);
3069 RCU_LOCKDEP_WARN(!rcu_rdp_cpu_online(rdp), "Callback enqueued on offline CPU!");
3070
3071 lazy = lazy_in && !rcu_async_should_hurry();
3072
3073 /* Add the callback to our list. */
3074 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3075 // This can trigger due to call_rcu() from offline CPU:
3076 WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3077 WARN_ON_ONCE(!rcu_is_watching());
3078 // Very early boot, before rcu_init(). Initialize if needed
3079 // and then drop through to queue the callback.
3080 if (rcu_segcblist_empty(&rdp->cblist))
3081 rcu_segcblist_init(&rdp->cblist);
3082 }
3083
3084 check_cb_ovld(rdp);
3085
3086 if (unlikely(rcu_rdp_is_offloaded(rdp)))
3087 call_rcu_nocb(rdp, head, func, flags, lazy);
3088 else
3089 call_rcu_core(rdp, head, func, flags);
3090 local_irq_restore(flags);
3091 }
3092
3093 #ifdef CONFIG_RCU_LAZY
3094 static bool enable_rcu_lazy __read_mostly = !IS_ENABLED(CONFIG_RCU_LAZY_DEFAULT_OFF);
3095 module_param(enable_rcu_lazy, bool, 0444);
3096
3097 /**
3098 * call_rcu_hurry() - Queue RCU callback for invocation after grace period, and
3099 * flush all lazy callbacks (including the new one) to the main ->cblist while
3100 * doing so.
3101 *
3102 * @head: structure to be used for queueing the RCU updates.
3103 * @func: actual callback function to be invoked after the grace period
3104 *
3105 * The callback function will be invoked some time after a full grace
3106 * period elapses, in other words after all pre-existing RCU read-side
3107 * critical sections have completed.
3108 *
3109 * Use this API instead of call_rcu() if you don't want the callback to be
3110 * invoked after very long periods of time, which can happen on systems without
3111 * memory pressure and on systems which are lightly loaded or mostly idle.
3112 * This function will cause callbacks to be invoked sooner than later at the
3113 * expense of extra power. Other than that, this function is identical to, and
3114 * reuses call_rcu()'s logic. Refer to call_rcu() for more details about memory
3115 * ordering and other functionality.
3116 */
call_rcu_hurry(struct rcu_head * head,rcu_callback_t func)3117 void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
3118 {
3119 __call_rcu_common(head, func, false);
3120 }
3121 EXPORT_SYMBOL_GPL(call_rcu_hurry);
3122 #else
3123 #define enable_rcu_lazy false
3124 #endif
3125
3126 /**
3127 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3128 * By default the callbacks are 'lazy' and are kept hidden from the main
3129 * ->cblist to prevent starting of grace periods too soon.
3130 * If you desire grace periods to start very soon, use call_rcu_hurry().
3131 *
3132 * @head: structure to be used for queueing the RCU updates.
3133 * @func: actual callback function to be invoked after the grace period
3134 *
3135 * The callback function will be invoked some time after a full grace
3136 * period elapses, in other words after all pre-existing RCU read-side
3137 * critical sections have completed. However, the callback function
3138 * might well execute concurrently with RCU read-side critical sections
3139 * that started after call_rcu() was invoked.
3140 *
3141 * RCU read-side critical sections are delimited by rcu_read_lock()
3142 * and rcu_read_unlock(), and may be nested. In addition, but only in
3143 * v5.0 and later, regions of code across which interrupts, preemption,
3144 * or softirqs have been disabled also serve as RCU read-side critical
3145 * sections. This includes hardware interrupt handlers, softirq handlers,
3146 * and NMI handlers.
3147 *
3148 * Note that all CPUs must agree that the grace period extended beyond
3149 * all pre-existing RCU read-side critical section. On systems with more
3150 * than one CPU, this means that when "func()" is invoked, each CPU is
3151 * guaranteed to have executed a full memory barrier since the end of its
3152 * last RCU read-side critical section whose beginning preceded the call
3153 * to call_rcu(). It also means that each CPU executing an RCU read-side
3154 * critical section that continues beyond the start of "func()" must have
3155 * executed a memory barrier after the call_rcu() but before the beginning
3156 * of that RCU read-side critical section. Note that these guarantees
3157 * include CPUs that are offline, idle, or executing in user mode, as
3158 * well as CPUs that are executing in the kernel.
3159 *
3160 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3161 * resulting RCU callback function "func()", then both CPU A and CPU B are
3162 * guaranteed to execute a full memory barrier during the time interval
3163 * between the call to call_rcu() and the invocation of "func()" -- even
3164 * if CPU A and CPU B are the same CPU (but again only if the system has
3165 * more than one CPU).
3166 *
3167 * Implementation of these memory-ordering guarantees is described here:
3168 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3169 */
call_rcu(struct rcu_head * head,rcu_callback_t func)3170 void call_rcu(struct rcu_head *head, rcu_callback_t func)
3171 {
3172 __call_rcu_common(head, func, enable_rcu_lazy);
3173 }
3174 EXPORT_SYMBOL_GPL(call_rcu);
3175
3176 /*
3177 * During early boot, any blocking grace-period wait automatically
3178 * implies a grace period.
3179 *
3180 * Later on, this could in theory be the case for kernels built with
3181 * CONFIG_SMP=y && CONFIG_PREEMPTION=y running on a single CPU, but this
3182 * is not a common case. Furthermore, this optimization would cause
3183 * the rcu_gp_oldstate structure to expand by 50%, so this potential
3184 * grace-period optimization is ignored once the scheduler is running.
3185 */
rcu_blocking_is_gp(void)3186 static int rcu_blocking_is_gp(void)
3187 {
3188 if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE) {
3189 might_sleep();
3190 return false;
3191 }
3192 return true;
3193 }
3194
3195 /*
3196 * Helper function for the synchronize_rcu() API.
3197 */
synchronize_rcu_normal(void)3198 static void synchronize_rcu_normal(void)
3199 {
3200 struct rcu_synchronize rs;
3201
3202 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request"));
3203
3204 if (!READ_ONCE(rcu_normal_wake_from_gp)) {
3205 wait_rcu_gp(call_rcu_hurry);
3206 goto trace_complete_out;
3207 }
3208
3209 init_rcu_head_on_stack(&rs.head);
3210 init_completion(&rs.completion);
3211
3212 /*
3213 * This code might be preempted, therefore take a GP
3214 * snapshot before adding a request.
3215 */
3216 if (IS_ENABLED(CONFIG_PROVE_RCU))
3217 rs.head.func = (void *) get_state_synchronize_rcu();
3218
3219 rcu_sr_normal_add_req(&rs);
3220
3221 /* Kick a GP and start waiting. */
3222 (void) start_poll_synchronize_rcu();
3223
3224 /* Now we can wait. */
3225 wait_for_completion(&rs.completion);
3226 destroy_rcu_head_on_stack(&rs.head);
3227
3228 trace_complete_out:
3229 trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("complete"));
3230 }
3231
3232 /**
3233 * synchronize_rcu - wait until a grace period has elapsed.
3234 *
3235 * Control will return to the caller some time after a full grace
3236 * period has elapsed, in other words after all currently executing RCU
3237 * read-side critical sections have completed. Note, however, that
3238 * upon return from synchronize_rcu(), the caller might well be executing
3239 * concurrently with new RCU read-side critical sections that began while
3240 * synchronize_rcu() was waiting.
3241 *
3242 * RCU read-side critical sections are delimited by rcu_read_lock()
3243 * and rcu_read_unlock(), and may be nested. In addition, but only in
3244 * v5.0 and later, regions of code across which interrupts, preemption,
3245 * or softirqs have been disabled also serve as RCU read-side critical
3246 * sections. This includes hardware interrupt handlers, softirq handlers,
3247 * and NMI handlers.
3248 *
3249 * Note that this guarantee implies further memory-ordering guarantees.
3250 * On systems with more than one CPU, when synchronize_rcu() returns,
3251 * each CPU is guaranteed to have executed a full memory barrier since
3252 * the end of its last RCU read-side critical section whose beginning
3253 * preceded the call to synchronize_rcu(). In addition, each CPU having
3254 * an RCU read-side critical section that extends beyond the return from
3255 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3256 * after the beginning of synchronize_rcu() and before the beginning of
3257 * that RCU read-side critical section. Note that these guarantees include
3258 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3259 * that are executing in the kernel.
3260 *
3261 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3262 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3263 * to have executed a full memory barrier during the execution of
3264 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3265 * again only if the system has more than one CPU).
3266 *
3267 * Implementation of these memory-ordering guarantees is described here:
3268 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3269 */
synchronize_rcu(void)3270 void synchronize_rcu(void)
3271 {
3272 unsigned long flags;
3273 struct rcu_node *rnp;
3274
3275 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3276 lock_is_held(&rcu_lock_map) ||
3277 lock_is_held(&rcu_sched_lock_map),
3278 "Illegal synchronize_rcu() in RCU read-side critical section");
3279 if (!rcu_blocking_is_gp()) {
3280 if (rcu_gp_is_expedited())
3281 synchronize_rcu_expedited();
3282 else
3283 synchronize_rcu_normal();
3284 return;
3285 }
3286
3287 // Context allows vacuous grace periods.
3288 // Note well that this code runs with !PREEMPT && !SMP.
3289 // In addition, all code that advances grace periods runs at
3290 // process level. Therefore, this normal GP overlaps with other
3291 // normal GPs only by being fully nested within them, which allows
3292 // reuse of ->gp_seq_polled_snap.
3293 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
3294 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
3295
3296 // Update the normal grace-period counters to record
3297 // this grace period, but only those used by the boot CPU.
3298 // The rcu_scheduler_starting() will take care of the rest of
3299 // these counters.
3300 local_irq_save(flags);
3301 WARN_ON_ONCE(num_online_cpus() > 1);
3302 rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
3303 for (rnp = this_cpu_ptr(&rcu_data)->mynode; rnp; rnp = rnp->parent)
3304 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
3305 local_irq_restore(flags);
3306 }
3307 EXPORT_SYMBOL_GPL(synchronize_rcu);
3308
3309 /**
3310 * get_completed_synchronize_rcu_full - Return a full pre-completed polled state cookie
3311 * @rgosp: Place to put state cookie
3312 *
3313 * Stores into @rgosp a value that will always be treated by functions
3314 * like poll_state_synchronize_rcu_full() as a cookie whose grace period
3315 * has already completed.
3316 */
get_completed_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3317 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3318 {
3319 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
3320 rgosp->rgos_exp = RCU_GET_STATE_COMPLETED;
3321 }
3322 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
3323
3324 /**
3325 * get_state_synchronize_rcu - Snapshot current RCU state
3326 *
3327 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3328 * or poll_state_synchronize_rcu() to determine whether or not a full
3329 * grace period has elapsed in the meantime.
3330 */
get_state_synchronize_rcu(void)3331 unsigned long get_state_synchronize_rcu(void)
3332 {
3333 /*
3334 * Any prior manipulation of RCU-protected data must happen
3335 * before the load from ->gp_seq.
3336 */
3337 smp_mb(); /* ^^^ */
3338 return rcu_seq_snap(&rcu_state.gp_seq_polled);
3339 }
3340 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3341
3342 /**
3343 * get_state_synchronize_rcu_full - Snapshot RCU state, both normal and expedited
3344 * @rgosp: location to place combined normal/expedited grace-period state
3345 *
3346 * Places the normal and expedited grace-period states in @rgosp. This
3347 * state value can be passed to a later call to cond_synchronize_rcu_full()
3348 * or poll_state_synchronize_rcu_full() to determine whether or not a
3349 * grace period (whether normal or expedited) has elapsed in the meantime.
3350 * The rcu_gp_oldstate structure takes up twice the memory of an unsigned
3351 * long, but is guaranteed to see all grace periods. In contrast, the
3352 * combined state occupies less memory, but can sometimes fail to take
3353 * grace periods into account.
3354 *
3355 * This does not guarantee that the needed grace period will actually
3356 * start.
3357 */
get_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3358 void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3359 {
3360 struct rcu_node *rnp = rcu_get_root();
3361
3362 /*
3363 * Any prior manipulation of RCU-protected data must happen
3364 * before the loads from ->gp_seq and ->expedited_sequence.
3365 */
3366 smp_mb(); /* ^^^ */
3367 rgosp->rgos_norm = rcu_seq_snap(&rnp->gp_seq);
3368 rgosp->rgos_exp = rcu_seq_snap(&rcu_state.expedited_sequence);
3369 }
3370 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu_full);
3371
3372 /*
3373 * Helper function for start_poll_synchronize_rcu() and
3374 * start_poll_synchronize_rcu_full().
3375 */
start_poll_synchronize_rcu_common(void)3376 static void start_poll_synchronize_rcu_common(void)
3377 {
3378 unsigned long flags;
3379 bool needwake;
3380 struct rcu_data *rdp;
3381 struct rcu_node *rnp;
3382
3383 local_irq_save(flags);
3384 rdp = this_cpu_ptr(&rcu_data);
3385 rnp = rdp->mynode;
3386 raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3387 // Note it is possible for a grace period to have elapsed between
3388 // the above call to get_state_synchronize_rcu() and the below call
3389 // to rcu_seq_snap. This is OK, the worst that happens is that we
3390 // get a grace period that no one needed. These accesses are ordered
3391 // by smp_mb(), and we are accessing them in the opposite order
3392 // from which they are updated at grace-period start, as required.
3393 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
3394 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3395 if (needwake)
3396 rcu_gp_kthread_wake();
3397 }
3398
3399 /**
3400 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3401 *
3402 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3403 * or poll_state_synchronize_rcu() to determine whether or not a full
3404 * grace period has elapsed in the meantime. If the needed grace period
3405 * is not already slated to start, notifies RCU core of the need for that
3406 * grace period.
3407 */
start_poll_synchronize_rcu(void)3408 unsigned long start_poll_synchronize_rcu(void)
3409 {
3410 unsigned long gp_seq = get_state_synchronize_rcu();
3411
3412 start_poll_synchronize_rcu_common();
3413 return gp_seq;
3414 }
3415 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3416
3417 /**
3418 * start_poll_synchronize_rcu_full - Take a full snapshot and start RCU grace period
3419 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3420 *
3421 * Places the normal and expedited grace-period states in *@rgos. This
3422 * state value can be passed to a later call to cond_synchronize_rcu_full()
3423 * or poll_state_synchronize_rcu_full() to determine whether or not a
3424 * grace period (whether normal or expedited) has elapsed in the meantime.
3425 * If the needed grace period is not already slated to start, notifies
3426 * RCU core of the need for that grace period.
3427 */
start_poll_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3428 void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3429 {
3430 get_state_synchronize_rcu_full(rgosp);
3431
3432 start_poll_synchronize_rcu_common();
3433 }
3434 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_full);
3435
3436 /**
3437 * poll_state_synchronize_rcu - Has the specified RCU grace period completed?
3438 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3439 *
3440 * If a full RCU grace period has elapsed since the earlier call from
3441 * which @oldstate was obtained, return @true, otherwise return @false.
3442 * If @false is returned, it is the caller's responsibility to invoke this
3443 * function later on until it does return @true. Alternatively, the caller
3444 * can explicitly wait for a grace period, for example, by passing @oldstate
3445 * to either cond_synchronize_rcu() or cond_synchronize_rcu_expedited()
3446 * on the one hand or by directly invoking either synchronize_rcu() or
3447 * synchronize_rcu_expedited() on the other.
3448 *
3449 * Yes, this function does not take counter wrap into account.
3450 * But counter wrap is harmless. If the counter wraps, we have waited for
3451 * more than a billion grace periods (and way more on a 64-bit system!).
3452 * Those needing to keep old state values for very long time periods
3453 * (many hours even on 32-bit systems) should check them occasionally and
3454 * either refresh them or set a flag indicating that the grace period has
3455 * completed. Alternatively, they can use get_completed_synchronize_rcu()
3456 * to get a guaranteed-completed grace-period state.
3457 *
3458 * In addition, because oldstate compresses the grace-period state for
3459 * both normal and expedited grace periods into a single unsigned long,
3460 * it can miss a grace period when synchronize_rcu() runs concurrently
3461 * with synchronize_rcu_expedited(). If this is unacceptable, please
3462 * instead use the _full() variant of these polling APIs.
3463 *
3464 * This function provides the same memory-ordering guarantees that
3465 * would be provided by a synchronize_rcu() that was invoked at the call
3466 * to the function that provided @oldstate, and that returned at the end
3467 * of this function.
3468 */
poll_state_synchronize_rcu(unsigned long oldstate)3469 bool poll_state_synchronize_rcu(unsigned long oldstate)
3470 {
3471 if (oldstate == RCU_GET_STATE_COMPLETED ||
3472 rcu_seq_done_exact(&rcu_state.gp_seq_polled, oldstate)) {
3473 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3474 return true;
3475 }
3476 return false;
3477 }
3478 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3479
3480 /**
3481 * poll_state_synchronize_rcu_full - Has the specified RCU grace period completed?
3482 * @rgosp: value from get_state_synchronize_rcu_full() or start_poll_synchronize_rcu_full()
3483 *
3484 * If a full RCU grace period has elapsed since the earlier call from
3485 * which *rgosp was obtained, return @true, otherwise return @false.
3486 * If @false is returned, it is the caller's responsibility to invoke this
3487 * function later on until it does return @true. Alternatively, the caller
3488 * can explicitly wait for a grace period, for example, by passing @rgosp
3489 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3490 *
3491 * Yes, this function does not take counter wrap into account.
3492 * But counter wrap is harmless. If the counter wraps, we have waited
3493 * for more than a billion grace periods (and way more on a 64-bit
3494 * system!). Those needing to keep rcu_gp_oldstate values for very
3495 * long time periods (many hours even on 32-bit systems) should check
3496 * them occasionally and either refresh them or set a flag indicating
3497 * that the grace period has completed. Alternatively, they can use
3498 * get_completed_synchronize_rcu_full() to get a guaranteed-completed
3499 * grace-period state.
3500 *
3501 * This function provides the same memory-ordering guarantees that would
3502 * be provided by a synchronize_rcu() that was invoked at the call to
3503 * the function that provided @rgosp, and that returned at the end of this
3504 * function. And this guarantee requires that the root rcu_node structure's
3505 * ->gp_seq field be checked instead of that of the rcu_state structure.
3506 * The problem is that the just-ending grace-period's callbacks can be
3507 * invoked between the time that the root rcu_node structure's ->gp_seq
3508 * field is updated and the time that the rcu_state structure's ->gp_seq
3509 * field is updated. Therefore, if a single synchronize_rcu() is to
3510 * cause a subsequent poll_state_synchronize_rcu_full() to return @true,
3511 * then the root rcu_node structure is the one that needs to be polled.
3512 */
poll_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3513 bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3514 {
3515 struct rcu_node *rnp = rcu_get_root();
3516
3517 smp_mb(); // Order against root rcu_node structure grace-period cleanup.
3518 if (rgosp->rgos_norm == RCU_GET_STATE_COMPLETED ||
3519 rcu_seq_done_exact(&rnp->gp_seq, rgosp->rgos_norm) ||
3520 rgosp->rgos_exp == RCU_GET_STATE_COMPLETED ||
3521 rcu_seq_done_exact(&rcu_state.expedited_sequence, rgosp->rgos_exp)) {
3522 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3523 return true;
3524 }
3525 return false;
3526 }
3527 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu_full);
3528
3529 /**
3530 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3531 * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
3532 *
3533 * If a full RCU grace period has elapsed since the earlier call to
3534 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3535 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3536 *
3537 * Yes, this function does not take counter wrap into account.
3538 * But counter wrap is harmless. If the counter wraps, we have waited for
3539 * more than 2 billion grace periods (and way more on a 64-bit system!),
3540 * so waiting for a couple of additional grace periods should be just fine.
3541 *
3542 * This function provides the same memory-ordering guarantees that
3543 * would be provided by a synchronize_rcu() that was invoked at the call
3544 * to the function that provided @oldstate and that returned at the end
3545 * of this function.
3546 */
cond_synchronize_rcu(unsigned long oldstate)3547 void cond_synchronize_rcu(unsigned long oldstate)
3548 {
3549 if (!poll_state_synchronize_rcu(oldstate))
3550 synchronize_rcu();
3551 }
3552 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3553
3554 /**
3555 * cond_synchronize_rcu_full - Conditionally wait for an RCU grace period
3556 * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
3557 *
3558 * If a full RCU grace period has elapsed since the call to
3559 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
3560 * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
3561 * obtained, just return. Otherwise, invoke synchronize_rcu() to wait
3562 * for a full grace period.
3563 *
3564 * Yes, this function does not take counter wrap into account.
3565 * But counter wrap is harmless. If the counter wraps, we have waited for
3566 * more than 2 billion grace periods (and way more on a 64-bit system!),
3567 * so waiting for a couple of additional grace periods should be just fine.
3568 *
3569 * This function provides the same memory-ordering guarantees that
3570 * would be provided by a synchronize_rcu() that was invoked at the call
3571 * to the function that provided @rgosp and that returned at the end of
3572 * this function.
3573 */
cond_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)3574 void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
3575 {
3576 if (!poll_state_synchronize_rcu_full(rgosp))
3577 synchronize_rcu();
3578 }
3579 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_full);
3580
3581 /*
3582 * Check to see if there is any immediate RCU-related work to be done by
3583 * the current CPU, returning 1 if so and zero otherwise. The checks are
3584 * in order of increasing expense: checks that can be carried out against
3585 * CPU-local state are performed first. However, we must check for CPU
3586 * stalls first, else we might not get a chance.
3587 */
rcu_pending(int user)3588 static int rcu_pending(int user)
3589 {
3590 bool gp_in_progress;
3591 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3592 struct rcu_node *rnp = rdp->mynode;
3593
3594 lockdep_assert_irqs_disabled();
3595
3596 /* Check for CPU stalls, if enabled. */
3597 check_cpu_stall(rdp);
3598
3599 /* Does this CPU need a deferred NOCB wakeup? */
3600 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3601 return 1;
3602
3603 /* Is this a nohz_full CPU in userspace or idle? (Ignore RCU if so.) */
3604 gp_in_progress = rcu_gp_in_progress();
3605 if ((user || rcu_is_cpu_rrupt_from_idle() ||
3606 (gp_in_progress &&
3607 time_before(jiffies, READ_ONCE(rcu_state.gp_start) +
3608 nohz_full_patience_delay_jiffies))) &&
3609 rcu_nohz_full_cpu())
3610 return 0;
3611
3612 /* Is the RCU core waiting for a quiescent state from this CPU? */
3613 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3614 return 1;
3615
3616 /* Does this CPU have callbacks ready to invoke? */
3617 if (!rcu_rdp_is_offloaded(rdp) &&
3618 rcu_segcblist_ready_cbs(&rdp->cblist))
3619 return 1;
3620
3621 /* Has RCU gone idle with this CPU needing another grace period? */
3622 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3623 !rcu_rdp_is_offloaded(rdp) &&
3624 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3625 return 1;
3626
3627 /* Have RCU grace period completed or started? */
3628 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3629 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3630 return 1;
3631
3632 /* nothing to do */
3633 return 0;
3634 }
3635
3636 /*
3637 * Helper function for rcu_barrier() tracing. If tracing is disabled,
3638 * the compiler is expected to optimize this away.
3639 */
rcu_barrier_trace(const char * s,int cpu,unsigned long done)3640 static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3641 {
3642 trace_rcu_barrier(rcu_state.name, s, cpu,
3643 atomic_read(&rcu_state.barrier_cpu_count), done);
3644 }
3645
3646 /*
3647 * RCU callback function for rcu_barrier(). If we are last, wake
3648 * up the task executing rcu_barrier().
3649 *
3650 * Note that the value of rcu_state.barrier_sequence must be captured
3651 * before the atomic_dec_and_test(). Otherwise, if this CPU is not last,
3652 * other CPUs might count the value down to zero before this CPU gets
3653 * around to invoking rcu_barrier_trace(), which might result in bogus
3654 * data from the next instance of rcu_barrier().
3655 */
rcu_barrier_callback(struct rcu_head * rhp)3656 static void rcu_barrier_callback(struct rcu_head *rhp)
3657 {
3658 unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3659
3660 rhp->next = rhp; // Mark the callback as having been invoked.
3661 if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3662 rcu_barrier_trace(TPS("LastCB"), -1, s);
3663 complete(&rcu_state.barrier_completion);
3664 } else {
3665 rcu_barrier_trace(TPS("CB"), -1, s);
3666 }
3667 }
3668
3669 /*
3670 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
3671 */
rcu_barrier_entrain(struct rcu_data * rdp)3672 static void rcu_barrier_entrain(struct rcu_data *rdp)
3673 {
3674 unsigned long gseq = READ_ONCE(rcu_state.barrier_sequence);
3675 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
3676 bool wake_nocb = false;
3677 bool was_alldone = false;
3678
3679 lockdep_assert_held(&rcu_state.barrier_lock);
3680 if (rcu_seq_state(lseq) || !rcu_seq_state(gseq) || rcu_seq_ctr(lseq) != rcu_seq_ctr(gseq))
3681 return;
3682 rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3683 rdp->barrier_head.func = rcu_barrier_callback;
3684 debug_rcu_head_queue(&rdp->barrier_head);
3685 rcu_nocb_lock(rdp);
3686 /*
3687 * Flush bypass and wakeup rcuog if we add callbacks to an empty regular
3688 * queue. This way we don't wait for bypass timer that can reach seconds
3689 * if it's fully lazy.
3690 */
3691 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
3692 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
3693 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
3694 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
3695 atomic_inc(&rcu_state.barrier_cpu_count);
3696 } else {
3697 debug_rcu_head_unqueue(&rdp->barrier_head);
3698 rcu_barrier_trace(TPS("IRQNQ"), -1, rcu_state.barrier_sequence);
3699 }
3700 rcu_nocb_unlock(rdp);
3701 if (wake_nocb)
3702 wake_nocb_gp(rdp, false);
3703 smp_store_release(&rdp->barrier_seq_snap, gseq);
3704 }
3705
3706 /*
3707 * Called with preemption disabled, and from cross-cpu IRQ context.
3708 */
rcu_barrier_handler(void * cpu_in)3709 static void rcu_barrier_handler(void *cpu_in)
3710 {
3711 uintptr_t cpu = (uintptr_t)cpu_in;
3712 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3713
3714 lockdep_assert_irqs_disabled();
3715 WARN_ON_ONCE(cpu != rdp->cpu);
3716 WARN_ON_ONCE(cpu != smp_processor_id());
3717 raw_spin_lock(&rcu_state.barrier_lock);
3718 rcu_barrier_entrain(rdp);
3719 raw_spin_unlock(&rcu_state.barrier_lock);
3720 }
3721
3722 /**
3723 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
3724 *
3725 * Note that this primitive does not necessarily wait for an RCU grace period
3726 * to complete. For example, if there are no RCU callbacks queued anywhere
3727 * in the system, then rcu_barrier() is within its rights to return
3728 * immediately, without waiting for anything, much less an RCU grace period.
3729 */
rcu_barrier(void)3730 void rcu_barrier(void)
3731 {
3732 uintptr_t cpu;
3733 unsigned long flags;
3734 unsigned long gseq;
3735 struct rcu_data *rdp;
3736 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3737
3738 rcu_barrier_trace(TPS("Begin"), -1, s);
3739
3740 /* Take mutex to serialize concurrent rcu_barrier() requests. */
3741 mutex_lock(&rcu_state.barrier_mutex);
3742
3743 /* Did someone else do our work for us? */
3744 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3745 rcu_barrier_trace(TPS("EarlyExit"), -1, rcu_state.barrier_sequence);
3746 smp_mb(); /* caller's subsequent code after above check. */
3747 mutex_unlock(&rcu_state.barrier_mutex);
3748 return;
3749 }
3750
3751 /* Mark the start of the barrier operation. */
3752 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3753 rcu_seq_start(&rcu_state.barrier_sequence);
3754 gseq = rcu_state.barrier_sequence;
3755 rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
3756
3757 /*
3758 * Initialize the count to two rather than to zero in order
3759 * to avoid a too-soon return to zero in case of an immediate
3760 * invocation of the just-enqueued callback (or preemption of
3761 * this task). Exclude CPU-hotplug operations to ensure that no
3762 * offline non-offloaded CPU has callbacks queued.
3763 */
3764 init_completion(&rcu_state.barrier_completion);
3765 atomic_set(&rcu_state.barrier_cpu_count, 2);
3766 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3767
3768 /*
3769 * Force each CPU with callbacks to register a new callback.
3770 * When that callback is invoked, we will know that all of the
3771 * corresponding CPU's preceding callbacks have been invoked.
3772 */
3773 for_each_possible_cpu(cpu) {
3774 rdp = per_cpu_ptr(&rcu_data, cpu);
3775 retry:
3776 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
3777 continue;
3778 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
3779 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
3780 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3781 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3782 rcu_barrier_trace(TPS("NQ"), cpu, rcu_state.barrier_sequence);
3783 continue;
3784 }
3785 if (!rcu_rdp_cpu_online(rdp)) {
3786 rcu_barrier_entrain(rdp);
3787 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3788 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3789 rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu, rcu_state.barrier_sequence);
3790 continue;
3791 }
3792 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
3793 if (smp_call_function_single(cpu, rcu_barrier_handler, (void *)cpu, 1)) {
3794 schedule_timeout_uninterruptible(1);
3795 goto retry;
3796 }
3797 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
3798 rcu_barrier_trace(TPS("OnlineQ"), cpu, rcu_state.barrier_sequence);
3799 }
3800
3801 /*
3802 * Now that we have an rcu_barrier_callback() callback on each
3803 * CPU, and thus each counted, remove the initial count.
3804 */
3805 if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
3806 complete(&rcu_state.barrier_completion);
3807
3808 /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3809 wait_for_completion(&rcu_state.barrier_completion);
3810
3811 /* Mark the end of the barrier operation. */
3812 rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
3813 rcu_seq_end(&rcu_state.barrier_sequence);
3814 gseq = rcu_state.barrier_sequence;
3815 for_each_possible_cpu(cpu) {
3816 rdp = per_cpu_ptr(&rcu_data, cpu);
3817
3818 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
3819 }
3820
3821 /* Other rcu_barrier() invocations can now safely proceed. */
3822 mutex_unlock(&rcu_state.barrier_mutex);
3823 }
3824 EXPORT_SYMBOL_GPL(rcu_barrier);
3825
3826 static unsigned long rcu_barrier_last_throttle;
3827
3828 /**
3829 * rcu_barrier_throttled - Do rcu_barrier(), but limit to one per second
3830 *
3831 * This can be thought of as guard rails around rcu_barrier() that
3832 * permits unrestricted userspace use, at least assuming the hardware's
3833 * try_cmpxchg() is robust. There will be at most one call per second to
3834 * rcu_barrier() system-wide from use of this function, which means that
3835 * callers might needlessly wait a second or three.
3836 *
3837 * This is intended for use by test suites to avoid OOM by flushing RCU
3838 * callbacks from the previous test before starting the next. See the
3839 * rcutree.do_rcu_barrier module parameter for more information.
3840 *
3841 * Why not simply make rcu_barrier() more scalable? That might be
3842 * the eventual endpoint, but let's keep it simple for the time being.
3843 * Note that the module parameter infrastructure serializes calls to a
3844 * given .set() function, but should concurrent .set() invocation ever be
3845 * possible, we are ready!
3846 */
rcu_barrier_throttled(void)3847 static void rcu_barrier_throttled(void)
3848 {
3849 unsigned long j = jiffies;
3850 unsigned long old = READ_ONCE(rcu_barrier_last_throttle);
3851 unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
3852
3853 while (time_in_range(j, old, old + HZ / 16) ||
3854 !try_cmpxchg(&rcu_barrier_last_throttle, &old, j)) {
3855 schedule_timeout_idle(HZ / 16);
3856 if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
3857 smp_mb(); /* caller's subsequent code after above check. */
3858 return;
3859 }
3860 j = jiffies;
3861 old = READ_ONCE(rcu_barrier_last_throttle);
3862 }
3863 rcu_barrier();
3864 }
3865
3866 /*
3867 * Invoke rcu_barrier_throttled() when a rcutree.do_rcu_barrier
3868 * request arrives. We insist on a true value to allow for possible
3869 * future expansion.
3870 */
param_set_do_rcu_barrier(const char * val,const struct kernel_param * kp)3871 static int param_set_do_rcu_barrier(const char *val, const struct kernel_param *kp)
3872 {
3873 bool b;
3874 int ret;
3875
3876 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING)
3877 return -EAGAIN;
3878 ret = kstrtobool(val, &b);
3879 if (!ret && b) {
3880 atomic_inc((atomic_t *)kp->arg);
3881 rcu_barrier_throttled();
3882 atomic_dec((atomic_t *)kp->arg);
3883 }
3884 return ret;
3885 }
3886
3887 /*
3888 * Output the number of outstanding rcutree.do_rcu_barrier requests.
3889 */
param_get_do_rcu_barrier(char * buffer,const struct kernel_param * kp)3890 static int param_get_do_rcu_barrier(char *buffer, const struct kernel_param *kp)
3891 {
3892 return sprintf(buffer, "%d\n", atomic_read((atomic_t *)kp->arg));
3893 }
3894
3895 static const struct kernel_param_ops do_rcu_barrier_ops = {
3896 .set = param_set_do_rcu_barrier,
3897 .get = param_get_do_rcu_barrier,
3898 };
3899 static atomic_t do_rcu_barrier;
3900 module_param_cb(do_rcu_barrier, &do_rcu_barrier_ops, &do_rcu_barrier, 0644);
3901
3902 /*
3903 * Compute the mask of online CPUs for the specified rcu_node structure.
3904 * This will not be stable unless the rcu_node structure's ->lock is
3905 * held, but the bit corresponding to the current CPU will be stable
3906 * in most contexts.
3907 */
rcu_rnp_online_cpus(struct rcu_node * rnp)3908 static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
3909 {
3910 return READ_ONCE(rnp->qsmaskinitnext);
3911 }
3912
3913 /*
3914 * Is the CPU corresponding to the specified rcu_data structure online
3915 * from RCU's perspective? This perspective is given by that structure's
3916 * ->qsmaskinitnext field rather than by the global cpu_online_mask.
3917 */
rcu_rdp_cpu_online(struct rcu_data * rdp)3918 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
3919 {
3920 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
3921 }
3922
rcu_cpu_online(int cpu)3923 bool rcu_cpu_online(int cpu)
3924 {
3925 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3926
3927 return rcu_rdp_cpu_online(rdp);
3928 }
3929
3930 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
3931
3932 /*
3933 * Is the current CPU online as far as RCU is concerned?
3934 *
3935 * Disable preemption to avoid false positives that could otherwise
3936 * happen due to the current CPU number being sampled, this task being
3937 * preempted, its old CPU being taken offline, resuming on some other CPU,
3938 * then determining that its old CPU is now offline.
3939 *
3940 * Disable checking if in an NMI handler because we cannot safely
3941 * report errors from NMI handlers anyway. In addition, it is OK to use
3942 * RCU on an offline processor during initial boot, hence the check for
3943 * rcu_scheduler_fully_active.
3944 */
rcu_lockdep_current_cpu_online(void)3945 bool rcu_lockdep_current_cpu_online(void)
3946 {
3947 struct rcu_data *rdp;
3948 bool ret = false;
3949
3950 if (in_nmi() || !rcu_scheduler_fully_active)
3951 return true;
3952 preempt_disable_notrace();
3953 rdp = this_cpu_ptr(&rcu_data);
3954 /*
3955 * Strictly, we care here about the case where the current CPU is
3956 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
3957 * not being up to date. So arch_spin_is_locked() might have a
3958 * false positive if it's held by some *other* CPU, but that's
3959 * OK because that just means a false *negative* on the warning.
3960 */
3961 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
3962 ret = true;
3963 preempt_enable_notrace();
3964 return ret;
3965 }
3966 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
3967
3968 #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
3969
3970 // Has rcu_init() been invoked? This is used (for example) to determine
3971 // whether spinlocks may be acquired safely.
rcu_init_invoked(void)3972 static bool rcu_init_invoked(void)
3973 {
3974 return !!READ_ONCE(rcu_state.n_online_cpus);
3975 }
3976
3977 /*
3978 * All CPUs for the specified rcu_node structure have gone offline,
3979 * and all tasks that were preempted within an RCU read-side critical
3980 * section while running on one of those CPUs have since exited their RCU
3981 * read-side critical section. Some other CPU is reporting this fact with
3982 * the specified rcu_node structure's ->lock held and interrupts disabled.
3983 * This function therefore goes up the tree of rcu_node structures,
3984 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
3985 * the leaf rcu_node structure's ->qsmaskinit field has already been
3986 * updated.
3987 *
3988 * This function does check that the specified rcu_node structure has
3989 * all CPUs offline and no blocked tasks, so it is OK to invoke it
3990 * prematurely. That said, invoking it after the fact will cost you
3991 * a needless lock acquisition. So once it has done its work, don't
3992 * invoke it again.
3993 */
rcu_cleanup_dead_rnp(struct rcu_node * rnp_leaf)3994 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
3995 {
3996 long mask;
3997 struct rcu_node *rnp = rnp_leaf;
3998
3999 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4000 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
4001 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
4002 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
4003 return;
4004 for (;;) {
4005 mask = rnp->grpmask;
4006 rnp = rnp->parent;
4007 if (!rnp)
4008 break;
4009 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4010 rnp->qsmaskinit &= ~mask;
4011 /* Between grace periods, so better already be zero! */
4012 WARN_ON_ONCE(rnp->qsmask);
4013 if (rnp->qsmaskinit) {
4014 raw_spin_unlock_rcu_node(rnp);
4015 /* irqs remain disabled. */
4016 return;
4017 }
4018 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4019 }
4020 }
4021
4022 /*
4023 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4024 * first CPU in a given leaf rcu_node structure coming online. The caller
4025 * must hold the corresponding leaf rcu_node ->lock with interrupts
4026 * disabled.
4027 */
rcu_init_new_rnp(struct rcu_node * rnp_leaf)4028 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4029 {
4030 long mask;
4031 long oldmask;
4032 struct rcu_node *rnp = rnp_leaf;
4033
4034 raw_lockdep_assert_held_rcu_node(rnp_leaf);
4035 WARN_ON_ONCE(rnp->wait_blkd_tasks);
4036 for (;;) {
4037 mask = rnp->grpmask;
4038 rnp = rnp->parent;
4039 if (rnp == NULL)
4040 return;
4041 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4042 oldmask = rnp->qsmaskinit;
4043 rnp->qsmaskinit |= mask;
4044 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4045 if (oldmask)
4046 return;
4047 }
4048 }
4049
4050 /*
4051 * Do boot-time initialization of a CPU's per-CPU RCU data.
4052 */
4053 static void __init
rcu_boot_init_percpu_data(int cpu)4054 rcu_boot_init_percpu_data(int cpu)
4055 {
4056 struct context_tracking *ct = this_cpu_ptr(&context_tracking);
4057 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4058
4059 /* Set up local state, ensuring consistent view of global state. */
4060 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4061 INIT_WORK(&rdp->strict_work, strict_work_handler);
4062 WARN_ON_ONCE(ct->nesting != 1);
4063 WARN_ON_ONCE(rcu_watching_snap_in_eqs(ct_rcu_watching_cpu(cpu)));
4064 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4065 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4066 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
4067 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4068 rdp->rcu_onl_gp_state = RCU_GP_CLEANED;
4069 rdp->last_sched_clock = jiffies;
4070 rdp->cpu = cpu;
4071 rcu_boot_init_nocb_percpu_data(rdp);
4072 }
4073
rcu_thread_affine_rnp(struct task_struct * t,struct rcu_node * rnp)4074 static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp)
4075 {
4076 cpumask_var_t affinity;
4077 int cpu;
4078
4079 if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
4080 return;
4081
4082 for_each_leaf_node_possible_cpu(rnp, cpu)
4083 cpumask_set_cpu(cpu, affinity);
4084
4085 kthread_affine_preferred(t, affinity);
4086
4087 free_cpumask_var(affinity);
4088 }
4089
4090 struct kthread_worker *rcu_exp_gp_kworker;
4091
rcu_spawn_exp_par_gp_kworker(struct rcu_node * rnp)4092 static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
4093 {
4094 struct kthread_worker *kworker;
4095 const char *name = "rcu_exp_par_gp_kthread_worker/%d";
4096 struct sched_param param = { .sched_priority = kthread_prio };
4097 int rnp_index = rnp - rcu_get_root();
4098
4099 if (rnp->exp_kworker)
4100 return;
4101
4102 kworker = kthread_create_worker(0, name, rnp_index);
4103 if (IS_ERR_OR_NULL(kworker)) {
4104 pr_err("Failed to create par gp kworker on %d/%d\n",
4105 rnp->grplo, rnp->grphi);
4106 return;
4107 }
4108 WRITE_ONCE(rnp->exp_kworker, kworker);
4109
4110 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
4111 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
4112
4113 rcu_thread_affine_rnp(kworker->task, rnp);
4114 wake_up_process(kworker->task);
4115 }
4116
rcu_start_exp_gp_kworker(void)4117 static void __init rcu_start_exp_gp_kworker(void)
4118 {
4119 const char *name = "rcu_exp_gp_kthread_worker";
4120 struct sched_param param = { .sched_priority = kthread_prio };
4121
4122 rcu_exp_gp_kworker = kthread_run_worker(0, name);
4123 if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
4124 pr_err("Failed to create %s!\n", name);
4125 rcu_exp_gp_kworker = NULL;
4126 return;
4127 }
4128
4129 if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
4130 sched_setscheduler_nocheck(rcu_exp_gp_kworker->task, SCHED_FIFO, ¶m);
4131 }
4132
rcu_spawn_rnp_kthreads(struct rcu_node * rnp)4133 static void rcu_spawn_rnp_kthreads(struct rcu_node *rnp)
4134 {
4135 if (rcu_scheduler_fully_active) {
4136 mutex_lock(&rnp->kthread_mutex);
4137 rcu_spawn_one_boost_kthread(rnp);
4138 rcu_spawn_exp_par_gp_kworker(rnp);
4139 mutex_unlock(&rnp->kthread_mutex);
4140 }
4141 }
4142
4143 /*
4144 * Invoked early in the CPU-online process, when pretty much all services
4145 * are available. The incoming CPU is not present.
4146 *
4147 * Initializes a CPU's per-CPU RCU data. Note that only one online or
4148 * offline event can be happening at a given time. Note also that we can
4149 * accept some slop in the rsp->gp_seq access due to the fact that this
4150 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4151 * And any offloaded callbacks are being numbered elsewhere.
4152 */
rcutree_prepare_cpu(unsigned int cpu)4153 int rcutree_prepare_cpu(unsigned int cpu)
4154 {
4155 unsigned long flags;
4156 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
4157 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4158 struct rcu_node *rnp = rcu_get_root();
4159
4160 /* Set up local state, ensuring consistent view of global state. */
4161 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4162 rdp->qlen_last_fqs_check = 0;
4163 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4164 rdp->blimit = blimit;
4165 ct->nesting = 1; /* CPU not up, no tearing. */
4166 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
4167
4168 /*
4169 * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4170 * (re-)initialized.
4171 */
4172 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4173 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4174
4175 /*
4176 * Add CPU to leaf rcu_node pending-online bitmask. Any needed
4177 * propagation up the rcu_node tree will happen at the beginning
4178 * of the next grace period.
4179 */
4180 rnp = rdp->mynode;
4181 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
4182 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4183 rdp->gp_seq_needed = rdp->gp_seq;
4184 rdp->cpu_no_qs.b.norm = true;
4185 rdp->core_needs_qs = false;
4186 rdp->rcu_iw_pending = false;
4187 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4188 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4189 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4190 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4191 rcu_spawn_rnp_kthreads(rnp);
4192 rcu_spawn_cpu_nocb_kthread(cpu);
4193 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4194 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4195
4196 return 0;
4197 }
4198
4199 /*
4200 * Has the specified (known valid) CPU ever been fully online?
4201 */
rcu_cpu_beenfullyonline(int cpu)4202 bool rcu_cpu_beenfullyonline(int cpu)
4203 {
4204 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4205
4206 return smp_load_acquire(&rdp->beenonline);
4207 }
4208
4209 /*
4210 * Near the end of the CPU-online process. Pretty much all services
4211 * enabled, and the CPU is now very much alive.
4212 */
rcutree_online_cpu(unsigned int cpu)4213 int rcutree_online_cpu(unsigned int cpu)
4214 {
4215 unsigned long flags;
4216 struct rcu_data *rdp;
4217 struct rcu_node *rnp;
4218
4219 rdp = per_cpu_ptr(&rcu_data, cpu);
4220 rnp = rdp->mynode;
4221 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4222 rnp->ffmask |= rdp->grpmask;
4223 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4224 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4225 return 0; /* Too early in boot for scheduler work. */
4226 sync_sched_exp_online_cleanup(cpu);
4227
4228 // Stop-machine done, so allow nohz_full to disable tick.
4229 tick_dep_clear(TICK_DEP_BIT_RCU);
4230 return 0;
4231 }
4232
4233 /*
4234 * Mark the specified CPU as being online so that subsequent grace periods
4235 * (both expedited and normal) will wait on it. Note that this means that
4236 * incoming CPUs are not allowed to use RCU read-side critical sections
4237 * until this function is called. Failing to observe this restriction
4238 * will result in lockdep splats.
4239 *
4240 * Note that this function is special in that it is invoked directly
4241 * from the incoming CPU rather than from the cpuhp_step mechanism.
4242 * This is because this function must be invoked at a precise location.
4243 * This incoming CPU must not have enabled interrupts yet.
4244 *
4245 * This mirrors the effects of rcutree_report_cpu_dead().
4246 */
rcutree_report_cpu_starting(unsigned int cpu)4247 void rcutree_report_cpu_starting(unsigned int cpu)
4248 {
4249 unsigned long mask;
4250 struct rcu_data *rdp;
4251 struct rcu_node *rnp;
4252 bool newcpu;
4253
4254 lockdep_assert_irqs_disabled();
4255 rdp = per_cpu_ptr(&rcu_data, cpu);
4256 if (rdp->cpu_started)
4257 return;
4258 rdp->cpu_started = true;
4259
4260 rnp = rdp->mynode;
4261 mask = rdp->grpmask;
4262 arch_spin_lock(&rcu_state.ofl_lock);
4263 rcu_watching_online();
4264 raw_spin_lock(&rcu_state.barrier_lock);
4265 raw_spin_lock_rcu_node(rnp);
4266 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4267 raw_spin_unlock(&rcu_state.barrier_lock);
4268 newcpu = !(rnp->expmaskinitnext & mask);
4269 rnp->expmaskinitnext |= mask;
4270 /* Allow lockless access for expedited grace periods. */
4271 smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4272 ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4273 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4274 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4275 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state);
4276
4277 /* An incoming CPU should never be blocking a grace period. */
4278 if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4279 /* rcu_report_qs_rnp() *really* wants some flags to restore */
4280 unsigned long flags;
4281
4282 local_irq_save(flags);
4283 rcu_disable_urgency_upon_qs(rdp);
4284 /* Report QS -after- changing ->qsmaskinitnext! */
4285 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4286 } else {
4287 raw_spin_unlock_rcu_node(rnp);
4288 }
4289 arch_spin_unlock(&rcu_state.ofl_lock);
4290 smp_store_release(&rdp->beenonline, true);
4291 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4292 }
4293
4294 /*
4295 * The outgoing function has no further need of RCU, so remove it from
4296 * the rcu_node tree's ->qsmaskinitnext bit masks.
4297 *
4298 * Note that this function is special in that it is invoked directly
4299 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4300 * This is because this function must be invoked at a precise location.
4301 *
4302 * This mirrors the effect of rcutree_report_cpu_starting().
4303 */
rcutree_report_cpu_dead(void)4304 void rcutree_report_cpu_dead(void)
4305 {
4306 unsigned long flags;
4307 unsigned long mask;
4308 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4309 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
4310
4311 /*
4312 * IRQS must be disabled from now on and until the CPU dies, or an interrupt
4313 * may introduce a new READ-side while it is actually off the QS masks.
4314 */
4315 lockdep_assert_irqs_disabled();
4316 // Do any dangling deferred wakeups.
4317 do_nocb_deferred_wakeup(rdp);
4318
4319 rcu_preempt_deferred_qs(current);
4320
4321 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4322 mask = rdp->grpmask;
4323 arch_spin_lock(&rcu_state.ofl_lock);
4324 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4325 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4326 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state);
4327 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4328 /* Report quiescent state -before- changing ->qsmaskinitnext! */
4329 rcu_disable_urgency_upon_qs(rdp);
4330 rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4331 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4332 }
4333 WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4334 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4335 arch_spin_unlock(&rcu_state.ofl_lock);
4336 rdp->cpu_started = false;
4337 }
4338
4339 #ifdef CONFIG_HOTPLUG_CPU
4340 /*
4341 * The outgoing CPU has just passed through the dying-idle state, and we
4342 * are being invoked from the CPU that was IPIed to continue the offline
4343 * operation. Migrate the outgoing CPU's callbacks to the current CPU.
4344 */
rcutree_migrate_callbacks(int cpu)4345 void rcutree_migrate_callbacks(int cpu)
4346 {
4347 unsigned long flags;
4348 struct rcu_data *my_rdp;
4349 struct rcu_node *my_rnp;
4350 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4351 bool needwake;
4352
4353 if (rcu_rdp_is_offloaded(rdp))
4354 return;
4355
4356 raw_spin_lock_irqsave(&rcu_state.barrier_lock, flags);
4357 if (rcu_segcblist_empty(&rdp->cblist)) {
4358 raw_spin_unlock_irqrestore(&rcu_state.barrier_lock, flags);
4359 return; /* No callbacks to migrate. */
4360 }
4361
4362 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
4363 rcu_barrier_entrain(rdp);
4364 my_rdp = this_cpu_ptr(&rcu_data);
4365 my_rnp = my_rdp->mynode;
4366 rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4367 WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies, false));
4368 raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4369 /* Leverage recent GPs and set GP for new callbacks. */
4370 needwake = rcu_advance_cbs(my_rnp, rdp) ||
4371 rcu_advance_cbs(my_rnp, my_rdp);
4372 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4373 raw_spin_unlock(&rcu_state.barrier_lock); /* irqs remain disabled. */
4374 needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4375 rcu_segcblist_disable(&rdp->cblist);
4376 WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) != !rcu_segcblist_n_cbs(&my_rdp->cblist));
4377 check_cb_ovld_locked(my_rdp, my_rnp);
4378 if (rcu_rdp_is_offloaded(my_rdp)) {
4379 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4380 __call_rcu_nocb_wake(my_rdp, true, flags);
4381 } else {
4382 rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4383 raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4384 }
4385 local_irq_restore(flags);
4386 if (needwake)
4387 rcu_gp_kthread_wake();
4388 lockdep_assert_irqs_enabled();
4389 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4390 !rcu_segcblist_empty(&rdp->cblist),
4391 "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4392 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4393 rcu_segcblist_first_cb(&rdp->cblist));
4394 }
4395
4396 /*
4397 * The CPU has been completely removed, and some other CPU is reporting
4398 * this fact from process context. Do the remainder of the cleanup.
4399 * There can only be one CPU hotplug operation at a time, so no need for
4400 * explicit locking.
4401 */
rcutree_dead_cpu(unsigned int cpu)4402 int rcutree_dead_cpu(unsigned int cpu)
4403 {
4404 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
4405 WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
4406 // Stop-machine done, so allow nohz_full to disable tick.
4407 tick_dep_clear(TICK_DEP_BIT_RCU);
4408 return 0;
4409 }
4410
4411 /*
4412 * Near the end of the offline process. Trace the fact that this CPU
4413 * is going offline.
4414 */
rcutree_dying_cpu(unsigned int cpu)4415 int rcutree_dying_cpu(unsigned int cpu)
4416 {
4417 bool blkd;
4418 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4419 struct rcu_node *rnp = rdp->mynode;
4420
4421 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
4422 trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
4423 blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
4424 return 0;
4425 }
4426
4427 /*
4428 * Near the beginning of the process. The CPU is still very much alive
4429 * with pretty much all services enabled.
4430 */
rcutree_offline_cpu(unsigned int cpu)4431 int rcutree_offline_cpu(unsigned int cpu)
4432 {
4433 unsigned long flags;
4434 struct rcu_data *rdp;
4435 struct rcu_node *rnp;
4436
4437 rdp = per_cpu_ptr(&rcu_data, cpu);
4438 rnp = rdp->mynode;
4439 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4440 rnp->ffmask &= ~rdp->grpmask;
4441 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4442
4443 // nohz_full CPUs need the tick for stop-machine to work quickly
4444 tick_dep_set(TICK_DEP_BIT_RCU);
4445 return 0;
4446 }
4447 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
4448
4449 /*
4450 * On non-huge systems, use expedited RCU grace periods to make suspend
4451 * and hibernation run faster.
4452 */
rcu_pm_notify(struct notifier_block * self,unsigned long action,void * hcpu)4453 static int rcu_pm_notify(struct notifier_block *self,
4454 unsigned long action, void *hcpu)
4455 {
4456 switch (action) {
4457 case PM_HIBERNATION_PREPARE:
4458 case PM_SUSPEND_PREPARE:
4459 rcu_async_hurry();
4460 rcu_expedite_gp();
4461 break;
4462 case PM_POST_HIBERNATION:
4463 case PM_POST_SUSPEND:
4464 rcu_unexpedite_gp();
4465 rcu_async_relax();
4466 break;
4467 default:
4468 break;
4469 }
4470 return NOTIFY_OK;
4471 }
4472
4473 /*
4474 * Spawn the kthreads that handle RCU's grace periods.
4475 */
rcu_spawn_gp_kthread(void)4476 static int __init rcu_spawn_gp_kthread(void)
4477 {
4478 unsigned long flags;
4479 struct rcu_node *rnp;
4480 struct sched_param sp;
4481 struct task_struct *t;
4482 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4483
4484 rcu_scheduler_fully_active = 1;
4485 t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4486 if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4487 return 0;
4488 if (kthread_prio) {
4489 sp.sched_priority = kthread_prio;
4490 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4491 }
4492 rnp = rcu_get_root();
4493 raw_spin_lock_irqsave_rcu_node(rnp, flags);
4494 WRITE_ONCE(rcu_state.gp_activity, jiffies);
4495 WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4496 // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4497 smp_store_release(&rcu_state.gp_kthread, t); /* ^^^ */
4498 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4499 wake_up_process(t);
4500 /* This is a pre-SMP initcall, we expect a single CPU */
4501 WARN_ON(num_online_cpus() > 1);
4502 /*
4503 * Those kthreads couldn't be created on rcu_init() -> rcutree_prepare_cpu()
4504 * due to rcu_scheduler_fully_active.
4505 */
4506 rcu_spawn_cpu_nocb_kthread(smp_processor_id());
4507 rcu_spawn_rnp_kthreads(rdp->mynode);
4508 rcu_spawn_core_kthreads();
4509 /* Create kthread worker for expedited GPs */
4510 rcu_start_exp_gp_kworker();
4511 return 0;
4512 }
4513 early_initcall(rcu_spawn_gp_kthread);
4514
4515 /*
4516 * This function is invoked towards the end of the scheduler's
4517 * initialization process. Before this is called, the idle task might
4518 * contain synchronous grace-period primitives (during which time, this idle
4519 * task is booting the system, and such primitives are no-ops). After this
4520 * function is called, any synchronous grace-period primitives are run as
4521 * expedited, with the requesting task driving the grace period forward.
4522 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4523 * runtime RCU functionality.
4524 */
rcu_scheduler_starting(void)4525 void rcu_scheduler_starting(void)
4526 {
4527 unsigned long flags;
4528 struct rcu_node *rnp;
4529
4530 WARN_ON(num_online_cpus() != 1);
4531 WARN_ON(nr_context_switches() > 0);
4532 rcu_test_sync_prims();
4533
4534 // Fix up the ->gp_seq counters.
4535 local_irq_save(flags);
4536 rcu_for_each_node_breadth_first(rnp)
4537 rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
4538 local_irq_restore(flags);
4539
4540 // Switch out of early boot mode.
4541 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4542 rcu_test_sync_prims();
4543 }
4544
4545 /*
4546 * Helper function for rcu_init() that initializes the rcu_state structure.
4547 */
rcu_init_one(void)4548 static void __init rcu_init_one(void)
4549 {
4550 static const char * const buf[] = RCU_NODE_NAME_INIT;
4551 static const char * const fqs[] = RCU_FQS_NAME_INIT;
4552 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4553 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4554
4555 int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
4556 int cpustride = 1;
4557 int i;
4558 int j;
4559 struct rcu_node *rnp;
4560
4561 BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
4562
4563 /* Silence gcc 4.8 false positive about array index out of range. */
4564 if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4565 panic("rcu_init_one: rcu_num_lvls out of range");
4566
4567 /* Initialize the level-tracking arrays. */
4568
4569 for (i = 1; i < rcu_num_lvls; i++)
4570 rcu_state.level[i] =
4571 rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4572 rcu_init_levelspread(levelspread, num_rcu_lvl);
4573
4574 /* Initialize the elements themselves, starting from the leaves. */
4575
4576 for (i = rcu_num_lvls - 1; i >= 0; i--) {
4577 cpustride *= levelspread[i];
4578 rnp = rcu_state.level[i];
4579 for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4580 raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4581 lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4582 &rcu_node_class[i], buf[i]);
4583 raw_spin_lock_init(&rnp->fqslock);
4584 lockdep_set_class_and_name(&rnp->fqslock,
4585 &rcu_fqs_class[i], fqs[i]);
4586 rnp->gp_seq = rcu_state.gp_seq;
4587 rnp->gp_seq_needed = rcu_state.gp_seq;
4588 rnp->completedqs = rcu_state.gp_seq;
4589 rnp->qsmask = 0;
4590 rnp->qsmaskinit = 0;
4591 rnp->grplo = j * cpustride;
4592 rnp->grphi = (j + 1) * cpustride - 1;
4593 if (rnp->grphi >= nr_cpu_ids)
4594 rnp->grphi = nr_cpu_ids - 1;
4595 if (i == 0) {
4596 rnp->grpnum = 0;
4597 rnp->grpmask = 0;
4598 rnp->parent = NULL;
4599 } else {
4600 rnp->grpnum = j % levelspread[i - 1];
4601 rnp->grpmask = BIT(rnp->grpnum);
4602 rnp->parent = rcu_state.level[i - 1] +
4603 j / levelspread[i - 1];
4604 }
4605 rnp->level = i;
4606 INIT_LIST_HEAD(&rnp->blkd_tasks);
4607 rcu_init_one_nocb(rnp);
4608 init_waitqueue_head(&rnp->exp_wq[0]);
4609 init_waitqueue_head(&rnp->exp_wq[1]);
4610 init_waitqueue_head(&rnp->exp_wq[2]);
4611 init_waitqueue_head(&rnp->exp_wq[3]);
4612 spin_lock_init(&rnp->exp_lock);
4613 mutex_init(&rnp->kthread_mutex);
4614 raw_spin_lock_init(&rnp->exp_poll_lock);
4615 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
4616 INIT_WORK(&rnp->exp_poll_wq, sync_rcu_do_polled_gp);
4617 }
4618 }
4619
4620 init_swait_queue_head(&rcu_state.gp_wq);
4621 init_swait_queue_head(&rcu_state.expedited_wq);
4622 rnp = rcu_first_leaf_node();
4623 for_each_possible_cpu(i) {
4624 while (i > rnp->grphi)
4625 rnp++;
4626 per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4627 per_cpu_ptr(&rcu_data, i)->barrier_head.next =
4628 &per_cpu_ptr(&rcu_data, i)->barrier_head;
4629 rcu_boot_init_percpu_data(i);
4630 }
4631 }
4632
4633 /*
4634 * Force priority from the kernel command-line into range.
4635 */
sanitize_kthread_prio(void)4636 static void __init sanitize_kthread_prio(void)
4637 {
4638 int kthread_prio_in = kthread_prio;
4639
4640 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4641 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4642 kthread_prio = 2;
4643 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4644 kthread_prio = 1;
4645 else if (kthread_prio < 0)
4646 kthread_prio = 0;
4647 else if (kthread_prio > 99)
4648 kthread_prio = 99;
4649
4650 if (kthread_prio != kthread_prio_in)
4651 pr_alert("%s: Limited prio to %d from %d\n",
4652 __func__, kthread_prio, kthread_prio_in);
4653 }
4654
4655 /*
4656 * Compute the rcu_node tree geometry from kernel parameters. This cannot
4657 * replace the definitions in tree.h because those are needed to size
4658 * the ->node array in the rcu_state structure.
4659 */
rcu_init_geometry(void)4660 void rcu_init_geometry(void)
4661 {
4662 ulong d;
4663 int i;
4664 static unsigned long old_nr_cpu_ids;
4665 int rcu_capacity[RCU_NUM_LVLS];
4666 static bool initialized;
4667
4668 if (initialized) {
4669 /*
4670 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4671 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4672 */
4673 WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4674 return;
4675 }
4676
4677 old_nr_cpu_ids = nr_cpu_ids;
4678 initialized = true;
4679
4680 /*
4681 * Initialize any unspecified boot parameters.
4682 * The default values of jiffies_till_first_fqs and
4683 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4684 * value, which is a function of HZ, then adding one for each
4685 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4686 */
4687 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4688 if (jiffies_till_first_fqs == ULONG_MAX)
4689 jiffies_till_first_fqs = d;
4690 if (jiffies_till_next_fqs == ULONG_MAX)
4691 jiffies_till_next_fqs = d;
4692 adjust_jiffies_till_sched_qs();
4693
4694 /* If the compile-time values are accurate, just leave. */
4695 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4696 nr_cpu_ids == NR_CPUS)
4697 return;
4698 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4699 rcu_fanout_leaf, nr_cpu_ids);
4700
4701 /*
4702 * The boot-time rcu_fanout_leaf parameter must be at least two
4703 * and cannot exceed the number of bits in the rcu_node masks.
4704 * Complain and fall back to the compile-time values if this
4705 * limit is exceeded.
4706 */
4707 if (rcu_fanout_leaf < 2 || rcu_fanout_leaf > BITS_PER_LONG) {
4708 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4709 WARN_ON(1);
4710 return;
4711 }
4712
4713 /*
4714 * Compute number of nodes that can be handled an rcu_node tree
4715 * with the given number of levels.
4716 */
4717 rcu_capacity[0] = rcu_fanout_leaf;
4718 for (i = 1; i < RCU_NUM_LVLS; i++)
4719 rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4720
4721 /*
4722 * The tree must be able to accommodate the configured number of CPUs.
4723 * If this limit is exceeded, fall back to the compile-time values.
4724 */
4725 if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4726 rcu_fanout_leaf = RCU_FANOUT_LEAF;
4727 WARN_ON(1);
4728 return;
4729 }
4730
4731 /* Calculate the number of levels in the tree. */
4732 for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4733 }
4734 rcu_num_lvls = i + 1;
4735
4736 /* Calculate the number of rcu_nodes at each level of the tree. */
4737 for (i = 0; i < rcu_num_lvls; i++) {
4738 int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4739 num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4740 }
4741
4742 /* Calculate the total number of rcu_node structures. */
4743 rcu_num_nodes = 0;
4744 for (i = 0; i < rcu_num_lvls; i++)
4745 rcu_num_nodes += num_rcu_lvl[i];
4746 }
4747
4748 /*
4749 * Dump out the structure of the rcu_node combining tree associated
4750 * with the rcu_state structure.
4751 */
rcu_dump_rcu_node_tree(void)4752 static void __init rcu_dump_rcu_node_tree(void)
4753 {
4754 int level = 0;
4755 struct rcu_node *rnp;
4756
4757 pr_info("rcu_node tree layout dump\n");
4758 pr_info(" ");
4759 rcu_for_each_node_breadth_first(rnp) {
4760 if (rnp->level != level) {
4761 pr_cont("\n");
4762 pr_info(" ");
4763 level = rnp->level;
4764 }
4765 pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
4766 }
4767 pr_cont("\n");
4768 }
4769
4770 struct workqueue_struct *rcu_gp_wq;
4771
rcu_init(void)4772 void __init rcu_init(void)
4773 {
4774 int cpu = smp_processor_id();
4775
4776 rcu_early_boot_tests();
4777
4778 rcu_bootup_announce();
4779 sanitize_kthread_prio();
4780 rcu_init_geometry();
4781 rcu_init_one();
4782 if (dump_tree)
4783 rcu_dump_rcu_node_tree();
4784 if (use_softirq)
4785 open_softirq(RCU_SOFTIRQ, rcu_core_si);
4786
4787 /*
4788 * We don't need protection against CPU-hotplug here because
4789 * this is called early in boot, before either interrupts
4790 * or the scheduler are operational.
4791 */
4792 pm_notifier(rcu_pm_notify, 0);
4793 WARN_ON(num_online_cpus() > 1); // Only one CPU this early in boot.
4794 rcutree_prepare_cpu(cpu);
4795 rcutree_report_cpu_starting(cpu);
4796 rcutree_online_cpu(cpu);
4797
4798 /* Create workqueue for Tree SRCU and for expedited GPs. */
4799 rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4800 WARN_ON(!rcu_gp_wq);
4801
4802 sync_wq = alloc_workqueue("sync_wq", WQ_MEM_RECLAIM, 0);
4803 WARN_ON(!sync_wq);
4804
4805 /* Fill in default value for rcutree.qovld boot parameter. */
4806 /* -After- the rcu_node ->lock fields are initialized! */
4807 if (qovld < 0)
4808 qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4809 else
4810 qovld_calc = qovld;
4811
4812 // Kick-start in case any polled grace periods started early.
4813 (void)start_poll_synchronize_rcu_expedited();
4814
4815 rcu_test_sync_prims();
4816
4817 tasks_cblist_init_generic();
4818 }
4819
4820 #include "tree_stall.h"
4821 #include "tree_exp.h"
4822 #include "tree_nocb.h"
4823 #include "tree_plugin.h"
4824