1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 *
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <[email protected]>
7 */
8
9 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
10
11 struct sugov_tunables {
12 struct gov_attr_set attr_set;
13 unsigned int rate_limit_us;
14 };
15
16 struct sugov_policy {
17 struct cpufreq_policy *policy;
18
19 struct sugov_tunables *tunables;
20 struct list_head tunables_hook;
21
22 raw_spinlock_t update_lock;
23 u64 last_freq_update_time;
24 s64 freq_update_delay_ns;
25 unsigned int next_freq;
26 unsigned int cached_raw_freq;
27
28 /* The next fields are only needed if fast switch cannot be used: */
29 struct irq_work irq_work;
30 struct kthread_work work;
31 struct mutex work_lock;
32 struct kthread_worker worker;
33 struct task_struct *thread;
34 bool work_in_progress;
35
36 bool limits_changed;
37 bool need_freq_update;
38 };
39
40 struct sugov_cpu {
41 struct update_util_data update_util;
42 struct sugov_policy *sg_policy;
43 unsigned int cpu;
44
45 bool iowait_boost_pending;
46 unsigned int iowait_boost;
47 u64 last_update;
48
49 unsigned long util;
50 unsigned long bw_min;
51
52 /* The field below is for single-CPU policies only: */
53 #ifdef CONFIG_NO_HZ_COMMON
54 unsigned long saved_idle_calls;
55 #endif
56 };
57
58 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
59
60 /************************ Governor internals ***********************/
61
sugov_should_update_freq(struct sugov_policy * sg_policy,u64 time)62 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
63 {
64 s64 delta_ns;
65
66 /*
67 * Since cpufreq_update_util() is called with rq->lock held for
68 * the @target_cpu, our per-CPU data is fully serialized.
69 *
70 * However, drivers cannot in general deal with cross-CPU
71 * requests, so while get_next_freq() will work, our
72 * sugov_update_commit() call may not for the fast switching platforms.
73 *
74 * Hence stop here for remote requests if they aren't supported
75 * by the hardware, as calculating the frequency is pointless if
76 * we cannot in fact act on it.
77 *
78 * This is needed on the slow switching platforms too to prevent CPUs
79 * going offline from leaving stale IRQ work items behind.
80 */
81 if (!cpufreq_this_cpu_can_update(sg_policy->policy))
82 return false;
83
84 if (unlikely(READ_ONCE(sg_policy->limits_changed))) {
85 WRITE_ONCE(sg_policy->limits_changed, false);
86 sg_policy->need_freq_update = true;
87
88 /*
89 * The above limits_changed update must occur before the reads
90 * of policy limits in cpufreq_driver_resolve_freq() or a policy
91 * limits update might be missed, so use a memory barrier to
92 * ensure it.
93 *
94 * This pairs with the write memory barrier in sugov_limits().
95 */
96 smp_mb();
97
98 return true;
99 }
100
101 delta_ns = time - sg_policy->last_freq_update_time;
102
103 return delta_ns >= sg_policy->freq_update_delay_ns;
104 }
105
sugov_update_next_freq(struct sugov_policy * sg_policy,u64 time,unsigned int next_freq)106 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
107 unsigned int next_freq)
108 {
109 if (sg_policy->need_freq_update) {
110 sg_policy->need_freq_update = false;
111 /*
112 * The policy limits have changed, but if the return value of
113 * cpufreq_driver_resolve_freq() after applying the new limits
114 * is still equal to the previously selected frequency, the
115 * driver callback need not be invoked unless the driver
116 * specifically wants that to happen on every update of the
117 * policy limits.
118 */
119 if (sg_policy->next_freq == next_freq &&
120 !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
121 return false;
122 } else if (sg_policy->next_freq == next_freq) {
123 return false;
124 }
125
126 sg_policy->next_freq = next_freq;
127 sg_policy->last_freq_update_time = time;
128
129 return true;
130 }
131
sugov_deferred_update(struct sugov_policy * sg_policy)132 static void sugov_deferred_update(struct sugov_policy *sg_policy)
133 {
134 if (!sg_policy->work_in_progress) {
135 sg_policy->work_in_progress = true;
136 irq_work_queue(&sg_policy->irq_work);
137 }
138 }
139
140 /**
141 * get_capacity_ref_freq - get the reference frequency that has been used to
142 * correlate frequency and compute capacity for a given cpufreq policy. We use
143 * the CPU managing it for the arch_scale_freq_ref() call in the function.
144 * @policy: the cpufreq policy of the CPU in question.
145 *
146 * Return: the reference CPU frequency to compute a capacity.
147 */
148 static __always_inline
get_capacity_ref_freq(struct cpufreq_policy * policy)149 unsigned long get_capacity_ref_freq(struct cpufreq_policy *policy)
150 {
151 unsigned int freq = arch_scale_freq_ref(policy->cpu);
152
153 if (freq)
154 return freq;
155
156 if (arch_scale_freq_invariant())
157 return policy->cpuinfo.max_freq;
158
159 /*
160 * Apply a 25% margin so that we select a higher frequency than
161 * the current one before the CPU is fully busy:
162 */
163 return policy->cur + (policy->cur >> 2);
164 }
165
166 /**
167 * get_next_freq - Compute a new frequency for a given cpufreq policy.
168 * @sg_policy: schedutil policy object to compute the new frequency for.
169 * @util: Current CPU utilization.
170 * @max: CPU capacity.
171 *
172 * If the utilization is frequency-invariant, choose the new frequency to be
173 * proportional to it, that is
174 *
175 * next_freq = C * max_freq * util / max
176 *
177 * Otherwise, approximate the would-be frequency-invariant utilization by
178 * util_raw * (curr_freq / max_freq) which leads to
179 *
180 * next_freq = C * curr_freq * util_raw / max
181 *
182 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
183 *
184 * The lowest driver-supported frequency which is equal or greater than the raw
185 * next_freq (as calculated above) is returned, subject to policy min/max and
186 * cpufreq driver limitations.
187 */
get_next_freq(struct sugov_policy * sg_policy,unsigned long util,unsigned long max)188 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
189 unsigned long util, unsigned long max)
190 {
191 struct cpufreq_policy *policy = sg_policy->policy;
192 unsigned int freq;
193
194 freq = get_capacity_ref_freq(policy);
195 freq = map_util_freq(util, freq, max);
196
197 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
198 return sg_policy->next_freq;
199
200 sg_policy->cached_raw_freq = freq;
201 return cpufreq_driver_resolve_freq(policy, freq);
202 }
203
sugov_effective_cpu_perf(int cpu,unsigned long actual,unsigned long min,unsigned long max)204 unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
205 unsigned long min,
206 unsigned long max)
207 {
208 /* Add dvfs headroom to actual utilization */
209 actual = map_util_perf(actual);
210 /* Actually we don't need to target the max performance */
211 if (actual < max)
212 max = actual;
213
214 /*
215 * Ensure at least minimum performance while providing more compute
216 * capacity when possible.
217 */
218 return max(min, max);
219 }
220
sugov_get_util(struct sugov_cpu * sg_cpu,unsigned long boost)221 static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
222 {
223 unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu);
224
225 if (!scx_switched_all())
226 util += cpu_util_cfs_boost(sg_cpu->cpu);
227 util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
228 util = max(util, boost);
229 sg_cpu->bw_min = min;
230 sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
231 }
232
233 /**
234 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
235 * @sg_cpu: the sugov data for the CPU to boost
236 * @time: the update time from the caller
237 * @set_iowait_boost: true if an IO boost has been requested
238 *
239 * The IO wait boost of a task is disabled after a tick since the last update
240 * of a CPU. If a new IO wait boost is requested after more then a tick, then
241 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
242 * efficiency by ignoring sporadic wakeups from IO.
243 */
sugov_iowait_reset(struct sugov_cpu * sg_cpu,u64 time,bool set_iowait_boost)244 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
245 bool set_iowait_boost)
246 {
247 s64 delta_ns = time - sg_cpu->last_update;
248
249 /* Reset boost only if a tick has elapsed since last request */
250 if (delta_ns <= TICK_NSEC)
251 return false;
252
253 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
254 sg_cpu->iowait_boost_pending = set_iowait_boost;
255
256 return true;
257 }
258
259 /**
260 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
261 * @sg_cpu: the sugov data for the CPU to boost
262 * @time: the update time from the caller
263 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
264 *
265 * Each time a task wakes up after an IO operation, the CPU utilization can be
266 * boosted to a certain utilization which doubles at each "frequent and
267 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
268 * of the maximum OPP.
269 *
270 * To keep doubling, an IO boost has to be requested at least once per tick,
271 * otherwise we restart from the utilization of the minimum OPP.
272 */
sugov_iowait_boost(struct sugov_cpu * sg_cpu,u64 time,unsigned int flags)273 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
274 unsigned int flags)
275 {
276 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
277
278 /* Reset boost if the CPU appears to have been idle enough */
279 if (sg_cpu->iowait_boost &&
280 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
281 return;
282
283 /* Boost only tasks waking up after IO */
284 if (!set_iowait_boost)
285 return;
286
287 /* Ensure boost doubles only one time at each request */
288 if (sg_cpu->iowait_boost_pending)
289 return;
290 sg_cpu->iowait_boost_pending = true;
291
292 /* Double the boost at each request */
293 if (sg_cpu->iowait_boost) {
294 sg_cpu->iowait_boost =
295 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
296 return;
297 }
298
299 /* First wakeup after IO: start with minimum boost */
300 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
301 }
302
303 /**
304 * sugov_iowait_apply() - Apply the IO boost to a CPU.
305 * @sg_cpu: the sugov data for the cpu to boost
306 * @time: the update time from the caller
307 * @max_cap: the max CPU capacity
308 *
309 * A CPU running a task which woken up after an IO operation can have its
310 * utilization boosted to speed up the completion of those IO operations.
311 * The IO boost value is increased each time a task wakes up from IO, in
312 * sugov_iowait_apply(), and it's instead decreased by this function,
313 * each time an increase has not been requested (!iowait_boost_pending).
314 *
315 * A CPU which also appears to have been idle for at least one tick has also
316 * its IO boost utilization reset.
317 *
318 * This mechanism is designed to boost high frequently IO waiting tasks, while
319 * being more conservative on tasks which does sporadic IO operations.
320 */
sugov_iowait_apply(struct sugov_cpu * sg_cpu,u64 time,unsigned long max_cap)321 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
322 unsigned long max_cap)
323 {
324 /* No boost currently required */
325 if (!sg_cpu->iowait_boost)
326 return 0;
327
328 /* Reset boost if the CPU appears to have been idle enough */
329 if (sugov_iowait_reset(sg_cpu, time, false))
330 return 0;
331
332 if (!sg_cpu->iowait_boost_pending) {
333 /*
334 * No boost pending; reduce the boost value.
335 */
336 sg_cpu->iowait_boost >>= 1;
337 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
338 sg_cpu->iowait_boost = 0;
339 return 0;
340 }
341 }
342
343 sg_cpu->iowait_boost_pending = false;
344
345 /*
346 * sg_cpu->util is already in capacity scale; convert iowait_boost
347 * into the same scale so we can compare.
348 */
349 return (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
350 }
351
352 #ifdef CONFIG_NO_HZ_COMMON
sugov_hold_freq(struct sugov_cpu * sg_cpu)353 static bool sugov_hold_freq(struct sugov_cpu *sg_cpu)
354 {
355 unsigned long idle_calls;
356 bool ret;
357
358 /*
359 * The heuristics in this function is for the fair class. For SCX, the
360 * performance target comes directly from the BPF scheduler. Let's just
361 * follow it.
362 */
363 if (scx_switched_all())
364 return false;
365
366 /* if capped by uclamp_max, always update to be in compliance */
367 if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)))
368 return false;
369
370 /*
371 * Maintain the frequency if the CPU has not been idle recently, as
372 * reduction is likely to be premature.
373 */
374 idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
375 ret = idle_calls == sg_cpu->saved_idle_calls;
376
377 sg_cpu->saved_idle_calls = idle_calls;
378 return ret;
379 }
380 #else
sugov_hold_freq(struct sugov_cpu * sg_cpu)381 static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
382 #endif /* CONFIG_NO_HZ_COMMON */
383
384 /*
385 * Make sugov_should_update_freq() ignore the rate limit when DL
386 * has increased the utilization.
387 */
ignore_dl_rate_limit(struct sugov_cpu * sg_cpu)388 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
389 {
390 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
391 WRITE_ONCE(sg_cpu->sg_policy->limits_changed, true);
392 }
393
sugov_update_single_common(struct sugov_cpu * sg_cpu,u64 time,unsigned long max_cap,unsigned int flags)394 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
395 u64 time, unsigned long max_cap,
396 unsigned int flags)
397 {
398 unsigned long boost;
399
400 sugov_iowait_boost(sg_cpu, time, flags);
401 sg_cpu->last_update = time;
402
403 ignore_dl_rate_limit(sg_cpu);
404
405 if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
406 return false;
407
408 boost = sugov_iowait_apply(sg_cpu, time, max_cap);
409 sugov_get_util(sg_cpu, boost);
410
411 return true;
412 }
413
sugov_update_single_freq(struct update_util_data * hook,u64 time,unsigned int flags)414 static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
415 unsigned int flags)
416 {
417 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
418 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
419 unsigned int cached_freq = sg_policy->cached_raw_freq;
420 unsigned long max_cap;
421 unsigned int next_f;
422
423 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
424
425 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
426 return;
427
428 next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
429
430 if (sugov_hold_freq(sg_cpu) && next_f < sg_policy->next_freq &&
431 !sg_policy->need_freq_update) {
432 next_f = sg_policy->next_freq;
433
434 /* Restore cached freq as next_freq has changed */
435 sg_policy->cached_raw_freq = cached_freq;
436 }
437
438 if (!sugov_update_next_freq(sg_policy, time, next_f))
439 return;
440
441 /*
442 * This code runs under rq->lock for the target CPU, so it won't run
443 * concurrently on two different CPUs for the same target and it is not
444 * necessary to acquire the lock in the fast switch case.
445 */
446 if (sg_policy->policy->fast_switch_enabled) {
447 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
448 } else {
449 raw_spin_lock(&sg_policy->update_lock);
450 sugov_deferred_update(sg_policy);
451 raw_spin_unlock(&sg_policy->update_lock);
452 }
453 }
454
sugov_update_single_perf(struct update_util_data * hook,u64 time,unsigned int flags)455 static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
456 unsigned int flags)
457 {
458 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
459 unsigned long prev_util = sg_cpu->util;
460 unsigned long max_cap;
461
462 /*
463 * Fall back to the "frequency" path if frequency invariance is not
464 * supported, because the direct mapping between the utilization and
465 * the performance levels depends on the frequency invariance.
466 */
467 if (!arch_scale_freq_invariant()) {
468 sugov_update_single_freq(hook, time, flags);
469 return;
470 }
471
472 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
473
474 if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
475 return;
476
477 if (sugov_hold_freq(sg_cpu) && sg_cpu->util < prev_util)
478 sg_cpu->util = prev_util;
479
480 cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min,
481 sg_cpu->util, max_cap);
482
483 sg_cpu->sg_policy->last_freq_update_time = time;
484 }
485
sugov_next_freq_shared(struct sugov_cpu * sg_cpu,u64 time)486 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
487 {
488 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
489 struct cpufreq_policy *policy = sg_policy->policy;
490 unsigned long util = 0, max_cap;
491 unsigned int j;
492
493 max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
494
495 for_each_cpu(j, policy->cpus) {
496 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
497 unsigned long boost;
498
499 boost = sugov_iowait_apply(j_sg_cpu, time, max_cap);
500 sugov_get_util(j_sg_cpu, boost);
501
502 util = max(j_sg_cpu->util, util);
503 }
504
505 return get_next_freq(sg_policy, util, max_cap);
506 }
507
508 static void
sugov_update_shared(struct update_util_data * hook,u64 time,unsigned int flags)509 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
510 {
511 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
512 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
513 unsigned int next_f;
514
515 raw_spin_lock(&sg_policy->update_lock);
516
517 sugov_iowait_boost(sg_cpu, time, flags);
518 sg_cpu->last_update = time;
519
520 ignore_dl_rate_limit(sg_cpu);
521
522 if (sugov_should_update_freq(sg_policy, time)) {
523 next_f = sugov_next_freq_shared(sg_cpu, time);
524
525 if (!sugov_update_next_freq(sg_policy, time, next_f))
526 goto unlock;
527
528 if (sg_policy->policy->fast_switch_enabled)
529 cpufreq_driver_fast_switch(sg_policy->policy, next_f);
530 else
531 sugov_deferred_update(sg_policy);
532 }
533 unlock:
534 raw_spin_unlock(&sg_policy->update_lock);
535 }
536
sugov_work(struct kthread_work * work)537 static void sugov_work(struct kthread_work *work)
538 {
539 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
540 unsigned int freq;
541 unsigned long flags;
542
543 /*
544 * Hold sg_policy->update_lock shortly to handle the case where:
545 * in case sg_policy->next_freq is read here, and then updated by
546 * sugov_deferred_update() just before work_in_progress is set to false
547 * here, we may miss queueing the new update.
548 *
549 * Note: If a work was queued after the update_lock is released,
550 * sugov_work() will just be called again by kthread_work code; and the
551 * request will be proceed before the sugov thread sleeps.
552 */
553 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
554 freq = sg_policy->next_freq;
555 sg_policy->work_in_progress = false;
556 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
557
558 mutex_lock(&sg_policy->work_lock);
559 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
560 mutex_unlock(&sg_policy->work_lock);
561 }
562
sugov_irq_work(struct irq_work * irq_work)563 static void sugov_irq_work(struct irq_work *irq_work)
564 {
565 struct sugov_policy *sg_policy;
566
567 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
568
569 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
570 }
571
572 /************************** sysfs interface ************************/
573
574 static struct sugov_tunables *global_tunables;
575 static DEFINE_MUTEX(global_tunables_lock);
576
to_sugov_tunables(struct gov_attr_set * attr_set)577 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
578 {
579 return container_of(attr_set, struct sugov_tunables, attr_set);
580 }
581
rate_limit_us_show(struct gov_attr_set * attr_set,char * buf)582 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
583 {
584 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
585
586 return sprintf(buf, "%u\n", tunables->rate_limit_us);
587 }
588
589 static ssize_t
rate_limit_us_store(struct gov_attr_set * attr_set,const char * buf,size_t count)590 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
591 {
592 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
593 struct sugov_policy *sg_policy;
594 unsigned int rate_limit_us;
595
596 if (kstrtouint(buf, 10, &rate_limit_us))
597 return -EINVAL;
598
599 tunables->rate_limit_us = rate_limit_us;
600
601 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
602 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
603
604 return count;
605 }
606
607 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
608
609 static struct attribute *sugov_attrs[] = {
610 &rate_limit_us.attr,
611 NULL
612 };
613 ATTRIBUTE_GROUPS(sugov);
614
sugov_tunables_free(struct kobject * kobj)615 static void sugov_tunables_free(struct kobject *kobj)
616 {
617 struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
618
619 kfree(to_sugov_tunables(attr_set));
620 }
621
622 static const struct kobj_type sugov_tunables_ktype = {
623 .default_groups = sugov_groups,
624 .sysfs_ops = &governor_sysfs_ops,
625 .release = &sugov_tunables_free,
626 };
627
628 /********************** cpufreq governor interface *********************/
629
630 struct cpufreq_governor schedutil_gov;
631
sugov_policy_alloc(struct cpufreq_policy * policy)632 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
633 {
634 struct sugov_policy *sg_policy;
635
636 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
637 if (!sg_policy)
638 return NULL;
639
640 sg_policy->policy = policy;
641 raw_spin_lock_init(&sg_policy->update_lock);
642 return sg_policy;
643 }
644
sugov_policy_free(struct sugov_policy * sg_policy)645 static void sugov_policy_free(struct sugov_policy *sg_policy)
646 {
647 kfree(sg_policy);
648 }
649
sugov_kthread_create(struct sugov_policy * sg_policy)650 static int sugov_kthread_create(struct sugov_policy *sg_policy)
651 {
652 struct task_struct *thread;
653 struct sched_attr attr = {
654 .size = sizeof(struct sched_attr),
655 .sched_policy = SCHED_DEADLINE,
656 .sched_flags = SCHED_FLAG_SUGOV,
657 .sched_nice = 0,
658 .sched_priority = 0,
659 /*
660 * Fake (unused) bandwidth; workaround to "fix"
661 * priority inheritance.
662 */
663 .sched_runtime = NSEC_PER_MSEC,
664 .sched_deadline = 10 * NSEC_PER_MSEC,
665 .sched_period = 10 * NSEC_PER_MSEC,
666 };
667 struct cpufreq_policy *policy = sg_policy->policy;
668 int ret;
669
670 /* kthread only required for slow path */
671 if (policy->fast_switch_enabled)
672 return 0;
673
674 kthread_init_work(&sg_policy->work, sugov_work);
675 kthread_init_worker(&sg_policy->worker);
676 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
677 "sugov:%d",
678 cpumask_first(policy->related_cpus));
679 if (IS_ERR(thread)) {
680 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
681 return PTR_ERR(thread);
682 }
683
684 ret = sched_setattr_nocheck(thread, &attr);
685 if (ret) {
686 kthread_stop(thread);
687 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
688 return ret;
689 }
690
691 sg_policy->thread = thread;
692 if (policy->dvfs_possible_from_any_cpu)
693 set_cpus_allowed_ptr(thread, policy->related_cpus);
694 else
695 kthread_bind_mask(thread, policy->related_cpus);
696
697 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
698 mutex_init(&sg_policy->work_lock);
699
700 wake_up_process(thread);
701
702 return 0;
703 }
704
sugov_kthread_stop(struct sugov_policy * sg_policy)705 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
706 {
707 /* kthread only required for slow path */
708 if (sg_policy->policy->fast_switch_enabled)
709 return;
710
711 kthread_flush_worker(&sg_policy->worker);
712 kthread_stop(sg_policy->thread);
713 mutex_destroy(&sg_policy->work_lock);
714 }
715
sugov_tunables_alloc(struct sugov_policy * sg_policy)716 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
717 {
718 struct sugov_tunables *tunables;
719
720 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
721 if (tunables) {
722 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
723 if (!have_governor_per_policy())
724 global_tunables = tunables;
725 }
726 return tunables;
727 }
728
sugov_clear_global_tunables(void)729 static void sugov_clear_global_tunables(void)
730 {
731 if (!have_governor_per_policy())
732 global_tunables = NULL;
733 }
734
sugov_init(struct cpufreq_policy * policy)735 static int sugov_init(struct cpufreq_policy *policy)
736 {
737 struct sugov_policy *sg_policy;
738 struct sugov_tunables *tunables;
739 int ret = 0;
740
741 /* State should be equivalent to EXIT */
742 if (policy->governor_data)
743 return -EBUSY;
744
745 cpufreq_enable_fast_switch(policy);
746
747 sg_policy = sugov_policy_alloc(policy);
748 if (!sg_policy) {
749 ret = -ENOMEM;
750 goto disable_fast_switch;
751 }
752
753 ret = sugov_kthread_create(sg_policy);
754 if (ret)
755 goto free_sg_policy;
756
757 mutex_lock(&global_tunables_lock);
758
759 if (global_tunables) {
760 if (WARN_ON(have_governor_per_policy())) {
761 ret = -EINVAL;
762 goto stop_kthread;
763 }
764 policy->governor_data = sg_policy;
765 sg_policy->tunables = global_tunables;
766
767 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
768 goto out;
769 }
770
771 tunables = sugov_tunables_alloc(sg_policy);
772 if (!tunables) {
773 ret = -ENOMEM;
774 goto stop_kthread;
775 }
776
777 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
778
779 policy->governor_data = sg_policy;
780 sg_policy->tunables = tunables;
781
782 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
783 get_governor_parent_kobj(policy), "%s",
784 schedutil_gov.name);
785 if (ret)
786 goto fail;
787
788 out:
789 /*
790 * Schedutil is the preferred governor for EAS, so rebuild sched domains
791 * on governor changes to make sure the scheduler knows about them.
792 */
793 em_rebuild_sched_domains();
794 mutex_unlock(&global_tunables_lock);
795 return 0;
796
797 fail:
798 kobject_put(&tunables->attr_set.kobj);
799 policy->governor_data = NULL;
800 sugov_clear_global_tunables();
801
802 stop_kthread:
803 sugov_kthread_stop(sg_policy);
804 mutex_unlock(&global_tunables_lock);
805
806 free_sg_policy:
807 sugov_policy_free(sg_policy);
808
809 disable_fast_switch:
810 cpufreq_disable_fast_switch(policy);
811
812 pr_err("initialization failed (error %d)\n", ret);
813 return ret;
814 }
815
sugov_exit(struct cpufreq_policy * policy)816 static void sugov_exit(struct cpufreq_policy *policy)
817 {
818 struct sugov_policy *sg_policy = policy->governor_data;
819 struct sugov_tunables *tunables = sg_policy->tunables;
820 unsigned int count;
821
822 mutex_lock(&global_tunables_lock);
823
824 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
825 policy->governor_data = NULL;
826 if (!count)
827 sugov_clear_global_tunables();
828
829 mutex_unlock(&global_tunables_lock);
830
831 sugov_kthread_stop(sg_policy);
832 sugov_policy_free(sg_policy);
833 cpufreq_disable_fast_switch(policy);
834
835 em_rebuild_sched_domains();
836 }
837
sugov_start(struct cpufreq_policy * policy)838 static int sugov_start(struct cpufreq_policy *policy)
839 {
840 struct sugov_policy *sg_policy = policy->governor_data;
841 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
842 unsigned int cpu;
843
844 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
845 sg_policy->last_freq_update_time = 0;
846 sg_policy->next_freq = 0;
847 sg_policy->work_in_progress = false;
848 sg_policy->limits_changed = false;
849 sg_policy->cached_raw_freq = 0;
850
851 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
852
853 if (policy_is_shared(policy))
854 uu = sugov_update_shared;
855 else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
856 uu = sugov_update_single_perf;
857 else
858 uu = sugov_update_single_freq;
859
860 for_each_cpu(cpu, policy->cpus) {
861 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
862
863 memset(sg_cpu, 0, sizeof(*sg_cpu));
864 sg_cpu->cpu = cpu;
865 sg_cpu->sg_policy = sg_policy;
866 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
867 }
868 return 0;
869 }
870
sugov_stop(struct cpufreq_policy * policy)871 static void sugov_stop(struct cpufreq_policy *policy)
872 {
873 struct sugov_policy *sg_policy = policy->governor_data;
874 unsigned int cpu;
875
876 for_each_cpu(cpu, policy->cpus)
877 cpufreq_remove_update_util_hook(cpu);
878
879 synchronize_rcu();
880
881 if (!policy->fast_switch_enabled) {
882 irq_work_sync(&sg_policy->irq_work);
883 kthread_cancel_work_sync(&sg_policy->work);
884 }
885 }
886
sugov_limits(struct cpufreq_policy * policy)887 static void sugov_limits(struct cpufreq_policy *policy)
888 {
889 struct sugov_policy *sg_policy = policy->governor_data;
890
891 if (!policy->fast_switch_enabled) {
892 mutex_lock(&sg_policy->work_lock);
893 cpufreq_policy_apply_limits(policy);
894 mutex_unlock(&sg_policy->work_lock);
895 }
896
897 /*
898 * The limits_changed update below must take place before the updates
899 * of policy limits in cpufreq_set_policy() or a policy limits update
900 * might be missed, so use a memory barrier to ensure it.
901 *
902 * This pairs with the memory barrier in sugov_should_update_freq().
903 */
904 smp_wmb();
905
906 WRITE_ONCE(sg_policy->limits_changed, true);
907 }
908
909 struct cpufreq_governor schedutil_gov = {
910 .name = "schedutil",
911 .owner = THIS_MODULE,
912 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
913 .init = sugov_init,
914 .exit = sugov_exit,
915 .start = sugov_start,
916 .stop = sugov_stop,
917 .limits = sugov_limits,
918 };
919
920 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
cpufreq_default_governor(void)921 struct cpufreq_governor *cpufreq_default_governor(void)
922 {
923 return &schedutil_gov;
924 }
925 #endif
926
927 cpufreq_governor_init(schedutil_gov);
928