1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * A scheduler with every callback defined.
4  *
5  * This scheduler defines every callback.
6  *
7  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
8  * Copyright (c) 2024 David Vernet <[email protected]>
9  */
10 
11 #include <scx/common.bpf.h>
12 
13 char _license[] SEC("license") = "GPL";
14 
15 #define DSQ_ID 0
16 
BPF_STRUCT_OPS(maximal_select_cpu,struct task_struct * p,s32 prev_cpu,u64 wake_flags)17 s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
18 		   u64 wake_flags)
19 {
20 	return prev_cpu;
21 }
22 
BPF_STRUCT_OPS(maximal_enqueue,struct task_struct * p,u64 enq_flags)23 void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
24 {
25 	scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
26 }
27 
BPF_STRUCT_OPS(maximal_dequeue,struct task_struct * p,u64 deq_flags)28 void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
29 {}
30 
BPF_STRUCT_OPS(maximal_dispatch,s32 cpu,struct task_struct * prev)31 void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
32 {
33 	scx_bpf_dsq_move_to_local(DSQ_ID);
34 }
35 
BPF_STRUCT_OPS(maximal_runnable,struct task_struct * p,u64 enq_flags)36 void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
37 {}
38 
BPF_STRUCT_OPS(maximal_running,struct task_struct * p)39 void BPF_STRUCT_OPS(maximal_running, struct task_struct *p)
40 {}
41 
BPF_STRUCT_OPS(maximal_stopping,struct task_struct * p,bool runnable)42 void BPF_STRUCT_OPS(maximal_stopping, struct task_struct *p, bool runnable)
43 {}
44 
BPF_STRUCT_OPS(maximal_quiescent,struct task_struct * p,u64 deq_flags)45 void BPF_STRUCT_OPS(maximal_quiescent, struct task_struct *p, u64 deq_flags)
46 {}
47 
BPF_STRUCT_OPS(maximal_yield,struct task_struct * from,struct task_struct * to)48 bool BPF_STRUCT_OPS(maximal_yield, struct task_struct *from,
49 		    struct task_struct *to)
50 {
51 	return false;
52 }
53 
BPF_STRUCT_OPS(maximal_core_sched_before,struct task_struct * a,struct task_struct * b)54 bool BPF_STRUCT_OPS(maximal_core_sched_before, struct task_struct *a,
55 		    struct task_struct *b)
56 {
57 	return false;
58 }
59 
BPF_STRUCT_OPS(maximal_set_weight,struct task_struct * p,u32 weight)60 void BPF_STRUCT_OPS(maximal_set_weight, struct task_struct *p, u32 weight)
61 {}
62 
BPF_STRUCT_OPS(maximal_set_cpumask,struct task_struct * p,const struct cpumask * cpumask)63 void BPF_STRUCT_OPS(maximal_set_cpumask, struct task_struct *p,
64 		    const struct cpumask *cpumask)
65 {}
66 
BPF_STRUCT_OPS(maximal_update_idle,s32 cpu,bool idle)67 void BPF_STRUCT_OPS(maximal_update_idle, s32 cpu, bool idle)
68 {}
69 
BPF_STRUCT_OPS(maximal_cpu_acquire,s32 cpu,struct scx_cpu_acquire_args * args)70 void BPF_STRUCT_OPS(maximal_cpu_acquire, s32 cpu,
71 		    struct scx_cpu_acquire_args *args)
72 {}
73 
BPF_STRUCT_OPS(maximal_cpu_release,s32 cpu,struct scx_cpu_release_args * args)74 void BPF_STRUCT_OPS(maximal_cpu_release, s32 cpu,
75 		    struct scx_cpu_release_args *args)
76 {}
77 
BPF_STRUCT_OPS(maximal_cpu_online,s32 cpu)78 void BPF_STRUCT_OPS(maximal_cpu_online, s32 cpu)
79 {}
80 
BPF_STRUCT_OPS(maximal_cpu_offline,s32 cpu)81 void BPF_STRUCT_OPS(maximal_cpu_offline, s32 cpu)
82 {}
83 
BPF_STRUCT_OPS(maximal_init_task,struct task_struct * p,struct scx_init_task_args * args)84 s32 BPF_STRUCT_OPS(maximal_init_task, struct task_struct *p,
85 		   struct scx_init_task_args *args)
86 {
87 	return 0;
88 }
89 
BPF_STRUCT_OPS(maximal_enable,struct task_struct * p)90 void BPF_STRUCT_OPS(maximal_enable, struct task_struct *p)
91 {}
92 
BPF_STRUCT_OPS(maximal_exit_task,struct task_struct * p,struct scx_exit_task_args * args)93 void BPF_STRUCT_OPS(maximal_exit_task, struct task_struct *p,
94 		    struct scx_exit_task_args *args)
95 {}
96 
BPF_STRUCT_OPS(maximal_disable,struct task_struct * p)97 void BPF_STRUCT_OPS(maximal_disable, struct task_struct *p)
98 {}
99 
BPF_STRUCT_OPS(maximal_cgroup_init,struct cgroup * cgrp,struct scx_cgroup_init_args * args)100 s32 BPF_STRUCT_OPS(maximal_cgroup_init, struct cgroup *cgrp,
101 		   struct scx_cgroup_init_args *args)
102 {
103 	return 0;
104 }
105 
BPF_STRUCT_OPS(maximal_cgroup_exit,struct cgroup * cgrp)106 void BPF_STRUCT_OPS(maximal_cgroup_exit, struct cgroup *cgrp)
107 {}
108 
BPF_STRUCT_OPS(maximal_cgroup_prep_move,struct task_struct * p,struct cgroup * from,struct cgroup * to)109 s32 BPF_STRUCT_OPS(maximal_cgroup_prep_move, struct task_struct *p,
110 		   struct cgroup *from, struct cgroup *to)
111 {
112 	return 0;
113 }
114 
BPF_STRUCT_OPS(maximal_cgroup_move,struct task_struct * p,struct cgroup * from,struct cgroup * to)115 void BPF_STRUCT_OPS(maximal_cgroup_move, struct task_struct *p,
116 		    struct cgroup *from, struct cgroup *to)
117 {}
118 
BPF_STRUCT_OPS(maximal_cgroup_cancel_move,struct task_struct * p,struct cgroup * from,struct cgroup * to)119 void BPF_STRUCT_OPS(maximal_cgroup_cancel_move, struct task_struct *p,
120 	       struct cgroup *from, struct cgroup *to)
121 {}
122 
BPF_STRUCT_OPS(maximal_cgroup_set_weight,struct cgroup * cgrp,u32 weight)123 void BPF_STRUCT_OPS(maximal_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
124 {}
125 
BPF_STRUCT_OPS_SLEEPABLE(maximal_init)126 s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init)
127 {
128 	return scx_bpf_create_dsq(DSQ_ID, -1);
129 }
130 
BPF_STRUCT_OPS(maximal_exit,struct scx_exit_info * info)131 void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
132 {}
133 
134 SEC(".struct_ops.link")
135 struct sched_ext_ops maximal_ops = {
136 	.select_cpu		= (void *) maximal_select_cpu,
137 	.enqueue		= (void *) maximal_enqueue,
138 	.dequeue		= (void *) maximal_dequeue,
139 	.dispatch		= (void *) maximal_dispatch,
140 	.runnable		= (void *) maximal_runnable,
141 	.running		= (void *) maximal_running,
142 	.stopping		= (void *) maximal_stopping,
143 	.quiescent		= (void *) maximal_quiescent,
144 	.yield			= (void *) maximal_yield,
145 	.core_sched_before	= (void *) maximal_core_sched_before,
146 	.set_weight		= (void *) maximal_set_weight,
147 	.set_cpumask		= (void *) maximal_set_cpumask,
148 	.update_idle		= (void *) maximal_update_idle,
149 	.cpu_acquire		= (void *) maximal_cpu_acquire,
150 	.cpu_release		= (void *) maximal_cpu_release,
151 	.cpu_online		= (void *) maximal_cpu_online,
152 	.cpu_offline		= (void *) maximal_cpu_offline,
153 	.init_task		= (void *) maximal_init_task,
154 	.enable			= (void *) maximal_enable,
155 	.exit_task		= (void *) maximal_exit_task,
156 	.disable		= (void *) maximal_disable,
157 	.cgroup_init		= (void *) maximal_cgroup_init,
158 	.cgroup_exit		= (void *) maximal_cgroup_exit,
159 	.cgroup_prep_move	= (void *) maximal_cgroup_prep_move,
160 	.cgroup_move		= (void *) maximal_cgroup_move,
161 	.cgroup_cancel_move	= (void *) maximal_cgroup_cancel_move,
162 	.cgroup_set_weight	= (void *) maximal_cgroup_set_weight,
163 	.init			= (void *) maximal_init,
164 	.exit			= (void *) maximal_exit,
165 	.name			= "maximal",
166 };
167