1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2023 Collabora ltd. */
3
4 #include <drm/drm_drv.h>
5 #include <drm/drm_exec.h>
6 #include <drm/drm_gem_shmem_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/gpu_scheduler.h>
9 #include <drm/panthor_drm.h>
10
11 #include <linux/build_bug.h>
12 #include <linux/cleanup.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-resv.h>
17 #include <linux/firmware.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/iopoll.h>
21 #include <linux/iosys-map.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25
26 #include "panthor_devfreq.h"
27 #include "panthor_device.h"
28 #include "panthor_fw.h"
29 #include "panthor_gem.h"
30 #include "panthor_gpu.h"
31 #include "panthor_heap.h"
32 #include "panthor_mmu.h"
33 #include "panthor_regs.h"
34 #include "panthor_sched.h"
35
36 /**
37 * DOC: Scheduler
38 *
39 * Mali CSF hardware adopts a firmware-assisted scheduling model, where
40 * the firmware takes care of scheduling aspects, to some extent.
41 *
42 * The scheduling happens at the scheduling group level, each group
43 * contains 1 to N queues (N is FW/hardware dependent, and exposed
44 * through the firmware interface). Each queue is assigned a command
45 * stream ring buffer, which serves as a way to get jobs submitted to
46 * the GPU, among other things.
47 *
48 * The firmware can schedule a maximum of M groups (M is FW/hardware
49 * dependent, and exposed through the firmware interface). Passed
50 * this maximum number of groups, the kernel must take care of
51 * rotating the groups passed to the firmware so every group gets
52 * a chance to have his queues scheduled for execution.
53 *
54 * The current implementation only supports with kernel-mode queues.
55 * In other terms, userspace doesn't have access to the ring-buffer.
56 * Instead, userspace passes indirect command stream buffers that are
57 * called from the queue ring-buffer by the kernel using a pre-defined
58 * sequence of command stream instructions to ensure the userspace driver
59 * always gets consistent results (cache maintenance,
60 * synchronization, ...).
61 *
62 * We rely on the drm_gpu_scheduler framework to deal with job
63 * dependencies and submission. As any other driver dealing with a
64 * FW-scheduler, we use the 1:1 entity:scheduler mode, such that each
65 * entity has its own job scheduler. When a job is ready to be executed
66 * (all its dependencies are met), it is pushed to the appropriate
67 * queue ring-buffer, and the group is scheduled for execution if it
68 * wasn't already active.
69 *
70 * Kernel-side group scheduling is timeslice-based. When we have less
71 * groups than there are slots, the periodic tick is disabled and we
72 * just let the FW schedule the active groups. When there are more
73 * groups than slots, we let each group a chance to execute stuff for
74 * a given amount of time, and then re-evaluate and pick new groups
75 * to schedule. The group selection algorithm is based on
76 * priority+round-robin.
77 *
78 * Even though user-mode queues is out of the scope right now, the
79 * current design takes them into account by avoiding any guess on the
80 * group/queue state that would be based on information we wouldn't have
81 * if userspace was in charge of the ring-buffer. That's also one of the
82 * reason we don't do 'cooperative' scheduling (encoding FW group slot
83 * reservation as dma_fence that would be returned from the
84 * drm_gpu_scheduler::prepare_job() hook, and treating group rotation as
85 * a queue of waiters, ordered by job submission order). This approach
86 * would work for kernel-mode queues, but would make user-mode queues a
87 * lot more complicated to retrofit.
88 */
89
90 #define JOB_TIMEOUT_MS 5000
91
92 #define MIN_CS_PER_CSG 8
93
94 #define MIN_CSGS 3
95 #define MAX_CSG_PRIO 0xf
96
97 #define NUM_INSTRS_PER_CACHE_LINE (64 / sizeof(u64))
98 #define MAX_INSTRS_PER_JOB 24
99
100 struct panthor_group;
101
102 /**
103 * struct panthor_csg_slot - Command stream group slot
104 *
105 * This represents a FW slot for a scheduling group.
106 */
107 struct panthor_csg_slot {
108 /** @group: Scheduling group bound to this slot. */
109 struct panthor_group *group;
110
111 /** @priority: Group priority. */
112 u8 priority;
113
114 /**
115 * @idle: True if the group bound to this slot is idle.
116 *
117 * A group is idle when it has nothing waiting for execution on
118 * all its queues, or when queues are blocked waiting for something
119 * to happen (synchronization object).
120 */
121 bool idle;
122 };
123
124 /**
125 * enum panthor_csg_priority - Group priority
126 */
127 enum panthor_csg_priority {
128 /** @PANTHOR_CSG_PRIORITY_LOW: Low priority group. */
129 PANTHOR_CSG_PRIORITY_LOW = 0,
130
131 /** @PANTHOR_CSG_PRIORITY_MEDIUM: Medium priority group. */
132 PANTHOR_CSG_PRIORITY_MEDIUM,
133
134 /** @PANTHOR_CSG_PRIORITY_HIGH: High priority group. */
135 PANTHOR_CSG_PRIORITY_HIGH,
136
137 /**
138 * @PANTHOR_CSG_PRIORITY_RT: Real-time priority group.
139 *
140 * Real-time priority allows one to preempt scheduling of other
141 * non-real-time groups. When such a group becomes executable,
142 * it will evict the group with the lowest non-rt priority if
143 * there's no free group slot available.
144 */
145 PANTHOR_CSG_PRIORITY_RT,
146
147 /** @PANTHOR_CSG_PRIORITY_COUNT: Number of priority levels. */
148 PANTHOR_CSG_PRIORITY_COUNT,
149 };
150
151 /**
152 * struct panthor_scheduler - Object used to manage the scheduler
153 */
154 struct panthor_scheduler {
155 /** @ptdev: Device. */
156 struct panthor_device *ptdev;
157
158 /**
159 * @wq: Workqueue used by our internal scheduler logic and
160 * drm_gpu_scheduler.
161 *
162 * Used for the scheduler tick, group update or other kind of FW
163 * event processing that can't be handled in the threaded interrupt
164 * path. Also passed to the drm_gpu_scheduler instances embedded
165 * in panthor_queue.
166 */
167 struct workqueue_struct *wq;
168
169 /**
170 * @heap_alloc_wq: Workqueue used to schedule tiler_oom works.
171 *
172 * We have a queue dedicated to heap chunk allocation works to avoid
173 * blocking the rest of the scheduler if the allocation tries to
174 * reclaim memory.
175 */
176 struct workqueue_struct *heap_alloc_wq;
177
178 /** @tick_work: Work executed on a scheduling tick. */
179 struct delayed_work tick_work;
180
181 /**
182 * @sync_upd_work: Work used to process synchronization object updates.
183 *
184 * We use this work to unblock queues/groups that were waiting on a
185 * synchronization object.
186 */
187 struct work_struct sync_upd_work;
188
189 /**
190 * @fw_events_work: Work used to process FW events outside the interrupt path.
191 *
192 * Even if the interrupt is threaded, we need any event processing
193 * that require taking the panthor_scheduler::lock to be processed
194 * outside the interrupt path so we don't block the tick logic when
195 * it calls panthor_fw_{csg,wait}_wait_acks(). Since most of the
196 * event processing requires taking this lock, we just delegate all
197 * FW event processing to the scheduler workqueue.
198 */
199 struct work_struct fw_events_work;
200
201 /**
202 * @fw_events: Bitmask encoding pending FW events.
203 */
204 atomic_t fw_events;
205
206 /**
207 * @resched_target: When the next tick should occur.
208 *
209 * Expressed in jiffies.
210 */
211 u64 resched_target;
212
213 /**
214 * @last_tick: When the last tick occurred.
215 *
216 * Expressed in jiffies.
217 */
218 u64 last_tick;
219
220 /** @tick_period: Tick period in jiffies. */
221 u64 tick_period;
222
223 /**
224 * @lock: Lock protecting access to all the scheduler fields.
225 *
226 * Should be taken in the tick work, the irq handler, and anywhere the @groups
227 * fields are touched.
228 */
229 struct mutex lock;
230
231 /** @groups: Various lists used to classify groups. */
232 struct {
233 /**
234 * @runnable: Runnable group lists.
235 *
236 * When a group has queues that want to execute something,
237 * its panthor_group::run_node should be inserted here.
238 *
239 * One list per-priority.
240 */
241 struct list_head runnable[PANTHOR_CSG_PRIORITY_COUNT];
242
243 /**
244 * @idle: Idle group lists.
245 *
246 * When all queues of a group are idle (either because they
247 * have nothing to execute, or because they are blocked), the
248 * panthor_group::run_node field should be inserted here.
249 *
250 * One list per-priority.
251 */
252 struct list_head idle[PANTHOR_CSG_PRIORITY_COUNT];
253
254 /**
255 * @waiting: List of groups whose queues are blocked on a
256 * synchronization object.
257 *
258 * Insert panthor_group::wait_node here when a group is waiting
259 * for synchronization objects to be signaled.
260 *
261 * This list is evaluated in the @sync_upd_work work.
262 */
263 struct list_head waiting;
264 } groups;
265
266 /**
267 * @csg_slots: FW command stream group slots.
268 */
269 struct panthor_csg_slot csg_slots[MAX_CSGS];
270
271 /** @csg_slot_count: Number of command stream group slots exposed by the FW. */
272 u32 csg_slot_count;
273
274 /** @cs_slot_count: Number of command stream slot per group slot exposed by the FW. */
275 u32 cs_slot_count;
276
277 /** @as_slot_count: Number of address space slots supported by the MMU. */
278 u32 as_slot_count;
279
280 /** @used_csg_slot_count: Number of command stream group slot currently used. */
281 u32 used_csg_slot_count;
282
283 /** @sb_slot_count: Number of scoreboard slots. */
284 u32 sb_slot_count;
285
286 /**
287 * @might_have_idle_groups: True if an active group might have become idle.
288 *
289 * This will force a tick, so other runnable groups can be scheduled if one
290 * or more active groups became idle.
291 */
292 bool might_have_idle_groups;
293
294 /** @pm: Power management related fields. */
295 struct {
296 /** @has_ref: True if the scheduler owns a runtime PM reference. */
297 bool has_ref;
298 } pm;
299
300 /** @reset: Reset related fields. */
301 struct {
302 /** @lock: Lock protecting the other reset fields. */
303 struct mutex lock;
304
305 /**
306 * @in_progress: True if a reset is in progress.
307 *
308 * Set to true in panthor_sched_pre_reset() and back to false in
309 * panthor_sched_post_reset().
310 */
311 atomic_t in_progress;
312
313 /**
314 * @stopped_groups: List containing all groups that were stopped
315 * before a reset.
316 *
317 * Insert panthor_group::run_node in the pre_reset path.
318 */
319 struct list_head stopped_groups;
320 } reset;
321 };
322
323 /**
324 * struct panthor_syncobj_32b - 32-bit FW synchronization object
325 */
326 struct panthor_syncobj_32b {
327 /** @seqno: Sequence number. */
328 u32 seqno;
329
330 /**
331 * @status: Status.
332 *
333 * Not zero on failure.
334 */
335 u32 status;
336 };
337
338 /**
339 * struct panthor_syncobj_64b - 64-bit FW synchronization object
340 */
341 struct panthor_syncobj_64b {
342 /** @seqno: Sequence number. */
343 u64 seqno;
344
345 /**
346 * @status: Status.
347 *
348 * Not zero on failure.
349 */
350 u32 status;
351
352 /** @pad: MBZ. */
353 u32 pad;
354 };
355
356 /**
357 * struct panthor_queue - Execution queue
358 */
359 struct panthor_queue {
360 /** @scheduler: DRM scheduler used for this queue. */
361 struct drm_gpu_scheduler scheduler;
362
363 /** @entity: DRM scheduling entity used for this queue. */
364 struct drm_sched_entity entity;
365
366 /**
367 * @remaining_time: Time remaining before the job timeout expires.
368 *
369 * The job timeout is suspended when the queue is not scheduled by the
370 * FW. Every time we suspend the timer, we need to save the remaining
371 * time so we can restore it later on.
372 */
373 unsigned long remaining_time;
374
375 /** @timeout_suspended: True if the job timeout was suspended. */
376 bool timeout_suspended;
377
378 /**
379 * @doorbell_id: Doorbell assigned to this queue.
380 *
381 * Right now, all groups share the same doorbell, and the doorbell ID
382 * is assigned to group_slot + 1 when the group is assigned a slot. But
383 * we might decide to provide fine grained doorbell assignment at some
384 * point, so don't have to wake up all queues in a group every time one
385 * of them is updated.
386 */
387 u8 doorbell_id;
388
389 /**
390 * @priority: Priority of the queue inside the group.
391 *
392 * Must be less than 16 (Only 4 bits available).
393 */
394 u8 priority;
395 #define CSF_MAX_QUEUE_PRIO GENMASK(3, 0)
396
397 /** @ringbuf: Command stream ring-buffer. */
398 struct panthor_kernel_bo *ringbuf;
399
400 /** @iface: Firmware interface. */
401 struct {
402 /** @mem: FW memory allocated for this interface. */
403 struct panthor_kernel_bo *mem;
404
405 /** @input: Input interface. */
406 struct panthor_fw_ringbuf_input_iface *input;
407
408 /** @output: Output interface. */
409 const struct panthor_fw_ringbuf_output_iface *output;
410
411 /** @input_fw_va: FW virtual address of the input interface buffer. */
412 u32 input_fw_va;
413
414 /** @output_fw_va: FW virtual address of the output interface buffer. */
415 u32 output_fw_va;
416 } iface;
417
418 /**
419 * @syncwait: Stores information about the synchronization object this
420 * queue is waiting on.
421 */
422 struct {
423 /** @gpu_va: GPU address of the synchronization object. */
424 u64 gpu_va;
425
426 /** @ref: Reference value to compare against. */
427 u64 ref;
428
429 /** @gt: True if this is a greater-than test. */
430 bool gt;
431
432 /** @sync64: True if this is a 64-bit sync object. */
433 bool sync64;
434
435 /** @bo: Buffer object holding the synchronization object. */
436 struct drm_gem_object *obj;
437
438 /** @offset: Offset of the synchronization object inside @bo. */
439 u64 offset;
440
441 /**
442 * @kmap: Kernel mapping of the buffer object holding the
443 * synchronization object.
444 */
445 void *kmap;
446 } syncwait;
447
448 /** @fence_ctx: Fence context fields. */
449 struct {
450 /** @lock: Used to protect access to all fences allocated by this context. */
451 spinlock_t lock;
452
453 /**
454 * @id: Fence context ID.
455 *
456 * Allocated with dma_fence_context_alloc().
457 */
458 u64 id;
459
460 /** @seqno: Sequence number of the last initialized fence. */
461 atomic64_t seqno;
462
463 /**
464 * @last_fence: Fence of the last submitted job.
465 *
466 * We return this fence when we get an empty command stream.
467 * This way, we are guaranteed that all earlier jobs have completed
468 * when drm_sched_job::s_fence::finished without having to feed
469 * the CS ring buffer with a dummy job that only signals the fence.
470 */
471 struct dma_fence *last_fence;
472
473 /**
474 * @in_flight_jobs: List containing all in-flight jobs.
475 *
476 * Used to keep track and signal panthor_job::done_fence when the
477 * synchronization object attached to the queue is signaled.
478 */
479 struct list_head in_flight_jobs;
480 } fence_ctx;
481
482 /** @profiling: Job profiling data slots and access information. */
483 struct {
484 /** @slots: Kernel BO holding the slots. */
485 struct panthor_kernel_bo *slots;
486
487 /** @slot_count: Number of jobs ringbuffer can hold at once. */
488 u32 slot_count;
489
490 /** @seqno: Index of the next available profiling information slot. */
491 u32 seqno;
492 } profiling;
493 };
494
495 /**
496 * enum panthor_group_state - Scheduling group state.
497 */
498 enum panthor_group_state {
499 /** @PANTHOR_CS_GROUP_CREATED: Group was created, but not scheduled yet. */
500 PANTHOR_CS_GROUP_CREATED,
501
502 /** @PANTHOR_CS_GROUP_ACTIVE: Group is currently scheduled. */
503 PANTHOR_CS_GROUP_ACTIVE,
504
505 /**
506 * @PANTHOR_CS_GROUP_SUSPENDED: Group was scheduled at least once, but is
507 * inactive/suspended right now.
508 */
509 PANTHOR_CS_GROUP_SUSPENDED,
510
511 /**
512 * @PANTHOR_CS_GROUP_TERMINATED: Group was terminated.
513 *
514 * Can no longer be scheduled. The only allowed action is a destruction.
515 */
516 PANTHOR_CS_GROUP_TERMINATED,
517
518 /**
519 * @PANTHOR_CS_GROUP_UNKNOWN_STATE: Group is an unknown state.
520 *
521 * The FW returned an inconsistent state. The group is flagged unusable
522 * and can no longer be scheduled. The only allowed action is a
523 * destruction.
524 *
525 * When that happens, we also schedule a FW reset, to start from a fresh
526 * state.
527 */
528 PANTHOR_CS_GROUP_UNKNOWN_STATE,
529 };
530
531 /**
532 * struct panthor_group - Scheduling group object
533 */
534 struct panthor_group {
535 /** @refcount: Reference count */
536 struct kref refcount;
537
538 /** @ptdev: Device. */
539 struct panthor_device *ptdev;
540
541 /** @vm: VM bound to the group. */
542 struct panthor_vm *vm;
543
544 /** @compute_core_mask: Mask of shader cores that can be used for compute jobs. */
545 u64 compute_core_mask;
546
547 /** @fragment_core_mask: Mask of shader cores that can be used for fragment jobs. */
548 u64 fragment_core_mask;
549
550 /** @tiler_core_mask: Mask of tiler cores that can be used for tiler jobs. */
551 u64 tiler_core_mask;
552
553 /** @max_compute_cores: Maximum number of shader cores used for compute jobs. */
554 u8 max_compute_cores;
555
556 /** @max_fragment_cores: Maximum number of shader cores used for fragment jobs. */
557 u8 max_fragment_cores;
558
559 /** @max_tiler_cores: Maximum number of tiler cores used for tiler jobs. */
560 u8 max_tiler_cores;
561
562 /** @priority: Group priority (check panthor_csg_priority). */
563 u8 priority;
564
565 /** @blocked_queues: Bitmask reflecting the blocked queues. */
566 u32 blocked_queues;
567
568 /** @idle_queues: Bitmask reflecting the idle queues. */
569 u32 idle_queues;
570
571 /** @fatal_lock: Lock used to protect access to fatal fields. */
572 spinlock_t fatal_lock;
573
574 /** @fatal_queues: Bitmask reflecting the queues that hit a fatal exception. */
575 u32 fatal_queues;
576
577 /** @tiler_oom: Mask of queues that have a tiler OOM event to process. */
578 atomic_t tiler_oom;
579
580 /** @queue_count: Number of queues in this group. */
581 u32 queue_count;
582
583 /** @queues: Queues owned by this group. */
584 struct panthor_queue *queues[MAX_CS_PER_CSG];
585
586 /**
587 * @csg_id: ID of the FW group slot.
588 *
589 * -1 when the group is not scheduled/active.
590 */
591 int csg_id;
592
593 /**
594 * @destroyed: True when the group has been destroyed.
595 *
596 * If a group is destroyed it becomes useless: no further jobs can be submitted
597 * to its queues. We simply wait for all references to be dropped so we can
598 * release the group object.
599 */
600 bool destroyed;
601
602 /**
603 * @timedout: True when a timeout occurred on any of the queues owned by
604 * this group.
605 *
606 * Timeouts can be reported by drm_sched or by the FW. If a reset is required,
607 * and the group can't be suspended, this also leads to a timeout. In any case,
608 * any timeout situation is unrecoverable, and the group becomes useless. We
609 * simply wait for all references to be dropped so we can release the group
610 * object.
611 */
612 bool timedout;
613
614 /**
615 * @innocent: True when the group becomes unusable because the group suspension
616 * failed during a reset.
617 *
618 * Sometimes the FW was put in a bad state by other groups, causing the group
619 * suspension happening in the reset path to fail. In that case, we consider the
620 * group innocent.
621 */
622 bool innocent;
623
624 /**
625 * @syncobjs: Pool of per-queue synchronization objects.
626 *
627 * One sync object per queue. The position of the sync object is
628 * determined by the queue index.
629 */
630 struct panthor_kernel_bo *syncobjs;
631
632 /** @fdinfo: Per-file info exposed through /proc/<process>/fdinfo */
633 struct {
634 /** @data: Total sampled values for jobs in queues from this group. */
635 struct panthor_gpu_usage data;
636
637 /**
638 * @fdinfo.lock: Spinlock to govern concurrent access from drm file's fdinfo
639 * callback and job post-completion processing function
640 */
641 spinlock_t lock;
642
643 /** @fdinfo.kbo_sizes: Aggregate size of private kernel BO's held by the group. */
644 size_t kbo_sizes;
645 } fdinfo;
646
647 /** @state: Group state. */
648 enum panthor_group_state state;
649
650 /**
651 * @suspend_buf: Suspend buffer.
652 *
653 * Stores the state of the group and its queues when a group is suspended.
654 * Used at resume time to restore the group in its previous state.
655 *
656 * The size of the suspend buffer is exposed through the FW interface.
657 */
658 struct panthor_kernel_bo *suspend_buf;
659
660 /**
661 * @protm_suspend_buf: Protection mode suspend buffer.
662 *
663 * Stores the state of the group and its queues when a group that's in
664 * protection mode is suspended.
665 *
666 * Used at resume time to restore the group in its previous state.
667 *
668 * The size of the protection mode suspend buffer is exposed through the
669 * FW interface.
670 */
671 struct panthor_kernel_bo *protm_suspend_buf;
672
673 /** @sync_upd_work: Work used to check/signal job fences. */
674 struct work_struct sync_upd_work;
675
676 /** @tiler_oom_work: Work used to process tiler OOM events happening on this group. */
677 struct work_struct tiler_oom_work;
678
679 /** @term_work: Work used to finish the group termination procedure. */
680 struct work_struct term_work;
681
682 /**
683 * @release_work: Work used to release group resources.
684 *
685 * We need to postpone the group release to avoid a deadlock when
686 * the last ref is released in the tick work.
687 */
688 struct work_struct release_work;
689
690 /**
691 * @run_node: Node used to insert the group in the
692 * panthor_group::groups::{runnable,idle} and
693 * panthor_group::reset.stopped_groups lists.
694 */
695 struct list_head run_node;
696
697 /**
698 * @wait_node: Node used to insert the group in the
699 * panthor_group::groups::waiting list.
700 */
701 struct list_head wait_node;
702 };
703
704 struct panthor_job_profiling_data {
705 struct {
706 u64 before;
707 u64 after;
708 } cycles;
709
710 struct {
711 u64 before;
712 u64 after;
713 } time;
714 };
715
716 /**
717 * group_queue_work() - Queue a group work
718 * @group: Group to queue the work for.
719 * @wname: Work name.
720 *
721 * Grabs a ref and queue a work item to the scheduler workqueue. If
722 * the work was already queued, we release the reference we grabbed.
723 *
724 * Work callbacks must release the reference we grabbed here.
725 */
726 #define group_queue_work(group, wname) \
727 do { \
728 group_get(group); \
729 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
730 group_put(group); \
731 } while (0)
732
733 /**
734 * sched_queue_work() - Queue a scheduler work.
735 * @sched: Scheduler object.
736 * @wname: Work name.
737 *
738 * Conditionally queues a scheduler work if no reset is pending/in-progress.
739 */
740 #define sched_queue_work(sched, wname) \
741 do { \
742 if (!atomic_read(&(sched)->reset.in_progress) && \
743 !panthor_device_reset_is_pending((sched)->ptdev)) \
744 queue_work((sched)->wq, &(sched)->wname ## _work); \
745 } while (0)
746
747 /**
748 * sched_queue_delayed_work() - Queue a scheduler delayed work.
749 * @sched: Scheduler object.
750 * @wname: Work name.
751 * @delay: Work delay in jiffies.
752 *
753 * Conditionally queues a scheduler delayed work if no reset is
754 * pending/in-progress.
755 */
756 #define sched_queue_delayed_work(sched, wname, delay) \
757 do { \
758 if (!atomic_read(&sched->reset.in_progress) && \
759 !panthor_device_reset_is_pending((sched)->ptdev)) \
760 mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
761 } while (0)
762
763 /*
764 * We currently set the maximum of groups per file to an arbitrary low value.
765 * But this can be updated if we need more.
766 */
767 #define MAX_GROUPS_PER_POOL 128
768
769 /**
770 * struct panthor_group_pool - Group pool
771 *
772 * Each file get assigned a group pool.
773 */
774 struct panthor_group_pool {
775 /** @xa: Xarray used to manage group handles. */
776 struct xarray xa;
777 };
778
779 /**
780 * struct panthor_job - Used to manage GPU job
781 */
782 struct panthor_job {
783 /** @base: Inherit from drm_sched_job. */
784 struct drm_sched_job base;
785
786 /** @refcount: Reference count. */
787 struct kref refcount;
788
789 /** @group: Group of the queue this job will be pushed to. */
790 struct panthor_group *group;
791
792 /** @queue_idx: Index of the queue inside @group. */
793 u32 queue_idx;
794
795 /** @call_info: Information about the userspace command stream call. */
796 struct {
797 /** @start: GPU address of the userspace command stream. */
798 u64 start;
799
800 /** @size: Size of the userspace command stream. */
801 u32 size;
802
803 /**
804 * @latest_flush: Flush ID at the time the userspace command
805 * stream was built.
806 *
807 * Needed for the flush reduction mechanism.
808 */
809 u32 latest_flush;
810 } call_info;
811
812 /** @ringbuf: Position of this job is in the ring buffer. */
813 struct {
814 /** @start: Start offset. */
815 u64 start;
816
817 /** @end: End offset. */
818 u64 end;
819 } ringbuf;
820
821 /**
822 * @node: Used to insert the job in the panthor_queue::fence_ctx::in_flight_jobs
823 * list.
824 */
825 struct list_head node;
826
827 /** @done_fence: Fence signaled when the job is finished or cancelled. */
828 struct dma_fence *done_fence;
829
830 /** @profiling: Job profiling information. */
831 struct {
832 /** @mask: Current device job profiling enablement bitmask. */
833 u32 mask;
834
835 /** @slot: Job index in the profiling slots BO. */
836 u32 slot;
837 } profiling;
838 };
839
840 static void
panthor_queue_put_syncwait_obj(struct panthor_queue * queue)841 panthor_queue_put_syncwait_obj(struct panthor_queue *queue)
842 {
843 if (queue->syncwait.kmap) {
844 struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap);
845
846 drm_gem_vunmap_unlocked(queue->syncwait.obj, &map);
847 queue->syncwait.kmap = NULL;
848 }
849
850 drm_gem_object_put(queue->syncwait.obj);
851 queue->syncwait.obj = NULL;
852 }
853
854 static void *
panthor_queue_get_syncwait_obj(struct panthor_group * group,struct panthor_queue * queue)855 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue)
856 {
857 struct panthor_device *ptdev = group->ptdev;
858 struct panthor_gem_object *bo;
859 struct iosys_map map;
860 int ret;
861
862 if (queue->syncwait.kmap)
863 return queue->syncwait.kmap + queue->syncwait.offset;
864
865 bo = panthor_vm_get_bo_for_va(group->vm,
866 queue->syncwait.gpu_va,
867 &queue->syncwait.offset);
868 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo)))
869 goto err_put_syncwait_obj;
870
871 queue->syncwait.obj = &bo->base.base;
872 ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map);
873 if (drm_WARN_ON(&ptdev->base, ret))
874 goto err_put_syncwait_obj;
875
876 queue->syncwait.kmap = map.vaddr;
877 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap))
878 goto err_put_syncwait_obj;
879
880 return queue->syncwait.kmap + queue->syncwait.offset;
881
882 err_put_syncwait_obj:
883 panthor_queue_put_syncwait_obj(queue);
884 return NULL;
885 }
886
group_free_queue(struct panthor_group * group,struct panthor_queue * queue)887 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue)
888 {
889 if (IS_ERR_OR_NULL(queue))
890 return;
891
892 if (queue->entity.fence_context)
893 drm_sched_entity_destroy(&queue->entity);
894
895 if (queue->scheduler.ops)
896 drm_sched_fini(&queue->scheduler);
897
898 panthor_queue_put_syncwait_obj(queue);
899
900 panthor_kernel_bo_destroy(queue->ringbuf);
901 panthor_kernel_bo_destroy(queue->iface.mem);
902 panthor_kernel_bo_destroy(queue->profiling.slots);
903
904 /* Release the last_fence we were holding, if any. */
905 dma_fence_put(queue->fence_ctx.last_fence);
906
907 kfree(queue);
908 }
909
group_release_work(struct work_struct * work)910 static void group_release_work(struct work_struct *work)
911 {
912 struct panthor_group *group = container_of(work,
913 struct panthor_group,
914 release_work);
915 u32 i;
916
917 for (i = 0; i < group->queue_count; i++)
918 group_free_queue(group, group->queues[i]);
919
920 panthor_kernel_bo_destroy(group->suspend_buf);
921 panthor_kernel_bo_destroy(group->protm_suspend_buf);
922 panthor_kernel_bo_destroy(group->syncobjs);
923
924 panthor_vm_put(group->vm);
925 kfree(group);
926 }
927
group_release(struct kref * kref)928 static void group_release(struct kref *kref)
929 {
930 struct panthor_group *group = container_of(kref,
931 struct panthor_group,
932 refcount);
933 struct panthor_device *ptdev = group->ptdev;
934
935 drm_WARN_ON(&ptdev->base, group->csg_id >= 0);
936 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node));
937 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node));
938
939 queue_work(panthor_cleanup_wq, &group->release_work);
940 }
941
group_put(struct panthor_group * group)942 static void group_put(struct panthor_group *group)
943 {
944 if (group)
945 kref_put(&group->refcount, group_release);
946 }
947
948 static struct panthor_group *
group_get(struct panthor_group * group)949 group_get(struct panthor_group *group)
950 {
951 if (group)
952 kref_get(&group->refcount);
953
954 return group;
955 }
956
957 /**
958 * group_bind_locked() - Bind a group to a group slot
959 * @group: Group.
960 * @csg_id: Slot.
961 *
962 * Return: 0 on success, a negative error code otherwise.
963 */
964 static int
group_bind_locked(struct panthor_group * group,u32 csg_id)965 group_bind_locked(struct panthor_group *group, u32 csg_id)
966 {
967 struct panthor_device *ptdev = group->ptdev;
968 struct panthor_csg_slot *csg_slot;
969 int ret;
970
971 lockdep_assert_held(&ptdev->scheduler->lock);
972
973 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS ||
974 ptdev->scheduler->csg_slots[csg_id].group))
975 return -EINVAL;
976
977 ret = panthor_vm_active(group->vm);
978 if (ret)
979 return ret;
980
981 csg_slot = &ptdev->scheduler->csg_slots[csg_id];
982 group_get(group);
983 group->csg_id = csg_id;
984
985 /* Dummy doorbell allocation: doorbell is assigned to the group and
986 * all queues use the same doorbell.
987 *
988 * TODO: Implement LRU-based doorbell assignment, so the most often
989 * updated queues get their own doorbell, thus avoiding useless checks
990 * on queues belonging to the same group that are rarely updated.
991 */
992 for (u32 i = 0; i < group->queue_count; i++)
993 group->queues[i]->doorbell_id = csg_id + 1;
994
995 csg_slot->group = group;
996
997 return 0;
998 }
999
1000 /**
1001 * group_unbind_locked() - Unbind a group from a slot.
1002 * @group: Group to unbind.
1003 *
1004 * Return: 0 on success, a negative error code otherwise.
1005 */
1006 static int
group_unbind_locked(struct panthor_group * group)1007 group_unbind_locked(struct panthor_group *group)
1008 {
1009 struct panthor_device *ptdev = group->ptdev;
1010 struct panthor_csg_slot *slot;
1011
1012 lockdep_assert_held(&ptdev->scheduler->lock);
1013
1014 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS))
1015 return -EINVAL;
1016
1017 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE))
1018 return -EINVAL;
1019
1020 slot = &ptdev->scheduler->csg_slots[group->csg_id];
1021 panthor_vm_idle(group->vm);
1022 group->csg_id = -1;
1023
1024 /* Tiler OOM events will be re-issued next time the group is scheduled. */
1025 atomic_set(&group->tiler_oom, 0);
1026 cancel_work(&group->tiler_oom_work);
1027
1028 for (u32 i = 0; i < group->queue_count; i++)
1029 group->queues[i]->doorbell_id = -1;
1030
1031 slot->group = NULL;
1032
1033 group_put(group);
1034 return 0;
1035 }
1036
1037 /**
1038 * cs_slot_prog_locked() - Program a queue slot
1039 * @ptdev: Device.
1040 * @csg_id: Group slot ID.
1041 * @cs_id: Queue slot ID.
1042 *
1043 * Program a queue slot with the queue information so things can start being
1044 * executed on this queue.
1045 *
1046 * The group slot must have a group bound to it already (group_bind_locked()).
1047 */
1048 static void
cs_slot_prog_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1049 cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1050 {
1051 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id];
1052 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1053
1054 lockdep_assert_held(&ptdev->scheduler->lock);
1055
1056 queue->iface.input->extract = queue->iface.output->extract;
1057 drm_WARN_ON(&ptdev->base, queue->iface.input->insert < queue->iface.input->extract);
1058
1059 cs_iface->input->ringbuf_base = panthor_kernel_bo_gpuva(queue->ringbuf);
1060 cs_iface->input->ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
1061 cs_iface->input->ringbuf_input = queue->iface.input_fw_va;
1062 cs_iface->input->ringbuf_output = queue->iface.output_fw_va;
1063 cs_iface->input->config = CS_CONFIG_PRIORITY(queue->priority) |
1064 CS_CONFIG_DOORBELL(queue->doorbell_id);
1065 cs_iface->input->ack_irq_mask = ~0;
1066 panthor_fw_update_reqs(cs_iface, req,
1067 CS_IDLE_SYNC_WAIT |
1068 CS_IDLE_EMPTY |
1069 CS_STATE_START |
1070 CS_EXTRACT_EVENT,
1071 CS_IDLE_SYNC_WAIT |
1072 CS_IDLE_EMPTY |
1073 CS_STATE_MASK |
1074 CS_EXTRACT_EVENT);
1075 if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
1076 drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
1077 queue->timeout_suspended = false;
1078 }
1079 }
1080
1081 /**
1082 * cs_slot_reset_locked() - Reset a queue slot
1083 * @ptdev: Device.
1084 * @csg_id: Group slot.
1085 * @cs_id: Queue slot.
1086 *
1087 * Change the queue slot state to STOP and suspend the queue timeout if
1088 * the queue is not blocked.
1089 *
1090 * The group slot must have a group bound to it (group_bind_locked()).
1091 */
1092 static int
cs_slot_reset_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1093 cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1094 {
1095 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1096 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1097 struct panthor_queue *queue = group->queues[cs_id];
1098
1099 lockdep_assert_held(&ptdev->scheduler->lock);
1100
1101 panthor_fw_update_reqs(cs_iface, req,
1102 CS_STATE_STOP,
1103 CS_STATE_MASK);
1104
1105 /* If the queue is blocked, we want to keep the timeout running, so
1106 * we can detect unbounded waits and kill the group when that happens.
1107 */
1108 if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
1109 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
1110 queue->timeout_suspended = true;
1111 WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
1112 }
1113
1114 return 0;
1115 }
1116
1117 /**
1118 * csg_slot_sync_priority_locked() - Synchronize the group slot priority
1119 * @ptdev: Device.
1120 * @csg_id: Group slot ID.
1121 *
1122 * Group slot priority update happens asynchronously. When we receive a
1123 * %CSG_ENDPOINT_CONFIG, we know the update is effective, and can
1124 * reflect it to our panthor_csg_slot object.
1125 */
1126 static void
csg_slot_sync_priority_locked(struct panthor_device * ptdev,u32 csg_id)1127 csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
1128 {
1129 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1130 struct panthor_fw_csg_iface *csg_iface;
1131
1132 lockdep_assert_held(&ptdev->scheduler->lock);
1133
1134 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1135 csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
1136 }
1137
1138 /**
1139 * cs_slot_sync_queue_state_locked() - Synchronize the queue slot priority
1140 * @ptdev: Device.
1141 * @csg_id: Group slot.
1142 * @cs_id: Queue slot.
1143 *
1144 * Queue state is updated on group suspend or STATUS_UPDATE event.
1145 */
1146 static void
cs_slot_sync_queue_state_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1147 cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
1148 {
1149 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group;
1150 struct panthor_queue *queue = group->queues[cs_id];
1151 struct panthor_fw_cs_iface *cs_iface =
1152 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id);
1153
1154 u32 status_wait_cond;
1155
1156 switch (cs_iface->output->status_blocked_reason) {
1157 case CS_STATUS_BLOCKED_REASON_UNBLOCKED:
1158 if (queue->iface.input->insert == queue->iface.output->extract &&
1159 cs_iface->output->status_scoreboards == 0)
1160 group->idle_queues |= BIT(cs_id);
1161 break;
1162
1163 case CS_STATUS_BLOCKED_REASON_SYNC_WAIT:
1164 if (list_empty(&group->wait_node)) {
1165 list_move_tail(&group->wait_node,
1166 &group->ptdev->scheduler->groups.waiting);
1167 }
1168
1169 /* The queue is only blocked if there's no deferred operation
1170 * pending, which can be checked through the scoreboard status.
1171 */
1172 if (!cs_iface->output->status_scoreboards)
1173 group->blocked_queues |= BIT(cs_id);
1174
1175 queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
1176 queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
1177 status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
1178 queue->syncwait.gt = status_wait_cond == CS_STATUS_WAIT_SYNC_COND_GT;
1179 if (cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_64B) {
1180 u64 sync_val_hi = cs_iface->output->status_wait_sync_value_hi;
1181
1182 queue->syncwait.sync64 = true;
1183 queue->syncwait.ref |= sync_val_hi << 32;
1184 } else {
1185 queue->syncwait.sync64 = false;
1186 }
1187 break;
1188
1189 default:
1190 /* Other reasons are not blocking. Consider the queue as runnable
1191 * in those cases.
1192 */
1193 break;
1194 }
1195 }
1196
1197 static void
csg_slot_sync_queues_state_locked(struct panthor_device * ptdev,u32 csg_id)1198 csg_slot_sync_queues_state_locked(struct panthor_device *ptdev, u32 csg_id)
1199 {
1200 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1201 struct panthor_group *group = csg_slot->group;
1202 u32 i;
1203
1204 lockdep_assert_held(&ptdev->scheduler->lock);
1205
1206 group->idle_queues = 0;
1207 group->blocked_queues = 0;
1208
1209 for (i = 0; i < group->queue_count; i++) {
1210 if (group->queues[i])
1211 cs_slot_sync_queue_state_locked(ptdev, csg_id, i);
1212 }
1213 }
1214
1215 static void
csg_slot_sync_state_locked(struct panthor_device * ptdev,u32 csg_id)1216 csg_slot_sync_state_locked(struct panthor_device *ptdev, u32 csg_id)
1217 {
1218 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1219 struct panthor_fw_csg_iface *csg_iface;
1220 struct panthor_group *group;
1221 enum panthor_group_state new_state, old_state;
1222 u32 csg_state;
1223
1224 lockdep_assert_held(&ptdev->scheduler->lock);
1225
1226 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1227 group = csg_slot->group;
1228
1229 if (!group)
1230 return;
1231
1232 old_state = group->state;
1233 csg_state = csg_iface->output->ack & CSG_STATE_MASK;
1234 switch (csg_state) {
1235 case CSG_STATE_START:
1236 case CSG_STATE_RESUME:
1237 new_state = PANTHOR_CS_GROUP_ACTIVE;
1238 break;
1239 case CSG_STATE_TERMINATE:
1240 new_state = PANTHOR_CS_GROUP_TERMINATED;
1241 break;
1242 case CSG_STATE_SUSPEND:
1243 new_state = PANTHOR_CS_GROUP_SUSPENDED;
1244 break;
1245 default:
1246 /* The unknown state might be caused by a FW state corruption,
1247 * which means the group metadata can't be trusted anymore, and
1248 * the SUSPEND operation might propagate the corruption to the
1249 * suspend buffers. Flag the group state as unknown to make
1250 * sure it's unusable after that point.
1251 */
1252 drm_err(&ptdev->base, "Invalid state on CSG %d (state=%d)",
1253 csg_id, csg_state);
1254 new_state = PANTHOR_CS_GROUP_UNKNOWN_STATE;
1255 break;
1256 }
1257
1258 if (old_state == new_state)
1259 return;
1260
1261 /* The unknown state might be caused by a FW issue, reset the FW to
1262 * take a fresh start.
1263 */
1264 if (new_state == PANTHOR_CS_GROUP_UNKNOWN_STATE)
1265 panthor_device_schedule_reset(ptdev);
1266
1267 if (new_state == PANTHOR_CS_GROUP_SUSPENDED)
1268 csg_slot_sync_queues_state_locked(ptdev, csg_id);
1269
1270 if (old_state == PANTHOR_CS_GROUP_ACTIVE) {
1271 u32 i;
1272
1273 /* Reset the queue slots so we start from a clean
1274 * state when starting/resuming a new group on this
1275 * CSG slot. No wait needed here, and no ringbell
1276 * either, since the CS slot will only be re-used
1277 * on the next CSG start operation.
1278 */
1279 for (i = 0; i < group->queue_count; i++) {
1280 if (group->queues[i])
1281 cs_slot_reset_locked(ptdev, csg_id, i);
1282 }
1283 }
1284
1285 group->state = new_state;
1286 }
1287
1288 static int
csg_slot_prog_locked(struct panthor_device * ptdev,u32 csg_id,u32 priority)1289 csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
1290 {
1291 struct panthor_fw_csg_iface *csg_iface;
1292 struct panthor_csg_slot *csg_slot;
1293 struct panthor_group *group;
1294 u32 queue_mask = 0, i;
1295
1296 lockdep_assert_held(&ptdev->scheduler->lock);
1297
1298 if (priority > MAX_CSG_PRIO)
1299 return -EINVAL;
1300
1301 if (drm_WARN_ON(&ptdev->base, csg_id >= MAX_CSGS))
1302 return -EINVAL;
1303
1304 csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1305 group = csg_slot->group;
1306 if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE)
1307 return 0;
1308
1309 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id);
1310
1311 for (i = 0; i < group->queue_count; i++) {
1312 if (group->queues[i]) {
1313 cs_slot_prog_locked(ptdev, csg_id, i);
1314 queue_mask |= BIT(i);
1315 }
1316 }
1317
1318 csg_iface->input->allow_compute = group->compute_core_mask;
1319 csg_iface->input->allow_fragment = group->fragment_core_mask;
1320 csg_iface->input->allow_other = group->tiler_core_mask;
1321 csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
1322 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
1323 CSG_EP_REQ_TILER(group->max_tiler_cores) |
1324 CSG_EP_REQ_PRIORITY(priority);
1325 csg_iface->input->config = panthor_vm_as(group->vm);
1326
1327 if (group->suspend_buf)
1328 csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf);
1329 else
1330 csg_iface->input->suspend_buf = 0;
1331
1332 if (group->protm_suspend_buf) {
1333 csg_iface->input->protm_suspend_buf =
1334 panthor_kernel_bo_gpuva(group->protm_suspend_buf);
1335 } else {
1336 csg_iface->input->protm_suspend_buf = 0;
1337 }
1338
1339 csg_iface->input->ack_irq_mask = ~0;
1340 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, queue_mask);
1341 return 0;
1342 }
1343
1344 static void
cs_slot_process_fatal_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1345 cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
1346 u32 csg_id, u32 cs_id)
1347 {
1348 struct panthor_scheduler *sched = ptdev->scheduler;
1349 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1350 struct panthor_group *group = csg_slot->group;
1351 struct panthor_fw_cs_iface *cs_iface;
1352 u32 fatal;
1353 u64 info;
1354
1355 lockdep_assert_held(&sched->lock);
1356
1357 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1358 fatal = cs_iface->output->fatal;
1359 info = cs_iface->output->fatal_info;
1360
1361 if (group)
1362 group->fatal_queues |= BIT(cs_id);
1363
1364 if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
1365 /* If this exception is unrecoverable, queue a reset, and make
1366 * sure we stop scheduling groups until the reset has happened.
1367 */
1368 panthor_device_schedule_reset(ptdev);
1369 cancel_delayed_work(&sched->tick_work);
1370 } else {
1371 sched_queue_delayed_work(sched, tick, 0);
1372 }
1373
1374 drm_warn(&ptdev->base,
1375 "CSG slot %d CS slot: %d\n"
1376 "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n"
1377 "CS_FATAL.EXCEPTION_DATA: 0x%x\n"
1378 "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n",
1379 csg_id, cs_id,
1380 (unsigned int)CS_EXCEPTION_TYPE(fatal),
1381 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fatal)),
1382 (unsigned int)CS_EXCEPTION_DATA(fatal),
1383 info);
1384 }
1385
1386 static void
cs_slot_process_fault_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1387 cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
1388 u32 csg_id, u32 cs_id)
1389 {
1390 struct panthor_scheduler *sched = ptdev->scheduler;
1391 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1392 struct panthor_group *group = csg_slot->group;
1393 struct panthor_queue *queue = group && cs_id < group->queue_count ?
1394 group->queues[cs_id] : NULL;
1395 struct panthor_fw_cs_iface *cs_iface;
1396 u32 fault;
1397 u64 info;
1398
1399 lockdep_assert_held(&sched->lock);
1400
1401 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1402 fault = cs_iface->output->fault;
1403 info = cs_iface->output->fault_info;
1404
1405 if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
1406 u64 cs_extract = queue->iface.output->extract;
1407 struct panthor_job *job;
1408
1409 spin_lock(&queue->fence_ctx.lock);
1410 list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) {
1411 if (cs_extract >= job->ringbuf.end)
1412 continue;
1413
1414 if (cs_extract < job->ringbuf.start)
1415 break;
1416
1417 dma_fence_set_error(job->done_fence, -EINVAL);
1418 }
1419 spin_unlock(&queue->fence_ctx.lock);
1420 }
1421
1422 drm_warn(&ptdev->base,
1423 "CSG slot %d CS slot: %d\n"
1424 "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
1425 "CS_FAULT.EXCEPTION_DATA: 0x%x\n"
1426 "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n",
1427 csg_id, cs_id,
1428 (unsigned int)CS_EXCEPTION_TYPE(fault),
1429 panthor_exception_name(ptdev, CS_EXCEPTION_TYPE(fault)),
1430 (unsigned int)CS_EXCEPTION_DATA(fault),
1431 info);
1432 }
1433
group_process_tiler_oom(struct panthor_group * group,u32 cs_id)1434 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id)
1435 {
1436 struct panthor_device *ptdev = group->ptdev;
1437 struct panthor_scheduler *sched = ptdev->scheduler;
1438 u32 renderpasses_in_flight, pending_frag_count;
1439 struct panthor_heap_pool *heaps = NULL;
1440 u64 heap_address, new_chunk_va = 0;
1441 u32 vt_start, vt_end, frag_end;
1442 int ret, csg_id;
1443
1444 mutex_lock(&sched->lock);
1445 csg_id = group->csg_id;
1446 if (csg_id >= 0) {
1447 struct panthor_fw_cs_iface *cs_iface;
1448
1449 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1450 heaps = panthor_vm_get_heap_pool(group->vm, false);
1451 heap_address = cs_iface->output->heap_address;
1452 vt_start = cs_iface->output->heap_vt_start;
1453 vt_end = cs_iface->output->heap_vt_end;
1454 frag_end = cs_iface->output->heap_frag_end;
1455 renderpasses_in_flight = vt_start - frag_end;
1456 pending_frag_count = vt_end - frag_end;
1457 }
1458 mutex_unlock(&sched->lock);
1459
1460 /* The group got scheduled out, we stop here. We will get a new tiler OOM event
1461 * when it's scheduled again.
1462 */
1463 if (unlikely(csg_id < 0))
1464 return 0;
1465
1466 if (IS_ERR(heaps) || frag_end > vt_end || vt_end >= vt_start) {
1467 ret = -EINVAL;
1468 } else {
1469 /* We do the allocation without holding the scheduler lock to avoid
1470 * blocking the scheduling.
1471 */
1472 ret = panthor_heap_grow(heaps, heap_address,
1473 renderpasses_in_flight,
1474 pending_frag_count, &new_chunk_va);
1475 }
1476
1477 /* If the heap context doesn't have memory for us, we want to let the
1478 * FW try to reclaim memory by waiting for fragment jobs to land or by
1479 * executing the tiler OOM exception handler, which is supposed to
1480 * implement incremental rendering.
1481 */
1482 if (ret && ret != -ENOMEM) {
1483 drm_warn(&ptdev->base, "Failed to extend the tiler heap\n");
1484 group->fatal_queues |= BIT(cs_id);
1485 sched_queue_delayed_work(sched, tick, 0);
1486 goto out_put_heap_pool;
1487 }
1488
1489 mutex_lock(&sched->lock);
1490 csg_id = group->csg_id;
1491 if (csg_id >= 0) {
1492 struct panthor_fw_csg_iface *csg_iface;
1493 struct panthor_fw_cs_iface *cs_iface;
1494
1495 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1496 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1497
1498 cs_iface->input->heap_start = new_chunk_va;
1499 cs_iface->input->heap_end = new_chunk_va;
1500 panthor_fw_update_reqs(cs_iface, req, cs_iface->output->ack, CS_TILER_OOM);
1501 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, BIT(cs_id));
1502 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1503 }
1504 mutex_unlock(&sched->lock);
1505
1506 /* We allocated a chunck, but couldn't link it to the heap
1507 * context because the group was scheduled out while we were
1508 * allocating memory. We need to return this chunk to the heap.
1509 */
1510 if (unlikely(csg_id < 0 && new_chunk_va))
1511 panthor_heap_return_chunk(heaps, heap_address, new_chunk_va);
1512
1513 ret = 0;
1514
1515 out_put_heap_pool:
1516 panthor_heap_pool_put(heaps);
1517 return ret;
1518 }
1519
group_tiler_oom_work(struct work_struct * work)1520 static void group_tiler_oom_work(struct work_struct *work)
1521 {
1522 struct panthor_group *group =
1523 container_of(work, struct panthor_group, tiler_oom_work);
1524 u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0);
1525
1526 while (tiler_oom) {
1527 u32 cs_id = ffs(tiler_oom) - 1;
1528
1529 group_process_tiler_oom(group, cs_id);
1530 tiler_oom &= ~BIT(cs_id);
1531 }
1532
1533 group_put(group);
1534 }
1535
1536 static void
cs_slot_process_tiler_oom_event_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1537 cs_slot_process_tiler_oom_event_locked(struct panthor_device *ptdev,
1538 u32 csg_id, u32 cs_id)
1539 {
1540 struct panthor_scheduler *sched = ptdev->scheduler;
1541 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1542 struct panthor_group *group = csg_slot->group;
1543
1544 lockdep_assert_held(&sched->lock);
1545
1546 if (drm_WARN_ON(&ptdev->base, !group))
1547 return;
1548
1549 atomic_or(BIT(cs_id), &group->tiler_oom);
1550
1551 /* We don't use group_queue_work() here because we want to queue the
1552 * work item to the heap_alloc_wq.
1553 */
1554 group_get(group);
1555 if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work))
1556 group_put(group);
1557 }
1558
cs_slot_process_irq_locked(struct panthor_device * ptdev,u32 csg_id,u32 cs_id)1559 static bool cs_slot_process_irq_locked(struct panthor_device *ptdev,
1560 u32 csg_id, u32 cs_id)
1561 {
1562 struct panthor_fw_cs_iface *cs_iface;
1563 u32 req, ack, events;
1564
1565 lockdep_assert_held(&ptdev->scheduler->lock);
1566
1567 cs_iface = panthor_fw_get_cs_iface(ptdev, csg_id, cs_id);
1568 req = cs_iface->input->req;
1569 ack = cs_iface->output->ack;
1570 events = (req ^ ack) & CS_EVT_MASK;
1571
1572 if (events & CS_FATAL)
1573 cs_slot_process_fatal_event_locked(ptdev, csg_id, cs_id);
1574
1575 if (events & CS_FAULT)
1576 cs_slot_process_fault_event_locked(ptdev, csg_id, cs_id);
1577
1578 if (events & CS_TILER_OOM)
1579 cs_slot_process_tiler_oom_event_locked(ptdev, csg_id, cs_id);
1580
1581 /* We don't acknowledge the TILER_OOM event since its handling is
1582 * deferred to a separate work.
1583 */
1584 panthor_fw_update_reqs(cs_iface, req, ack, CS_FATAL | CS_FAULT);
1585
1586 return (events & (CS_FAULT | CS_TILER_OOM)) != 0;
1587 }
1588
csg_slot_sync_idle_state_locked(struct panthor_device * ptdev,u32 csg_id)1589 static void csg_slot_sync_idle_state_locked(struct panthor_device *ptdev, u32 csg_id)
1590 {
1591 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1592 struct panthor_fw_csg_iface *csg_iface;
1593
1594 lockdep_assert_held(&ptdev->scheduler->lock);
1595
1596 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1597 csg_slot->idle = csg_iface->output->status_state & CSG_STATUS_STATE_IS_IDLE;
1598 }
1599
csg_slot_process_idle_event_locked(struct panthor_device * ptdev,u32 csg_id)1600 static void csg_slot_process_idle_event_locked(struct panthor_device *ptdev, u32 csg_id)
1601 {
1602 struct panthor_scheduler *sched = ptdev->scheduler;
1603
1604 lockdep_assert_held(&sched->lock);
1605
1606 sched->might_have_idle_groups = true;
1607
1608 /* Schedule a tick so we can evict idle groups and schedule non-idle
1609 * ones. This will also update runtime PM and devfreq busy/idle states,
1610 * so the device can lower its frequency or get suspended.
1611 */
1612 sched_queue_delayed_work(sched, tick, 0);
1613 }
1614
csg_slot_sync_update_locked(struct panthor_device * ptdev,u32 csg_id)1615 static void csg_slot_sync_update_locked(struct panthor_device *ptdev,
1616 u32 csg_id)
1617 {
1618 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
1619 struct panthor_group *group = csg_slot->group;
1620
1621 lockdep_assert_held(&ptdev->scheduler->lock);
1622
1623 if (group)
1624 group_queue_work(group, sync_upd);
1625
1626 sched_queue_work(ptdev->scheduler, sync_upd);
1627 }
1628
1629 static void
csg_slot_process_progress_timer_event_locked(struct panthor_device * ptdev,u32 csg_id)1630 csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 csg_id)
1631 {
1632 struct panthor_scheduler *sched = ptdev->scheduler;
1633 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
1634 struct panthor_group *group = csg_slot->group;
1635
1636 lockdep_assert_held(&sched->lock);
1637
1638 drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
1639
1640 group = csg_slot->group;
1641 if (!drm_WARN_ON(&ptdev->base, !group))
1642 group->timedout = true;
1643
1644 sched_queue_delayed_work(sched, tick, 0);
1645 }
1646
sched_process_csg_irq_locked(struct panthor_device * ptdev,u32 csg_id)1647 static void sched_process_csg_irq_locked(struct panthor_device *ptdev, u32 csg_id)
1648 {
1649 u32 req, ack, cs_irq_req, cs_irq_ack, cs_irqs, csg_events;
1650 struct panthor_fw_csg_iface *csg_iface;
1651 u32 ring_cs_db_mask = 0;
1652
1653 lockdep_assert_held(&ptdev->scheduler->lock);
1654
1655 if (drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1656 return;
1657
1658 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1659 req = READ_ONCE(csg_iface->input->req);
1660 ack = READ_ONCE(csg_iface->output->ack);
1661 cs_irq_req = READ_ONCE(csg_iface->output->cs_irq_req);
1662 cs_irq_ack = READ_ONCE(csg_iface->input->cs_irq_ack);
1663 csg_events = (req ^ ack) & CSG_EVT_MASK;
1664
1665 /* There may not be any pending CSG/CS interrupts to process */
1666 if (req == ack && cs_irq_req == cs_irq_ack)
1667 return;
1668
1669 /* Immediately set IRQ_ACK bits to be same as the IRQ_REQ bits before
1670 * examining the CS_ACK & CS_REQ bits. This would ensure that Host
1671 * doesn't miss an interrupt for the CS in the race scenario where
1672 * whilst Host is servicing an interrupt for the CS, firmware sends
1673 * another interrupt for that CS.
1674 */
1675 csg_iface->input->cs_irq_ack = cs_irq_req;
1676
1677 panthor_fw_update_reqs(csg_iface, req, ack,
1678 CSG_SYNC_UPDATE |
1679 CSG_IDLE |
1680 CSG_PROGRESS_TIMER_EVENT);
1681
1682 if (csg_events & CSG_IDLE)
1683 csg_slot_process_idle_event_locked(ptdev, csg_id);
1684
1685 if (csg_events & CSG_PROGRESS_TIMER_EVENT)
1686 csg_slot_process_progress_timer_event_locked(ptdev, csg_id);
1687
1688 cs_irqs = cs_irq_req ^ cs_irq_ack;
1689 while (cs_irqs) {
1690 u32 cs_id = ffs(cs_irqs) - 1;
1691
1692 if (cs_slot_process_irq_locked(ptdev, csg_id, cs_id))
1693 ring_cs_db_mask |= BIT(cs_id);
1694
1695 cs_irqs &= ~BIT(cs_id);
1696 }
1697
1698 if (csg_events & CSG_SYNC_UPDATE)
1699 csg_slot_sync_update_locked(ptdev, csg_id);
1700
1701 if (ring_cs_db_mask)
1702 panthor_fw_toggle_reqs(csg_iface, doorbell_req, doorbell_ack, ring_cs_db_mask);
1703
1704 panthor_fw_ring_csg_doorbells(ptdev, BIT(csg_id));
1705 }
1706
sched_process_idle_event_locked(struct panthor_device * ptdev)1707 static void sched_process_idle_event_locked(struct panthor_device *ptdev)
1708 {
1709 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1710
1711 lockdep_assert_held(&ptdev->scheduler->lock);
1712
1713 /* Acknowledge the idle event and schedule a tick. */
1714 panthor_fw_update_reqs(glb_iface, req, glb_iface->output->ack, GLB_IDLE);
1715 sched_queue_delayed_work(ptdev->scheduler, tick, 0);
1716 }
1717
1718 /**
1719 * sched_process_global_irq_locked() - Process the scheduling part of a global IRQ
1720 * @ptdev: Device.
1721 */
sched_process_global_irq_locked(struct panthor_device * ptdev)1722 static void sched_process_global_irq_locked(struct panthor_device *ptdev)
1723 {
1724 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
1725 u32 req, ack, evts;
1726
1727 lockdep_assert_held(&ptdev->scheduler->lock);
1728
1729 req = READ_ONCE(glb_iface->input->req);
1730 ack = READ_ONCE(glb_iface->output->ack);
1731 evts = (req ^ ack) & GLB_EVT_MASK;
1732
1733 if (evts & GLB_IDLE)
1734 sched_process_idle_event_locked(ptdev);
1735 }
1736
process_fw_events_work(struct work_struct * work)1737 static void process_fw_events_work(struct work_struct *work)
1738 {
1739 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
1740 fw_events_work);
1741 u32 events = atomic_xchg(&sched->fw_events, 0);
1742 struct panthor_device *ptdev = sched->ptdev;
1743
1744 mutex_lock(&sched->lock);
1745
1746 if (events & JOB_INT_GLOBAL_IF) {
1747 sched_process_global_irq_locked(ptdev);
1748 events &= ~JOB_INT_GLOBAL_IF;
1749 }
1750
1751 while (events) {
1752 u32 csg_id = ffs(events) - 1;
1753
1754 sched_process_csg_irq_locked(ptdev, csg_id);
1755 events &= ~BIT(csg_id);
1756 }
1757
1758 mutex_unlock(&sched->lock);
1759 }
1760
1761 /**
1762 * panthor_sched_report_fw_events() - Report FW events to the scheduler.
1763 */
panthor_sched_report_fw_events(struct panthor_device * ptdev,u32 events)1764 void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events)
1765 {
1766 if (!ptdev->scheduler)
1767 return;
1768
1769 atomic_or(events, &ptdev->scheduler->fw_events);
1770 sched_queue_work(ptdev->scheduler, fw_events);
1771 }
1772
fence_get_driver_name(struct dma_fence * fence)1773 static const char *fence_get_driver_name(struct dma_fence *fence)
1774 {
1775 return "panthor";
1776 }
1777
queue_fence_get_timeline_name(struct dma_fence * fence)1778 static const char *queue_fence_get_timeline_name(struct dma_fence *fence)
1779 {
1780 return "queue-fence";
1781 }
1782
1783 static const struct dma_fence_ops panthor_queue_fence_ops = {
1784 .get_driver_name = fence_get_driver_name,
1785 .get_timeline_name = queue_fence_get_timeline_name,
1786 };
1787
1788 struct panthor_csg_slots_upd_ctx {
1789 u32 update_mask;
1790 u32 timedout_mask;
1791 struct {
1792 u32 value;
1793 u32 mask;
1794 } requests[MAX_CSGS];
1795 };
1796
csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx * ctx)1797 static void csgs_upd_ctx_init(struct panthor_csg_slots_upd_ctx *ctx)
1798 {
1799 memset(ctx, 0, sizeof(*ctx));
1800 }
1801
csgs_upd_ctx_queue_reqs(struct panthor_device * ptdev,struct panthor_csg_slots_upd_ctx * ctx,u32 csg_id,u32 value,u32 mask)1802 static void csgs_upd_ctx_queue_reqs(struct panthor_device *ptdev,
1803 struct panthor_csg_slots_upd_ctx *ctx,
1804 u32 csg_id, u32 value, u32 mask)
1805 {
1806 if (drm_WARN_ON(&ptdev->base, !mask) ||
1807 drm_WARN_ON(&ptdev->base, csg_id >= ptdev->scheduler->csg_slot_count))
1808 return;
1809
1810 ctx->requests[csg_id].value = (ctx->requests[csg_id].value & ~mask) | (value & mask);
1811 ctx->requests[csg_id].mask |= mask;
1812 ctx->update_mask |= BIT(csg_id);
1813 }
1814
csgs_upd_ctx_apply_locked(struct panthor_device * ptdev,struct panthor_csg_slots_upd_ctx * ctx)1815 static int csgs_upd_ctx_apply_locked(struct panthor_device *ptdev,
1816 struct panthor_csg_slots_upd_ctx *ctx)
1817 {
1818 struct panthor_scheduler *sched = ptdev->scheduler;
1819 u32 update_slots = ctx->update_mask;
1820
1821 lockdep_assert_held(&sched->lock);
1822
1823 if (!ctx->update_mask)
1824 return 0;
1825
1826 while (update_slots) {
1827 struct panthor_fw_csg_iface *csg_iface;
1828 u32 csg_id = ffs(update_slots) - 1;
1829
1830 update_slots &= ~BIT(csg_id);
1831 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1832 panthor_fw_update_reqs(csg_iface, req,
1833 ctx->requests[csg_id].value,
1834 ctx->requests[csg_id].mask);
1835 }
1836
1837 panthor_fw_ring_csg_doorbells(ptdev, ctx->update_mask);
1838
1839 update_slots = ctx->update_mask;
1840 while (update_slots) {
1841 struct panthor_fw_csg_iface *csg_iface;
1842 u32 csg_id = ffs(update_slots) - 1;
1843 u32 req_mask = ctx->requests[csg_id].mask, acked;
1844 int ret;
1845
1846 update_slots &= ~BIT(csg_id);
1847 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
1848
1849 ret = panthor_fw_csg_wait_acks(ptdev, csg_id, req_mask, &acked, 100);
1850
1851 if (acked & CSG_ENDPOINT_CONFIG)
1852 csg_slot_sync_priority_locked(ptdev, csg_id);
1853
1854 if (acked & CSG_STATE_MASK)
1855 csg_slot_sync_state_locked(ptdev, csg_id);
1856
1857 if (acked & CSG_STATUS_UPDATE) {
1858 csg_slot_sync_queues_state_locked(ptdev, csg_id);
1859 csg_slot_sync_idle_state_locked(ptdev, csg_id);
1860 }
1861
1862 if (ret && acked != req_mask &&
1863 ((csg_iface->input->req ^ csg_iface->output->ack) & req_mask) != 0) {
1864 drm_err(&ptdev->base, "CSG %d update request timedout", csg_id);
1865 ctx->timedout_mask |= BIT(csg_id);
1866 }
1867 }
1868
1869 if (ctx->timedout_mask)
1870 return -ETIMEDOUT;
1871
1872 return 0;
1873 }
1874
1875 struct panthor_sched_tick_ctx {
1876 struct list_head old_groups[PANTHOR_CSG_PRIORITY_COUNT];
1877 struct list_head groups[PANTHOR_CSG_PRIORITY_COUNT];
1878 u32 idle_group_count;
1879 u32 group_count;
1880 enum panthor_csg_priority min_priority;
1881 struct panthor_vm *vms[MAX_CS_PER_CSG];
1882 u32 as_count;
1883 bool immediate_tick;
1884 u32 csg_upd_failed_mask;
1885 };
1886
1887 static bool
tick_ctx_is_full(const struct panthor_scheduler * sched,const struct panthor_sched_tick_ctx * ctx)1888 tick_ctx_is_full(const struct panthor_scheduler *sched,
1889 const struct panthor_sched_tick_ctx *ctx)
1890 {
1891 return ctx->group_count == sched->csg_slot_count;
1892 }
1893
1894 static bool
group_is_idle(struct panthor_group * group)1895 group_is_idle(struct panthor_group *group)
1896 {
1897 struct panthor_device *ptdev = group->ptdev;
1898 u32 inactive_queues;
1899
1900 if (group->csg_id >= 0)
1901 return ptdev->scheduler->csg_slots[group->csg_id].idle;
1902
1903 inactive_queues = group->idle_queues | group->blocked_queues;
1904 return hweight32(inactive_queues) == group->queue_count;
1905 }
1906
1907 static bool
group_can_run(struct panthor_group * group)1908 group_can_run(struct panthor_group *group)
1909 {
1910 return group->state != PANTHOR_CS_GROUP_TERMINATED &&
1911 group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
1912 !group->destroyed && group->fatal_queues == 0 &&
1913 !group->timedout;
1914 }
1915
1916 static void
tick_ctx_pick_groups_from_list(const struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,struct list_head * queue,bool skip_idle_groups,bool owned_by_tick_ctx)1917 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
1918 struct panthor_sched_tick_ctx *ctx,
1919 struct list_head *queue,
1920 bool skip_idle_groups,
1921 bool owned_by_tick_ctx)
1922 {
1923 struct panthor_group *group, *tmp;
1924
1925 if (tick_ctx_is_full(sched, ctx))
1926 return;
1927
1928 list_for_each_entry_safe(group, tmp, queue, run_node) {
1929 u32 i;
1930
1931 if (!group_can_run(group))
1932 continue;
1933
1934 if (skip_idle_groups && group_is_idle(group))
1935 continue;
1936
1937 for (i = 0; i < ctx->as_count; i++) {
1938 if (ctx->vms[i] == group->vm)
1939 break;
1940 }
1941
1942 if (i == ctx->as_count && ctx->as_count == sched->as_slot_count)
1943 continue;
1944
1945 if (!owned_by_tick_ctx)
1946 group_get(group);
1947
1948 list_move_tail(&group->run_node, &ctx->groups[group->priority]);
1949 ctx->group_count++;
1950 if (group_is_idle(group))
1951 ctx->idle_group_count++;
1952
1953 if (i == ctx->as_count)
1954 ctx->vms[ctx->as_count++] = group->vm;
1955
1956 if (ctx->min_priority > group->priority)
1957 ctx->min_priority = group->priority;
1958
1959 if (tick_ctx_is_full(sched, ctx))
1960 return;
1961 }
1962 }
1963
1964 static void
tick_ctx_insert_old_group(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,struct panthor_group * group,bool full_tick)1965 tick_ctx_insert_old_group(struct panthor_scheduler *sched,
1966 struct panthor_sched_tick_ctx *ctx,
1967 struct panthor_group *group,
1968 bool full_tick)
1969 {
1970 struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id];
1971 struct panthor_group *other_group;
1972
1973 if (!full_tick) {
1974 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1975 return;
1976 }
1977
1978 /* Rotate to make sure groups with lower CSG slot
1979 * priorities have a chance to get a higher CSG slot
1980 * priority next time they get picked. This priority
1981 * has an impact on resource request ordering, so it's
1982 * important to make sure we don't let one group starve
1983 * all other groups with the same group priority.
1984 */
1985 list_for_each_entry(other_group,
1986 &ctx->old_groups[csg_slot->group->priority],
1987 run_node) {
1988 struct panthor_csg_slot *other_csg_slot = &sched->csg_slots[other_group->csg_id];
1989
1990 if (other_csg_slot->priority > csg_slot->priority) {
1991 list_add_tail(&csg_slot->group->run_node, &other_group->run_node);
1992 return;
1993 }
1994 }
1995
1996 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]);
1997 }
1998
1999 static void
tick_ctx_init(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx,bool full_tick)2000 tick_ctx_init(struct panthor_scheduler *sched,
2001 struct panthor_sched_tick_ctx *ctx,
2002 bool full_tick)
2003 {
2004 struct panthor_device *ptdev = sched->ptdev;
2005 struct panthor_csg_slots_upd_ctx upd_ctx;
2006 int ret;
2007 u32 i;
2008
2009 memset(ctx, 0, sizeof(*ctx));
2010 csgs_upd_ctx_init(&upd_ctx);
2011
2012 ctx->min_priority = PANTHOR_CSG_PRIORITY_COUNT;
2013 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2014 INIT_LIST_HEAD(&ctx->groups[i]);
2015 INIT_LIST_HEAD(&ctx->old_groups[i]);
2016 }
2017
2018 for (i = 0; i < sched->csg_slot_count; i++) {
2019 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2020 struct panthor_group *group = csg_slot->group;
2021 struct panthor_fw_csg_iface *csg_iface;
2022
2023 if (!group)
2024 continue;
2025
2026 csg_iface = panthor_fw_get_csg_iface(ptdev, i);
2027 group_get(group);
2028
2029 /* If there was unhandled faults on the VM, force processing of
2030 * CSG IRQs, so we can flag the faulty queue.
2031 */
2032 if (panthor_vm_has_unhandled_faults(group->vm)) {
2033 sched_process_csg_irq_locked(ptdev, i);
2034
2035 /* No fatal fault reported, flag all queues as faulty. */
2036 if (!group->fatal_queues)
2037 group->fatal_queues |= GENMASK(group->queue_count - 1, 0);
2038 }
2039
2040 tick_ctx_insert_old_group(sched, ctx, group, full_tick);
2041 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2042 csg_iface->output->ack ^ CSG_STATUS_UPDATE,
2043 CSG_STATUS_UPDATE);
2044 }
2045
2046 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2047 if (ret) {
2048 panthor_device_schedule_reset(ptdev);
2049 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2050 }
2051 }
2052
2053 static void
group_term_post_processing(struct panthor_group * group)2054 group_term_post_processing(struct panthor_group *group)
2055 {
2056 struct panthor_job *job, *tmp;
2057 LIST_HEAD(faulty_jobs);
2058 bool cookie;
2059 u32 i = 0;
2060
2061 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group)))
2062 return;
2063
2064 cookie = dma_fence_begin_signalling();
2065 for (i = 0; i < group->queue_count; i++) {
2066 struct panthor_queue *queue = group->queues[i];
2067 struct panthor_syncobj_64b *syncobj;
2068 int err;
2069
2070 if (group->fatal_queues & BIT(i))
2071 err = -EINVAL;
2072 else if (group->timedout)
2073 err = -ETIMEDOUT;
2074 else
2075 err = -ECANCELED;
2076
2077 if (!queue)
2078 continue;
2079
2080 spin_lock(&queue->fence_ctx.lock);
2081 list_for_each_entry_safe(job, tmp, &queue->fence_ctx.in_flight_jobs, node) {
2082 list_move_tail(&job->node, &faulty_jobs);
2083 dma_fence_set_error(job->done_fence, err);
2084 dma_fence_signal_locked(job->done_fence);
2085 }
2086 spin_unlock(&queue->fence_ctx.lock);
2087
2088 /* Manually update the syncobj seqno to unblock waiters. */
2089 syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj));
2090 syncobj->status = ~0;
2091 syncobj->seqno = atomic64_read(&queue->fence_ctx.seqno);
2092 sched_queue_work(group->ptdev->scheduler, sync_upd);
2093 }
2094 dma_fence_end_signalling(cookie);
2095
2096 list_for_each_entry_safe(job, tmp, &faulty_jobs, node) {
2097 list_del_init(&job->node);
2098 panthor_job_put(&job->base);
2099 }
2100 }
2101
group_term_work(struct work_struct * work)2102 static void group_term_work(struct work_struct *work)
2103 {
2104 struct panthor_group *group =
2105 container_of(work, struct panthor_group, term_work);
2106
2107 group_term_post_processing(group);
2108 group_put(group);
2109 }
2110
2111 static void
tick_ctx_cleanup(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2112 tick_ctx_cleanup(struct panthor_scheduler *sched,
2113 struct panthor_sched_tick_ctx *ctx)
2114 {
2115 struct panthor_device *ptdev = sched->ptdev;
2116 struct panthor_group *group, *tmp;
2117 u32 i;
2118
2119 for (i = 0; i < ARRAY_SIZE(ctx->old_groups); i++) {
2120 list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) {
2121 /* If everything went fine, we should only have groups
2122 * to be terminated in the old_groups lists.
2123 */
2124 drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
2125 group_can_run(group));
2126
2127 if (!group_can_run(group)) {
2128 list_del_init(&group->run_node);
2129 list_del_init(&group->wait_node);
2130 group_queue_work(group, term);
2131 } else if (group->csg_id >= 0) {
2132 list_del_init(&group->run_node);
2133 } else {
2134 list_move(&group->run_node,
2135 group_is_idle(group) ?
2136 &sched->groups.idle[group->priority] :
2137 &sched->groups.runnable[group->priority]);
2138 }
2139 group_put(group);
2140 }
2141 }
2142
2143 for (i = 0; i < ARRAY_SIZE(ctx->groups); i++) {
2144 /* If everything went fine, the groups to schedule lists should
2145 * be empty.
2146 */
2147 drm_WARN_ON(&ptdev->base,
2148 !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
2149
2150 list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
2151 if (group->csg_id >= 0) {
2152 list_del_init(&group->run_node);
2153 } else {
2154 list_move(&group->run_node,
2155 group_is_idle(group) ?
2156 &sched->groups.idle[group->priority] :
2157 &sched->groups.runnable[group->priority]);
2158 }
2159 group_put(group);
2160 }
2161 }
2162 }
2163
2164 static void
tick_ctx_apply(struct panthor_scheduler * sched,struct panthor_sched_tick_ctx * ctx)2165 tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *ctx)
2166 {
2167 struct panthor_group *group, *tmp;
2168 struct panthor_device *ptdev = sched->ptdev;
2169 struct panthor_csg_slot *csg_slot;
2170 int prio, new_csg_prio = MAX_CSG_PRIO, i;
2171 u32 free_csg_slots = 0;
2172 struct panthor_csg_slots_upd_ctx upd_ctx;
2173 int ret;
2174
2175 csgs_upd_ctx_init(&upd_ctx);
2176
2177 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2178 /* Suspend or terminate evicted groups. */
2179 list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2180 bool term = !group_can_run(group);
2181 int csg_id = group->csg_id;
2182
2183 if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2184 continue;
2185
2186 csg_slot = &sched->csg_slots[csg_id];
2187 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2188 term ? CSG_STATE_TERMINATE : CSG_STATE_SUSPEND,
2189 CSG_STATE_MASK);
2190 }
2191
2192 /* Update priorities on already running groups. */
2193 list_for_each_entry(group, &ctx->groups[prio], run_node) {
2194 struct panthor_fw_csg_iface *csg_iface;
2195 int csg_id = group->csg_id;
2196
2197 if (csg_id < 0) {
2198 new_csg_prio--;
2199 continue;
2200 }
2201
2202 csg_slot = &sched->csg_slots[csg_id];
2203 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2204 if (csg_slot->priority == new_csg_prio) {
2205 new_csg_prio--;
2206 continue;
2207 }
2208
2209 panthor_fw_update_reqs(csg_iface, endpoint_req,
2210 CSG_EP_REQ_PRIORITY(new_csg_prio),
2211 CSG_EP_REQ_PRIORITY_MASK);
2212 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2213 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2214 CSG_ENDPOINT_CONFIG);
2215 new_csg_prio--;
2216 }
2217 }
2218
2219 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2220 if (ret) {
2221 panthor_device_schedule_reset(ptdev);
2222 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2223 return;
2224 }
2225
2226 /* Unbind evicted groups. */
2227 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2228 list_for_each_entry(group, &ctx->old_groups[prio], run_node) {
2229 /* This group is gone. Process interrupts to clear
2230 * any pending interrupts before we start the new
2231 * group.
2232 */
2233 if (group->csg_id >= 0)
2234 sched_process_csg_irq_locked(ptdev, group->csg_id);
2235
2236 group_unbind_locked(group);
2237 }
2238 }
2239
2240 for (i = 0; i < sched->csg_slot_count; i++) {
2241 if (!sched->csg_slots[i].group)
2242 free_csg_slots |= BIT(i);
2243 }
2244
2245 csgs_upd_ctx_init(&upd_ctx);
2246 new_csg_prio = MAX_CSG_PRIO;
2247
2248 /* Start new groups. */
2249 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2250 list_for_each_entry(group, &ctx->groups[prio], run_node) {
2251 int csg_id = group->csg_id;
2252 struct panthor_fw_csg_iface *csg_iface;
2253
2254 if (csg_id >= 0) {
2255 new_csg_prio--;
2256 continue;
2257 }
2258
2259 csg_id = ffs(free_csg_slots) - 1;
2260 if (drm_WARN_ON(&ptdev->base, csg_id < 0))
2261 break;
2262
2263 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
2264 csg_slot = &sched->csg_slots[csg_id];
2265 group_bind_locked(group, csg_id);
2266 csg_slot_prog_locked(ptdev, csg_id, new_csg_prio--);
2267 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2268 group->state == PANTHOR_CS_GROUP_SUSPENDED ?
2269 CSG_STATE_RESUME : CSG_STATE_START,
2270 CSG_STATE_MASK);
2271 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2272 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
2273 CSG_ENDPOINT_CONFIG);
2274 free_csg_slots &= ~BIT(csg_id);
2275 }
2276 }
2277
2278 ret = csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2279 if (ret) {
2280 panthor_device_schedule_reset(ptdev);
2281 ctx->csg_upd_failed_mask |= upd_ctx.timedout_mask;
2282 return;
2283 }
2284
2285 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
2286 list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) {
2287 list_del_init(&group->run_node);
2288
2289 /* If the group has been destroyed while we were
2290 * scheduling, ask for an immediate tick to
2291 * re-evaluate as soon as possible and get rid of
2292 * this dangling group.
2293 */
2294 if (group->destroyed)
2295 ctx->immediate_tick = true;
2296 group_put(group);
2297 }
2298
2299 /* Return evicted groups to the idle or run queues. Groups
2300 * that can no longer be run (because they've been destroyed
2301 * or experienced an unrecoverable error) will be scheduled
2302 * for destruction in tick_ctx_cleanup().
2303 */
2304 list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) {
2305 if (!group_can_run(group))
2306 continue;
2307
2308 if (group_is_idle(group))
2309 list_move_tail(&group->run_node, &sched->groups.idle[prio]);
2310 else
2311 list_move_tail(&group->run_node, &sched->groups.runnable[prio]);
2312 group_put(group);
2313 }
2314 }
2315
2316 sched->used_csg_slot_count = ctx->group_count;
2317 sched->might_have_idle_groups = ctx->idle_group_count > 0;
2318 }
2319
2320 static u64
tick_ctx_update_resched_target(struct panthor_scheduler * sched,const struct panthor_sched_tick_ctx * ctx)2321 tick_ctx_update_resched_target(struct panthor_scheduler *sched,
2322 const struct panthor_sched_tick_ctx *ctx)
2323 {
2324 /* We had space left, no need to reschedule until some external event happens. */
2325 if (!tick_ctx_is_full(sched, ctx))
2326 goto no_tick;
2327
2328 /* If idle groups were scheduled, no need to wake up until some external
2329 * event happens (group unblocked, new job submitted, ...).
2330 */
2331 if (ctx->idle_group_count)
2332 goto no_tick;
2333
2334 if (drm_WARN_ON(&sched->ptdev->base, ctx->min_priority >= PANTHOR_CSG_PRIORITY_COUNT))
2335 goto no_tick;
2336
2337 /* If there are groups of the same priority waiting, we need to
2338 * keep the scheduler ticking, otherwise, we'll just wait for
2339 * new groups with higher priority to be queued.
2340 */
2341 if (!list_empty(&sched->groups.runnable[ctx->min_priority])) {
2342 u64 resched_target = sched->last_tick + sched->tick_period;
2343
2344 if (time_before64(sched->resched_target, sched->last_tick) ||
2345 time_before64(resched_target, sched->resched_target))
2346 sched->resched_target = resched_target;
2347
2348 return sched->resched_target - sched->last_tick;
2349 }
2350
2351 no_tick:
2352 sched->resched_target = U64_MAX;
2353 return U64_MAX;
2354 }
2355
tick_work(struct work_struct * work)2356 static void tick_work(struct work_struct *work)
2357 {
2358 struct panthor_scheduler *sched = container_of(work, struct panthor_scheduler,
2359 tick_work.work);
2360 struct panthor_device *ptdev = sched->ptdev;
2361 struct panthor_sched_tick_ctx ctx;
2362 u64 remaining_jiffies = 0, resched_delay;
2363 u64 now = get_jiffies_64();
2364 int prio, ret, cookie;
2365
2366 if (!drm_dev_enter(&ptdev->base, &cookie))
2367 return;
2368
2369 ret = panthor_device_resume_and_get(ptdev);
2370 if (drm_WARN_ON(&ptdev->base, ret))
2371 goto out_dev_exit;
2372
2373 if (time_before64(now, sched->resched_target))
2374 remaining_jiffies = sched->resched_target - now;
2375
2376 mutex_lock(&sched->lock);
2377 if (panthor_device_reset_is_pending(sched->ptdev))
2378 goto out_unlock;
2379
2380 tick_ctx_init(sched, &ctx, remaining_jiffies != 0);
2381 if (ctx.csg_upd_failed_mask)
2382 goto out_cleanup_ctx;
2383
2384 if (remaining_jiffies) {
2385 /* Scheduling forced in the middle of a tick. Only RT groups
2386 * can preempt non-RT ones. Currently running RT groups can't be
2387 * preempted.
2388 */
2389 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2390 prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2391 prio--) {
2392 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio],
2393 true, true);
2394 if (prio == PANTHOR_CSG_PRIORITY_RT) {
2395 tick_ctx_pick_groups_from_list(sched, &ctx,
2396 &sched->groups.runnable[prio],
2397 true, false);
2398 }
2399 }
2400 }
2401
2402 /* First pick non-idle groups */
2403 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2404 prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2405 prio--) {
2406 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.runnable[prio],
2407 true, false);
2408 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], true, true);
2409 }
2410
2411 /* If we have free CSG slots left, pick idle groups */
2412 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1;
2413 prio >= 0 && !tick_ctx_is_full(sched, &ctx);
2414 prio--) {
2415 /* Check the old_group queue first to avoid reprogramming the slots */
2416 tick_ctx_pick_groups_from_list(sched, &ctx, &ctx.old_groups[prio], false, true);
2417 tick_ctx_pick_groups_from_list(sched, &ctx, &sched->groups.idle[prio],
2418 false, false);
2419 }
2420
2421 tick_ctx_apply(sched, &ctx);
2422 if (ctx.csg_upd_failed_mask)
2423 goto out_cleanup_ctx;
2424
2425 if (ctx.idle_group_count == ctx.group_count) {
2426 panthor_devfreq_record_idle(sched->ptdev);
2427 if (sched->pm.has_ref) {
2428 pm_runtime_put_autosuspend(ptdev->base.dev);
2429 sched->pm.has_ref = false;
2430 }
2431 } else {
2432 panthor_devfreq_record_busy(sched->ptdev);
2433 if (!sched->pm.has_ref) {
2434 pm_runtime_get(ptdev->base.dev);
2435 sched->pm.has_ref = true;
2436 }
2437 }
2438
2439 sched->last_tick = now;
2440 resched_delay = tick_ctx_update_resched_target(sched, &ctx);
2441 if (ctx.immediate_tick)
2442 resched_delay = 0;
2443
2444 if (resched_delay != U64_MAX)
2445 sched_queue_delayed_work(sched, tick, resched_delay);
2446
2447 out_cleanup_ctx:
2448 tick_ctx_cleanup(sched, &ctx);
2449
2450 out_unlock:
2451 mutex_unlock(&sched->lock);
2452 pm_runtime_mark_last_busy(ptdev->base.dev);
2453 pm_runtime_put_autosuspend(ptdev->base.dev);
2454
2455 out_dev_exit:
2456 drm_dev_exit(cookie);
2457 }
2458
panthor_queue_eval_syncwait(struct panthor_group * group,u8 queue_idx)2459 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx)
2460 {
2461 struct panthor_queue *queue = group->queues[queue_idx];
2462 union {
2463 struct panthor_syncobj_64b sync64;
2464 struct panthor_syncobj_32b sync32;
2465 } *syncobj;
2466 bool result;
2467 u64 value;
2468
2469 syncobj = panthor_queue_get_syncwait_obj(group, queue);
2470 if (!syncobj)
2471 return -EINVAL;
2472
2473 value = queue->syncwait.sync64 ?
2474 syncobj->sync64.seqno :
2475 syncobj->sync32.seqno;
2476
2477 if (queue->syncwait.gt)
2478 result = value > queue->syncwait.ref;
2479 else
2480 result = value <= queue->syncwait.ref;
2481
2482 if (result)
2483 panthor_queue_put_syncwait_obj(queue);
2484
2485 return result;
2486 }
2487
sync_upd_work(struct work_struct * work)2488 static void sync_upd_work(struct work_struct *work)
2489 {
2490 struct panthor_scheduler *sched = container_of(work,
2491 struct panthor_scheduler,
2492 sync_upd_work);
2493 struct panthor_group *group, *tmp;
2494 bool immediate_tick = false;
2495
2496 mutex_lock(&sched->lock);
2497 list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) {
2498 u32 tested_queues = group->blocked_queues;
2499 u32 unblocked_queues = 0;
2500
2501 while (tested_queues) {
2502 u32 cs_id = ffs(tested_queues) - 1;
2503 int ret;
2504
2505 ret = panthor_queue_eval_syncwait(group, cs_id);
2506 drm_WARN_ON(&group->ptdev->base, ret < 0);
2507 if (ret)
2508 unblocked_queues |= BIT(cs_id);
2509
2510 tested_queues &= ~BIT(cs_id);
2511 }
2512
2513 if (unblocked_queues) {
2514 group->blocked_queues &= ~unblocked_queues;
2515
2516 if (group->csg_id < 0) {
2517 list_move(&group->run_node,
2518 &sched->groups.runnable[group->priority]);
2519 if (group->priority == PANTHOR_CSG_PRIORITY_RT)
2520 immediate_tick = true;
2521 }
2522 }
2523
2524 if (!group->blocked_queues)
2525 list_del_init(&group->wait_node);
2526 }
2527 mutex_unlock(&sched->lock);
2528
2529 if (immediate_tick)
2530 sched_queue_delayed_work(sched, tick, 0);
2531 }
2532
group_schedule_locked(struct panthor_group * group,u32 queue_mask)2533 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
2534 {
2535 struct panthor_device *ptdev = group->ptdev;
2536 struct panthor_scheduler *sched = ptdev->scheduler;
2537 struct list_head *queue = &sched->groups.runnable[group->priority];
2538 u64 delay_jiffies = 0;
2539 bool was_idle;
2540 u64 now;
2541
2542 if (!group_can_run(group))
2543 return;
2544
2545 /* All updated queues are blocked, no need to wake up the scheduler. */
2546 if ((queue_mask & group->blocked_queues) == queue_mask)
2547 return;
2548
2549 was_idle = group_is_idle(group);
2550 group->idle_queues &= ~queue_mask;
2551
2552 /* Don't mess up with the lists if we're in a middle of a reset. */
2553 if (atomic_read(&sched->reset.in_progress))
2554 return;
2555
2556 if (was_idle && !group_is_idle(group))
2557 list_move_tail(&group->run_node, queue);
2558
2559 /* RT groups are preemptive. */
2560 if (group->priority == PANTHOR_CSG_PRIORITY_RT) {
2561 sched_queue_delayed_work(sched, tick, 0);
2562 return;
2563 }
2564
2565 /* Some groups might be idle, force an immediate tick to
2566 * re-evaluate.
2567 */
2568 if (sched->might_have_idle_groups) {
2569 sched_queue_delayed_work(sched, tick, 0);
2570 return;
2571 }
2572
2573 /* Scheduler is ticking, nothing to do. */
2574 if (sched->resched_target != U64_MAX) {
2575 /* If there are free slots, force immediating ticking. */
2576 if (sched->used_csg_slot_count < sched->csg_slot_count)
2577 sched_queue_delayed_work(sched, tick, 0);
2578
2579 return;
2580 }
2581
2582 /* Scheduler tick was off, recalculate the resched_target based on the
2583 * last tick event, and queue the scheduler work.
2584 */
2585 now = get_jiffies_64();
2586 sched->resched_target = sched->last_tick + sched->tick_period;
2587 if (sched->used_csg_slot_count == sched->csg_slot_count &&
2588 time_before64(now, sched->resched_target))
2589 delay_jiffies = min_t(unsigned long, sched->resched_target - now, ULONG_MAX);
2590
2591 sched_queue_delayed_work(sched, tick, delay_jiffies);
2592 }
2593
queue_stop(struct panthor_queue * queue,struct panthor_job * bad_job)2594 static void queue_stop(struct panthor_queue *queue,
2595 struct panthor_job *bad_job)
2596 {
2597 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
2598 }
2599
queue_start(struct panthor_queue * queue)2600 static void queue_start(struct panthor_queue *queue)
2601 {
2602 struct panthor_job *job;
2603
2604 /* Re-assign the parent fences. */
2605 list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
2606 job->base.s_fence->parent = dma_fence_get(job->done_fence);
2607
2608 drm_sched_start(&queue->scheduler, 0);
2609 }
2610
panthor_group_stop(struct panthor_group * group)2611 static void panthor_group_stop(struct panthor_group *group)
2612 {
2613 struct panthor_scheduler *sched = group->ptdev->scheduler;
2614
2615 lockdep_assert_held(&sched->reset.lock);
2616
2617 for (u32 i = 0; i < group->queue_count; i++)
2618 queue_stop(group->queues[i], NULL);
2619
2620 group_get(group);
2621 list_move_tail(&group->run_node, &sched->reset.stopped_groups);
2622 }
2623
panthor_group_start(struct panthor_group * group)2624 static void panthor_group_start(struct panthor_group *group)
2625 {
2626 struct panthor_scheduler *sched = group->ptdev->scheduler;
2627
2628 lockdep_assert_held(&group->ptdev->scheduler->reset.lock);
2629
2630 for (u32 i = 0; i < group->queue_count; i++)
2631 queue_start(group->queues[i]);
2632
2633 if (group_can_run(group)) {
2634 list_move_tail(&group->run_node,
2635 group_is_idle(group) ?
2636 &sched->groups.idle[group->priority] :
2637 &sched->groups.runnable[group->priority]);
2638 } else {
2639 list_del_init(&group->run_node);
2640 list_del_init(&group->wait_node);
2641 group_queue_work(group, term);
2642 }
2643
2644 group_put(group);
2645 }
2646
panthor_sched_immediate_tick(struct panthor_device * ptdev)2647 static void panthor_sched_immediate_tick(struct panthor_device *ptdev)
2648 {
2649 struct panthor_scheduler *sched = ptdev->scheduler;
2650
2651 sched_queue_delayed_work(sched, tick, 0);
2652 }
2653
2654 /**
2655 * panthor_sched_report_mmu_fault() - Report MMU faults to the scheduler.
2656 */
panthor_sched_report_mmu_fault(struct panthor_device * ptdev)2657 void panthor_sched_report_mmu_fault(struct panthor_device *ptdev)
2658 {
2659 /* Force a tick to immediately kill faulty groups. */
2660 if (ptdev->scheduler)
2661 panthor_sched_immediate_tick(ptdev);
2662 }
2663
panthor_sched_resume(struct panthor_device * ptdev)2664 void panthor_sched_resume(struct panthor_device *ptdev)
2665 {
2666 /* Force a tick to re-evaluate after a resume. */
2667 panthor_sched_immediate_tick(ptdev);
2668 }
2669
panthor_sched_suspend(struct panthor_device * ptdev)2670 void panthor_sched_suspend(struct panthor_device *ptdev)
2671 {
2672 struct panthor_scheduler *sched = ptdev->scheduler;
2673 struct panthor_csg_slots_upd_ctx upd_ctx;
2674 struct panthor_group *group;
2675 u32 suspended_slots;
2676 u32 i;
2677
2678 mutex_lock(&sched->lock);
2679 csgs_upd_ctx_init(&upd_ctx);
2680 for (i = 0; i < sched->csg_slot_count; i++) {
2681 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2682
2683 if (csg_slot->group) {
2684 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, i,
2685 group_can_run(csg_slot->group) ?
2686 CSG_STATE_SUSPEND : CSG_STATE_TERMINATE,
2687 CSG_STATE_MASK);
2688 }
2689 }
2690
2691 suspended_slots = upd_ctx.update_mask;
2692
2693 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2694 suspended_slots &= ~upd_ctx.timedout_mask;
2695
2696 if (upd_ctx.timedout_mask) {
2697 u32 slot_mask = upd_ctx.timedout_mask;
2698
2699 drm_err(&ptdev->base, "CSG suspend failed, escalating to termination");
2700 csgs_upd_ctx_init(&upd_ctx);
2701 while (slot_mask) {
2702 u32 csg_id = ffs(slot_mask) - 1;
2703 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2704
2705 /* If the group was still usable before that point, we consider
2706 * it innocent.
2707 */
2708 if (group_can_run(csg_slot->group))
2709 csg_slot->group->innocent = true;
2710
2711 /* We consider group suspension failures as fatal and flag the
2712 * group as unusable by setting timedout=true.
2713 */
2714 csg_slot->group->timedout = true;
2715
2716 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
2717 CSG_STATE_TERMINATE,
2718 CSG_STATE_MASK);
2719 slot_mask &= ~BIT(csg_id);
2720 }
2721
2722 csgs_upd_ctx_apply_locked(ptdev, &upd_ctx);
2723
2724 slot_mask = upd_ctx.timedout_mask;
2725 while (slot_mask) {
2726 u32 csg_id = ffs(slot_mask) - 1;
2727 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2728
2729 /* Terminate command timedout, but the soft-reset will
2730 * automatically terminate all active groups, so let's
2731 * force the state to halted here.
2732 */
2733 if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
2734 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2735 slot_mask &= ~BIT(csg_id);
2736 }
2737 }
2738
2739 /* Flush L2 and LSC caches to make sure suspend state is up-to-date.
2740 * If the flush fails, flag all queues for termination.
2741 */
2742 if (suspended_slots) {
2743 bool flush_caches_failed = false;
2744 u32 slot_mask = suspended_slots;
2745
2746 if (panthor_gpu_flush_caches(ptdev, CACHE_CLEAN, CACHE_CLEAN, 0))
2747 flush_caches_failed = true;
2748
2749 while (slot_mask) {
2750 u32 csg_id = ffs(slot_mask) - 1;
2751 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
2752
2753 if (flush_caches_failed)
2754 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
2755 else
2756 csg_slot_sync_update_locked(ptdev, csg_id);
2757
2758 slot_mask &= ~BIT(csg_id);
2759 }
2760 }
2761
2762 for (i = 0; i < sched->csg_slot_count; i++) {
2763 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
2764
2765 group = csg_slot->group;
2766 if (!group)
2767 continue;
2768
2769 group_get(group);
2770
2771 if (group->csg_id >= 0)
2772 sched_process_csg_irq_locked(ptdev, group->csg_id);
2773
2774 group_unbind_locked(group);
2775
2776 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node));
2777
2778 if (group_can_run(group)) {
2779 list_add(&group->run_node,
2780 &sched->groups.idle[group->priority]);
2781 } else {
2782 /* We don't bother stopping the scheduler if the group is
2783 * faulty, the group termination work will finish the job.
2784 */
2785 list_del_init(&group->wait_node);
2786 group_queue_work(group, term);
2787 }
2788 group_put(group);
2789 }
2790 mutex_unlock(&sched->lock);
2791 }
2792
panthor_sched_pre_reset(struct panthor_device * ptdev)2793 void panthor_sched_pre_reset(struct panthor_device *ptdev)
2794 {
2795 struct panthor_scheduler *sched = ptdev->scheduler;
2796 struct panthor_group *group, *group_tmp;
2797 u32 i;
2798
2799 mutex_lock(&sched->reset.lock);
2800 atomic_set(&sched->reset.in_progress, true);
2801
2802 /* Cancel all scheduler works. Once this is done, these works can't be
2803 * scheduled again until the reset operation is complete.
2804 */
2805 cancel_work_sync(&sched->sync_upd_work);
2806 cancel_delayed_work_sync(&sched->tick_work);
2807
2808 panthor_sched_suspend(ptdev);
2809
2810 /* Stop all groups that might still accept jobs, so we don't get passed
2811 * new jobs while we're resetting.
2812 */
2813 for (i = 0; i < ARRAY_SIZE(sched->groups.runnable); i++) {
2814 /* All groups should be in the idle lists. */
2815 drm_WARN_ON(&ptdev->base, !list_empty(&sched->groups.runnable[i]));
2816 list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node)
2817 panthor_group_stop(group);
2818 }
2819
2820 for (i = 0; i < ARRAY_SIZE(sched->groups.idle); i++) {
2821 list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node)
2822 panthor_group_stop(group);
2823 }
2824
2825 mutex_unlock(&sched->reset.lock);
2826 }
2827
panthor_sched_post_reset(struct panthor_device * ptdev,bool reset_failed)2828 void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
2829 {
2830 struct panthor_scheduler *sched = ptdev->scheduler;
2831 struct panthor_group *group, *group_tmp;
2832
2833 mutex_lock(&sched->reset.lock);
2834
2835 list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
2836 /* Consider all previously running group as terminated if the
2837 * reset failed.
2838 */
2839 if (reset_failed)
2840 group->state = PANTHOR_CS_GROUP_TERMINATED;
2841
2842 panthor_group_start(group);
2843 }
2844
2845 /* We're done resetting the GPU, clear the reset.in_progress bit so we can
2846 * kick the scheduler.
2847 */
2848 atomic_set(&sched->reset.in_progress, false);
2849 mutex_unlock(&sched->reset.lock);
2850
2851 /* No need to queue a tick and update syncs if the reset failed. */
2852 if (!reset_failed) {
2853 sched_queue_delayed_work(sched, tick, 0);
2854 sched_queue_work(sched, sync_upd);
2855 }
2856 }
2857
update_fdinfo_stats(struct panthor_job * job)2858 static void update_fdinfo_stats(struct panthor_job *job)
2859 {
2860 struct panthor_group *group = job->group;
2861 struct panthor_queue *queue = group->queues[job->queue_idx];
2862 struct panthor_gpu_usage *fdinfo = &group->fdinfo.data;
2863 struct panthor_job_profiling_data *slots = queue->profiling.slots->kmap;
2864 struct panthor_job_profiling_data *data = &slots[job->profiling.slot];
2865
2866 scoped_guard(spinlock, &group->fdinfo.lock) {
2867 if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_CYCLES)
2868 fdinfo->cycles += data->cycles.after - data->cycles.before;
2869 if (job->profiling.mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP)
2870 fdinfo->time += data->time.after - data->time.before;
2871 }
2872 }
2873
panthor_fdinfo_gather_group_samples(struct panthor_file * pfile)2874 void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
2875 {
2876 struct panthor_group_pool *gpool = pfile->groups;
2877 struct panthor_group *group;
2878 unsigned long i;
2879
2880 if (IS_ERR_OR_NULL(gpool))
2881 return;
2882
2883 xa_lock(&gpool->xa);
2884 xa_for_each(&gpool->xa, i, group) {
2885 guard(spinlock)(&group->fdinfo.lock);
2886 pfile->stats.cycles += group->fdinfo.data.cycles;
2887 pfile->stats.time += group->fdinfo.data.time;
2888 group->fdinfo.data.cycles = 0;
2889 group->fdinfo.data.time = 0;
2890 }
2891 xa_unlock(&gpool->xa);
2892 }
2893
group_sync_upd_work(struct work_struct * work)2894 static void group_sync_upd_work(struct work_struct *work)
2895 {
2896 struct panthor_group *group =
2897 container_of(work, struct panthor_group, sync_upd_work);
2898 struct panthor_job *job, *job_tmp;
2899 LIST_HEAD(done_jobs);
2900 u32 queue_idx;
2901 bool cookie;
2902
2903 cookie = dma_fence_begin_signalling();
2904 for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
2905 struct panthor_queue *queue = group->queues[queue_idx];
2906 struct panthor_syncobj_64b *syncobj;
2907
2908 if (!queue)
2909 continue;
2910
2911 syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
2912
2913 spin_lock(&queue->fence_ctx.lock);
2914 list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
2915 if (syncobj->seqno < job->done_fence->seqno)
2916 break;
2917
2918 list_move_tail(&job->node, &done_jobs);
2919 dma_fence_signal_locked(job->done_fence);
2920 }
2921 spin_unlock(&queue->fence_ctx.lock);
2922 }
2923 dma_fence_end_signalling(cookie);
2924
2925 list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
2926 if (job->profiling.mask)
2927 update_fdinfo_stats(job);
2928 list_del_init(&job->node);
2929 panthor_job_put(&job->base);
2930 }
2931
2932 group_put(group);
2933 }
2934
2935 struct panthor_job_ringbuf_instrs {
2936 u64 buffer[MAX_INSTRS_PER_JOB];
2937 u32 count;
2938 };
2939
2940 struct panthor_job_instr {
2941 u32 profile_mask;
2942 u64 instr;
2943 };
2944
2945 #define JOB_INSTR(__prof, __instr) \
2946 { \
2947 .profile_mask = __prof, \
2948 .instr = __instr, \
2949 }
2950
2951 static void
copy_instrs_to_ringbuf(struct panthor_queue * queue,struct panthor_job * job,struct panthor_job_ringbuf_instrs * instrs)2952 copy_instrs_to_ringbuf(struct panthor_queue *queue,
2953 struct panthor_job *job,
2954 struct panthor_job_ringbuf_instrs *instrs)
2955 {
2956 u64 ringbuf_size = panthor_kernel_bo_size(queue->ringbuf);
2957 u64 start = job->ringbuf.start & (ringbuf_size - 1);
2958 u64 size, written;
2959
2960 /*
2961 * We need to write a whole slot, including any trailing zeroes
2962 * that may come at the end of it. Also, because instrs.buffer has
2963 * been zero-initialised, there's no need to pad it with 0's
2964 */
2965 instrs->count = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
2966 size = instrs->count * sizeof(u64);
2967 WARN_ON(size > ringbuf_size);
2968 written = min(ringbuf_size - start, size);
2969
2970 memcpy(queue->ringbuf->kmap + start, instrs->buffer, written);
2971
2972 if (written < size)
2973 memcpy(queue->ringbuf->kmap,
2974 &instrs->buffer[written / sizeof(u64)],
2975 size - written);
2976 }
2977
2978 struct panthor_job_cs_params {
2979 u32 profile_mask;
2980 u64 addr_reg; u64 val_reg;
2981 u64 cycle_reg; u64 time_reg;
2982 u64 sync_addr; u64 times_addr;
2983 u64 cs_start; u64 cs_size;
2984 u32 last_flush; u32 waitall_mask;
2985 };
2986
2987 static void
get_job_cs_params(struct panthor_job * job,struct panthor_job_cs_params * params)2988 get_job_cs_params(struct panthor_job *job, struct panthor_job_cs_params *params)
2989 {
2990 struct panthor_group *group = job->group;
2991 struct panthor_queue *queue = group->queues[job->queue_idx];
2992 struct panthor_device *ptdev = group->ptdev;
2993 struct panthor_scheduler *sched = ptdev->scheduler;
2994
2995 params->addr_reg = ptdev->csif_info.cs_reg_count -
2996 ptdev->csif_info.unpreserved_cs_reg_count;
2997 params->val_reg = params->addr_reg + 2;
2998 params->cycle_reg = params->addr_reg;
2999 params->time_reg = params->val_reg;
3000
3001 params->sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) +
3002 job->queue_idx * sizeof(struct panthor_syncobj_64b);
3003 params->times_addr = panthor_kernel_bo_gpuva(queue->profiling.slots) +
3004 (job->profiling.slot * sizeof(struct panthor_job_profiling_data));
3005 params->waitall_mask = GENMASK(sched->sb_slot_count - 1, 0);
3006
3007 params->cs_start = job->call_info.start;
3008 params->cs_size = job->call_info.size;
3009 params->last_flush = job->call_info.latest_flush;
3010
3011 params->profile_mask = job->profiling.mask;
3012 }
3013
3014 #define JOB_INSTR_ALWAYS(instr) \
3015 JOB_INSTR(PANTHOR_DEVICE_PROFILING_DISABLED, (instr))
3016 #define JOB_INSTR_TIMESTAMP(instr) \
3017 JOB_INSTR(PANTHOR_DEVICE_PROFILING_TIMESTAMP, (instr))
3018 #define JOB_INSTR_CYCLES(instr) \
3019 JOB_INSTR(PANTHOR_DEVICE_PROFILING_CYCLES, (instr))
3020
3021 static void
prepare_job_instrs(const struct panthor_job_cs_params * params,struct panthor_job_ringbuf_instrs * instrs)3022 prepare_job_instrs(const struct panthor_job_cs_params *params,
3023 struct panthor_job_ringbuf_instrs *instrs)
3024 {
3025 const struct panthor_job_instr instr_seq[] = {
3026 /* MOV32 rX+2, cs.latest_flush */
3027 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->last_flush),
3028 /* FLUSH_CACHE2.clean_inv_all.no_wait.signal(0) rX+2 */
3029 JOB_INSTR_ALWAYS((36ull << 56) | (0ull << 48) | (params->val_reg << 40) |
3030 (0 << 16) | 0x233),
3031 /* MOV48 rX:rX+1, cycles_offset */
3032 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3033 (params->times_addr +
3034 offsetof(struct panthor_job_profiling_data, cycles.before))),
3035 /* STORE_STATE cycles */
3036 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3037 /* MOV48 rX:rX+1, time_offset */
3038 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3039 (params->times_addr +
3040 offsetof(struct panthor_job_profiling_data, time.before))),
3041 /* STORE_STATE timer */
3042 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3043 /* MOV48 rX:rX+1, cs.start */
3044 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->cs_start),
3045 /* MOV32 rX+2, cs.size */
3046 JOB_INSTR_ALWAYS((2ull << 56) | (params->val_reg << 48) | params->cs_size),
3047 /* WAIT(0) => waits for FLUSH_CACHE2 instruction */
3048 JOB_INSTR_ALWAYS((3ull << 56) | (1 << 16)),
3049 /* CALL rX:rX+1, rX+2 */
3050 JOB_INSTR_ALWAYS((32ull << 56) | (params->addr_reg << 40) |
3051 (params->val_reg << 32)),
3052 /* MOV48 rX:rX+1, cycles_offset */
3053 JOB_INSTR_CYCLES((1ull << 56) | (params->cycle_reg << 48) |
3054 (params->times_addr +
3055 offsetof(struct panthor_job_profiling_data, cycles.after))),
3056 /* STORE_STATE cycles */
3057 JOB_INSTR_CYCLES((40ull << 56) | (params->cycle_reg << 40) | (1ll << 32)),
3058 /* MOV48 rX:rX+1, time_offset */
3059 JOB_INSTR_TIMESTAMP((1ull << 56) | (params->time_reg << 48) |
3060 (params->times_addr +
3061 offsetof(struct panthor_job_profiling_data, time.after))),
3062 /* STORE_STATE timer */
3063 JOB_INSTR_TIMESTAMP((40ull << 56) | (params->time_reg << 40) | (0ll << 32)),
3064 /* MOV48 rX:rX+1, sync_addr */
3065 JOB_INSTR_ALWAYS((1ull << 56) | (params->addr_reg << 48) | params->sync_addr),
3066 /* MOV48 rX+2, #1 */
3067 JOB_INSTR_ALWAYS((1ull << 56) | (params->val_reg << 48) | 1),
3068 /* WAIT(all) */
3069 JOB_INSTR_ALWAYS((3ull << 56) | (params->waitall_mask << 16)),
3070 /* SYNC_ADD64.system_scope.propage_err.nowait rX:rX+1, rX+2*/
3071 JOB_INSTR_ALWAYS((51ull << 56) | (0ull << 48) | (params->addr_reg << 40) |
3072 (params->val_reg << 32) | (0 << 16) | 1),
3073 /* ERROR_BARRIER, so we can recover from faults at job boundaries. */
3074 JOB_INSTR_ALWAYS((47ull << 56)),
3075 };
3076 u32 pad;
3077
3078 instrs->count = 0;
3079
3080 /* NEED to be cacheline aligned to please the prefetcher. */
3081 static_assert(sizeof(instrs->buffer) % 64 == 0,
3082 "panthor_job_ringbuf_instrs::buffer is not aligned on a cacheline");
3083
3084 /* Make sure we have enough storage to store the whole sequence. */
3085 static_assert(ALIGN(ARRAY_SIZE(instr_seq), NUM_INSTRS_PER_CACHE_LINE) ==
3086 ARRAY_SIZE(instrs->buffer),
3087 "instr_seq vs panthor_job_ringbuf_instrs::buffer size mismatch");
3088
3089 for (u32 i = 0; i < ARRAY_SIZE(instr_seq); i++) {
3090 /* If the profile mask of this instruction is not enabled, skip it. */
3091 if (instr_seq[i].profile_mask &&
3092 !(instr_seq[i].profile_mask & params->profile_mask))
3093 continue;
3094
3095 instrs->buffer[instrs->count++] = instr_seq[i].instr;
3096 }
3097
3098 pad = ALIGN(instrs->count, NUM_INSTRS_PER_CACHE_LINE);
3099 memset(&instrs->buffer[instrs->count], 0,
3100 (pad - instrs->count) * sizeof(instrs->buffer[0]));
3101 instrs->count = pad;
3102 }
3103
calc_job_credits(u32 profile_mask)3104 static u32 calc_job_credits(u32 profile_mask)
3105 {
3106 struct panthor_job_ringbuf_instrs instrs;
3107 struct panthor_job_cs_params params = {
3108 .profile_mask = profile_mask,
3109 };
3110
3111 prepare_job_instrs(¶ms, &instrs);
3112 return instrs.count;
3113 }
3114
3115 static struct dma_fence *
queue_run_job(struct drm_sched_job * sched_job)3116 queue_run_job(struct drm_sched_job *sched_job)
3117 {
3118 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3119 struct panthor_group *group = job->group;
3120 struct panthor_queue *queue = group->queues[job->queue_idx];
3121 struct panthor_device *ptdev = group->ptdev;
3122 struct panthor_scheduler *sched = ptdev->scheduler;
3123 struct panthor_job_ringbuf_instrs instrs;
3124 struct panthor_job_cs_params cs_params;
3125 struct dma_fence *done_fence;
3126 int ret;
3127
3128 /* Stream size is zero, nothing to do except making sure all previously
3129 * submitted jobs are done before we signal the
3130 * drm_sched_job::s_fence::finished fence.
3131 */
3132 if (!job->call_info.size) {
3133 job->done_fence = dma_fence_get(queue->fence_ctx.last_fence);
3134 return dma_fence_get(job->done_fence);
3135 }
3136
3137 ret = panthor_device_resume_and_get(ptdev);
3138 if (drm_WARN_ON(&ptdev->base, ret))
3139 return ERR_PTR(ret);
3140
3141 mutex_lock(&sched->lock);
3142 if (!group_can_run(group)) {
3143 done_fence = ERR_PTR(-ECANCELED);
3144 goto out_unlock;
3145 }
3146
3147 dma_fence_init(job->done_fence,
3148 &panthor_queue_fence_ops,
3149 &queue->fence_ctx.lock,
3150 queue->fence_ctx.id,
3151 atomic64_inc_return(&queue->fence_ctx.seqno));
3152
3153 job->profiling.slot = queue->profiling.seqno++;
3154 if (queue->profiling.seqno == queue->profiling.slot_count)
3155 queue->profiling.seqno = 0;
3156
3157 job->ringbuf.start = queue->iface.input->insert;
3158
3159 get_job_cs_params(job, &cs_params);
3160 prepare_job_instrs(&cs_params, &instrs);
3161 copy_instrs_to_ringbuf(queue, job, &instrs);
3162
3163 job->ringbuf.end = job->ringbuf.start + (instrs.count * sizeof(u64));
3164
3165 panthor_job_get(&job->base);
3166 spin_lock(&queue->fence_ctx.lock);
3167 list_add_tail(&job->node, &queue->fence_ctx.in_flight_jobs);
3168 spin_unlock(&queue->fence_ctx.lock);
3169
3170 /* Make sure the ring buffer is updated before the INSERT
3171 * register.
3172 */
3173 wmb();
3174
3175 queue->iface.input->extract = queue->iface.output->extract;
3176 queue->iface.input->insert = job->ringbuf.end;
3177
3178 if (group->csg_id < 0) {
3179 /* If the queue is blocked, we want to keep the timeout running, so we
3180 * can detect unbounded waits and kill the group when that happens.
3181 * Otherwise, we suspend the timeout so the time we spend waiting for
3182 * a CSG slot is not counted.
3183 */
3184 if (!(group->blocked_queues & BIT(job->queue_idx)) &&
3185 !queue->timeout_suspended) {
3186 queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
3187 queue->timeout_suspended = true;
3188 }
3189
3190 group_schedule_locked(group, BIT(job->queue_idx));
3191 } else {
3192 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
3193 if (!sched->pm.has_ref &&
3194 !(group->blocked_queues & BIT(job->queue_idx))) {
3195 pm_runtime_get(ptdev->base.dev);
3196 sched->pm.has_ref = true;
3197 }
3198 panthor_devfreq_record_busy(sched->ptdev);
3199 }
3200
3201 /* Update the last fence. */
3202 dma_fence_put(queue->fence_ctx.last_fence);
3203 queue->fence_ctx.last_fence = dma_fence_get(job->done_fence);
3204
3205 done_fence = dma_fence_get(job->done_fence);
3206
3207 out_unlock:
3208 mutex_unlock(&sched->lock);
3209 pm_runtime_mark_last_busy(ptdev->base.dev);
3210 pm_runtime_put_autosuspend(ptdev->base.dev);
3211
3212 return done_fence;
3213 }
3214
3215 static enum drm_gpu_sched_stat
queue_timedout_job(struct drm_sched_job * sched_job)3216 queue_timedout_job(struct drm_sched_job *sched_job)
3217 {
3218 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3219 struct panthor_group *group = job->group;
3220 struct panthor_device *ptdev = group->ptdev;
3221 struct panthor_scheduler *sched = ptdev->scheduler;
3222 struct panthor_queue *queue = group->queues[job->queue_idx];
3223
3224 drm_warn(&ptdev->base, "job timeout\n");
3225
3226 drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
3227
3228 queue_stop(queue, job);
3229
3230 mutex_lock(&sched->lock);
3231 group->timedout = true;
3232 if (group->csg_id >= 0) {
3233 sched_queue_delayed_work(ptdev->scheduler, tick, 0);
3234 } else {
3235 /* Remove from the run queues, so the scheduler can't
3236 * pick the group on the next tick.
3237 */
3238 list_del_init(&group->run_node);
3239 list_del_init(&group->wait_node);
3240
3241 group_queue_work(group, term);
3242 }
3243 mutex_unlock(&sched->lock);
3244
3245 queue_start(queue);
3246
3247 return DRM_GPU_SCHED_STAT_NOMINAL;
3248 }
3249
queue_free_job(struct drm_sched_job * sched_job)3250 static void queue_free_job(struct drm_sched_job *sched_job)
3251 {
3252 drm_sched_job_cleanup(sched_job);
3253 panthor_job_put(sched_job);
3254 }
3255
3256 static const struct drm_sched_backend_ops panthor_queue_sched_ops = {
3257 .run_job = queue_run_job,
3258 .timedout_job = queue_timedout_job,
3259 .free_job = queue_free_job,
3260 };
3261
calc_profiling_ringbuf_num_slots(struct panthor_device * ptdev,u32 cs_ringbuf_size)3262 static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
3263 u32 cs_ringbuf_size)
3264 {
3265 u32 min_profiled_job_instrs = U32_MAX;
3266 u32 last_flag = fls(PANTHOR_DEVICE_PROFILING_ALL);
3267
3268 /*
3269 * We want to calculate the minimum size of a profiled job's CS,
3270 * because since they need additional instructions for the sampling
3271 * of performance metrics, they might take up further slots in
3272 * the queue's ringbuffer. This means we might not need as many job
3273 * slots for keeping track of their profiling information. What we
3274 * need is the maximum number of slots we should allocate to this end,
3275 * which matches the maximum number of profiled jobs we can place
3276 * simultaneously in the queue's ring buffer.
3277 * That has to be calculated separately for every single job profiling
3278 * flag, but not in the case job profiling is disabled, since unprofiled
3279 * jobs don't need to keep track of this at all.
3280 */
3281 for (u32 i = 0; i < last_flag; i++) {
3282 min_profiled_job_instrs =
3283 min(min_profiled_job_instrs, calc_job_credits(BIT(i)));
3284 }
3285
3286 return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
3287 }
3288
3289 static struct panthor_queue *
group_create_queue(struct panthor_group * group,const struct drm_panthor_queue_create * args)3290 group_create_queue(struct panthor_group *group,
3291 const struct drm_panthor_queue_create *args)
3292 {
3293 struct drm_gpu_scheduler *drm_sched;
3294 struct panthor_queue *queue;
3295 int ret;
3296
3297 if (args->pad[0] || args->pad[1] || args->pad[2])
3298 return ERR_PTR(-EINVAL);
3299
3300 if (args->ringbuf_size < SZ_4K || args->ringbuf_size > SZ_64K ||
3301 !is_power_of_2(args->ringbuf_size))
3302 return ERR_PTR(-EINVAL);
3303
3304 if (args->priority > CSF_MAX_QUEUE_PRIO)
3305 return ERR_PTR(-EINVAL);
3306
3307 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
3308 if (!queue)
3309 return ERR_PTR(-ENOMEM);
3310
3311 queue->fence_ctx.id = dma_fence_context_alloc(1);
3312 spin_lock_init(&queue->fence_ctx.lock);
3313 INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
3314
3315 queue->priority = args->priority;
3316
3317 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
3318 args->ringbuf_size,
3319 DRM_PANTHOR_BO_NO_MMAP,
3320 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3321 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3322 PANTHOR_VM_KERNEL_AUTO_VA);
3323 if (IS_ERR(queue->ringbuf)) {
3324 ret = PTR_ERR(queue->ringbuf);
3325 goto err_free_queue;
3326 }
3327
3328 ret = panthor_kernel_bo_vmap(queue->ringbuf);
3329 if (ret)
3330 goto err_free_queue;
3331
3332 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev,
3333 &queue->iface.input,
3334 &queue->iface.output,
3335 &queue->iface.input_fw_va,
3336 &queue->iface.output_fw_va);
3337 if (IS_ERR(queue->iface.mem)) {
3338 ret = PTR_ERR(queue->iface.mem);
3339 goto err_free_queue;
3340 }
3341
3342 queue->profiling.slot_count =
3343 calc_profiling_ringbuf_num_slots(group->ptdev, args->ringbuf_size);
3344
3345 queue->profiling.slots =
3346 panthor_kernel_bo_create(group->ptdev, group->vm,
3347 queue->profiling.slot_count *
3348 sizeof(struct panthor_job_profiling_data),
3349 DRM_PANTHOR_BO_NO_MMAP,
3350 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3351 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3352 PANTHOR_VM_KERNEL_AUTO_VA);
3353
3354 if (IS_ERR(queue->profiling.slots)) {
3355 ret = PTR_ERR(queue->profiling.slots);
3356 goto err_free_queue;
3357 }
3358
3359 ret = panthor_kernel_bo_vmap(queue->profiling.slots);
3360 if (ret)
3361 goto err_free_queue;
3362
3363 /*
3364 * Credit limit argument tells us the total number of instructions
3365 * across all CS slots in the ringbuffer, with some jobs requiring
3366 * twice as many as others, depending on their profiling status.
3367 */
3368 ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops,
3369 group->ptdev->scheduler->wq, 1,
3370 args->ringbuf_size / sizeof(u64),
3371 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
3372 group->ptdev->reset.wq,
3373 NULL, "panthor-queue", group->ptdev->base.dev);
3374 if (ret)
3375 goto err_free_queue;
3376
3377 drm_sched = &queue->scheduler;
3378 ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
3379
3380 return queue;
3381
3382 err_free_queue:
3383 group_free_queue(group, queue);
3384 return ERR_PTR(ret);
3385 }
3386
add_group_kbo_sizes(struct panthor_device * ptdev,struct panthor_group * group)3387 static void add_group_kbo_sizes(struct panthor_device *ptdev,
3388 struct panthor_group *group)
3389 {
3390 struct panthor_queue *queue;
3391 int i;
3392
3393 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(group)))
3394 return;
3395 if (drm_WARN_ON(&ptdev->base, ptdev != group->ptdev))
3396 return;
3397
3398 group->fdinfo.kbo_sizes += group->suspend_buf->obj->size;
3399 group->fdinfo.kbo_sizes += group->protm_suspend_buf->obj->size;
3400 group->fdinfo.kbo_sizes += group->syncobjs->obj->size;
3401
3402 for (i = 0; i < group->queue_count; i++) {
3403 queue = group->queues[i];
3404 group->fdinfo.kbo_sizes += queue->ringbuf->obj->size;
3405 group->fdinfo.kbo_sizes += queue->iface.mem->obj->size;
3406 group->fdinfo.kbo_sizes += queue->profiling.slots->obj->size;
3407 }
3408 }
3409
3410 #define MAX_GROUPS_PER_POOL 128
3411
panthor_group_create(struct panthor_file * pfile,const struct drm_panthor_group_create * group_args,const struct drm_panthor_queue_create * queue_args)3412 int panthor_group_create(struct panthor_file *pfile,
3413 const struct drm_panthor_group_create *group_args,
3414 const struct drm_panthor_queue_create *queue_args)
3415 {
3416 struct panthor_device *ptdev = pfile->ptdev;
3417 struct panthor_group_pool *gpool = pfile->groups;
3418 struct panthor_scheduler *sched = ptdev->scheduler;
3419 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3420 struct panthor_group *group = NULL;
3421 u32 gid, i, suspend_size;
3422 int ret;
3423
3424 if (group_args->pad)
3425 return -EINVAL;
3426
3427 if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
3428 return -EINVAL;
3429
3430 if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
3431 (group_args->fragment_core_mask & ~ptdev->gpu_info.shader_present) ||
3432 (group_args->tiler_core_mask & ~ptdev->gpu_info.tiler_present))
3433 return -EINVAL;
3434
3435 if (hweight64(group_args->compute_core_mask) < group_args->max_compute_cores ||
3436 hweight64(group_args->fragment_core_mask) < group_args->max_fragment_cores ||
3437 hweight64(group_args->tiler_core_mask) < group_args->max_tiler_cores)
3438 return -EINVAL;
3439
3440 group = kzalloc(sizeof(*group), GFP_KERNEL);
3441 if (!group)
3442 return -ENOMEM;
3443
3444 spin_lock_init(&group->fatal_lock);
3445 kref_init(&group->refcount);
3446 group->state = PANTHOR_CS_GROUP_CREATED;
3447 group->csg_id = -1;
3448
3449 group->ptdev = ptdev;
3450 group->max_compute_cores = group_args->max_compute_cores;
3451 group->compute_core_mask = group_args->compute_core_mask;
3452 group->max_fragment_cores = group_args->max_fragment_cores;
3453 group->fragment_core_mask = group_args->fragment_core_mask;
3454 group->max_tiler_cores = group_args->max_tiler_cores;
3455 group->tiler_core_mask = group_args->tiler_core_mask;
3456 group->priority = group_args->priority;
3457
3458 INIT_LIST_HEAD(&group->wait_node);
3459 INIT_LIST_HEAD(&group->run_node);
3460 INIT_WORK(&group->term_work, group_term_work);
3461 INIT_WORK(&group->sync_upd_work, group_sync_upd_work);
3462 INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work);
3463 INIT_WORK(&group->release_work, group_release_work);
3464
3465 group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id);
3466 if (!group->vm) {
3467 ret = -EINVAL;
3468 goto err_put_group;
3469 }
3470
3471 suspend_size = csg_iface->control->suspend_size;
3472 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3473 if (IS_ERR(group->suspend_buf)) {
3474 ret = PTR_ERR(group->suspend_buf);
3475 group->suspend_buf = NULL;
3476 goto err_put_group;
3477 }
3478
3479 suspend_size = csg_iface->control->protm_suspend_size;
3480 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size);
3481 if (IS_ERR(group->protm_suspend_buf)) {
3482 ret = PTR_ERR(group->protm_suspend_buf);
3483 group->protm_suspend_buf = NULL;
3484 goto err_put_group;
3485 }
3486
3487 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
3488 group_args->queues.count *
3489 sizeof(struct panthor_syncobj_64b),
3490 DRM_PANTHOR_BO_NO_MMAP,
3491 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
3492 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
3493 PANTHOR_VM_KERNEL_AUTO_VA);
3494 if (IS_ERR(group->syncobjs)) {
3495 ret = PTR_ERR(group->syncobjs);
3496 goto err_put_group;
3497 }
3498
3499 ret = panthor_kernel_bo_vmap(group->syncobjs);
3500 if (ret)
3501 goto err_put_group;
3502
3503 memset(group->syncobjs->kmap, 0,
3504 group_args->queues.count * sizeof(struct panthor_syncobj_64b));
3505
3506 for (i = 0; i < group_args->queues.count; i++) {
3507 group->queues[i] = group_create_queue(group, &queue_args[i]);
3508 if (IS_ERR(group->queues[i])) {
3509 ret = PTR_ERR(group->queues[i]);
3510 group->queues[i] = NULL;
3511 goto err_put_group;
3512 }
3513
3514 group->queue_count++;
3515 }
3516
3517 group->idle_queues = GENMASK(group->queue_count - 1, 0);
3518
3519 ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
3520 if (ret)
3521 goto err_put_group;
3522
3523 mutex_lock(&sched->reset.lock);
3524 if (atomic_read(&sched->reset.in_progress)) {
3525 panthor_group_stop(group);
3526 } else {
3527 mutex_lock(&sched->lock);
3528 list_add_tail(&group->run_node,
3529 &sched->groups.idle[group->priority]);
3530 mutex_unlock(&sched->lock);
3531 }
3532 mutex_unlock(&sched->reset.lock);
3533
3534 add_group_kbo_sizes(group->ptdev, group);
3535 spin_lock_init(&group->fdinfo.lock);
3536
3537 return gid;
3538
3539 err_put_group:
3540 group_put(group);
3541 return ret;
3542 }
3543
panthor_group_destroy(struct panthor_file * pfile,u32 group_handle)3544 int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
3545 {
3546 struct panthor_group_pool *gpool = pfile->groups;
3547 struct panthor_device *ptdev = pfile->ptdev;
3548 struct panthor_scheduler *sched = ptdev->scheduler;
3549 struct panthor_group *group;
3550
3551 group = xa_erase(&gpool->xa, group_handle);
3552 if (!group)
3553 return -EINVAL;
3554
3555 for (u32 i = 0; i < group->queue_count; i++) {
3556 if (group->queues[i])
3557 drm_sched_entity_destroy(&group->queues[i]->entity);
3558 }
3559
3560 mutex_lock(&sched->reset.lock);
3561 mutex_lock(&sched->lock);
3562 group->destroyed = true;
3563 if (group->csg_id >= 0) {
3564 sched_queue_delayed_work(sched, tick, 0);
3565 } else if (!atomic_read(&sched->reset.in_progress)) {
3566 /* Remove from the run queues, so the scheduler can't
3567 * pick the group on the next tick.
3568 */
3569 list_del_init(&group->run_node);
3570 list_del_init(&group->wait_node);
3571 group_queue_work(group, term);
3572 }
3573 mutex_unlock(&sched->lock);
3574 mutex_unlock(&sched->reset.lock);
3575
3576 group_put(group);
3577 return 0;
3578 }
3579
group_from_handle(struct panthor_group_pool * pool,u32 group_handle)3580 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool,
3581 u32 group_handle)
3582 {
3583 struct panthor_group *group;
3584
3585 xa_lock(&pool->xa);
3586 group = group_get(xa_load(&pool->xa, group_handle));
3587 xa_unlock(&pool->xa);
3588
3589 return group;
3590 }
3591
panthor_group_get_state(struct panthor_file * pfile,struct drm_panthor_group_get_state * get_state)3592 int panthor_group_get_state(struct panthor_file *pfile,
3593 struct drm_panthor_group_get_state *get_state)
3594 {
3595 struct panthor_group_pool *gpool = pfile->groups;
3596 struct panthor_device *ptdev = pfile->ptdev;
3597 struct panthor_scheduler *sched = ptdev->scheduler;
3598 struct panthor_group *group;
3599
3600 if (get_state->pad)
3601 return -EINVAL;
3602
3603 group = group_from_handle(gpool, get_state->group_handle);
3604 if (!group)
3605 return -EINVAL;
3606
3607 memset(get_state, 0, sizeof(*get_state));
3608
3609 mutex_lock(&sched->lock);
3610 if (group->timedout)
3611 get_state->state |= DRM_PANTHOR_GROUP_STATE_TIMEDOUT;
3612 if (group->fatal_queues) {
3613 get_state->state |= DRM_PANTHOR_GROUP_STATE_FATAL_FAULT;
3614 get_state->fatal_queues = group->fatal_queues;
3615 }
3616 if (group->innocent)
3617 get_state->state |= DRM_PANTHOR_GROUP_STATE_INNOCENT;
3618 mutex_unlock(&sched->lock);
3619
3620 group_put(group);
3621 return 0;
3622 }
3623
panthor_group_pool_create(struct panthor_file * pfile)3624 int panthor_group_pool_create(struct panthor_file *pfile)
3625 {
3626 struct panthor_group_pool *gpool;
3627
3628 gpool = kzalloc(sizeof(*gpool), GFP_KERNEL);
3629 if (!gpool)
3630 return -ENOMEM;
3631
3632 xa_init_flags(&gpool->xa, XA_FLAGS_ALLOC1);
3633 pfile->groups = gpool;
3634 return 0;
3635 }
3636
panthor_group_pool_destroy(struct panthor_file * pfile)3637 void panthor_group_pool_destroy(struct panthor_file *pfile)
3638 {
3639 struct panthor_group_pool *gpool = pfile->groups;
3640 struct panthor_group *group;
3641 unsigned long i;
3642
3643 if (IS_ERR_OR_NULL(gpool))
3644 return;
3645
3646 xa_for_each(&gpool->xa, i, group)
3647 panthor_group_destroy(pfile, i);
3648
3649 xa_destroy(&gpool->xa);
3650 kfree(gpool);
3651 pfile->groups = NULL;
3652 }
3653
3654 /**
3655 * panthor_fdinfo_gather_group_mem_info() - Retrieve aggregate size of all private kernel BO's
3656 * belonging to all the groups owned by an open Panthor file
3657 * @pfile: File.
3658 * @stats: Memory statistics to be updated.
3659 *
3660 */
3661 void
panthor_fdinfo_gather_group_mem_info(struct panthor_file * pfile,struct drm_memory_stats * stats)3662 panthor_fdinfo_gather_group_mem_info(struct panthor_file *pfile,
3663 struct drm_memory_stats *stats)
3664 {
3665 struct panthor_group_pool *gpool = pfile->groups;
3666 struct panthor_group *group;
3667 unsigned long i;
3668
3669 if (IS_ERR_OR_NULL(gpool))
3670 return;
3671
3672 xa_lock(&gpool->xa);
3673 xa_for_each(&gpool->xa, i, group) {
3674 stats->resident += group->fdinfo.kbo_sizes;
3675 if (group->csg_id >= 0)
3676 stats->active += group->fdinfo.kbo_sizes;
3677 }
3678 xa_unlock(&gpool->xa);
3679 }
3680
job_release(struct kref * ref)3681 static void job_release(struct kref *ref)
3682 {
3683 struct panthor_job *job = container_of(ref, struct panthor_job, refcount);
3684
3685 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node));
3686
3687 if (job->base.s_fence)
3688 drm_sched_job_cleanup(&job->base);
3689
3690 if (job->done_fence && job->done_fence->ops)
3691 dma_fence_put(job->done_fence);
3692 else
3693 dma_fence_free(job->done_fence);
3694
3695 group_put(job->group);
3696
3697 kfree(job);
3698 }
3699
panthor_job_get(struct drm_sched_job * sched_job)3700 struct drm_sched_job *panthor_job_get(struct drm_sched_job *sched_job)
3701 {
3702 if (sched_job) {
3703 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3704
3705 kref_get(&job->refcount);
3706 }
3707
3708 return sched_job;
3709 }
3710
panthor_job_put(struct drm_sched_job * sched_job)3711 void panthor_job_put(struct drm_sched_job *sched_job)
3712 {
3713 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3714
3715 if (sched_job)
3716 kref_put(&job->refcount, job_release);
3717 }
3718
panthor_job_vm(struct drm_sched_job * sched_job)3719 struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
3720 {
3721 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3722
3723 return job->group->vm;
3724 }
3725
3726 struct drm_sched_job *
panthor_job_create(struct panthor_file * pfile,u16 group_handle,const struct drm_panthor_queue_submit * qsubmit)3727 panthor_job_create(struct panthor_file *pfile,
3728 u16 group_handle,
3729 const struct drm_panthor_queue_submit *qsubmit)
3730 {
3731 struct panthor_group_pool *gpool = pfile->groups;
3732 struct panthor_job *job;
3733 u32 credits;
3734 int ret;
3735
3736 if (qsubmit->pad)
3737 return ERR_PTR(-EINVAL);
3738
3739 /* If stream_addr is zero, so stream_size should be. */
3740 if ((qsubmit->stream_size == 0) != (qsubmit->stream_addr == 0))
3741 return ERR_PTR(-EINVAL);
3742
3743 /* Make sure the address is aligned on 64-byte (cacheline) and the size is
3744 * aligned on 8-byte (instruction size).
3745 */
3746 if ((qsubmit->stream_addr & 63) || (qsubmit->stream_size & 7))
3747 return ERR_PTR(-EINVAL);
3748
3749 /* bits 24:30 must be zero. */
3750 if (qsubmit->latest_flush & GENMASK(30, 24))
3751 return ERR_PTR(-EINVAL);
3752
3753 job = kzalloc(sizeof(*job), GFP_KERNEL);
3754 if (!job)
3755 return ERR_PTR(-ENOMEM);
3756
3757 kref_init(&job->refcount);
3758 job->queue_idx = qsubmit->queue_index;
3759 job->call_info.size = qsubmit->stream_size;
3760 job->call_info.start = qsubmit->stream_addr;
3761 job->call_info.latest_flush = qsubmit->latest_flush;
3762 INIT_LIST_HEAD(&job->node);
3763
3764 job->group = group_from_handle(gpool, group_handle);
3765 if (!job->group) {
3766 ret = -EINVAL;
3767 goto err_put_job;
3768 }
3769
3770 if (!group_can_run(job->group)) {
3771 ret = -EINVAL;
3772 goto err_put_job;
3773 }
3774
3775 if (job->queue_idx >= job->group->queue_count ||
3776 !job->group->queues[job->queue_idx]) {
3777 ret = -EINVAL;
3778 goto err_put_job;
3779 }
3780
3781 /* Empty command streams don't need a fence, they'll pick the one from
3782 * the previously submitted job.
3783 */
3784 if (job->call_info.size) {
3785 job->done_fence = kzalloc(sizeof(*job->done_fence), GFP_KERNEL);
3786 if (!job->done_fence) {
3787 ret = -ENOMEM;
3788 goto err_put_job;
3789 }
3790 }
3791
3792 job->profiling.mask = pfile->ptdev->profile_mask;
3793 credits = calc_job_credits(job->profiling.mask);
3794 if (credits == 0) {
3795 ret = -EINVAL;
3796 goto err_put_job;
3797 }
3798
3799 ret = drm_sched_job_init(&job->base,
3800 &job->group->queues[job->queue_idx]->entity,
3801 credits, job->group);
3802 if (ret)
3803 goto err_put_job;
3804
3805 return &job->base;
3806
3807 err_put_job:
3808 panthor_job_put(&job->base);
3809 return ERR_PTR(ret);
3810 }
3811
panthor_job_update_resvs(struct drm_exec * exec,struct drm_sched_job * sched_job)3812 void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched_job)
3813 {
3814 struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
3815
3816 panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
3817 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
3818 }
3819
panthor_sched_unplug(struct panthor_device * ptdev)3820 void panthor_sched_unplug(struct panthor_device *ptdev)
3821 {
3822 struct panthor_scheduler *sched = ptdev->scheduler;
3823
3824 cancel_delayed_work_sync(&sched->tick_work);
3825
3826 mutex_lock(&sched->lock);
3827 if (sched->pm.has_ref) {
3828 pm_runtime_put(ptdev->base.dev);
3829 sched->pm.has_ref = false;
3830 }
3831 mutex_unlock(&sched->lock);
3832 }
3833
panthor_sched_fini(struct drm_device * ddev,void * res)3834 static void panthor_sched_fini(struct drm_device *ddev, void *res)
3835 {
3836 struct panthor_scheduler *sched = res;
3837 int prio;
3838
3839 if (!sched || !sched->csg_slot_count)
3840 return;
3841
3842 cancel_delayed_work_sync(&sched->tick_work);
3843
3844 if (sched->wq)
3845 destroy_workqueue(sched->wq);
3846
3847 if (sched->heap_alloc_wq)
3848 destroy_workqueue(sched->heap_alloc_wq);
3849
3850 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3851 drm_WARN_ON(ddev, !list_empty(&sched->groups.runnable[prio]));
3852 drm_WARN_ON(ddev, !list_empty(&sched->groups.idle[prio]));
3853 }
3854
3855 drm_WARN_ON(ddev, !list_empty(&sched->groups.waiting));
3856 }
3857
panthor_sched_init(struct panthor_device * ptdev)3858 int panthor_sched_init(struct panthor_device *ptdev)
3859 {
3860 struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
3861 struct panthor_fw_csg_iface *csg_iface = panthor_fw_get_csg_iface(ptdev, 0);
3862 struct panthor_fw_cs_iface *cs_iface = panthor_fw_get_cs_iface(ptdev, 0, 0);
3863 struct panthor_scheduler *sched;
3864 u32 gpu_as_count, num_groups;
3865 int prio, ret;
3866
3867 sched = drmm_kzalloc(&ptdev->base, sizeof(*sched), GFP_KERNEL);
3868 if (!sched)
3869 return -ENOMEM;
3870
3871 /* The highest bit in JOB_INT_* is reserved for globabl IRQs. That
3872 * leaves 31 bits for CSG IRQs, hence the MAX_CSGS clamp here.
3873 */
3874 num_groups = min_t(u32, MAX_CSGS, glb_iface->control->group_num);
3875
3876 /* The FW-side scheduler might deadlock if two groups with the same
3877 * priority try to access a set of resources that overlaps, with part
3878 * of the resources being allocated to one group and the other part to
3879 * the other group, both groups waiting for the remaining resources to
3880 * be allocated. To avoid that, it is recommended to assign each CSG a
3881 * different priority. In theory we could allow several groups to have
3882 * the same CSG priority if they don't request the same resources, but
3883 * that makes the scheduling logic more complicated, so let's clamp
3884 * the number of CSG slots to MAX_CSG_PRIO + 1 for now.
3885 */
3886 num_groups = min_t(u32, MAX_CSG_PRIO + 1, num_groups);
3887
3888 /* We need at least one AS for the MCU and one for the GPU contexts. */
3889 gpu_as_count = hweight32(ptdev->gpu_info.as_present & GENMASK(31, 1));
3890 if (!gpu_as_count) {
3891 drm_err(&ptdev->base, "Not enough AS (%d, expected at least 2)",
3892 gpu_as_count + 1);
3893 return -EINVAL;
3894 }
3895
3896 sched->ptdev = ptdev;
3897 sched->sb_slot_count = CS_FEATURES_SCOREBOARDS(cs_iface->control->features);
3898 sched->csg_slot_count = num_groups;
3899 sched->cs_slot_count = csg_iface->control->stream_num;
3900 sched->as_slot_count = gpu_as_count;
3901 ptdev->csif_info.csg_slot_count = sched->csg_slot_count;
3902 ptdev->csif_info.cs_slot_count = sched->cs_slot_count;
3903 ptdev->csif_info.scoreboard_slot_count = sched->sb_slot_count;
3904
3905 sched->last_tick = 0;
3906 sched->resched_target = U64_MAX;
3907 sched->tick_period = msecs_to_jiffies(10);
3908 INIT_DELAYED_WORK(&sched->tick_work, tick_work);
3909 INIT_WORK(&sched->sync_upd_work, sync_upd_work);
3910 INIT_WORK(&sched->fw_events_work, process_fw_events_work);
3911
3912 ret = drmm_mutex_init(&ptdev->base, &sched->lock);
3913 if (ret)
3914 return ret;
3915
3916 for (prio = PANTHOR_CSG_PRIORITY_COUNT - 1; prio >= 0; prio--) {
3917 INIT_LIST_HEAD(&sched->groups.runnable[prio]);
3918 INIT_LIST_HEAD(&sched->groups.idle[prio]);
3919 }
3920 INIT_LIST_HEAD(&sched->groups.waiting);
3921
3922 ret = drmm_mutex_init(&ptdev->base, &sched->reset.lock);
3923 if (ret)
3924 return ret;
3925
3926 INIT_LIST_HEAD(&sched->reset.stopped_groups);
3927
3928 /* sched->heap_alloc_wq will be used for heap chunk allocation on
3929 * tiler OOM events, which means we can't use the same workqueue for
3930 * the scheduler because works queued by the scheduler are in
3931 * the dma-signalling path. Allocate a dedicated heap_alloc_wq to
3932 * work around this limitation.
3933 *
3934 * FIXME: Ultimately, what we need is a failable/non-blocking GEM
3935 * allocation path that we can call when a heap OOM is reported. The
3936 * FW is smart enough to fall back on other methods if the kernel can't
3937 * allocate memory, and fail the tiling job if none of these
3938 * countermeasures worked.
3939 *
3940 * Set WQ_MEM_RECLAIM on sched->wq to unblock the situation when the
3941 * system is running out of memory.
3942 */
3943 sched->heap_alloc_wq = alloc_workqueue("panthor-heap-alloc", WQ_UNBOUND, 0);
3944 sched->wq = alloc_workqueue("panthor-csf-sched", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
3945 if (!sched->wq || !sched->heap_alloc_wq) {
3946 panthor_sched_fini(&ptdev->base, sched);
3947 drm_err(&ptdev->base, "Failed to allocate the workqueues");
3948 return -ENOMEM;
3949 }
3950
3951 ret = drmm_add_action_or_reset(&ptdev->base, panthor_sched_fini, sched);
3952 if (ret)
3953 return ret;
3954
3955 ptdev->scheduler = sched;
3956 return 0;
3957 }
3958