Lines Matching +full:parent +full:- +full:locked

2  * SPDX-License-Identifier: MIT
47 GEM_BUG_ON(rb_first_cached(&sched_engine->queue) != in assert_priolists()
48 rb_first(&sched_engine->queue.rb_root)); in assert_priolists()
51 for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) { in assert_priolists()
54 GEM_BUG_ON(p->priority > last_prio); in assert_priolists()
55 last_prio = p->priority; in assert_priolists()
63 struct rb_node **parent, *rb; in i915_sched_lookup_priolist() local
66 lockdep_assert_held(&sched_engine->lock); in i915_sched_lookup_priolist()
69 if (unlikely(sched_engine->no_priolist)) in i915_sched_lookup_priolist()
75 parent = &sched_engine->queue.rb_root.rb_node; in i915_sched_lookup_priolist()
76 while (*parent) { in i915_sched_lookup_priolist()
77 rb = *parent; in i915_sched_lookup_priolist()
79 if (prio > p->priority) { in i915_sched_lookup_priolist()
80 parent = &rb->rb_left; in i915_sched_lookup_priolist()
81 } else if (prio < p->priority) { in i915_sched_lookup_priolist()
82 parent = &rb->rb_right; in i915_sched_lookup_priolist()
85 return &p->requests; in i915_sched_lookup_priolist()
90 p = &sched_engine->default_priolist; in i915_sched_lookup_priolist()
105 sched_engine->no_priolist = true; in i915_sched_lookup_priolist()
110 p->priority = prio; in i915_sched_lookup_priolist()
111 INIT_LIST_HEAD(&p->requests); in i915_sched_lookup_priolist()
113 rb_link_node(&p->node, rb, parent); in i915_sched_lookup_priolist()
114 rb_insert_color_cached(&p->node, &sched_engine->queue, first); in i915_sched_lookup_priolist()
116 return &p->requests; in i915_sched_lookup_priolist()
130 struct i915_sched_engine *locked, in lock_sched_engine() argument
136 GEM_BUG_ON(!locked); in lock_sched_engine()
140 * as their rq->engine pointer is not stable until under that in lock_sched_engine()
142 * check that the rq still belongs to the newly locked engine. in lock_sched_engine()
144 while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) { in lock_sched_engine()
145 spin_unlock(&locked->lock); in lock_sched_engine()
147 spin_lock(&sched_engine->lock); in lock_sched_engine()
148 locked = sched_engine; in lock_sched_engine()
151 GEM_BUG_ON(locked != sched_engine); in lock_sched_engine()
152 return locked; in lock_sched_engine()
158 const int prio = max(attr->priority, node->attr.priority); in __i915_schedule()
180 * list_for_each_entry(dep, &node->signalers_list, signal_link) in __i915_schedule()
181 * update_priorities(dep->signal, prio) in __i915_schedule()
194 struct i915_sched_node *node = dep->signaler; in __i915_schedule()
206 list_for_each_entry(p, &node->signalers_list, signal_link) { in __i915_schedule()
209 if (node_signaled(p->signaler)) in __i915_schedule()
212 if (prio > READ_ONCE(p->signaler->attr.priority)) in __i915_schedule()
213 list_move_tail(&p->dfs_link, &dfs); in __i915_schedule()
223 if (node->attr.priority == I915_PRIORITY_INVALID) { in __i915_schedule()
224 GEM_BUG_ON(!list_empty(&node->link)); in __i915_schedule()
225 node->attr = *attr; in __i915_schedule()
234 sched_engine = node_to_request(node)->engine->sched_engine; in __i915_schedule()
235 spin_lock(&sched_engine->lock); in __i915_schedule()
237 /* Fifo and depth-first replacement ensure our deps execute before us */ in __i915_schedule()
240 struct i915_request *from = container_of(dep->signaler, in __i915_schedule()
243 INIT_LIST_HEAD(&dep->dfs_link); in __i915_schedule()
245 node = dep->signaler; in __i915_schedule()
247 lockdep_assert_held(&sched_engine->lock); in __i915_schedule()
249 /* Recheck after acquiring the engine->timeline.lock */ in __i915_schedule()
250 if (prio <= node->attr.priority || node_signaled(node)) in __i915_schedule()
253 GEM_BUG_ON(node_to_request(node)->engine->sched_engine != in __i915_schedule()
257 if (sched_engine->bump_inflight_request_prio) in __i915_schedule()
258 sched_engine->bump_inflight_request_prio(from, prio); in __i915_schedule()
260 WRITE_ONCE(node->attr.priority, prio); in __i915_schedule()
268 * See engine->submit_request() in __i915_schedule()
270 if (list_empty(&node->link)) in __i915_schedule()
278 list_move_tail(&node->link, cache.priolist); in __i915_schedule()
282 if (sched_engine->kick_backend) in __i915_schedule()
283 sched_engine->kick_backend(node_to_request(node), prio); in __i915_schedule()
286 spin_unlock(&sched_engine->lock); in __i915_schedule()
292 __i915_schedule(&rq->sched, attr); in i915_schedule()
298 INIT_LIST_HEAD(&node->signalers_list); in i915_sched_node_init()
299 INIT_LIST_HEAD(&node->waiters_list); in i915_sched_node_init()
300 INIT_LIST_HEAD(&node->link); in i915_sched_node_init()
307 node->attr.priority = I915_PRIORITY_INVALID; in i915_sched_node_reinit()
308 node->semaphores = 0; in i915_sched_node_reinit()
309 node->flags = 0; in i915_sched_node_reinit()
311 GEM_BUG_ON(!list_empty(&node->signalers_list)); in i915_sched_node_reinit()
312 GEM_BUG_ON(!list_empty(&node->waiters_list)); in i915_sched_node_reinit()
313 GEM_BUG_ON(!list_empty(&node->link)); in i915_sched_node_reinit()
338 INIT_LIST_HEAD(&dep->dfs_link); in __i915_sched_node_add_dependency()
339 dep->signaler = signal; in __i915_sched_node_add_dependency()
340 dep->waiter = node; in __i915_sched_node_add_dependency()
341 dep->flags = flags; in __i915_sched_node_add_dependency()
344 list_add_rcu(&dep->signal_link, &node->signalers_list); in __i915_sched_node_add_dependency()
345 list_add_rcu(&dep->wait_link, &signal->waiters_list); in __i915_sched_node_add_dependency()
348 node->flags |= signal->flags; in __i915_sched_node_add_dependency()
365 return -ENOMEM; in i915_sched_node_add_dependency()
384 * so we may be called out-of-order. in i915_sched_node_fini()
386 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { in i915_sched_node_fini()
387 GEM_BUG_ON(!list_empty(&dep->dfs_link)); in i915_sched_node_fini()
389 list_del_rcu(&dep->wait_link); in i915_sched_node_fini()
390 if (dep->flags & I915_DEPENDENCY_ALLOC) in i915_sched_node_fini()
393 INIT_LIST_HEAD(&node->signalers_list); in i915_sched_node_fini()
396 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { in i915_sched_node_fini()
397 GEM_BUG_ON(dep->signaler != node); in i915_sched_node_fini()
398 GEM_BUG_ON(!list_empty(&dep->dfs_link)); in i915_sched_node_fini()
400 list_del_rcu(&dep->signal_link); in i915_sched_node_fini()
401 if (dep->flags & I915_DEPENDENCY_ALLOC) in i915_sched_node_fini()
404 INIT_LIST_HEAD(&node->waiters_list); in i915_sched_node_fini()
423 node_to_request(dep->signaler); in i915_request_show_with_schedule()
426 if (signaler->timeline == rq->timeline) in i915_request_show_with_schedule()
442 tasklet_kill(&sched_engine->tasklet); /* flush the callback */ in default_destroy()
460 kref_init(&sched_engine->ref); in i915_sched_engine_create()
462 sched_engine->queue = RB_ROOT_CACHED; in i915_sched_engine_create()
463 sched_engine->queue_priority_hint = INT_MIN; in i915_sched_engine_create()
464 sched_engine->destroy = default_destroy; in i915_sched_engine_create()
465 sched_engine->disabled = default_disabled; in i915_sched_engine_create()
467 INIT_LIST_HEAD(&sched_engine->requests); in i915_sched_engine_create()
468 INIT_LIST_HEAD(&sched_engine->hold); in i915_sched_engine_create()
470 spin_lock_init(&sched_engine->lock); in i915_sched_engine_create()
471 lockdep_set_subclass(&sched_engine->lock, subclass); in i915_sched_engine_create()
480 lock_map_acquire(&sched_engine->lock.dep_map); in i915_sched_engine_create()
481 lock_map_release(&sched_engine->lock.dep_map); in i915_sched_engine_create()
500 return -ENOMEM; in i915_scheduler_module_init()
510 return -ENOMEM; in i915_scheduler_module_init()