Lines Matching full:fence
18 * DOC: fence register handling
37 * dynamically associated with objects. Furthermore fence state is committed to
41 * cases the fence can be removed forcefully with i915_gem_object_put_fence().
49 static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence) in fence_to_i915() argument
51 return fence->ggtt->vm.i915; in fence_to_i915()
54 static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence) in fence_to_uncore() argument
56 return fence->ggtt->vm.gt->uncore; in fence_to_uncore()
59 static void i965_write_fence_reg(struct i915_fence_reg *fence) in i965_write_fence_reg() argument
65 if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) { in i965_write_fence_reg()
66 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); in i965_write_fence_reg()
67 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); in i965_write_fence_reg()
71 fence_reg_lo = FENCE_REG_965_LO(fence->id); in i965_write_fence_reg()
72 fence_reg_hi = FENCE_REG_965_HI(fence->id); in i965_write_fence_reg()
77 if (fence->tiling) { in i965_write_fence_reg()
78 unsigned int stride = fence->stride; in i965_write_fence_reg()
82 val = fence->start + fence->size - I965_FENCE_PAGE; in i965_write_fence_reg()
84 val |= fence->start; in i965_write_fence_reg()
86 if (fence->tiling == I915_TILING_Y) in i965_write_fence_reg()
92 struct intel_uncore *uncore = fence_to_uncore(fence); in i965_write_fence_reg()
97 * for a partial fence not to be evaluated between writes, we in i965_write_fence_reg()
98 * precede the update with write to turn off the fence register, in i965_write_fence_reg()
99 * and only enable the fence as the last step. in i965_write_fence_reg()
113 static void i915_write_fence_reg(struct i915_fence_reg *fence) in i915_write_fence_reg() argument
118 if (fence->tiling) { in i915_write_fence_reg()
119 unsigned int stride = fence->stride; in i915_write_fence_reg()
120 unsigned int tiling = fence->tiling; in i915_write_fence_reg()
123 if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence))) in i915_write_fence_reg()
129 val = fence->start; in i915_write_fence_reg()
132 val |= I915_FENCE_SIZE_BITS(fence->size); in i915_write_fence_reg()
139 struct intel_uncore *uncore = fence_to_uncore(fence); in i915_write_fence_reg()
140 i915_reg_t reg = FENCE_REG(fence->id); in i915_write_fence_reg()
147 static void i830_write_fence_reg(struct i915_fence_reg *fence) in i830_write_fence_reg() argument
152 if (fence->tiling) { in i830_write_fence_reg()
153 unsigned int stride = fence->stride; in i830_write_fence_reg()
155 val = fence->start; in i830_write_fence_reg()
156 if (fence->tiling == I915_TILING_Y) in i830_write_fence_reg()
158 val |= I830_FENCE_SIZE_BITS(fence->size); in i830_write_fence_reg()
164 struct intel_uncore *uncore = fence_to_uncore(fence); in i830_write_fence_reg()
165 i915_reg_t reg = FENCE_REG(fence->id); in i830_write_fence_reg()
172 static void fence_write(struct i915_fence_reg *fence) in fence_write() argument
174 struct drm_i915_private *i915 = fence_to_i915(fence); in fence_write()
177 * Previous access through the fence register is marshalled by in fence_write()
183 i830_write_fence_reg(fence); in fence_write()
185 i915_write_fence_reg(fence); in fence_write()
187 i965_write_fence_reg(fence); in fence_write()
195 static bool gpu_uses_fence_registers(struct i915_fence_reg *fence) in gpu_uses_fence_registers() argument
197 return GRAPHICS_VER(fence_to_i915(fence)) < 4; in gpu_uses_fence_registers()
200 static int fence_update(struct i915_fence_reg *fence, in fence_update() argument
203 struct i915_ggtt *ggtt = fence->ggtt; in fence_update()
204 struct intel_uncore *uncore = fence_to_uncore(fence); in fence_update()
209 fence->tiling = 0; in fence_update()
217 if (gpu_uses_fence_registers(fence)) { in fence_update()
225 fence->start = i915_ggtt_offset(vma); in fence_update()
226 fence->size = vma->fence_size; in fence_update()
227 fence->stride = i915_gem_object_get_stride(vma->obj); in fence_update()
228 fence->tiling = i915_gem_object_get_tiling(vma->obj); in fence_update()
230 WRITE_ONCE(fence->dirty, false); in fence_update()
232 old = xchg(&fence->vma, NULL); in fence_update()
235 ret = i915_active_wait(&fence->active); in fence_update()
237 fence->vma = old; in fence_update()
245 * stealing the fence. in fence_update()
248 GEM_BUG_ON(old->fence != fence); in fence_update()
250 old->fence = NULL; in fence_update()
253 list_move(&fence->link, &ggtt->fence_list); in fence_update()
261 * This only works for removing the fence register, on acquisition in fence_update()
262 * the caller must hold the rpm wakeref. The fence register must in fence_update()
272 WRITE_ONCE(fence->vma, vma); in fence_update()
273 fence_write(fence); in fence_update()
276 vma->fence = fence; in fence_update()
277 list_move_tail(&fence->link, &ggtt->fence_list); in fence_update()
285 * i915_vma_revoke_fence - force-remove fence for a VMA
286 * @vma: vma to map linearly (not through a fence reg)
288 * This function force-removes any fence from the given object, which is useful
293 struct i915_fence_reg *fence = vma->fence; in i915_vma_revoke_fence() local
297 if (!fence) in i915_vma_revoke_fence()
300 GEM_BUG_ON(fence->vma != vma); in i915_vma_revoke_fence()
301 i915_active_wait(&fence->active); in i915_vma_revoke_fence()
302 GEM_BUG_ON(!i915_active_is_idle(&fence->active)); in i915_vma_revoke_fence()
303 GEM_BUG_ON(atomic_read(&fence->pin_count)); in i915_vma_revoke_fence()
305 fence->tiling = 0; in i915_vma_revoke_fence()
306 WRITE_ONCE(fence->vma, NULL); in i915_vma_revoke_fence()
307 vma->fence = NULL; in i915_vma_revoke_fence()
316 * (powered down) and we skip clearing the fence register, the HW is in i915_vma_revoke_fence()
320 with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref) in i915_vma_revoke_fence()
321 fence_write(fence); in i915_vma_revoke_fence()
324 static bool fence_is_active(const struct i915_fence_reg *fence) in fence_is_active() argument
326 return fence->vma && i915_vma_is_active(fence->vma); in fence_is_active()
332 struct i915_fence_reg *fence, *fn; in fence_find() local
334 list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) { in fence_find()
335 GEM_BUG_ON(fence->vma && fence->vma->fence != fence); in fence_find()
337 if (fence == active) /* now seen this fence twice */ in fence_find()
341 if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) { in fence_find()
343 active = fence; in fence_find()
345 list_move_tail(&fence->link, &ggtt->fence_list); in fence_find()
349 if (atomic_read(&fence->pin_count)) in fence_find()
352 return fence; in fence_find()
365 struct i915_fence_reg *fence; in __i915_vma_pin_fence() local
371 /* Just update our place in the LRU if our fence is getting reused. */ in __i915_vma_pin_fence()
372 if (vma->fence) { in __i915_vma_pin_fence()
373 fence = vma->fence; in __i915_vma_pin_fence()
374 GEM_BUG_ON(fence->vma != vma); in __i915_vma_pin_fence()
375 atomic_inc(&fence->pin_count); in __i915_vma_pin_fence()
376 if (!fence->dirty) { in __i915_vma_pin_fence()
377 list_move_tail(&fence->link, &ggtt->fence_list); in __i915_vma_pin_fence()
381 fence = fence_find(ggtt); in __i915_vma_pin_fence()
382 if (IS_ERR(fence)) in __i915_vma_pin_fence()
383 return PTR_ERR(fence); in __i915_vma_pin_fence()
385 GEM_BUG_ON(atomic_read(&fence->pin_count)); in __i915_vma_pin_fence()
386 atomic_inc(&fence->pin_count); in __i915_vma_pin_fence()
391 err = fence_update(fence, set); in __i915_vma_pin_fence()
395 GEM_BUG_ON(fence->vma != set); in __i915_vma_pin_fence()
396 GEM_BUG_ON(vma->fence != (set ? fence : NULL)); in __i915_vma_pin_fence()
402 atomic_dec(&fence->pin_count); in __i915_vma_pin_fence()
408 * @vma: vma to map through a fence reg
412 * This function walks the fence regs looking for a free one for @obj,
418 * For an untiled surface, this removes any existing fence.
427 if (!vma->fence && !i915_gem_object_is_tiled(vma->obj)) in i915_vma_pin_fence()
432 * must keep the device awake whilst using the fence. in i915_vma_pin_fence()
448 * i915_reserve_fence - Reserve a fence for vGPU
451 * This function walks the fence regs looking for a free one and remove
452 * it from the fence_list. It is used to reserve fence for vGPU to use.
456 struct i915_fence_reg *fence; in i915_reserve_fence() local
462 /* Keep at least one fence available for the display engine. */ in i915_reserve_fence()
464 list_for_each_entry(fence, &ggtt->fence_list, link) in i915_reserve_fence()
465 count += !atomic_read(&fence->pin_count); in i915_reserve_fence()
469 fence = fence_find(ggtt); in i915_reserve_fence()
470 if (IS_ERR(fence)) in i915_reserve_fence()
471 return fence; in i915_reserve_fence()
473 if (fence->vma) { in i915_reserve_fence()
474 /* Force-remove fence from VMA */ in i915_reserve_fence()
475 ret = fence_update(fence, NULL); in i915_reserve_fence()
480 list_del(&fence->link); in i915_reserve_fence()
482 return fence; in i915_reserve_fence()
486 * i915_unreserve_fence - Reclaim a reserved fence
487 * @fence: the fence reg
489 * This function add a reserved fence register from vGPU to the fence_list.
491 void i915_unreserve_fence(struct i915_fence_reg *fence) in i915_unreserve_fence() argument
493 struct i915_ggtt *ggtt = fence->ggtt; in i915_unreserve_fence()
497 list_add(&fence->link, &ggtt->fence_list); in i915_unreserve_fence()
501 * intel_ggtt_restore_fences - restore fence state
504 * Restore the hw fence state to match the software tracking again, to be called
871 /* Initialize fence registers to zero */ in intel_ggtt_init_fences()
873 struct i915_fence_reg *fence = &ggtt->fence_regs[i]; in intel_ggtt_init_fences() local
875 i915_active_init(&fence->active, NULL, NULL, 0); in intel_ggtt_init_fences()
876 fence->ggtt = ggtt; in intel_ggtt_init_fences()
877 fence->id = i; in intel_ggtt_init_fences()
878 list_add_tail(&fence->link, &ggtt->fence_list); in intel_ggtt_init_fences()
890 struct i915_fence_reg *fence = &ggtt->fence_regs[i]; in intel_ggtt_fini_fences() local
892 i915_active_fini(&fence->active); in intel_ggtt_fini_fences()