Lines Matching +full:10 +full:base +full:- +full:t1
1 // SPDX-License-Identifier: MIT
18 CHUNK_SZ - SZ_4K,
44 struct drm_i915_private *i915 = migrate->context->engine->i915; in copy()
56 sz = src->base.size; in copy()
94 if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS) in copy()
110 err = -ETIME; in copy()
122 err = -EINVAL; in copy()
151 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm); in intel_context_copy_ccs()
154 GEM_BUG_ON(ce->ring->size < SZ_64K); in intel_context_copy_ccs()
157 if (HAS_64K_PAGES(ce->engine->i915)) in intel_context_copy_ccs()
174 if (rq->engine->emit_init_breadcrumb) { in intel_context_copy_ccs()
175 err = rq->engine->emit_init_breadcrumb(rq); in intel_context_copy_ccs()
194 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_copy_ccs()
203 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE); in intel_context_copy_ccs()
205 /* Arbitration is re-enabled between requests. */ in intel_context_copy_ccs()
234 if (!m->context) in intel_migrate_ccs_copy()
235 return -ENODEV; in intel_migrate_ccs_copy()
239 ce = intel_context_get(m->context); in intel_migrate_ccs_copy()
263 struct drm_i915_private *i915 = migrate->context->engine->i915; in clear()
277 sz = obj->base.size; in clear()
302 obj->mm.pages->sgl, in clear()
303 obj->pat_index, in clear()
309 err = -ETIME; in clear()
322 err = -ETIME; in clear()
341 err = -EINVAL; in clear()
353 obj->mm.pages->sgl, in clear()
354 obj->pat_index, in clear()
360 err = -ETIME; in clear()
375 int ccs_bytes_left = (ccs_bytes - i * PAGE_SIZE) / sizeof(u32); in clear()
385 err = -EINVAL; in clear()
396 if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS) in clear()
398 if (rq && err != -EINVAL) { in clear()
417 src->mm.pages->sgl, src->pat_index, in __migrate_copy()
419 dst->mm.pages->sgl, dst->pat_index, in __migrate_copy()
430 return intel_context_migrate_copy(migrate->context, NULL, in __global_copy()
431 src->mm.pages->sgl, src->pat_index, in __global_copy()
433 dst->mm.pages->sgl, dst->pat_index, in __global_copy()
457 obj->mm.pages->sgl, in __migrate_clear()
458 obj->pat_index, in __migrate_clear()
469 return intel_context_migrate_clear(migrate->context, NULL, in __global_clear()
470 obj->mm.pages->sgl, in __global_clear()
471 obj->pat_index, in __global_clear()
491 struct intel_migrate *migrate = >->migrate; in live_migrate_copy()
492 struct drm_i915_private *i915 = migrate->context->engine->i915; in live_migrate_copy()
513 struct intel_migrate *migrate = >->migrate; in live_migrate_clear()
514 struct drm_i915_private *i915 = migrate->context->engine->i915; in live_migrate_clear()
542 igt_spinner_end(&st->spin); in spinner_kill()
549 struct intel_migrate *migrate = >->migrate; in live_emit_pte_full_ring()
550 struct drm_i915_private *i915 = migrate->context->engine->i915; in live_emit_pte_full_ring()
561 * rq->reserved_space when returning from emit_pte(), if the ring is in live_emit_pte_full_ring()
566 return -ENOMEM; in live_emit_pte_full_ring()
584 ce->ring_size = SZ_4K; /* Not too big */ in live_emit_pte_full_ring()
598 err = -EIO; in live_emit_pte_full_ring()
604 * ring->reserved_space at the end. To actually emit the PTEs we require in live_emit_pte_full_ring()
622 sz = (rq->ring->space - rq->reserved_space) / sizeof(u32) - in live_emit_pte_full_ring()
624 sz = min_t(u32, sz, (SZ_1K - rq->reserved_space) / sizeof(u32) - in live_emit_pte_full_ring()
636 pr_info("%s emit=%u sz=%d\n", __func__, rq->ring->emit, sz); in live_emit_pte_full_ring()
639 } while (rq->ring->space > (rq->reserved_space + in live_emit_pte_full_ring()
649 pr_info("%s emite_pte ring space=%u\n", __func__, rq->ring->space); in live_emit_pte_full_ring()
650 it = sg_sgt(obj->mm.pages->sgl); in live_emit_pte_full_ring()
651 len = emit_pte(rq, &it, obj->pat_index, false, 0, CHUNK_SZ); in live_emit_pte_full_ring()
653 err = -EINVAL; in live_emit_pte_full_ring()
662 i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */ in live_emit_pte_full_ring()
703 tsk = kthread_run(fn, &thread[i], "igt-%d", i); in threaded_migrate()
713 msleep(10 * n_cpus); /* start all threads before we kthread_stop() */ in threaded_migrate()
735 return migrate_copy(tm->migrate, 2 * CHUNK_SZ, &tm->prng); in __thread_migrate_copy()
741 struct intel_migrate *migrate = >->migrate; in thread_migrate_copy()
750 return global_copy(tm->migrate, 2 * CHUNK_SZ, &tm->prng); in __thread_global_copy()
756 struct intel_migrate *migrate = >->migrate; in thread_global_copy()
765 return migrate_clear(tm->migrate, 2 * CHUNK_SZ, &tm->prng); in __thread_migrate_clear()
772 return global_clear(tm->migrate, 2 * CHUNK_SZ, &tm->prng); in __thread_global_clear()
778 struct intel_migrate *migrate = >->migrate; in thread_migrate_clear()
786 struct intel_migrate *migrate = >->migrate; in thread_global_clear()
804 if (!gt->migrate.context) in intel_migrate_live_selftests()
817 obj = i915_gem_object_create_lmem(gt->i915, sz, 0); in create_init_lmem_internal()
820 obj = i915_gem_object_create_internal(gt->i915, sz); in create_init_lmem_internal()
855 ktime_t t0, t1; in __perf_clear_blt() local
863 err = -EIO; in __perf_clear_blt()
869 t1 = ktime_get(); in __perf_clear_blt()
870 t[pass] = ktime_sub(t1, t0); in __perf_clear_blt()
877 ce->engine->name, sz >> 10, in __perf_clear_blt()
903 err = __perf_clear_blt(gt->migrate.context, in perf_clear_blt()
904 dst->mm.pages->sgl, in perf_clear_blt()
905 i915_gem_get_pat_index(gt->i915, in perf_clear_blt()
934 ktime_t t0, t1; in __perf_copy_blt() local
946 err = -EIO; in __perf_copy_blt()
952 t1 = ktime_get(); in __perf_copy_blt()
953 t[pass] = ktime_sub(t1, t0); in __perf_copy_blt()
960 ce->engine->name, sz >> 10, in __perf_copy_blt()
987 sz = src->base.size; in perf_copy_blt()
994 err = __perf_copy_blt(gt->migrate.context, in perf_copy_blt()
995 src->mm.pages->sgl, in perf_copy_blt()
996 i915_gem_get_pat_index(gt->i915, in perf_copy_blt()
999 dst->mm.pages->sgl, in perf_copy_blt()
1000 i915_gem_get_pat_index(gt->i915, in perf_copy_blt()
1028 if (!gt->migrate.context) in intel_migrate_perf_selftests()