xref: /aosp_15_r20/external/igt-gpu-tools/tests/i915/gem_vm_create.c (revision d83cc019efdc2edc6c4b16e9034a3ceb8d35d77c)
1 /*
2  * Copyright © 2019 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "igt.h"
25 #include "igt_dummyload.h"
26 #include "i915/gem_vm.h"
27 
vm_create_ioctl(int i915,struct drm_i915_gem_vm_control * ctl)28 static int vm_create_ioctl(int i915, struct drm_i915_gem_vm_control *ctl)
29 {
30 	int err = 0;
31 	if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_VM_CREATE, ctl)) {
32 		err = -errno;
33 		igt_assume(err);
34 	}
35 	errno = 0;
36 	return err;
37 }
38 
vm_destroy_ioctl(int i915,struct drm_i915_gem_vm_control * ctl)39 static int vm_destroy_ioctl(int i915, struct drm_i915_gem_vm_control *ctl)
40 {
41 	int err = 0;
42 	if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_VM_DESTROY, ctl)) {
43 		err = -errno;
44 		igt_assume(err);
45 	}
46 	errno = 0;
47 	return err;
48 }
49 
ctx_create_ioctl(int i915,struct drm_i915_gem_context_create_ext * arg)50 static int ctx_create_ioctl(int i915,
51 			    struct drm_i915_gem_context_create_ext *arg)
52 {
53 	int err = 0;
54 	if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, arg)) {
55 		err = -errno;
56 		igt_assume(err);
57 	}
58 	errno = 0;
59 	return err;
60 }
61 
has_vm(int i915)62 static bool has_vm(int i915)
63 {
64 	struct drm_i915_gem_vm_control ctl = {};
65 	int err;
66 
67 	err = vm_create_ioctl(i915, &ctl);
68 	switch (err) {
69 	case -EINVAL: /* unknown ioctl */
70 	case -ENODEV: /* !full-ppgtt */
71 		return false;
72 
73 	case 0:
74 		gem_vm_destroy(i915, ctl.vm_id);
75 		return true;
76 
77 	default:
78 		igt_fail_on_f(err, "Unknown response from VM_CREATE\n");
79 		return false;
80 	}
81 }
82 
invalid_create(int i915)83 static void invalid_create(int i915)
84 {
85 	struct drm_i915_gem_vm_control ctl = {};
86 	struct i915_user_extension ext = { .name = -1 };
87 
88 	igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
89 	gem_vm_destroy(i915, ctl.vm_id);
90 
91 	ctl.vm_id = 0xdeadbeef;
92 	igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
93 	gem_vm_destroy(i915, ctl.vm_id);
94 	ctl.vm_id = 0;
95 
96 	ctl.flags = -1;
97 	igt_assert_eq(vm_create_ioctl(i915, &ctl), -EINVAL);
98 	ctl.flags = 0;
99 
100 	ctl.extensions = -1;
101 	igt_assert_eq(vm_create_ioctl(i915, &ctl), -EFAULT);
102 	ctl.extensions = to_user_pointer(&ext);
103 	igt_assert_eq(vm_create_ioctl(i915, &ctl), -EINVAL);
104 	ctl.extensions = 0;
105 }
106 
invalid_destroy(int i915)107 static void invalid_destroy(int i915)
108 {
109 	struct drm_i915_gem_vm_control ctl = {};
110 
111 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -ENOENT);
112 
113 	igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
114 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), 0);
115 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -ENOENT);
116 
117 	igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
118 	ctl.vm_id = ctl.vm_id + 1; /* assumes no one else allocated */
119 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -ENOENT);
120 	ctl.vm_id = ctl.vm_id - 1;
121 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), 0);
122 
123 	igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
124 	ctl.flags = -1;
125 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -EINVAL);
126 	ctl.flags = 0;
127 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), 0);
128 
129 	igt_assert_eq(vm_create_ioctl(i915, &ctl), 0);
130 	ctl.extensions = -1;
131 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), -EINVAL);
132 	ctl.extensions = 0;
133 	igt_assert_eq(vm_destroy_ioctl(i915, &ctl), 0);
134 }
135 
__batch_create(int i915,uint32_t offset)136 static uint32_t __batch_create(int i915, uint32_t offset)
137 {
138 	const uint32_t bbe = MI_BATCH_BUFFER_END;
139 	uint32_t handle;
140 
141 	handle = gem_create(i915, ALIGN(offset + 4, 4096));
142 	gem_write(i915, handle, offset, &bbe, sizeof(bbe));
143 
144 	return handle;
145 }
146 
batch_create(int i915)147 static uint32_t batch_create(int i915)
148 {
149 	return __batch_create(i915, 0);
150 }
151 
check_same_vm(int i915,uint32_t ctx_a,uint32_t ctx_b)152 static void check_same_vm(int i915, uint32_t ctx_a, uint32_t ctx_b)
153 {
154 	struct drm_i915_gem_exec_object2 batch = {
155 		.handle = batch_create(i915),
156 	};
157 	struct drm_i915_gem_execbuffer2 eb = {
158 		.buffers_ptr = to_user_pointer(&batch),
159 		.buffer_count = 1,
160 	};
161 
162 	/* First verify that we try to use "softpinning" by default */
163 	batch.offset = 48 << 20;
164 	eb.rsvd1 = ctx_a;
165 	gem_execbuf(i915, &eb);
166 	igt_assert_eq_u64(batch.offset, 48 << 20);
167 
168 	/* An already active VMA will try to keep its offset */
169 	batch.offset = 0;
170 	eb.rsvd1 = ctx_b;
171 	gem_execbuf(i915, &eb);
172 	igt_assert_eq_u64(batch.offset, 48 << 20);
173 
174 	gem_sync(i915, batch.handle);
175 	gem_close(i915, batch.handle);
176 }
177 
create_ext(int i915)178 static void create_ext(int i915)
179 {
180 	struct drm_i915_gem_context_create_ext_setparam ext = {
181 		{ .name = I915_CONTEXT_CREATE_EXT_SETPARAM },
182 		{ .param = I915_CONTEXT_PARAM_VM }
183 	};
184 	struct drm_i915_gem_context_create_ext create = {
185 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS
186 	};
187 	uint32_t ctx[2];
188 
189 	igt_require(ctx_create_ioctl(i915, &create) == 0);
190 	gem_context_destroy(i915, create.ctx_id);
191 
192 	create.extensions = to_user_pointer(&ext);
193 
194 	ext.param.value = gem_vm_create(i915);
195 
196 	igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
197 	ctx[0] = create.ctx_id;
198 
199 	igt_assert_eq(ctx_create_ioctl(i915, &create), 0);
200 	ctx[1] = create.ctx_id;
201 
202 	gem_vm_destroy(i915, ext.param.value);
203 
204 	check_same_vm(i915, ctx[0], ctx[1]);
205 
206 	gem_context_destroy(i915, ctx[1]);
207 	gem_context_destroy(i915, ctx[0]);
208 }
209 
execbuf(int i915)210 static void execbuf(int i915)
211 {
212 	struct drm_i915_gem_exec_object2 batch = {
213 		.handle = batch_create(i915),
214 	};
215 	struct drm_i915_gem_execbuffer2 eb = {
216 		.buffers_ptr = to_user_pointer(&batch),
217 		.buffer_count = 1,
218 	};
219 	struct drm_i915_gem_context_param arg = {
220 		.param = I915_CONTEXT_PARAM_VM,
221 	};
222 
223 	/* First verify that we try to use "softpinning" by default */
224 	batch.offset = 48 << 20;
225 	gem_execbuf(i915, &eb);
226 	igt_assert_eq_u64(batch.offset, 48 << 20);
227 
228 	arg.value = gem_vm_create(i915);
229 	gem_context_set_param(i915, &arg);
230 	gem_execbuf(i915, &eb);
231 	igt_assert_eq_u64(batch.offset, 48 << 20);
232 	gem_vm_destroy(i915, arg.value);
233 
234 	arg.value = gem_vm_create(i915);
235 	gem_context_set_param(i915, &arg);
236 	batch.offset = 0;
237 	gem_execbuf(i915, &eb);
238 	igt_assert_eq_u64(batch.offset, 0);
239 	gem_vm_destroy(i915, arg.value);
240 
241 	gem_sync(i915, batch.handle);
242 	gem_close(i915, batch.handle);
243 }
244 
245 static void
write_to_address(int fd,uint32_t ctx,uint64_t addr,uint32_t value)246 write_to_address(int fd, uint32_t ctx, uint64_t addr, uint32_t value)
247 {
248 	const int gen = intel_gen(intel_get_drm_devid(fd));
249 	struct drm_i915_gem_exec_object2 batch = {
250 		.handle = gem_create(fd, 4096)
251 	};
252 	struct drm_i915_gem_execbuffer2 eb = {
253 		.buffers_ptr = to_user_pointer(&batch),
254 		.buffer_count = 1,
255 		.rsvd1 = ctx,
256 	};
257 	uint32_t cs[16];
258 	int i;
259 
260 	i = 0;
261 	cs[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
262 	if (gen >= 8) {
263 		cs[++i] = addr;
264 		cs[++i] = addr >> 32;
265 	} else if (gen >= 4) {
266 		cs[++i] = 0;
267 		cs[++i] = addr;
268 	} else {
269 		cs[i]--;
270 		cs[++i] = addr;
271 	}
272 	cs[++i] = value;
273 	cs[++i] = MI_BATCH_BUFFER_END;
274 	gem_write(fd, batch.handle, 0, cs, sizeof(cs));
275 
276 	gem_execbuf(fd, &eb);
277 	igt_assert(batch.offset != addr);
278 
279 	gem_sync(fd, batch.handle);
280 	gem_close(fd, batch.handle);
281 }
282 
isolation(int i915)283 static void isolation(int i915)
284 {
285 	struct drm_i915_gem_exec_object2 obj[2] = {
286 		{
287 			.handle = gem_create(i915, 4096),
288 			.offset = 1 << 20
289 		},
290 		{ .handle = batch_create(i915), }
291 	};
292 	struct drm_i915_gem_execbuffer2 eb = {
293 		.buffers_ptr = to_user_pointer(obj),
294 		.buffer_count = 2,
295 	};
296 	struct drm_i915_gem_context_param arg = {
297 		.param = I915_CONTEXT_PARAM_VM,
298 	};
299 	int other = gem_reopen_driver(i915);
300 	uint32_t ctx[2], vm[2], result;
301 	int loops = 4096;
302 
303 	/* An vm_id on one fd is not the same on another fd */
304 	igt_assert_neq(i915, other);
305 
306 	ctx[0] = gem_context_create(i915);
307 	ctx[1] = gem_context_create(other);
308 
309 	vm[0] = gem_vm_create(i915);
310 	do {
311 		vm[1] = gem_vm_create(other);
312 	} while (vm[1] != vm[0] && loops-- > 0);
313 	igt_assert(loops);
314 
315 	arg.ctx_id = ctx[0];
316 	arg.value = vm[0];
317 	gem_context_set_param(i915, &arg);
318 
319 	arg.ctx_id = ctx[1];
320 	arg.value = vm[1];
321 	gem_context_set_param(other, &arg);
322 
323 	eb.rsvd1 = ctx[0];
324 	gem_execbuf(i915, &eb); /* bind object into vm[0] */
325 
326 	/* Verify the trick with the assumed target address works */
327 	write_to_address(i915, ctx[0], obj[0].offset, 1);
328 	gem_read(i915, obj[0].handle, 0, &result, sizeof(result));
329 	igt_assert_eq(result, 1);
330 
331 	/* Now check that we can't write to vm[0] from second fd/vm */
332 	write_to_address(other, ctx[1], obj[0].offset, 2);
333 	gem_read(i915, obj[0].handle, 0, &result, sizeof(result));
334 	igt_assert_eq(result, 1);
335 
336 	close(other);
337 
338 	gem_close(i915, obj[1].handle);
339 	gem_close(i915, obj[0].handle);
340 
341 	gem_context_destroy(i915, ctx[0]);
342 	gem_vm_destroy(i915, vm[0]);
343 }
344 
async_destroy(int i915)345 static void async_destroy(int i915)
346 {
347 	struct drm_i915_gem_context_param arg = {
348 		.ctx_id = gem_context_create(i915),
349 		.value = gem_vm_create(i915),
350 		.param = I915_CONTEXT_PARAM_VM,
351 	};
352 	igt_spin_t *spin[2];
353 
354 	spin[0] = igt_spin_new(i915,
355 			       .ctx = arg.ctx_id,
356 			       .flags = IGT_SPIN_POLL_RUN);
357 	igt_spin_busywait_until_started(spin[0]);
358 
359 	gem_context_set_param(i915, &arg);
360 	spin[1] = __igt_spin_new(i915, .ctx = arg.ctx_id);
361 
362 	igt_spin_end(spin[0]);
363 	gem_sync(i915, spin[0]->handle);
364 
365 	gem_vm_destroy(i915, arg.value);
366 	gem_context_destroy(i915, arg.ctx_id);
367 
368 	igt_spin_end(spin[1]);
369 	gem_sync(i915, spin[1]->handle);
370 
371 	for (int i = 0; i < ARRAY_SIZE(spin); i++)
372 		igt_spin_free(i915, spin[i]);
373 }
374 
375 igt_main
376 {
377 	int i915 = -1;
378 
379 	igt_fixture {
380 		i915 = drm_open_driver(DRIVER_INTEL);
381 		igt_require_gem(i915);
382 		igt_require(has_vm(i915));
383 	}
384 
385 	igt_subtest("invalid-create")
386 		invalid_create(i915);
387 
388 	igt_subtest("invalid-destroy")
389 		invalid_destroy(i915);
390 
391 	igt_subtest_group {
392 		igt_fixture {
393 			gem_context_require_param(i915, I915_CONTEXT_PARAM_VM);
394 		}
395 
396 		igt_subtest("execbuf")
397 			execbuf(i915);
398 
399 		igt_subtest("isolation")
400 			isolation(i915);
401 
402 		igt_subtest("create-ext")
403 			create_ext(i915);
404 
405 		igt_subtest("async-destroy")
406 			async_destroy(i915);
407 	}
408 
409 	igt_fixture {
410 		close(i915);
411 	}
412 }
413