xref: /aosp_15_r20/external/libdrm/tests/amdgpu/vce_tests.c (revision 7688df22e49036ff52a766b7101da3a49edadb8c)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22 */
23 
24 #include <stdio.h>
25 #include <inttypes.h>
26 
27 #include "CUnit/Basic.h"
28 
29 #include "util_math.h"
30 
31 #include "amdgpu_test.h"
32 #include "amdgpu_drm.h"
33 #include "amdgpu_internal.h"
34 
35 #include "vce_ib.h"
36 #include "frame.h"
37 
38 #define IB_SIZE		4096
39 #define MAX_RESOURCES	16
40 #define FW_53_0_03 ((53 << 24) | (0 << 16) | (03 << 8))
41 
42 struct amdgpu_vce_bo {
43 	amdgpu_bo_handle handle;
44 	amdgpu_va_handle va_handle;
45 	uint64_t addr;
46 	uint64_t size;
47 	uint8_t *ptr;
48 };
49 
50 struct amdgpu_vce_encode {
51 	unsigned width;
52 	unsigned height;
53 	struct amdgpu_vce_bo vbuf;
54 	struct amdgpu_vce_bo bs[2];
55 	struct amdgpu_vce_bo fb[2];
56 	struct amdgpu_vce_bo cpb;
57 	unsigned ib_len;
58 	bool two_instance;
59 	struct amdgpu_vce_bo mvrefbuf;
60 	struct amdgpu_vce_bo mvb;
61 	unsigned mvbuf_size;
62 };
63 
64 static amdgpu_device_handle device_handle;
65 static uint32_t major_version;
66 static uint32_t minor_version;
67 static uint32_t family_id;
68 static uint32_t vce_harvest_config;
69 static uint32_t chip_rev;
70 static uint32_t chip_id;
71 static uint32_t ids_flags;
72 static bool is_mv_supported = true;
73 
74 static amdgpu_context_handle context_handle;
75 static amdgpu_bo_handle ib_handle;
76 static amdgpu_va_handle ib_va_handle;
77 static uint64_t ib_mc_address;
78 static uint32_t *ib_cpu;
79 
80 static struct amdgpu_vce_encode enc;
81 static amdgpu_bo_handle resources[MAX_RESOURCES];
82 static unsigned num_resources;
83 
84 static void amdgpu_cs_vce_create(void);
85 static void amdgpu_cs_vce_encode(void);
86 static void amdgpu_cs_vce_encode_mv(void);
87 static void amdgpu_cs_vce_destroy(void);
88 
89 CU_TestInfo vce_tests[] = {
90 	{ "VCE create",  amdgpu_cs_vce_create },
91 	{ "VCE encode",  amdgpu_cs_vce_encode },
92 	{ "VCE MV dump",  amdgpu_cs_vce_encode_mv },
93 	{ "VCE destroy",  amdgpu_cs_vce_destroy },
94 	CU_TEST_INFO_NULL,
95 };
96 
suite_vce_tests_enable(void)97 CU_BOOL suite_vce_tests_enable(void)
98 {
99 	uint32_t version, feature;
100 	CU_BOOL ret_mv = CU_FALSE;
101 
102 	if (amdgpu_device_initialize(drm_amdgpu[0], &major_version,
103 					     &minor_version, &device_handle))
104 		return CU_FALSE;
105 
106 	family_id = device_handle->info.family_id;
107 	chip_rev = device_handle->info.chip_rev;
108 	chip_id = device_handle->info.chip_external_rev;
109 	ids_flags = device_handle->info.ids_flags;
110 
111 	amdgpu_query_firmware_version(device_handle, AMDGPU_INFO_FW_VCE, 0,
112 					  0, &version, &feature);
113 
114 	if (amdgpu_device_deinitialize(device_handle))
115 		return CU_FALSE;
116 
117 	if (family_id >= AMDGPU_FAMILY_RV || family_id == AMDGPU_FAMILY_SI ||
118 		asic_is_gfx_pipe_removed(family_id, chip_id, chip_rev)) {
119 		printf("\n\nThe ASIC NOT support VCE, suite disabled\n");
120 		return CU_FALSE;
121 	}
122 
123 	if (!(chip_id == (chip_rev + 0x3C) || /* FIJI */
124 			chip_id == (chip_rev + 0x50) || /* Polaris 10*/
125 			chip_id == (chip_rev + 0x5A) || /* Polaris 11*/
126 			chip_id == (chip_rev + 0x64) || /* Polaris 12*/
127 			(family_id >= AMDGPU_FAMILY_AI && !ids_flags))) /* dGPU > Polaris */
128 		printf("\n\nThe ASIC NOT support VCE MV, suite disabled\n");
129 	else if (FW_53_0_03 > version)
130 		printf("\n\nThe ASIC FW version NOT support VCE MV, suite disabled\n");
131 	else
132 		ret_mv = CU_TRUE;
133 
134 	if (ret_mv == CU_FALSE) {
135 		amdgpu_set_test_active("VCE Tests", "VCE MV dump", ret_mv);
136 		is_mv_supported = false;
137 	}
138 
139 	return CU_TRUE;
140 }
141 
suite_vce_tests_init(void)142 int suite_vce_tests_init(void)
143 {
144 	int r;
145 
146 	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
147 				     &minor_version, &device_handle);
148 	if (r) {
149 		if ((r == -EACCES) && (errno == EACCES))
150 			printf("\n\nError:%s. "
151 				"Hint:Try to run this test program as root.",
152 				strerror(errno));
153 
154 		return CUE_SINIT_FAILED;
155 	}
156 
157 	family_id = device_handle->info.family_id;
158 	vce_harvest_config = device_handle->info.vce_harvest_config;
159 
160 	r = amdgpu_cs_ctx_create(device_handle, &context_handle);
161 	if (r)
162 		return CUE_SINIT_FAILED;
163 
164 	r = amdgpu_bo_alloc_and_map(device_handle, IB_SIZE, 4096,
165 				    AMDGPU_GEM_DOMAIN_GTT, 0,
166 				    &ib_handle, (void**)&ib_cpu,
167 				    &ib_mc_address, &ib_va_handle);
168 	if (r)
169 		return CUE_SINIT_FAILED;
170 
171 	memset(&enc, 0, sizeof(struct amdgpu_vce_encode));
172 
173 	return CUE_SUCCESS;
174 }
175 
suite_vce_tests_clean(void)176 int suite_vce_tests_clean(void)
177 {
178 	int r;
179 
180 	r = amdgpu_bo_unmap_and_free(ib_handle, ib_va_handle,
181 				     ib_mc_address, IB_SIZE);
182 	if (r)
183 		return CUE_SCLEAN_FAILED;
184 
185 	r = amdgpu_cs_ctx_free(context_handle);
186 	if (r)
187 		return CUE_SCLEAN_FAILED;
188 
189 	r = amdgpu_device_deinitialize(device_handle);
190 	if (r)
191 		return CUE_SCLEAN_FAILED;
192 
193 	return CUE_SUCCESS;
194 }
195 
submit(unsigned ndw,unsigned ip)196 static int submit(unsigned ndw, unsigned ip)
197 {
198 	struct amdgpu_cs_request ibs_request = {0};
199 	struct amdgpu_cs_ib_info ib_info = {0};
200 	struct amdgpu_cs_fence fence_status = {0};
201 	uint32_t expired;
202 	int r;
203 
204 	ib_info.ib_mc_address = ib_mc_address;
205 	ib_info.size = ndw;
206 
207 	ibs_request.ip_type = ip;
208 
209 	r = amdgpu_bo_list_create(device_handle, num_resources, resources,
210 				  NULL, &ibs_request.resources);
211 	if (r)
212 		return r;
213 
214 	ibs_request.number_of_ibs = 1;
215 	ibs_request.ibs = &ib_info;
216 	ibs_request.fence_info.handle = NULL;
217 
218 	r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
219 	if (r)
220 		return r;
221 
222 	r = amdgpu_bo_list_destroy(ibs_request.resources);
223 	if (r)
224 		return r;
225 
226 	fence_status.context = context_handle;
227 	fence_status.ip_type = ip;
228 	fence_status.fence = ibs_request.seq_no;
229 
230 	r = amdgpu_cs_query_fence_status(&fence_status,
231 					 AMDGPU_TIMEOUT_INFINITE,
232 					 0, &expired);
233 	if (r)
234 		return r;
235 
236 	return 0;
237 }
238 
alloc_resource(struct amdgpu_vce_bo * vce_bo,unsigned size,unsigned domain)239 static void alloc_resource(struct amdgpu_vce_bo *vce_bo, unsigned size, unsigned domain)
240 {
241 	struct amdgpu_bo_alloc_request req = {0};
242 	amdgpu_bo_handle buf_handle;
243 	amdgpu_va_handle va_handle;
244 	uint64_t va = 0;
245 	int r;
246 
247 	req.alloc_size = ALIGN(size, 4096);
248 	req.preferred_heap = domain;
249 	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
250 	CU_ASSERT_EQUAL(r, 0);
251 	r = amdgpu_va_range_alloc(device_handle,
252 				  amdgpu_gpu_va_range_general,
253 				  req.alloc_size, 1, 0, &va,
254 				  &va_handle, 0);
255 	CU_ASSERT_EQUAL(r, 0);
256 	r = amdgpu_bo_va_op(buf_handle, 0, req.alloc_size, va, 0,
257 			    AMDGPU_VA_OP_MAP);
258 	CU_ASSERT_EQUAL(r, 0);
259 	vce_bo->addr = va;
260 	vce_bo->handle = buf_handle;
261 	vce_bo->size = req.alloc_size;
262 	vce_bo->va_handle = va_handle;
263 	r = amdgpu_bo_cpu_map(vce_bo->handle, (void **)&vce_bo->ptr);
264 	CU_ASSERT_EQUAL(r, 0);
265 	memset(vce_bo->ptr, 0, size);
266 	r = amdgpu_bo_cpu_unmap(vce_bo->handle);
267 	CU_ASSERT_EQUAL(r, 0);
268 }
269 
free_resource(struct amdgpu_vce_bo * vce_bo)270 static void free_resource(struct amdgpu_vce_bo *vce_bo)
271 {
272 	int r;
273 
274 	r = amdgpu_bo_va_op(vce_bo->handle, 0, vce_bo->size,
275 			    vce_bo->addr, 0, AMDGPU_VA_OP_UNMAP);
276 	CU_ASSERT_EQUAL(r, 0);
277 
278 	r = amdgpu_va_range_free(vce_bo->va_handle);
279 	CU_ASSERT_EQUAL(r, 0);
280 
281 	r = amdgpu_bo_free(vce_bo->handle);
282 	CU_ASSERT_EQUAL(r, 0);
283 	memset(vce_bo, 0, sizeof(*vce_bo));
284 }
285 
amdgpu_cs_vce_create(void)286 static void amdgpu_cs_vce_create(void)
287 {
288 	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
289 	int len, r;
290 
291 	enc.width = vce_create[6];
292 	enc.height = vce_create[7];
293 
294 	num_resources  = 0;
295 	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
296 	resources[num_resources++] = enc.fb[0].handle;
297 	resources[num_resources++] = ib_handle;
298 
299 	len = 0;
300 	memcpy(ib_cpu, vce_session, sizeof(vce_session));
301 	len += sizeof(vce_session) / 4;
302 	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
303 	len += sizeof(vce_taskinfo) / 4;
304 	memcpy((ib_cpu + len), vce_create, sizeof(vce_create));
305 	ib_cpu[len + 8] = ALIGN(enc.width, align);
306 	ib_cpu[len + 9] = ALIGN(enc.width, align);
307 	if (is_mv_supported == true) {/* disableTwoInstance */
308 		if (family_id >= AMDGPU_FAMILY_AI)
309 			ib_cpu[len + 11] = 0x01000001;
310 		else
311 			ib_cpu[len + 11] = 0x01000201;
312 	}
313 	len += sizeof(vce_create) / 4;
314 	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
315 	ib_cpu[len + 2] = enc.fb[0].addr >> 32;
316 	ib_cpu[len + 3] = enc.fb[0].addr;
317 	len += sizeof(vce_feedback) / 4;
318 
319 	r = submit(len, AMDGPU_HW_IP_VCE);
320 	CU_ASSERT_EQUAL(r, 0);
321 
322 	free_resource(&enc.fb[0]);
323 }
324 
amdgpu_cs_vce_config(void)325 static void amdgpu_cs_vce_config(void)
326 {
327 	int len = 0, r;
328 
329 	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
330 	len += sizeof(vce_session) / 4;
331 	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
332 	ib_cpu[len + 3] = 2;
333 	ib_cpu[len + 6] = 0xffffffff;
334 	len += sizeof(vce_taskinfo) / 4;
335 	memcpy((ib_cpu + len), vce_rate_ctrl, sizeof(vce_rate_ctrl));
336 	len += sizeof(vce_rate_ctrl) / 4;
337 	memcpy((ib_cpu + len), vce_config_ext, sizeof(vce_config_ext));
338 	len += sizeof(vce_config_ext) / 4;
339 	memcpy((ib_cpu + len), vce_motion_est, sizeof(vce_motion_est));
340 	len += sizeof(vce_motion_est) / 4;
341 	memcpy((ib_cpu + len), vce_rdo, sizeof(vce_rdo));
342 	len += sizeof(vce_rdo) / 4;
343 	memcpy((ib_cpu + len), vce_pic_ctrl, sizeof(vce_pic_ctrl));
344 	if (is_mv_supported == true)
345 		ib_cpu[len + 27] = 0x00000001; /* encSliceMode */
346 	len += sizeof(vce_pic_ctrl) / 4;
347 
348 	r = submit(len, AMDGPU_HW_IP_VCE);
349 	CU_ASSERT_EQUAL(r, 0);
350 }
351 
amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode * enc)352 static void amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode *enc)
353 {
354 
355 	uint64_t luma_offset, chroma_offset;
356 	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
357 	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
358 	int len = 0, i, r;
359 
360 	luma_offset = enc->vbuf.addr;
361 	chroma_offset = luma_offset + luma_size;
362 
363 	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
364 	len += sizeof(vce_session) / 4;
365 	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
366 	len += sizeof(vce_taskinfo) / 4;
367 	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
368 	ib_cpu[len + 2] = enc->bs[0].addr >> 32;
369 	ib_cpu[len + 3] = enc->bs[0].addr;
370 	len += sizeof(vce_bs_buffer) / 4;
371 	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
372 	ib_cpu[len + 2] = enc->cpb.addr >> 32;
373 	ib_cpu[len + 3] = enc->cpb.addr;
374 	len += sizeof(vce_context_buffer) / 4;
375 	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
376 	for (i = 0; i <  8; ++i)
377 		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
378 	for (i = 0; i <  8; ++i)
379 		ib_cpu[len + 10 + i] = luma_size * 1.5;
380 	len += sizeof(vce_aux_buffer) / 4;
381 	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
382 	ib_cpu[len + 2] = enc->fb[0].addr >> 32;
383 	ib_cpu[len + 3] = enc->fb[0].addr;
384 	len += sizeof(vce_feedback) / 4;
385 	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
386 	ib_cpu[len + 9] = luma_offset >> 32;
387 	ib_cpu[len + 10] = luma_offset;
388 	ib_cpu[len + 11] = chroma_offset >> 32;
389 	ib_cpu[len + 12] = chroma_offset;
390 	ib_cpu[len + 14] = ALIGN(enc->width, align);
391 	ib_cpu[len + 15] = ALIGN(enc->width, align);
392 	ib_cpu[len + 73] = luma_size * 1.5;
393 	ib_cpu[len + 74] = luma_size * 2.5;
394 	len += sizeof(vce_encode) / 4;
395 	enc->ib_len = len;
396 	if (!enc->two_instance) {
397 		r = submit(len, AMDGPU_HW_IP_VCE);
398 		CU_ASSERT_EQUAL(r, 0);
399 	}
400 }
401 
amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode * enc)402 static void amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode *enc)
403 {
404 	uint64_t luma_offset, chroma_offset;
405 	int len, i, r;
406 	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
407 	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
408 
409 	len = (enc->two_instance) ? enc->ib_len : 0;
410 	luma_offset = enc->vbuf.addr;
411 	chroma_offset = luma_offset + luma_size;
412 
413 	if (!enc->two_instance) {
414 		memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
415 		len += sizeof(vce_session) / 4;
416 	}
417 	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
418 	len += sizeof(vce_taskinfo) / 4;
419 	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
420 	ib_cpu[len + 2] = enc->bs[1].addr >> 32;
421 	ib_cpu[len + 3] = enc->bs[1].addr;
422 	len += sizeof(vce_bs_buffer) / 4;
423 	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
424 	ib_cpu[len + 2] = enc->cpb.addr >> 32;
425 	ib_cpu[len + 3] = enc->cpb.addr;
426 	len += sizeof(vce_context_buffer) / 4;
427 	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
428 	for (i = 0; i <  8; ++i)
429 		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
430 	for (i = 0; i <  8; ++i)
431 		ib_cpu[len + 10 + i] = luma_size * 1.5;
432 	len += sizeof(vce_aux_buffer) / 4;
433 	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
434 	ib_cpu[len + 2] = enc->fb[1].addr >> 32;
435 	ib_cpu[len + 3] = enc->fb[1].addr;
436 	len += sizeof(vce_feedback) / 4;
437 	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
438 	ib_cpu[len + 2] = 0;
439 	ib_cpu[len + 9] = luma_offset >> 32;
440 	ib_cpu[len + 10] = luma_offset;
441 	ib_cpu[len + 11] = chroma_offset >> 32;
442 	ib_cpu[len + 12] = chroma_offset;
443 	ib_cpu[len + 14] = ALIGN(enc->width, align);
444 	ib_cpu[len + 15] = ALIGN(enc->width, align);
445 	ib_cpu[len + 18] = 0;
446 	ib_cpu[len + 19] = 0;
447 	ib_cpu[len + 56] = 3;
448 	ib_cpu[len + 57] = 0;
449 	ib_cpu[len + 58] = 0;
450 	ib_cpu[len + 59] = luma_size * 1.5;
451 	ib_cpu[len + 60] = luma_size * 2.5;
452 	ib_cpu[len + 73] = 0;
453 	ib_cpu[len + 74] = luma_size;
454 	ib_cpu[len + 81] = 1;
455 	ib_cpu[len + 82] = 1;
456 	len += sizeof(vce_encode) / 4;
457 
458 	r = submit(len, AMDGPU_HW_IP_VCE);
459 	CU_ASSERT_EQUAL(r, 0);
460 }
461 
check_result(struct amdgpu_vce_encode * enc)462 static void check_result(struct amdgpu_vce_encode *enc)
463 {
464 	uint64_t sum;
465 	uint32_t s[2] = {180325, 15946};
466 	uint32_t *ptr, size;
467 	int i, j, r;
468 
469 	for (i = 0; i < 2; ++i) {
470 		r = amdgpu_bo_cpu_map(enc->fb[i].handle, (void **)&enc->fb[i].ptr);
471 		CU_ASSERT_EQUAL(r, 0);
472 		ptr = (uint32_t *)enc->fb[i].ptr;
473 		size = ptr[4] - ptr[9];
474 		r = amdgpu_bo_cpu_unmap(enc->fb[i].handle);
475 		CU_ASSERT_EQUAL(r, 0);
476 		r = amdgpu_bo_cpu_map(enc->bs[i].handle, (void **)&enc->bs[i].ptr);
477 		CU_ASSERT_EQUAL(r, 0);
478 		for (j = 0, sum = 0; j < size; ++j)
479 			sum += enc->bs[i].ptr[j];
480 		CU_ASSERT_EQUAL(sum, s[i]);
481 		r = amdgpu_bo_cpu_unmap(enc->bs[i].handle);
482 		CU_ASSERT_EQUAL(r, 0);
483 	}
484 }
485 
amdgpu_cs_vce_encode(void)486 static void amdgpu_cs_vce_encode(void)
487 {
488 	uint32_t vbuf_size, bs_size = 0x154000, cpb_size;
489 	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
490 	int i, r;
491 
492 	vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
493 	cpb_size = vbuf_size * 10;
494 	num_resources = 0;
495 	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
496 	resources[num_resources++] = enc.fb[0].handle;
497 	alloc_resource(&enc.fb[1], 4096, AMDGPU_GEM_DOMAIN_GTT);
498 	resources[num_resources++] = enc.fb[1].handle;
499 	alloc_resource(&enc.bs[0], bs_size, AMDGPU_GEM_DOMAIN_GTT);
500 	resources[num_resources++] = enc.bs[0].handle;
501 	alloc_resource(&enc.bs[1], bs_size, AMDGPU_GEM_DOMAIN_GTT);
502 	resources[num_resources++] = enc.bs[1].handle;
503 	alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
504 	resources[num_resources++] = enc.vbuf.handle;
505 	alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
506 	resources[num_resources++] = enc.cpb.handle;
507 	resources[num_resources++] = ib_handle;
508 
509 	r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
510 	CU_ASSERT_EQUAL(r, 0);
511 
512 	memset(enc.vbuf.ptr, 0, vbuf_size);
513 	for (i = 0; i < enc.height; ++i) {
514 		memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
515 		enc.vbuf.ptr += ALIGN(enc.width, align);
516 	}
517 	for (i = 0; i < enc.height / 2; ++i) {
518 		memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
519 		enc.vbuf.ptr += ALIGN(enc.width, align);
520 	}
521 
522 	r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
523 	CU_ASSERT_EQUAL(r, 0);
524 
525 	amdgpu_cs_vce_config();
526 
527 	if (family_id >= AMDGPU_FAMILY_VI) {
528 		vce_taskinfo[3] = 3;
529 		amdgpu_cs_vce_encode_idr(&enc);
530 		amdgpu_cs_vce_encode_p(&enc);
531 		check_result(&enc);
532 
533 		/* two pipes */
534 		vce_encode[16] = 0;
535 		amdgpu_cs_vce_encode_idr(&enc);
536 		amdgpu_cs_vce_encode_p(&enc);
537 		check_result(&enc);
538 
539 		/* two instances */
540 		if (vce_harvest_config == 0) {
541 			enc.two_instance = true;
542 			vce_taskinfo[2] = 0x83;
543 			vce_taskinfo[4] = 1;
544 			amdgpu_cs_vce_encode_idr(&enc);
545 			vce_taskinfo[2] = 0xffffffff;
546 			vce_taskinfo[4] = 2;
547 			amdgpu_cs_vce_encode_p(&enc);
548 			check_result(&enc);
549 		}
550 	} else {
551 		vce_taskinfo[3] = 3;
552 		vce_encode[16] = 0;
553 		amdgpu_cs_vce_encode_idr(&enc);
554 		amdgpu_cs_vce_encode_p(&enc);
555 		check_result(&enc);
556 	}
557 
558 	free_resource(&enc.fb[0]);
559 	free_resource(&enc.fb[1]);
560 	free_resource(&enc.bs[0]);
561 	free_resource(&enc.bs[1]);
562 	free_resource(&enc.vbuf);
563 	free_resource(&enc.cpb);
564 }
565 
amdgpu_cs_vce_mv(struct amdgpu_vce_encode * enc)566 static void amdgpu_cs_vce_mv(struct amdgpu_vce_encode *enc)
567 {
568 	uint64_t luma_offset, chroma_offset;
569 	uint64_t mv_ref_luma_offset;
570 	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
571 	unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
572 	int len = 0, i, r;
573 
574 	luma_offset = enc->vbuf.addr;
575 	chroma_offset = luma_offset + luma_size;
576 	mv_ref_luma_offset = enc->mvrefbuf.addr;
577 
578 	memcpy((ib_cpu + len), vce_session, sizeof(vce_session));
579 	len += sizeof(vce_session) / 4;
580 	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
581 	len += sizeof(vce_taskinfo) / 4;
582 	memcpy((ib_cpu + len), vce_bs_buffer, sizeof(vce_bs_buffer));
583 	ib_cpu[len + 2] = enc->bs[0].addr >> 32;
584 	ib_cpu[len + 3] = enc->bs[0].addr;
585 	len += sizeof(vce_bs_buffer) / 4;
586 	memcpy((ib_cpu + len), vce_context_buffer, sizeof(vce_context_buffer));
587 	ib_cpu[len + 2] = enc->cpb.addr >> 32;
588 	ib_cpu[len + 3] = enc->cpb.addr;
589 	len += sizeof(vce_context_buffer) / 4;
590 	memcpy((ib_cpu + len), vce_aux_buffer, sizeof(vce_aux_buffer));
591 	for (i = 0; i <  8; ++i)
592 		ib_cpu[len + 2 + i] = luma_size * 1.5 * (i + 2);
593 	for (i = 0; i <  8; ++i)
594 		ib_cpu[len + 10 + i] = luma_size * 1.5;
595 	len += sizeof(vce_aux_buffer) / 4;
596 	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
597 	ib_cpu[len + 2] = enc->fb[0].addr >> 32;
598 	ib_cpu[len + 3] = enc->fb[0].addr;
599 	len += sizeof(vce_feedback) / 4;
600 	memcpy((ib_cpu + len), vce_mv_buffer, sizeof(vce_mv_buffer));
601 	ib_cpu[len + 2] = mv_ref_luma_offset >> 32;
602 	ib_cpu[len + 3] = mv_ref_luma_offset;
603 	ib_cpu[len + 4] = ALIGN(enc->width, align);
604 	ib_cpu[len + 5] = ALIGN(enc->width, align);
605 	ib_cpu[len + 6] = luma_size;
606 	ib_cpu[len + 7] = enc->mvb.addr >> 32;
607 	ib_cpu[len + 8] = enc->mvb.addr;
608 	len += sizeof(vce_mv_buffer) / 4;
609 	memcpy((ib_cpu + len), vce_encode, sizeof(vce_encode));
610 	ib_cpu[len + 2] = 0;
611 	ib_cpu[len + 3] = 0;
612 	ib_cpu[len + 4] = 0x154000;
613 	ib_cpu[len + 9] = luma_offset >> 32;
614 	ib_cpu[len + 10] = luma_offset;
615 	ib_cpu[len + 11] = chroma_offset >> 32;
616 	ib_cpu[len + 12] = chroma_offset;
617 	ib_cpu[len + 13] = ALIGN(enc->height, 16);;
618 	ib_cpu[len + 14] = ALIGN(enc->width, align);
619 	ib_cpu[len + 15] = ALIGN(enc->width, align);
620 	/* encDisableMBOffloading-encDisableTwoPipeMode-encInputPicArrayMode-encInputPicAddrMode */
621 	ib_cpu[len + 16] = 0x01010000;
622 	ib_cpu[len + 18] = 0; /* encPicType */
623 	ib_cpu[len + 19] = 0; /* encIdrFlag */
624 	ib_cpu[len + 20] = 0; /* encIdrPicId */
625 	ib_cpu[len + 21] = 0; /* encMGSKeyPic */
626 	ib_cpu[len + 22] = 0; /* encReferenceFlag */
627 	ib_cpu[len + 23] = 0; /* encTemporalLayerIndex */
628 	ib_cpu[len + 55] = 0; /* pictureStructure */
629 	ib_cpu[len + 56] = 0; /* encPicType -ref[0] */
630 	ib_cpu[len + 61] = 0; /* pictureStructure */
631 	ib_cpu[len + 62] = 0; /* encPicType -ref[1] */
632 	ib_cpu[len + 67] = 0; /* pictureStructure */
633 	ib_cpu[len + 68] = 0; /* encPicType -ref1 */
634 	ib_cpu[len + 81] = 1; /* frameNumber */
635 	ib_cpu[len + 82] = 2; /* pictureOrderCount */
636 	ib_cpu[len + 83] = 0xffffffff; /* numIPicRemainInRCGOP */
637 	ib_cpu[len + 84] = 0xffffffff; /* numPPicRemainInRCGOP */
638 	ib_cpu[len + 85] = 0xffffffff; /* numBPicRemainInRCGOP */
639 	ib_cpu[len + 86] = 0xffffffff; /* numIRPicRemainInRCGOP */
640 	ib_cpu[len + 87] = 0; /* remainedIntraRefreshPictures */
641 	len += sizeof(vce_encode) / 4;
642 
643 	enc->ib_len = len;
644 	r = submit(len, AMDGPU_HW_IP_VCE);
645 	CU_ASSERT_EQUAL(r, 0);
646 }
647 
check_mv_result(struct amdgpu_vce_encode * enc)648 static void check_mv_result(struct amdgpu_vce_encode *enc)
649 {
650 	uint64_t sum;
651 	uint32_t s = 140790;
652 	int j, r;
653 
654 	r = amdgpu_bo_cpu_map(enc->fb[0].handle, (void **)&enc->fb[0].ptr);
655 	CU_ASSERT_EQUAL(r, 0);
656 	r = amdgpu_bo_cpu_unmap(enc->fb[0].handle);
657 	CU_ASSERT_EQUAL(r, 0);
658 	r = amdgpu_bo_cpu_map(enc->mvb.handle, (void **)&enc->mvb.ptr);
659 	CU_ASSERT_EQUAL(r, 0);
660 	for (j = 0, sum = 0; j < enc->mvbuf_size; ++j)
661 		sum += enc->mvb.ptr[j];
662 	CU_ASSERT_EQUAL(sum, s);
663 	r = amdgpu_bo_cpu_unmap(enc->mvb.handle);
664 	CU_ASSERT_EQUAL(r, 0);
665 }
666 
amdgpu_cs_vce_encode_mv(void)667 static void amdgpu_cs_vce_encode_mv(void)
668 {
669 	uint32_t vbuf_size, bs_size = 0x154000, cpb_size;
670 	unsigned align = (family_id >= AMDGPU_FAMILY_AI) ? 256 : 16;
671 	int i, r;
672 
673 	vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
674 	enc.mvbuf_size = ALIGN(enc.width, 16) * ALIGN(enc.height, 16) / 8;
675 	cpb_size = vbuf_size * 10;
676 	num_resources = 0;
677 	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
678 	resources[num_resources++] = enc.fb[0].handle;
679 	alloc_resource(&enc.bs[0], bs_size, AMDGPU_GEM_DOMAIN_GTT);
680 	resources[num_resources++] = enc.bs[0].handle;
681 	alloc_resource(&enc.mvb, enc.mvbuf_size, AMDGPU_GEM_DOMAIN_GTT);
682 	resources[num_resources++] = enc.mvb.handle;
683 	alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
684 	resources[num_resources++] = enc.vbuf.handle;
685 	alloc_resource(&enc.mvrefbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
686 	resources[num_resources++] = enc.mvrefbuf.handle;
687 	alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
688 	resources[num_resources++] = enc.cpb.handle;
689 	resources[num_resources++] = ib_handle;
690 
691 	r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
692 	CU_ASSERT_EQUAL(r, 0);
693 
694 	memset(enc.vbuf.ptr, 0, vbuf_size);
695 	for (i = 0; i < enc.height; ++i) {
696 		memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
697 		enc.vbuf.ptr += ALIGN(enc.width, align);
698 	}
699 	for (i = 0; i < enc.height / 2; ++i) {
700 		memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
701 		enc.vbuf.ptr += ALIGN(enc.width, align);
702 	}
703 
704 	r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
705 	CU_ASSERT_EQUAL(r, 0);
706 
707 	r = amdgpu_bo_cpu_map(enc.mvrefbuf.handle, (void **)&enc.mvrefbuf.ptr);
708 	CU_ASSERT_EQUAL(r, 0);
709 
710 	memset(enc.mvrefbuf.ptr, 0, vbuf_size);
711 	for (i = 0; i < enc.height; ++i) {
712 		memcpy(enc.mvrefbuf.ptr, (frame + (enc.height - i -1) * enc.width), enc.width);
713 		enc.mvrefbuf.ptr += ALIGN(enc.width, align);
714 	}
715 	for (i = 0; i < enc.height / 2; ++i) {
716 		memcpy(enc.mvrefbuf.ptr,
717 		((frame + enc.height * enc.width) + (enc.height / 2 - i -1) * enc.width), enc.width);
718 		enc.mvrefbuf.ptr += ALIGN(enc.width, align);
719 	}
720 
721 	r = amdgpu_bo_cpu_unmap(enc.mvrefbuf.handle);
722 	CU_ASSERT_EQUAL(r, 0);
723 
724 	amdgpu_cs_vce_config();
725 
726 	vce_taskinfo[3] = 3;
727 	amdgpu_cs_vce_mv(&enc);
728 	check_mv_result(&enc);
729 
730 	free_resource(&enc.fb[0]);
731 	free_resource(&enc.bs[0]);
732 	free_resource(&enc.vbuf);
733 	free_resource(&enc.cpb);
734 	free_resource(&enc.mvrefbuf);
735 	free_resource(&enc.mvb);
736 }
737 
amdgpu_cs_vce_destroy(void)738 static void amdgpu_cs_vce_destroy(void)
739 {
740 	int len, r;
741 
742 	num_resources  = 0;
743 	alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
744 	resources[num_resources++] = enc.fb[0].handle;
745 	resources[num_resources++] = ib_handle;
746 
747 	len = 0;
748 	memcpy(ib_cpu, vce_session, sizeof(vce_session));
749 	len += sizeof(vce_session) / 4;
750 	memcpy((ib_cpu + len), vce_taskinfo, sizeof(vce_taskinfo));
751 	ib_cpu[len + 3] = 1;
752 	len += sizeof(vce_taskinfo) / 4;
753 	memcpy((ib_cpu + len), vce_feedback, sizeof(vce_feedback));
754 	ib_cpu[len + 2] = enc.fb[0].addr >> 32;
755 	ib_cpu[len + 3] = enc.fb[0].addr;
756 	len += sizeof(vce_feedback) / 4;
757 	memcpy((ib_cpu + len), vce_destroy, sizeof(vce_destroy));
758 	len += sizeof(vce_destroy) / 4;
759 
760 	r = submit(len, AMDGPU_HW_IP_VCE);
761 	CU_ASSERT_EQUAL(r, 0);
762 
763 	free_resource(&enc.fb[0]);
764 }
765