1 /*
2 * Copyright 2016 The Chromium OS Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6 #ifdef DRV_AMDGPU
7 #include <amdgpu.h>
8 #include <amdgpu_drm.h>
9 #include <assert.h>
10 #include <drm_fourcc.h>
11 #include <errno.h>
12 #include <fcntl.h>
13 #include <inttypes.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <unistd.h>
19 #include <xf86drm.h>
20 #include <xf86drmMode.h>
21
22 #include "dri.h"
23 #include "drv_helpers.h"
24 #include "drv_priv.h"
25 #include "util.h"
26
27 // clang-format off
28 #define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so)
29 // clang-format on
30
31 #define TILE_TYPE_LINEAR 0
32 /* We decide a modifier and then use DRI to manage allocation */
33 #define TILE_TYPE_DRI_MODIFIER 1
34 /* DRI backend decides tiling in this case. */
35 #define TILE_TYPE_DRI 2
36
37 /* Height alignement for Encoder/Decoder buffers */
38 #define CHROME_HEIGHT_ALIGN 16
39
40 struct amdgpu_priv {
41 int drm_version;
42
43 struct drm_amdgpu_info_device dev_info;
44 struct dri_driver *dri;
45
46 /* sdma */
47 uint32_t sdma_ctx;
48 uint32_t sdma_cmdbuf_bo;
49 uint64_t sdma_cmdbuf_addr;
50 uint64_t sdma_cmdbuf_size;
51 uint32_t *sdma_cmdbuf_map;
52 };
53
54 struct amdgpu_linear_vma_priv {
55 uint32_t handle;
56 uint32_t map_flags;
57 };
58
59 const static uint32_t render_target_formats[] = {
60 DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565,
61 DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR2101010,
62 DRM_FORMAT_ARGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_XRGB2101010,
63 DRM_FORMAT_ABGR16161616F,
64 };
65
66 const static uint32_t texture_source_formats[] = {
67 DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_NV21, DRM_FORMAT_NV12,
68 DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420, DRM_FORMAT_P010
69 };
70
query_dev_info(int fd,struct drm_amdgpu_info_device * dev_info)71 static int query_dev_info(int fd, struct drm_amdgpu_info_device *dev_info)
72 {
73 struct drm_amdgpu_info info_args = { 0 };
74
75 info_args.return_pointer = (uintptr_t)dev_info;
76 info_args.return_size = sizeof(*dev_info);
77 info_args.query = AMDGPU_INFO_DEV_INFO;
78
79 return drmCommandWrite(fd, DRM_AMDGPU_INFO, &info_args, sizeof(info_args));
80 }
81
sdma_init(struct amdgpu_priv * priv,int fd)82 static int sdma_init(struct amdgpu_priv *priv, int fd)
83 {
84 union drm_amdgpu_ctx ctx_args = { { 0 } };
85 union drm_amdgpu_gem_create gem_create = { { 0 } };
86 struct drm_amdgpu_gem_va va_args = { 0 };
87 union drm_amdgpu_gem_mmap gem_map = { { 0 } };
88 struct drm_gem_close gem_close = { 0 };
89 int ret;
90
91 /* Ensure we can make a submission without BO lists. */
92 if (priv->drm_version < 27)
93 return 0;
94
95 /* Anything outside this range needs adjustments to the SDMA copy commands */
96 if (priv->dev_info.family < AMDGPU_FAMILY_CI || priv->dev_info.family > AMDGPU_FAMILY_NV)
97 return 0;
98
99 ctx_args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
100
101 ret = drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
102 if (ret < 0)
103 return ret;
104
105 priv->sdma_ctx = ctx_args.out.alloc.ctx_id;
106
107 priv->sdma_cmdbuf_size = ALIGN(4096, priv->dev_info.virtual_address_alignment);
108 gem_create.in.bo_size = priv->sdma_cmdbuf_size;
109 gem_create.in.alignment = 4096;
110 gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
111
112 ret = drmCommandWriteRead(fd, DRM_AMDGPU_GEM_CREATE, &gem_create, sizeof(gem_create));
113 if (ret < 0)
114 goto fail_ctx;
115
116 priv->sdma_cmdbuf_bo = gem_create.out.handle;
117
118 priv->sdma_cmdbuf_addr =
119 ALIGN(priv->dev_info.virtual_address_offset, priv->dev_info.virtual_address_alignment);
120
121 /* Map the buffer into the GPU address space so we can use it from the GPU */
122 va_args.handle = priv->sdma_cmdbuf_bo;
123 va_args.operation = AMDGPU_VA_OP_MAP;
124 va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_EXECUTABLE;
125 va_args.va_address = priv->sdma_cmdbuf_addr;
126 va_args.offset_in_bo = 0;
127 va_args.map_size = priv->sdma_cmdbuf_size;
128
129 ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
130 if (ret)
131 goto fail_bo;
132
133 gem_map.in.handle = priv->sdma_cmdbuf_bo;
134 ret = drmIoctl(fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
135 if (ret)
136 goto fail_va;
137
138 priv->sdma_cmdbuf_map = mmap(0, priv->sdma_cmdbuf_size, PROT_READ | PROT_WRITE, MAP_SHARED,
139 fd, gem_map.out.addr_ptr);
140 if (priv->sdma_cmdbuf_map == MAP_FAILED) {
141 priv->sdma_cmdbuf_map = NULL;
142 ret = -ENOMEM;
143 goto fail_va;
144 }
145
146 return 0;
147 fail_va:
148 va_args.operation = AMDGPU_VA_OP_UNMAP;
149 va_args.flags = 0;
150 drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
151 fail_bo:
152 gem_close.handle = priv->sdma_cmdbuf_bo;
153 drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
154 fail_ctx:
155 memset(&ctx_args, 0, sizeof(ctx_args));
156 ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
157 ctx_args.in.ctx_id = priv->sdma_ctx;
158 drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
159 return ret;
160 }
161
sdma_finish(struct amdgpu_priv * priv,int fd)162 static void sdma_finish(struct amdgpu_priv *priv, int fd)
163 {
164 union drm_amdgpu_ctx ctx_args = { { 0 } };
165 struct drm_amdgpu_gem_va va_args = { 0 };
166 struct drm_gem_close gem_close = { 0 };
167
168 if (!priv->sdma_cmdbuf_map)
169 return;
170
171 va_args.handle = priv->sdma_cmdbuf_bo;
172 va_args.operation = AMDGPU_VA_OP_UNMAP;
173 va_args.flags = 0;
174 va_args.va_address = priv->sdma_cmdbuf_addr;
175 va_args.offset_in_bo = 0;
176 va_args.map_size = priv->sdma_cmdbuf_size;
177 drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
178
179 gem_close.handle = priv->sdma_cmdbuf_bo;
180 drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
181
182 ctx_args.in.op = AMDGPU_CTX_OP_FREE_CTX;
183 ctx_args.in.ctx_id = priv->sdma_ctx;
184 drmCommandWriteRead(fd, DRM_AMDGPU_CTX, &ctx_args, sizeof(ctx_args));
185 }
186
sdma_copy(struct amdgpu_priv * priv,int fd,uint32_t src_handle,uint32_t dst_handle,uint64_t size)187 static int sdma_copy(struct amdgpu_priv *priv, int fd, uint32_t src_handle, uint32_t dst_handle,
188 uint64_t size)
189 {
190 const uint64_t max_size_per_cmd = 0x3fff00;
191 const uint32_t cmd_size = 7 * sizeof(uint32_t); /* 7 dwords, see loop below. */
192 const uint64_t max_commands = priv->sdma_cmdbuf_size / cmd_size;
193 uint64_t src_addr = priv->sdma_cmdbuf_addr + priv->sdma_cmdbuf_size;
194 uint64_t dst_addr = src_addr + size;
195 struct drm_amdgpu_gem_va va_args = { 0 };
196 unsigned cmd = 0;
197 uint64_t remaining_size = size;
198 uint64_t cur_src_addr = src_addr;
199 uint64_t cur_dst_addr = dst_addr;
200 struct drm_amdgpu_cs_chunk_ib ib = { 0 };
201 struct drm_amdgpu_cs_chunk chunks[2] = { { 0 } };
202 uint64_t chunk_ptrs[2];
203 union drm_amdgpu_cs cs = { { 0 } };
204 struct drm_amdgpu_bo_list_in bo_list = { 0 };
205 struct drm_amdgpu_bo_list_entry bo_list_entries[3] = { { 0 } };
206 union drm_amdgpu_wait_cs wait_cs = { { 0 } };
207 int ret = 0;
208
209 if (size > UINT64_MAX - max_size_per_cmd ||
210 DIV_ROUND_UP(size, max_size_per_cmd) > max_commands)
211 return -ENOMEM;
212
213 /* Map both buffers into the GPU address space so we can access them from the GPU. */
214 va_args.handle = src_handle;
215 va_args.operation = AMDGPU_VA_OP_MAP;
216 va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_DELAY_UPDATE;
217 va_args.va_address = src_addr;
218 va_args.map_size = size;
219
220 ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
221 if (ret)
222 return ret;
223
224 va_args.handle = dst_handle;
225 va_args.flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_DELAY_UPDATE;
226 va_args.va_address = dst_addr;
227
228 ret = drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
229 if (ret)
230 goto unmap_src;
231
232 while (remaining_size) {
233 uint64_t cur_size = remaining_size;
234 if (cur_size > max_size_per_cmd)
235 cur_size = max_size_per_cmd;
236
237 priv->sdma_cmdbuf_map[cmd++] = 0x01; /* linear copy */
238 priv->sdma_cmdbuf_map[cmd++] =
239 priv->dev_info.family >= AMDGPU_FAMILY_AI ? (cur_size - 1) : cur_size;
240 priv->sdma_cmdbuf_map[cmd++] = 0;
241 priv->sdma_cmdbuf_map[cmd++] = cur_src_addr;
242 priv->sdma_cmdbuf_map[cmd++] = cur_src_addr >> 32;
243 priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr;
244 priv->sdma_cmdbuf_map[cmd++] = cur_dst_addr >> 32;
245
246 remaining_size -= cur_size;
247 cur_src_addr += cur_size;
248 cur_dst_addr += cur_size;
249 }
250
251 ib.va_start = priv->sdma_cmdbuf_addr;
252 ib.ib_bytes = cmd * 4;
253 ib.ip_type = AMDGPU_HW_IP_DMA;
254
255 chunks[1].chunk_id = AMDGPU_CHUNK_ID_IB;
256 chunks[1].length_dw = sizeof(ib) / 4;
257 chunks[1].chunk_data = (uintptr_t)&ib;
258
259 bo_list_entries[0].bo_handle = priv->sdma_cmdbuf_bo;
260 bo_list_entries[0].bo_priority = 8; /* Middle of range, like RADV. */
261 bo_list_entries[1].bo_handle = src_handle;
262 bo_list_entries[1].bo_priority = 8;
263 bo_list_entries[2].bo_handle = dst_handle;
264 bo_list_entries[2].bo_priority = 8;
265
266 bo_list.bo_number = 3;
267 bo_list.bo_info_size = sizeof(bo_list_entries[0]);
268 bo_list.bo_info_ptr = (uintptr_t)bo_list_entries;
269
270 chunks[0].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
271 chunks[0].length_dw = sizeof(bo_list) / 4;
272 chunks[0].chunk_data = (uintptr_t)&bo_list;
273
274 chunk_ptrs[0] = (uintptr_t)&chunks[0];
275 chunk_ptrs[1] = (uintptr_t)&chunks[1];
276
277 cs.in.ctx_id = priv->sdma_ctx;
278 cs.in.num_chunks = 2;
279 cs.in.chunks = (uintptr_t)chunk_ptrs;
280
281 ret = drmCommandWriteRead(fd, DRM_AMDGPU_CS, &cs, sizeof(cs));
282 if (ret) {
283 drv_loge("SDMA copy command buffer submission failed %d\n", ret);
284 goto unmap_dst;
285 }
286
287 wait_cs.in.handle = cs.out.handle;
288 wait_cs.in.ip_type = AMDGPU_HW_IP_DMA;
289 wait_cs.in.ctx_id = priv->sdma_ctx;
290 wait_cs.in.timeout = INT64_MAX;
291
292 ret = drmCommandWriteRead(fd, DRM_AMDGPU_WAIT_CS, &wait_cs, sizeof(wait_cs));
293 if (ret) {
294 drv_loge("Could not wait for CS to finish\n");
295 } else if (wait_cs.out.status) {
296 drv_loge("Infinite wait timed out, likely GPU hang.\n");
297 ret = -ENODEV;
298 }
299
300 unmap_dst:
301 va_args.handle = dst_handle;
302 va_args.operation = AMDGPU_VA_OP_UNMAP;
303 va_args.flags = AMDGPU_VM_DELAY_UPDATE;
304 va_args.va_address = dst_addr;
305 drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
306
307 unmap_src:
308 va_args.handle = src_handle;
309 va_args.operation = AMDGPU_VA_OP_UNMAP;
310 va_args.flags = AMDGPU_VM_DELAY_UPDATE;
311 va_args.va_address = src_addr;
312 drmCommandWrite(fd, DRM_AMDGPU_GEM_VA, &va_args, sizeof(va_args));
313
314 return ret;
315 }
316
is_modifier_scanout_capable(struct amdgpu_priv * priv,uint32_t format,uint64_t modifier)317 static bool is_modifier_scanout_capable(struct amdgpu_priv *priv, uint32_t format,
318 uint64_t modifier)
319 {
320 unsigned bytes_per_pixel = drv_stride_from_format(format, 1, 0);
321
322 if (modifier == DRM_FORMAT_MOD_LINEAR)
323 return true;
324
325 if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_AMD)
326 return false;
327
328 unsigned swizzle = AMD_FMT_MOD_GET(TILE, modifier);
329 if (priv->dev_info.family >= AMDGPU_FAMILY_RV) { /* DCN based GPUs */
330 /* D swizzle only supported for 64 bpp */
331 if ((swizzle & 3) == 2 && bytes_per_pixel != 8)
332 return false;
333
334 /* S swizzle not supported for 64 bpp */
335 if ((swizzle & 3) == 1 && bytes_per_pixel == 8)
336 return false;
337 } else { /* DCE based GPUs with GFX9 based modifier swizzling. */
338 assert(priv->dev_info.family == AMDGPU_FAMILY_AI);
339 /* Only D swizzles are allowed for display */
340 if ((swizzle & 3) != 2)
341 return false;
342 }
343
344 if (AMD_FMT_MOD_GET(DCC, modifier) &&
345 (AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier) || !AMD_FMT_MOD_GET(DCC_RETILE, modifier)))
346 return false;
347 return true;
348 }
349
amdgpu_preload(bool load)350 static void amdgpu_preload(bool load)
351 {
352 static void *handle;
353
354 if (load && !handle)
355 handle = dri_dlopen(DRI_PATH);
356 else if (!load && handle) {
357 dri_dlclose(handle);
358 handle = NULL;
359 }
360 }
361
amdgpu_init(struct driver * drv)362 static int amdgpu_init(struct driver *drv)
363 {
364 struct amdgpu_priv *priv;
365 drmVersionPtr drm_version;
366 struct format_metadata metadata;
367 uint64_t use_flags = BO_USE_RENDER_MASK;
368
369 priv = calloc(1, sizeof(struct amdgpu_priv));
370 if (!priv)
371 return -ENOMEM;
372
373 drm_version = drmGetVersion(drv_get_fd(drv));
374 if (!drm_version) {
375 free(priv);
376 return -ENODEV;
377 }
378
379 priv->drm_version = drm_version->version_minor;
380 drmFreeVersion(drm_version);
381
382 drv->priv = priv;
383
384 if (query_dev_info(drv_get_fd(drv), &priv->dev_info)) {
385 free(priv);
386 drv->priv = NULL;
387 return -ENODEV;
388 }
389
390 priv->dri = dri_init(drv, DRI_PATH, "radeonsi");
391 if (!priv->dri) {
392 free(priv);
393 drv->priv = NULL;
394 return -ENODEV;
395 }
396
397 /* Continue on failure, as we can still succesfully map things without SDMA. */
398 if (sdma_init(priv, drv_get_fd(drv)))
399 drv_loge("SDMA init failed\n");
400
401 metadata.tiling = TILE_TYPE_LINEAR;
402 metadata.priority = 1;
403 metadata.modifier = DRM_FORMAT_MOD_LINEAR;
404
405 drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats),
406 &metadata, use_flags);
407
408 drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats),
409 &metadata, BO_USE_TEXTURE_MASK);
410
411 /* NV12 format for camera, display, decoding and encoding. */
412 drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
413 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT |
414 BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
415 BO_USE_PROTECTED);
416
417 drv_modify_combination(drv, DRM_FORMAT_P010, &metadata,
418 BO_USE_SCANOUT | BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER |
419 BO_USE_PROTECTED);
420
421 /* Android CTS tests require this. */
422 drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
423
424 /* Linear formats supported by display. */
425 drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
426 drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT);
427 drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT);
428 drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT);
429 drv_modify_combination(drv, DRM_FORMAT_RGB565, &metadata, BO_USE_SCANOUT);
430
431 drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SCANOUT);
432 drv_modify_combination(drv, DRM_FORMAT_ARGB2101010, &metadata, BO_USE_SCANOUT);
433 drv_modify_combination(drv, DRM_FORMAT_XBGR2101010, &metadata, BO_USE_SCANOUT);
434 drv_modify_combination(drv, DRM_FORMAT_XRGB2101010, &metadata, BO_USE_SCANOUT);
435
436 drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT);
437
438 /*
439 * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
440 * from camera and input/output from hardware decoder/encoder.
441 */
442 drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
443 BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
444 BO_USE_HW_VIDEO_ENCODER | BO_USE_GPU_DATA_BUFFER |
445 BO_USE_SENSOR_DIRECT_DATA);
446
447 /*
448 * The following formats will be allocated by the DRI backend and may be potentially tiled.
449 * Since format modifier support hasn't been implemented fully yet, it's not
450 * possible to enumerate the different types of buffers (like i915 can).
451 */
452 use_flags &= ~BO_USE_RENDERSCRIPT;
453 use_flags &= ~BO_USE_SW_WRITE_OFTEN;
454 use_flags &= ~BO_USE_SW_READ_OFTEN;
455 #if __ANDROID__
456 use_flags &= ~BO_USE_SW_WRITE_RARELY;
457 use_flags &= ~BO_USE_SW_READ_RARELY;
458 #endif
459 use_flags &= ~BO_USE_LINEAR;
460
461 metadata.priority = 2;
462
463 for (unsigned f = 0; f < ARRAY_SIZE(render_target_formats); ++f) {
464 uint32_t format = render_target_formats[f];
465 int mod_cnt;
466 if (dri_query_modifiers(priv->dri, format, 0, NULL, &mod_cnt) && mod_cnt) {
467 uint64_t *modifiers = calloc(mod_cnt, sizeof(uint64_t));
468 dri_query_modifiers(priv->dri, format, mod_cnt, modifiers, &mod_cnt);
469 metadata.tiling = TILE_TYPE_DRI_MODIFIER;
470 for (int i = 0; i < mod_cnt; ++i) {
471 bool scanout =
472 is_modifier_scanout_capable(drv->priv, format, modifiers[i]);
473
474 /* LINEAR will be handled using the LINEAR metadata. */
475 if (modifiers[i] == DRM_FORMAT_MOD_LINEAR)
476 continue;
477
478 /* The virtgpu minigbm can't handle auxiliary planes in the host. */
479 if (dri_num_planes_from_modifier(priv->dri, format, modifiers[i]) !=
480 drv_num_planes_from_format(format))
481 continue;
482
483 metadata.modifier = modifiers[i];
484 drv_add_combination(drv, format, &metadata,
485 use_flags | (scanout ? BO_USE_SCANOUT : 0));
486 }
487 free(modifiers);
488 } else {
489 bool scanout = false;
490 switch (format) {
491 case DRM_FORMAT_ARGB8888:
492 case DRM_FORMAT_XRGB8888:
493 case DRM_FORMAT_ABGR8888:
494 case DRM_FORMAT_XBGR8888:
495 case DRM_FORMAT_ABGR2101010:
496 case DRM_FORMAT_ARGB2101010:
497 case DRM_FORMAT_XBGR2101010:
498 case DRM_FORMAT_XRGB2101010:
499 scanout = true;
500 break;
501 default:
502 break;
503 }
504 metadata.tiling = TILE_TYPE_DRI;
505 drv_add_combination(drv, format, &metadata,
506 use_flags | (scanout ? BO_USE_SCANOUT : 0));
507 }
508 }
509 return 0;
510 }
511
amdgpu_close(struct driver * drv)512 static void amdgpu_close(struct driver *drv)
513 {
514 struct amdgpu_priv *priv = drv->priv;
515
516 sdma_finish(priv, drv_get_fd(drv));
517 dri_close(priv->dri);
518 free(priv);
519
520 drv->priv = NULL;
521 }
522
amdgpu_create_bo_linear(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)523 static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
524 uint64_t use_flags)
525 {
526 int ret;
527 bool need_align = false;
528 uint32_t stride_align = 1;
529 uint32_t stride;
530 union drm_amdgpu_gem_create gem_create = { { 0 } };
531 struct amdgpu_priv *priv = bo->drv->priv;
532
533 stride = drv_stride_from_format(format, width, 0);
534
535 /* some clients (e.g., virtio-wl) set BO_USE_LINEAR to mean
536 * BO_USE_SCANOUT or BO_USE_TEXTURE
537 */
538 need_align = use_flags & (BO_USE_HW_MASK | BO_USE_LINEAR);
539
540 #if defined(ANDROID) && ANDROID_API_LEVEL < 30
541 /* work around
542 * android.hardware.camera2.cts.ImageWriterTest#testYuvImageWriterReaderOperation
543 * failure before R
544 */
545 need_align = true;
546 #endif
547
548 if (need_align) {
549 /* GFX9+ requires the stride to be aligned to 256 bytes */
550 stride_align = 256;
551 stride = ALIGN(stride, stride_align);
552
553 /* Android YV12 requires the UV stride to be half of the Y
554 * stride. Before GFX10, we can double the alignment for the
555 * Y stride, which makes sure the UV stride is still aligned
556 * to 256 bytes after halving.
557 *
558 * GFX10+ additionally requires the stride to be as small as
559 * possible. It is impossible to support the format in some
560 * cases. Instead, we force DRM_FORMAT_YVU420 and knowingly
561 * vioate Android YV12 stride requirement. This is done
562 * because
563 *
564 * - we would like to know what breaks, and
565 * - when used as a classic resource by virglrenderer, the
566 * requirement hopefully does not matter
567 */
568 bool double_align = format == DRM_FORMAT_YVU420_ANDROID;
569 if (double_align && priv->dev_info.family >= AMDGPU_FAMILY_NV &&
570 (use_flags & BO_USE_GPU_HW) && ((stride / stride_align) & 1)) {
571 drv_loge("allocating %dx%d YV12 bo (usage 0x%" PRIx64 ") with bad strides",
572 width, height, use_flags);
573 format = DRM_FORMAT_YVU420;
574 double_align = false;
575 }
576 if (double_align)
577 stride = ALIGN(stride, stride_align * 2);
578 }
579
580 /*
581 * Currently, allocator used by chrome aligns the height for Encoder/
582 * Decoder buffers while allocator used by android(gralloc/minigbm)
583 * doesn't provide any aligment.
584 *
585 * See b/153130069
586 */
587 if (use_flags & (BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER))
588 height = ALIGN(height, CHROME_HEIGHT_ALIGN);
589
590 drv_bo_from_format(bo, stride, stride_align, height, format);
591
592 gem_create.in.bo_size =
593 ALIGN(bo->meta.total_size, priv->dev_info.virtual_address_alignment);
594 gem_create.in.alignment = 256;
595 gem_create.in.domain_flags = 0;
596
597 if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK))
598 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
599
600 gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
601
602 /* Scanout in GTT requires USWC, otherwise try to use cachable memory
603 * for buffers that are read often, because uncacheable reads can be
604 * very slow. USWC should be faster on the GPU though. */
605 if ((use_flags & BO_USE_SCANOUT) || !(use_flags & BO_USE_SW_READ_OFTEN))
606 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
607
608 /* For protected data Buffer needs to be allocated from TMZ */
609 if (use_flags & BO_USE_PROTECTED)
610 gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_ENCRYPTED;
611
612 /* Allocate the buffer with the preferred heap. */
613 ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create,
614 sizeof(gem_create));
615 if (ret < 0)
616 return ret;
617
618 bo->handle.u32 = gem_create.out.handle;
619
620 bo->meta.format_modifier = DRM_FORMAT_MOD_LINEAR;
621
622 return 0;
623 }
624
amdgpu_create_bo(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)625 static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
626 uint64_t use_flags)
627 {
628 struct combination *combo;
629 struct amdgpu_priv *priv = bo->drv->priv;
630
631 combo = drv_get_combination(bo->drv, format, use_flags);
632 if (!combo)
633 return -EINVAL;
634
635 if (combo->metadata.tiling == TILE_TYPE_DRI) {
636 // See b/122049612
637 if (use_flags & (BO_USE_SCANOUT) && priv->dev_info.family == AMDGPU_FAMILY_CZ) {
638 uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0);
639 width = ALIGN(width, 256 / bytes_per_pixel);
640 }
641
642 return dri_bo_create(priv->dri, bo, width, height, format, use_flags);
643 } else if (combo->metadata.tiling == TILE_TYPE_DRI_MODIFIER) {
644 return dri_bo_create_with_modifiers(priv->dri, bo, width, height, format, use_flags,
645 &combo->metadata.modifier, 1);
646 }
647
648 return amdgpu_create_bo_linear(bo, width, height, format, use_flags);
649 }
650
amdgpu_create_bo_with_modifiers(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,const uint64_t * modifiers,uint32_t count)651 static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
652 uint32_t format, const uint64_t *modifiers,
653 uint32_t count)
654 {
655 struct amdgpu_priv *priv = bo->drv->priv;
656 bool only_use_linear = true;
657
658 for (uint32_t i = 0; i < count; ++i)
659 if (modifiers[i] != DRM_FORMAT_MOD_LINEAR)
660 only_use_linear = false;
661
662 if (only_use_linear)
663 return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT);
664
665 return dri_bo_create_with_modifiers(priv->dri, bo, width, height, format, 0, modifiers,
666 count);
667 }
668
amdgpu_import_bo(struct bo * bo,struct drv_import_fd_data * data)669 static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data)
670 {
671 struct amdgpu_priv *priv = bo->drv->priv;
672 bool dri_tiling = data->format_modifier != DRM_FORMAT_MOD_LINEAR;
673 if (data->format_modifier == DRM_FORMAT_MOD_INVALID) {
674 struct combination *combo;
675 combo = drv_get_combination(bo->drv, data->format, data->use_flags);
676 if (!combo)
677 return -EINVAL;
678
679 dri_tiling = combo->metadata.tiling != TILE_TYPE_LINEAR;
680 }
681
682 bo->meta.num_planes =
683 dri_num_planes_from_modifier(priv->dri, data->format, data->format_modifier);
684
685 if (dri_tiling)
686 return dri_bo_import(priv->dri, bo, data);
687 else
688 return drv_prime_bo_import(bo, data);
689 }
690
amdgpu_release_bo(struct bo * bo)691 static int amdgpu_release_bo(struct bo *bo)
692 {
693 struct amdgpu_priv *priv = bo->drv->priv;
694
695 if (bo->priv)
696 return dri_bo_release(priv->dri, bo);
697
698 return 0;
699 }
700
amdgpu_destroy_bo(struct bo * bo)701 static int amdgpu_destroy_bo(struct bo *bo)
702 {
703 struct amdgpu_priv *priv = bo->drv->priv;
704
705 if (bo->priv)
706 return dri_bo_destroy(priv->dri, bo);
707 else
708 return drv_gem_bo_destroy(bo);
709 }
710
amdgpu_map_bo(struct bo * bo,struct vma * vma,uint32_t map_flags)711 static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, uint32_t map_flags)
712 {
713 void *addr = MAP_FAILED;
714 int ret;
715 union drm_amdgpu_gem_mmap gem_map = { { 0 } };
716 struct drm_amdgpu_gem_create_in bo_info = { 0 };
717 struct drm_amdgpu_gem_op gem_op = { 0 };
718 uint32_t handle = bo->handle.u32;
719 struct amdgpu_linear_vma_priv *priv = NULL;
720 struct amdgpu_priv *drv_priv = bo->drv->priv;
721
722 if (bo->priv)
723 return dri_bo_map(drv_priv->dri, bo, vma, 0, map_flags);
724
725 gem_op.handle = handle;
726 gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
727 gem_op.value = (uintptr_t)&bo_info;
728
729 ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_OP, &gem_op, sizeof(gem_op));
730 if (ret)
731 return MAP_FAILED;
732
733 vma->length = bo_info.bo_size;
734
735 if (((bo_info.domains & AMDGPU_GEM_DOMAIN_VRAM) ||
736 (bo_info.domain_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)) &&
737 drv_priv->sdma_cmdbuf_map) {
738 union drm_amdgpu_gem_create gem_create = { { 0 } };
739
740 priv = calloc(1, sizeof(struct amdgpu_linear_vma_priv));
741 if (!priv)
742 return MAP_FAILED;
743
744 gem_create.in.bo_size = bo_info.bo_size;
745 gem_create.in.alignment = 4096;
746 gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT;
747
748 ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_CREATE, &gem_create,
749 sizeof(gem_create));
750 if (ret < 0) {
751 drv_loge("GEM create failed\n");
752 free(priv);
753 return MAP_FAILED;
754 }
755
756 priv->map_flags = map_flags;
757 handle = priv->handle = gem_create.out.handle;
758
759 ret = sdma_copy(bo->drv->priv, bo->drv->fd, bo->handle.u32, priv->handle,
760 bo_info.bo_size);
761 if (ret) {
762 drv_loge("SDMA copy for read failed\n");
763 goto fail;
764 }
765 }
766
767 gem_map.in.handle = handle;
768 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map);
769 if (ret) {
770 drv_loge("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n");
771 goto fail;
772 }
773
774 addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
775 gem_map.out.addr_ptr);
776 if (addr == MAP_FAILED)
777 goto fail;
778
779 vma->priv = priv;
780 return addr;
781
782 fail:
783 if (priv) {
784 struct drm_gem_close gem_close = { 0 };
785 gem_close.handle = priv->handle;
786 drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
787 free(priv);
788 }
789 return MAP_FAILED;
790 }
791
amdgpu_unmap_bo(struct bo * bo,struct vma * vma)792 static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma)
793 {
794 struct amdgpu_priv *priv = bo->drv->priv;
795
796 if (bo->priv) {
797 return dri_bo_unmap(priv->dri, bo, vma);
798 } else {
799 int r = munmap(vma->addr, vma->length);
800 if (r)
801 return r;
802
803 if (vma->priv) {
804 struct amdgpu_linear_vma_priv *priv = vma->priv;
805 struct drm_gem_close gem_close = { 0 };
806
807 if (BO_MAP_WRITE & priv->map_flags) {
808 r = sdma_copy(bo->drv->priv, bo->drv->fd, priv->handle,
809 bo->handle.u32, vma->length);
810 if (r)
811 return r;
812 }
813
814 gem_close.handle = priv->handle;
815 r = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
816 }
817
818 return 0;
819 }
820 }
821
amdgpu_bo_invalidate(struct bo * bo,struct mapping * mapping)822 static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping)
823 {
824 int ret;
825 union drm_amdgpu_gem_wait_idle wait_idle = { { 0 } };
826
827 if (bo->priv)
828 return 0;
829
830 wait_idle.in.handle = bo->handle.u32;
831 wait_idle.in.timeout = AMDGPU_TIMEOUT_INFINITE;
832
833 ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &wait_idle,
834 sizeof(wait_idle));
835
836 if (ret < 0) {
837 drv_loge("DRM_AMDGPU_GEM_WAIT_IDLE failed with %d\n", ret);
838 return ret;
839 }
840
841 if (ret == 0 && wait_idle.out.status)
842 drv_loge("DRM_AMDGPU_GEM_WAIT_IDLE BO is busy\n");
843
844 return 0;
845 }
846
amdgpu_num_planes_from_modifier(struct driver * drv,uint32_t format,uint64_t modifier)847 static size_t amdgpu_num_planes_from_modifier(struct driver *drv, uint32_t format,
848 uint64_t modifier)
849 {
850 struct amdgpu_priv *priv = drv->priv;
851 return dri_num_planes_from_modifier(priv->dri, format, modifier);
852 }
853
854 const struct backend backend_amdgpu = {
855 .name = "amdgpu",
856 .preload = amdgpu_preload,
857 .init = amdgpu_init,
858 .close = amdgpu_close,
859 .bo_create = amdgpu_create_bo,
860 .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers,
861 .bo_release = amdgpu_release_bo,
862 .bo_destroy = amdgpu_destroy_bo,
863 .bo_import = amdgpu_import_bo,
864 .bo_map = amdgpu_map_bo,
865 .bo_unmap = amdgpu_unmap_bo,
866 .bo_invalidate = amdgpu_bo_invalidate,
867 .resolve_format_and_use_flags = drv_resolve_format_and_use_flags_helper,
868 .num_planes_from_modifier = amdgpu_num_planes_from_modifier,
869 };
870
871 #endif
872