1 /*
2 * Copyright 2010 Jerome Glisse <[email protected]>
3 * Copyright 2015-2021 Advanced Micro Devices, Inc.
4 * Copyright 2023 Valve Corporation
5 * All Rights Reserved.
6 *
7 * SPDX-License-Identifier: MIT
8 */
9
10 #include "radv_sdma.h"
11 #include "util/macros.h"
12 #include "util/u_memory.h"
13 #include "radv_buffer.h"
14 #include "radv_cs.h"
15 #include "radv_formats.h"
16
17 #include "ac_formats.h"
18
19 struct radv_sdma_chunked_copy_info {
20 unsigned extent_horizontal_blocks;
21 unsigned extent_vertical_blocks;
22 unsigned aligned_row_pitch;
23 unsigned num_rows_per_copy;
24 };
25
26 static const VkExtent3D radv_sdma_t2t_alignment_2d_and_planar[] = {
27 {16, 16, 1}, /* 1 bpp */
28 {16, 8, 1}, /* 2 bpp */
29 {8, 8, 1}, /* 4 bpp */
30 {8, 4, 1}, /* 8 bpp */
31 {4, 4, 1}, /* 16 bpp */
32 };
33
34 static const VkExtent3D radv_sdma_t2t_alignment_3d[] = {
35 {8, 4, 8}, /* 1 bpp */
36 {4, 4, 8}, /* 2 bpp */
37 {4, 4, 4}, /* 4 bpp */
38 {4, 2, 4}, /* 8 bpp */
39 {2, 2, 4}, /* 16 bpp */
40 };
41
42 ALWAYS_INLINE static unsigned
radv_sdma_pitch_alignment(const struct radv_device * device,const unsigned bpp)43 radv_sdma_pitch_alignment(const struct radv_device *device, const unsigned bpp)
44 {
45 const struct radv_physical_device *pdev = radv_device_physical(device);
46
47 if (pdev->info.sdma_ip_version >= SDMA_5_0)
48 return MAX2(1, 4 / bpp);
49
50 return 4;
51 }
52
53 ALWAYS_INLINE static void
radv_sdma_check_pitches(const unsigned pitch,const unsigned slice_pitch,const unsigned bpp,const bool uses_depth)54 radv_sdma_check_pitches(const unsigned pitch, const unsigned slice_pitch, const unsigned bpp, const bool uses_depth)
55 {
56 ASSERTED const unsigned pitch_alignment = MAX2(1, 4 / bpp);
57 assert(pitch);
58 assert(pitch <= (1 << 14));
59 assert(util_is_aligned(pitch, pitch_alignment));
60
61 if (uses_depth) {
62 ASSERTED const unsigned slice_pitch_alignment = 4;
63 assert(slice_pitch);
64 assert(slice_pitch <= (1 << 28));
65 assert(util_is_aligned(slice_pitch, slice_pitch_alignment));
66 }
67 }
68
69 ALWAYS_INLINE static enum gfx9_resource_type
radv_sdma_surface_resource_type(const struct radv_device * const device,const struct radeon_surf * const surf)70 radv_sdma_surface_resource_type(const struct radv_device *const device, const struct radeon_surf *const surf)
71 {
72 const struct radv_physical_device *pdev = radv_device_physical(device);
73
74 if (pdev->info.sdma_ip_version >= SDMA_5_0) {
75 /* Use the 2D resource type for rotated or Z swizzles. */
76 if ((surf->u.gfx9.resource_type == RADEON_RESOURCE_1D || surf->u.gfx9.resource_type == RADEON_RESOURCE_3D) &&
77 (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER || surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH))
78 return RADEON_RESOURCE_2D;
79 }
80
81 return surf->u.gfx9.resource_type;
82 }
83
84 ALWAYS_INLINE static uint32_t
radv_sdma_surface_type_from_aspect_mask(const VkImageAspectFlags aspectMask)85 radv_sdma_surface_type_from_aspect_mask(const VkImageAspectFlags aspectMask)
86 {
87 if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
88 return 1;
89 else if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
90 return 2;
91
92 return 0;
93 }
94
95 ALWAYS_INLINE static VkExtent3D
radv_sdma_pixel_extent_to_blocks(const VkExtent3D extent,const unsigned blk_w,const unsigned blk_h)96 radv_sdma_pixel_extent_to_blocks(const VkExtent3D extent, const unsigned blk_w, const unsigned blk_h)
97 {
98 const VkExtent3D r = {
99 .width = DIV_ROUND_UP(extent.width, blk_w),
100 .height = DIV_ROUND_UP(extent.height, blk_h),
101 .depth = extent.depth,
102 };
103
104 return r;
105 }
106
107 ALWAYS_INLINE static VkOffset3D
radv_sdma_pixel_offset_to_blocks(const VkOffset3D offset,const unsigned blk_w,const unsigned blk_h)108 radv_sdma_pixel_offset_to_blocks(const VkOffset3D offset, const unsigned blk_w, const unsigned blk_h)
109 {
110 const VkOffset3D r = {
111 .x = DIV_ROUND_UP(offset.x, blk_w),
112 .y = DIV_ROUND_UP(offset.y, blk_h),
113 .z = offset.z,
114 };
115
116 return r;
117 }
118
119 ALWAYS_INLINE static unsigned
radv_sdma_pixels_to_blocks(const unsigned linear_pitch,const unsigned blk_w)120 radv_sdma_pixels_to_blocks(const unsigned linear_pitch, const unsigned blk_w)
121 {
122 return DIV_ROUND_UP(linear_pitch, blk_w);
123 }
124
125 ALWAYS_INLINE static unsigned
radv_sdma_pixel_area_to_blocks(const unsigned linear_slice_pitch,const unsigned blk_w,const unsigned blk_h)126 radv_sdma_pixel_area_to_blocks(const unsigned linear_slice_pitch, const unsigned blk_w, const unsigned blk_h)
127 {
128 return DIV_ROUND_UP(DIV_ROUND_UP(linear_slice_pitch, blk_w), blk_h);
129 }
130
131 static struct radv_sdma_chunked_copy_info
radv_sdma_get_chunked_copy_info(const struct radv_device * const device,const struct radv_sdma_surf * const img,const VkExtent3D extent)132 radv_sdma_get_chunked_copy_info(const struct radv_device *const device, const struct radv_sdma_surf *const img,
133 const VkExtent3D extent)
134 {
135 const unsigned extent_horizontal_blocks = DIV_ROUND_UP(extent.width, img->blk_w);
136 const unsigned extent_vertical_blocks = DIV_ROUND_UP(extent.height, img->blk_h);
137 const unsigned aligned_row_pitch = ALIGN(extent_horizontal_blocks, 4);
138 const unsigned aligned_row_bytes = aligned_row_pitch * img->bpp;
139
140 /* Assume that we can always copy at least one full row at a time. */
141 const unsigned max_num_rows_per_copy = MIN2(RADV_SDMA_TRANSFER_TEMP_BYTES / aligned_row_bytes, extent.height);
142 assert(max_num_rows_per_copy);
143
144 /* Ensure that the number of rows copied at a time is a power of two. */
145 const unsigned num_rows_per_copy = MAX2(1, util_next_power_of_two(max_num_rows_per_copy + 1) / 2);
146
147 const struct radv_sdma_chunked_copy_info r = {
148 .extent_horizontal_blocks = extent_horizontal_blocks,
149 .extent_vertical_blocks = extent_vertical_blocks,
150 .aligned_row_pitch = aligned_row_pitch,
151 .num_rows_per_copy = num_rows_per_copy,
152 };
153
154 return r;
155 }
156
157 struct radv_sdma_surf
radv_sdma_get_buf_surf(const struct radv_buffer * const buffer,const struct radv_image * const image,const VkBufferImageCopy2 * const region,const VkImageAspectFlags aspect_mask)158 radv_sdma_get_buf_surf(const struct radv_buffer *const buffer, const struct radv_image *const image,
159 const VkBufferImageCopy2 *const region, const VkImageAspectFlags aspect_mask)
160 {
161 assert(util_bitcount(aspect_mask) == 1);
162
163 const unsigned pitch = (region->bufferRowLength ? region->bufferRowLength : region->imageExtent.width);
164 const unsigned slice_pitch =
165 (region->bufferImageHeight ? region->bufferImageHeight : region->imageExtent.height) * pitch;
166
167 const unsigned plane_idx = radv_plane_from_aspect(region->imageSubresource.aspectMask);
168 const struct radeon_surf *surf = &image->planes[plane_idx].surface;
169 const struct radv_sdma_surf info = {
170 .va = radv_buffer_get_va(buffer->bo) + buffer->offset + region->bufferOffset,
171 .pitch = pitch,
172 .slice_pitch = slice_pitch,
173 .bpp = surf->bpe,
174 .blk_w = surf->blk_w,
175 .blk_h = surf->blk_h,
176 .is_linear = true,
177 };
178
179 return info;
180 }
181
182 static uint32_t
radv_sdma_get_metadata_config(const struct radv_device * const device,const struct radv_image * const image,const struct radeon_surf * const surf,const VkImageSubresourceLayers subresource,const VkImageAspectFlags aspect_mask)183 radv_sdma_get_metadata_config(const struct radv_device *const device, const struct radv_image *const image,
184 const struct radeon_surf *const surf, const VkImageSubresourceLayers subresource,
185 const VkImageAspectFlags aspect_mask)
186 {
187 const struct radv_physical_device *pdev = radv_device_physical(device);
188
189 if (!pdev->info.sdma_supports_compression ||
190 !(radv_dcc_enabled(image, subresource.mipLevel) || radv_image_has_htile(image))) {
191 return 0;
192 }
193
194 const VkFormat format = vk_format_get_aspect_format(image->vk.format, aspect_mask);
195 const struct util_format_description *desc = vk_format_description(format);
196
197 const uint32_t data_format = ac_get_cb_format(pdev->info.gfx_level, vk_format_to_pipe_format(format));
198 const uint32_t alpha_is_on_msb = ac_alpha_is_on_msb(&pdev->info, vk_format_to_pipe_format(format));
199 const uint32_t number_type = radv_translate_buffer_numformat(desc, vk_format_get_first_non_void_channel(format));
200 const uint32_t surface_type = radv_sdma_surface_type_from_aspect_mask(aspect_mask);
201 const uint32_t max_comp_block_size = surf->u.gfx9.color.dcc.max_compressed_block_size;
202 const uint32_t max_uncomp_block_size = radv_get_dcc_max_uncompressed_block_size(device, image);
203 const uint32_t pipe_aligned = surf->u.gfx9.color.dcc.pipe_aligned;
204
205 return data_format | alpha_is_on_msb << 8 | number_type << 9 | surface_type << 12 | max_comp_block_size << 24 |
206 max_uncomp_block_size << 26 | pipe_aligned << 31;
207 }
208
209 static uint32_t
radv_sdma_get_tiled_info_dword(const struct radv_device * const device,const struct radv_image * const image,const struct radeon_surf * const surf,const VkImageSubresourceLayers subresource)210 radv_sdma_get_tiled_info_dword(const struct radv_device *const device, const struct radv_image *const image,
211 const struct radeon_surf *const surf, const VkImageSubresourceLayers subresource)
212 {
213 const struct radv_physical_device *pdev = radv_device_physical(device);
214 const uint32_t element_size = util_logbase2(surf->bpe);
215 const uint32_t swizzle_mode = surf->has_stencil ? surf->u.gfx9.zs.stencil_swizzle_mode : surf->u.gfx9.swizzle_mode;
216 uint32_t info = element_size | swizzle_mode << 3;
217 const enum sdma_version ver = pdev->info.sdma_ip_version;
218
219 if (ver < SDMA_7_0) {
220 const enum gfx9_resource_type dimension = radv_sdma_surface_resource_type(device, surf);
221 info |= dimension << 9;
222 }
223
224 if (ver >= SDMA_5_0) {
225 const uint32_t mip_max = MAX2(image->vk.mip_levels, 1);
226 const uint32_t mip_id = subresource.mipLevel;
227
228 return info | (mip_max - 1) << 16 | mip_id << 20;
229 } else if (ver >= SDMA_4_0) {
230 return info | surf->u.gfx9.epitch << 16;
231 } else {
232 unreachable("unsupported SDMA version");
233 }
234 }
235
236 static uint32_t
radv_sdma_get_tiled_header_dword(const struct radv_device * const device,const struct radv_image * const image,const VkImageSubresourceLayers subresource)237 radv_sdma_get_tiled_header_dword(const struct radv_device *const device, const struct radv_image *const image,
238 const VkImageSubresourceLayers subresource)
239 {
240 const struct radv_physical_device *pdev = radv_device_physical(device);
241 const enum sdma_version ver = pdev->info.sdma_ip_version;
242
243 if (ver >= SDMA_5_0) {
244 return 0;
245 } else if (ver >= SDMA_4_0) {
246 const uint32_t mip_max = MAX2(image->vk.mip_levels, 1);
247 const uint32_t mip_id = subresource.mipLevel;
248 return (mip_max - 1) << 20 | mip_id << 24;
249 } else {
250 unreachable("unsupported SDMA version");
251 }
252 }
253
254 struct radv_sdma_surf
radv_sdma_get_surf(const struct radv_device * const device,const struct radv_image * const image,const VkImageSubresourceLayers subresource,const VkOffset3D offset,const VkImageAspectFlags aspect_mask)255 radv_sdma_get_surf(const struct radv_device *const device, const struct radv_image *const image,
256 const VkImageSubresourceLayers subresource, const VkOffset3D offset,
257 const VkImageAspectFlags aspect_mask)
258 {
259 assert(util_bitcount(aspect_mask) == 1);
260
261 const struct radv_physical_device *pdev = radv_device_physical(device);
262 const unsigned plane_idx = radv_plane_from_aspect(aspect_mask);
263 const unsigned binding_idx = image->disjoint ? plane_idx : 0;
264 const struct radeon_surf *const surf = &image->planes[plane_idx].surface;
265 const uint64_t va = radv_image_get_va(image, binding_idx);
266 struct radv_sdma_surf info = {
267 .extent =
268 {
269 .width = vk_format_get_plane_width(image->vk.format, plane_idx, image->vk.extent.width),
270 .height = vk_format_get_plane_height(image->vk.format, plane_idx, image->vk.extent.height),
271 .depth = image->vk.image_type == VK_IMAGE_TYPE_3D ? image->vk.extent.depth : image->vk.array_layers,
272 },
273 .offset =
274 {
275 .x = offset.x,
276 .y = offset.y,
277 .z = image->vk.image_type == VK_IMAGE_TYPE_3D ? offset.z : subresource.baseArrayLayer,
278 },
279 .bpp = surf->bpe,
280 .blk_w = surf->blk_w,
281 .blk_h = surf->blk_h,
282 .mip_levels = image->vk.mip_levels,
283 .micro_tile_mode = surf->micro_tile_mode,
284 .is_linear = surf->is_linear,
285 .is_3d = surf->u.gfx9.resource_type == RADEON_RESOURCE_3D,
286 };
287
288 if (surf->is_linear) {
289 info.va = va + surf->u.gfx9.surf_offset + surf->u.gfx9.offset[subresource.mipLevel];
290 info.pitch = surf->u.gfx9.pitch[subresource.mipLevel];
291 info.slice_pitch = surf->blk_w * surf->blk_h * surf->u.gfx9.surf_slice_size / surf->bpe;
292 } else {
293 /* 1D resources should be linear. */
294 assert(surf->u.gfx9.resource_type != RADEON_RESOURCE_1D);
295
296 info.va = (va + surf->u.gfx9.surf_offset) | surf->tile_swizzle << 8;
297 info.info_dword = radv_sdma_get_tiled_info_dword(device, image, surf, subresource);
298 info.header_dword = radv_sdma_get_tiled_header_dword(device, image, subresource);
299
300 if (pdev->info.sdma_supports_compression &&
301 (radv_dcc_enabled(image, subresource.mipLevel) || radv_image_has_htile(image))) {
302 info.meta_va = va + surf->meta_offset;
303 info.meta_config = radv_sdma_get_metadata_config(device, image, surf, subresource, aspect_mask);
304 }
305 }
306
307 return info;
308 }
309
310 static void
radv_sdma_emit_nop(const struct radv_device * device,struct radeon_cmdbuf * cs)311 radv_sdma_emit_nop(const struct radv_device *device, struct radeon_cmdbuf *cs)
312 {
313 /* SDMA NOP acts as a fence command and causes the SDMA engine to wait for pending copy operations. */
314 radeon_check_space(device->ws, cs, 1);
315 radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
316 }
317
318 void
radv_sdma_copy_buffer(const struct radv_device * device,struct radeon_cmdbuf * cs,uint64_t src_va,uint64_t dst_va,uint64_t size)319 radv_sdma_copy_buffer(const struct radv_device *device, struct radeon_cmdbuf *cs, uint64_t src_va, uint64_t dst_va,
320 uint64_t size)
321 {
322 if (size == 0)
323 return;
324
325 const struct radv_physical_device *pdev = radv_device_physical(device);
326 const enum sdma_version ver = pdev->info.sdma_ip_version;
327 const unsigned max_size_per_packet = ver >= SDMA_5_2 ? SDMA_V5_2_COPY_MAX_BYTES : SDMA_V2_0_COPY_MAX_BYTES;
328
329 unsigned align = ~0u;
330 unsigned ncopy = DIV_ROUND_UP(size, max_size_per_packet);
331
332 assert(ver >= SDMA_2_0);
333
334 /* SDMA FW automatically enables a faster dword copy mode when
335 * source, destination and size are all dword-aligned.
336 *
337 * When source and destination are dword-aligned, round down the size to
338 * take advantage of faster dword copy, and copy the remaining few bytes
339 * with the last copy packet.
340 */
341 if ((src_va & 0x3) == 0 && (dst_va & 0x3) == 0 && size > 4 && (size & 0x3) != 0) {
342 align = ~0x3u;
343 ncopy++;
344 }
345
346 radeon_check_space(device->ws, cs, ncopy * 7);
347
348 for (unsigned i = 0; i < ncopy; i++) {
349 unsigned csize = size >= 4 ? MIN2(size & align, max_size_per_packet) : size;
350 radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
351 radeon_emit(cs, ver >= SDMA_4_0 ? csize - 1 : csize);
352 radeon_emit(cs, 0); /* src/dst endian swap */
353 radeon_emit(cs, src_va);
354 radeon_emit(cs, src_va >> 32);
355 radeon_emit(cs, dst_va);
356 radeon_emit(cs, dst_va >> 32);
357 dst_va += csize;
358 src_va += csize;
359 size -= csize;
360 }
361 }
362
363 void
radv_sdma_fill_buffer(const struct radv_device * device,struct radeon_cmdbuf * cs,const uint64_t va,const uint64_t size,const uint32_t value)364 radv_sdma_fill_buffer(const struct radv_device *device, struct radeon_cmdbuf *cs, const uint64_t va,
365 const uint64_t size, const uint32_t value)
366 {
367 const struct radv_physical_device *pdev = radv_device_physical(device);
368
369 const uint32_t fill_size = 2; /* This means that the count is in dwords. */
370 const uint32_t constant_fill_header = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0) | (fill_size & 0x3) << 30;
371
372 /* This packet is the same since SDMA v2.4, haven't bothered to check older versions. */
373 const enum sdma_version ver = pdev->info.sdma_ip_version;
374 assert(ver >= SDMA_2_4);
375
376 /* Maximum allowed fill size depends on the GPU.
377 * Emit as many packets as necessary to fill all the bytes we need.
378 */
379 const uint64_t max_fill_bytes = BITFIELD64_MASK(ver >= SDMA_6_0 ? 30 : 22) & ~0x3;
380 const unsigned num_packets = DIV_ROUND_UP(size, max_fill_bytes);
381 ASSERTED unsigned cdw_max = radeon_check_space(device->ws, cs, num_packets * 5);
382
383 for (unsigned i = 0; i < num_packets; ++i) {
384 const uint64_t offset = i * max_fill_bytes;
385 const uint64_t fill_bytes = MIN2(size - offset, max_fill_bytes);
386 const uint64_t fill_va = va + offset;
387
388 radeon_emit(cs, constant_fill_header);
389 radeon_emit(cs, fill_va);
390 radeon_emit(cs, fill_va >> 32);
391 radeon_emit(cs, value);
392 radeon_emit(cs, fill_bytes - 1); /* Must be programmed in bytes, even if the fill is done in dwords. */
393 }
394
395 assert(cs->cdw <= cdw_max);
396 }
397
398 static void
radv_sdma_emit_copy_linear_sub_window(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * const src,const struct radv_sdma_surf * const dst,const VkExtent3D pix_extent)399 radv_sdma_emit_copy_linear_sub_window(const struct radv_device *device, struct radeon_cmdbuf *cs,
400 const struct radv_sdma_surf *const src, const struct radv_sdma_surf *const dst,
401 const VkExtent3D pix_extent)
402 {
403 /* This packet is the same since SDMA v2.4, haven't bothered to check older versions.
404 * The main difference is the bitfield sizes:
405 *
406 * v2.4 - src/dst_pitch: 14 bits, rect_z: 11 bits
407 * v4.0 - src/dst_pitch: 19 bits, rect_z: 11 bits
408 * v5.0 - src/dst_pitch: 19 bits, rect_z: 13 bits
409 *
410 * We currently use the smallest limits (from SDMA v2.4).
411 */
412
413 const VkOffset3D src_off = radv_sdma_pixel_offset_to_blocks(src->offset, src->blk_w, src->blk_h);
414 const VkOffset3D dst_off = radv_sdma_pixel_offset_to_blocks(dst->offset, dst->blk_w, dst->blk_h);
415 const VkExtent3D ext = radv_sdma_pixel_extent_to_blocks(pix_extent, src->blk_w, src->blk_h);
416 const unsigned src_pitch = radv_sdma_pixels_to_blocks(src->pitch, src->blk_w);
417 const unsigned dst_pitch = radv_sdma_pixels_to_blocks(dst->pitch, dst->blk_w);
418 const unsigned src_slice_pitch = radv_sdma_pixel_area_to_blocks(src->slice_pitch, src->blk_w, src->blk_h);
419 const unsigned dst_slice_pitch = radv_sdma_pixel_area_to_blocks(dst->slice_pitch, dst->blk_w, dst->blk_h);
420
421 assert(src->bpp == dst->bpp);
422 assert(util_is_power_of_two_nonzero(src->bpp));
423 radv_sdma_check_pitches(src->pitch, src->slice_pitch, src->bpp, false);
424 radv_sdma_check_pitches(dst->pitch, dst->slice_pitch, dst->bpp, false);
425
426 ASSERTED unsigned cdw_end = radeon_check_space(device->ws, cs, 13);
427
428 radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) | util_logbase2(src->bpp)
429 << 29);
430 radeon_emit(cs, src->va);
431 radeon_emit(cs, src->va >> 32);
432 radeon_emit(cs, src_off.x | src_off.y << 16);
433 radeon_emit(cs, src_off.z | (src_pitch - 1) << 13);
434 radeon_emit(cs, src_slice_pitch - 1);
435 radeon_emit(cs, dst->va);
436 radeon_emit(cs, dst->va >> 32);
437 radeon_emit(cs, dst_off.x | dst_off.y << 16);
438 radeon_emit(cs, dst_off.z | (dst_pitch - 1) << 13);
439 radeon_emit(cs, dst_slice_pitch - 1);
440 radeon_emit(cs, (ext.width - 1) | (ext.height - 1) << 16);
441 radeon_emit(cs, (ext.depth - 1));
442
443 assert(cs->cdw == cdw_end);
444 }
445
446 static void
radv_sdma_emit_copy_tiled_sub_window(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * const tiled,const struct radv_sdma_surf * const linear,const VkExtent3D pix_extent,const bool detile)447 radv_sdma_emit_copy_tiled_sub_window(const struct radv_device *device, struct radeon_cmdbuf *cs,
448 const struct radv_sdma_surf *const tiled,
449 const struct radv_sdma_surf *const linear, const VkExtent3D pix_extent,
450 const bool detile)
451 {
452 const struct radv_physical_device *pdev = radv_device_physical(device);
453
454 if (!pdev->info.sdma_supports_compression) {
455 assert(!tiled->meta_va);
456 }
457
458 const VkOffset3D linear_off = radv_sdma_pixel_offset_to_blocks(linear->offset, linear->blk_w, linear->blk_h);
459 const VkOffset3D tiled_off = radv_sdma_pixel_offset_to_blocks(tiled->offset, tiled->blk_w, tiled->blk_h);
460 const VkExtent3D tiled_ext = radv_sdma_pixel_extent_to_blocks(tiled->extent, tiled->blk_w, tiled->blk_h);
461 const VkExtent3D ext = radv_sdma_pixel_extent_to_blocks(pix_extent, tiled->blk_w, tiled->blk_h);
462 const unsigned linear_pitch = radv_sdma_pixels_to_blocks(linear->pitch, tiled->blk_w);
463 const unsigned linear_slice_pitch = radv_sdma_pixel_area_to_blocks(linear->slice_pitch, tiled->blk_w, tiled->blk_h);
464 const bool dcc = !!tiled->meta_va;
465 const bool uses_depth = linear_off.z != 0 || tiled_off.z != 0 || ext.depth != 1;
466
467 assert(util_is_power_of_two_nonzero(tiled->bpp));
468 radv_sdma_check_pitches(linear_pitch, linear_slice_pitch, tiled->bpp, uses_depth);
469
470 ASSERTED unsigned cdw_end = radeon_check_space(device->ws, cs, 14 + (dcc ? 3 : 0));
471
472 radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) | dcc << 19 | detile << 31 |
473 tiled->header_dword);
474 radeon_emit(cs, tiled->va);
475 radeon_emit(cs, tiled->va >> 32);
476 radeon_emit(cs, tiled_off.x | tiled_off.y << 16);
477 radeon_emit(cs, tiled_off.z | (tiled_ext.width - 1) << 16);
478 radeon_emit(cs, (tiled_ext.height - 1) | (tiled_ext.depth - 1) << 16);
479 radeon_emit(cs, tiled->info_dword);
480 radeon_emit(cs, linear->va);
481 radeon_emit(cs, linear->va >> 32);
482 radeon_emit(cs, linear_off.x | linear_off.y << 16);
483 radeon_emit(cs, linear_off.z | (linear_pitch - 1) << 16);
484 radeon_emit(cs, linear_slice_pitch - 1);
485 radeon_emit(cs, (ext.width - 1) | (ext.height - 1) << 16);
486 radeon_emit(cs, (ext.depth - 1));
487
488 if (tiled->meta_va) {
489 const unsigned write_compress_enable = !detile;
490 radeon_emit(cs, tiled->meta_va);
491 radeon_emit(cs, tiled->meta_va >> 32);
492 radeon_emit(cs, tiled->meta_config | write_compress_enable << 28);
493 }
494
495 assert(cs->cdw == cdw_end);
496 }
497
498 static void
radv_sdma_emit_copy_t2t_sub_window(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * const src,const struct radv_sdma_surf * const dst,const VkExtent3D px_extent)499 radv_sdma_emit_copy_t2t_sub_window(const struct radv_device *device, struct radeon_cmdbuf *cs,
500 const struct radv_sdma_surf *const src, const struct radv_sdma_surf *const dst,
501 const VkExtent3D px_extent)
502 {
503 const struct radv_physical_device *pdev = radv_device_physical(device);
504
505 /* We currently only support the SDMA v4+ versions of this packet. */
506 assert(pdev->info.sdma_ip_version >= SDMA_4_0);
507
508 /* On GFX10+ this supports DCC, but cannot copy a compressed surface to another compressed surface. */
509 assert(!src->meta_va || !dst->meta_va);
510
511 if (pdev->info.sdma_ip_version >= SDMA_4_0 && pdev->info.sdma_ip_version < SDMA_5_0) {
512 /* SDMA v4 doesn't support mip_id selection in the T2T copy packet. */
513 assert(src->header_dword >> 24 == 0);
514 assert(dst->header_dword >> 24 == 0);
515 /* SDMA v4 doesn't support any image metadata. */
516 assert(!src->meta_va);
517 assert(!dst->meta_va);
518 }
519
520 /* Despite the name, this can indicate DCC or HTILE metadata. */
521 const uint32_t dcc = src->meta_va || dst->meta_va;
522 /* 0 = compress (src is uncompressed), 1 = decompress (src is compressed). */
523 const uint32_t dcc_dir = src->meta_va && !dst->meta_va;
524
525 const VkOffset3D src_off = radv_sdma_pixel_offset_to_blocks(src->offset, src->blk_w, src->blk_h);
526 const VkOffset3D dst_off = radv_sdma_pixel_offset_to_blocks(dst->offset, dst->blk_w, dst->blk_h);
527 const VkExtent3D src_ext = radv_sdma_pixel_extent_to_blocks(src->extent, src->blk_w, src->blk_h);
528 const VkExtent3D dst_ext = radv_sdma_pixel_extent_to_blocks(dst->extent, dst->blk_w, dst->blk_h);
529 const VkExtent3D ext = radv_sdma_pixel_extent_to_blocks(px_extent, src->blk_w, src->blk_h);
530
531 assert(util_is_power_of_two_nonzero(src->bpp));
532 assert(util_is_power_of_two_nonzero(dst->bpp));
533
534 ASSERTED unsigned cdw_end = radeon_check_space(device->ws, cs, 15 + (dcc ? 3 : 0));
535
536 radeon_emit(cs, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0) | dcc << 19 | dcc_dir << 31 |
537 src->header_dword);
538 radeon_emit(cs, src->va);
539 radeon_emit(cs, src->va >> 32);
540 radeon_emit(cs, src_off.x | src_off.y << 16);
541 radeon_emit(cs, src_off.z | (src_ext.width - 1) << 16);
542 radeon_emit(cs, (src_ext.height - 1) | (src_ext.depth - 1) << 16);
543 radeon_emit(cs, src->info_dword);
544 radeon_emit(cs, dst->va);
545 radeon_emit(cs, dst->va >> 32);
546 radeon_emit(cs, dst_off.x | dst_off.y << 16);
547 radeon_emit(cs, dst_off.z | (dst_ext.width - 1) << 16);
548 radeon_emit(cs, (dst_ext.height - 1) | (dst_ext.depth - 1) << 16);
549 radeon_emit(cs, dst->info_dword);
550 radeon_emit(cs, (ext.width - 1) | (ext.height - 1) << 16);
551 radeon_emit(cs, (ext.depth - 1));
552
553 if (dst->meta_va) {
554 const uint32_t write_compress_enable = 1;
555 radeon_emit(cs, dst->meta_va);
556 radeon_emit(cs, dst->meta_va >> 32);
557 radeon_emit(cs, dst->meta_config | write_compress_enable << 28);
558 } else if (src->meta_va) {
559 radeon_emit(cs, src->meta_va);
560 radeon_emit(cs, src->meta_va >> 32);
561 radeon_emit(cs, src->meta_config);
562 }
563
564 assert(cs->cdw == cdw_end);
565 }
566
567 void
radv_sdma_copy_buffer_image(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * buf,const struct radv_sdma_surf * img,const VkExtent3D extent,bool to_image)568 radv_sdma_copy_buffer_image(const struct radv_device *device, struct radeon_cmdbuf *cs,
569 const struct radv_sdma_surf *buf, const struct radv_sdma_surf *img, const VkExtent3D extent,
570 bool to_image)
571 {
572 if (img->is_linear) {
573 if (to_image)
574 radv_sdma_emit_copy_linear_sub_window(device, cs, buf, img, extent);
575 else
576 radv_sdma_emit_copy_linear_sub_window(device, cs, img, buf, extent);
577 } else {
578 radv_sdma_emit_copy_tiled_sub_window(device, cs, img, buf, extent, !to_image);
579 }
580 }
581
582 bool
radv_sdma_use_unaligned_buffer_image_copy(const struct radv_device * device,const struct radv_sdma_surf * buf,const struct radv_sdma_surf * img,const VkExtent3D ext)583 radv_sdma_use_unaligned_buffer_image_copy(const struct radv_device *device, const struct radv_sdma_surf *buf,
584 const struct radv_sdma_surf *img, const VkExtent3D ext)
585 {
586 const unsigned pitch_blocks = radv_sdma_pixels_to_blocks(buf->pitch, img->blk_w);
587 if (!util_is_aligned(pitch_blocks, radv_sdma_pitch_alignment(device, img->bpp)))
588 return true;
589
590 const bool uses_depth = img->offset.z != 0 || ext.depth != 1;
591 if (!img->is_linear && uses_depth) {
592 const unsigned slice_pitch_blocks = radv_sdma_pixel_area_to_blocks(buf->slice_pitch, img->blk_w, img->blk_h);
593 if (!util_is_aligned(slice_pitch_blocks, 4))
594 return true;
595 }
596
597 return false;
598 }
599
600 void
radv_sdma_copy_buffer_image_unaligned(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * buf,const struct radv_sdma_surf * img_in,const VkExtent3D base_extent,struct radeon_winsys_bo * temp_bo,bool to_image)601 radv_sdma_copy_buffer_image_unaligned(const struct radv_device *device, struct radeon_cmdbuf *cs,
602 const struct radv_sdma_surf *buf, const struct radv_sdma_surf *img_in,
603 const VkExtent3D base_extent, struct radeon_winsys_bo *temp_bo, bool to_image)
604 {
605 const struct radv_sdma_chunked_copy_info info = radv_sdma_get_chunked_copy_info(device, img_in, base_extent);
606 struct radv_sdma_surf img = *img_in;
607 struct radv_sdma_surf tmp = {
608 .va = temp_bo->va,
609 .bpp = img.bpp,
610 .blk_w = img.blk_w,
611 .blk_h = img.blk_h,
612 .pitch = info.aligned_row_pitch * img.blk_w,
613 .slice_pitch = info.aligned_row_pitch * img.blk_w * info.extent_vertical_blocks * img.blk_h,
614 };
615
616 VkExtent3D extent = base_extent;
617 const unsigned buf_pitch_blocks = DIV_ROUND_UP(buf->pitch, img.blk_w);
618 const unsigned buf_slice_pitch_blocks = DIV_ROUND_UP(DIV_ROUND_UP(buf->slice_pitch, img.blk_w), img.blk_h);
619 assert(buf_pitch_blocks);
620 assert(buf_slice_pitch_blocks);
621 extent.depth = 1;
622
623 for (unsigned slice = 0; slice < base_extent.depth; ++slice) {
624 for (unsigned row = 0; row < info.extent_vertical_blocks; row += info.num_rows_per_copy) {
625 const unsigned rows = MIN2(info.extent_vertical_blocks - row, info.num_rows_per_copy);
626
627 img.offset.y = img_in->offset.y + row * img.blk_h;
628 img.offset.z = img_in->offset.z + slice;
629 extent.height = rows * img.blk_h;
630 tmp.slice_pitch = tmp.pitch * rows * img.blk_h;
631
632 if (!to_image) {
633 /* Copy the rows from the source image to the temporary buffer. */
634 if (img.is_linear)
635 radv_sdma_emit_copy_linear_sub_window(device, cs, &img, &tmp, extent);
636 else
637 radv_sdma_emit_copy_tiled_sub_window(device, cs, &img, &tmp, extent, true);
638
639 /* Wait for the copy to finish. */
640 radv_sdma_emit_nop(device, cs);
641 }
642
643 /* buffer to image: copy each row from source buffer to temporary buffer.
644 * image to buffer: copy each row from temporary buffer to destination buffer.
645 */
646 for (unsigned r = 0; r < rows; ++r) {
647 const uint64_t buf_va =
648 buf->va + slice * buf_slice_pitch_blocks * img.bpp + (row + r) * buf_pitch_blocks * img.bpp;
649 const uint64_t tmp_va = tmp.va + r * info.aligned_row_pitch * img.bpp;
650 radv_sdma_copy_buffer(device, cs, to_image ? buf_va : tmp_va, to_image ? tmp_va : buf_va,
651 info.extent_horizontal_blocks * img.bpp);
652 }
653
654 /* Wait for the copy to finish. */
655 radv_sdma_emit_nop(device, cs);
656
657 if (to_image) {
658 /* Copy the rows from the temporary buffer to the destination image. */
659 if (img.is_linear)
660 radv_sdma_emit_copy_linear_sub_window(device, cs, &tmp, &img, extent);
661 else
662 radv_sdma_emit_copy_tiled_sub_window(device, cs, &img, &tmp, extent, false);
663
664 /* Wait for the copy to finish. */
665 radv_sdma_emit_nop(device, cs);
666 }
667 }
668 }
669 }
670
671 void
radv_sdma_copy_image(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * src,const struct radv_sdma_surf * dst,const VkExtent3D extent)672 radv_sdma_copy_image(const struct radv_device *device, struct radeon_cmdbuf *cs, const struct radv_sdma_surf *src,
673 const struct radv_sdma_surf *dst, const VkExtent3D extent)
674 {
675 if (src->is_linear) {
676 if (dst->is_linear) {
677 radv_sdma_emit_copy_linear_sub_window(device, cs, src, dst, extent);
678 } else {
679 radv_sdma_emit_copy_tiled_sub_window(device, cs, dst, src, extent, false);
680 }
681 } else {
682 if (dst->is_linear) {
683 radv_sdma_emit_copy_tiled_sub_window(device, cs, src, dst, extent, true);
684 } else {
685 radv_sdma_emit_copy_t2t_sub_window(device, cs, src, dst, extent);
686 }
687 }
688 }
689
690 bool
radv_sdma_use_t2t_scanline_copy(const struct radv_device * device,const struct radv_sdma_surf * src,const struct radv_sdma_surf * dst,const VkExtent3D extent)691 radv_sdma_use_t2t_scanline_copy(const struct radv_device *device, const struct radv_sdma_surf *src,
692 const struct radv_sdma_surf *dst, const VkExtent3D extent)
693 {
694 /* These need a linear-to-linear / linear-to-tiled copy. */
695 if (src->is_linear || dst->is_linear)
696 return false;
697
698 /* SDMA can't do format conversion. */
699 assert(src->bpp == dst->bpp);
700
701 const struct radv_physical_device *pdev = radv_device_physical(device);
702 const enum sdma_version ver = pdev->info.sdma_ip_version;
703 if (ver < SDMA_5_0) {
704 /* SDMA v4.x and older doesn't support proper mip level selection. */
705 if (src->mip_levels > 1 || dst->mip_levels > 1)
706 return true;
707 }
708
709 /* The two images can have a different block size,
710 * but must have the same swizzle mode.
711 */
712 if (src->micro_tile_mode != dst->micro_tile_mode)
713 return true;
714
715 /* The T2T subwindow copy packet only has fields for one metadata configuration.
716 * It can either compress or decompress, or copy uncompressed images, but it
717 * can't copy from a compressed image to another.
718 */
719 if (src->meta_va && dst->meta_va)
720 return true;
721
722 const bool needs_3d_alignment = src->is_3d && (src->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
723 src->micro_tile_mode == RADEON_MICRO_MODE_STANDARD);
724 const unsigned log2bpp = util_logbase2(src->bpp);
725 const VkExtent3D *const alignment =
726 needs_3d_alignment ? &radv_sdma_t2t_alignment_3d[log2bpp] : &radv_sdma_t2t_alignment_2d_and_planar[log2bpp];
727
728 const VkExtent3D copy_extent_blk = radv_sdma_pixel_extent_to_blocks(extent, src->blk_w, src->blk_h);
729 const VkOffset3D src_offset_blk = radv_sdma_pixel_offset_to_blocks(src->offset, src->blk_w, src->blk_h);
730 const VkOffset3D dst_offset_blk = radv_sdma_pixel_offset_to_blocks(dst->offset, dst->blk_w, dst->blk_h);
731
732 if (!util_is_aligned(copy_extent_blk.width, alignment->width) ||
733 !util_is_aligned(copy_extent_blk.height, alignment->height) ||
734 !util_is_aligned(copy_extent_blk.depth, alignment->depth))
735 return true;
736
737 if (!util_is_aligned(src_offset_blk.x, alignment->width) || !util_is_aligned(src_offset_blk.y, alignment->height) ||
738 !util_is_aligned(src_offset_blk.z, alignment->depth))
739 return true;
740
741 if (!util_is_aligned(dst_offset_blk.x, alignment->width) || !util_is_aligned(dst_offset_blk.y, alignment->height) ||
742 !util_is_aligned(dst_offset_blk.z, alignment->depth))
743 return true;
744
745 return false;
746 }
747
748 void
radv_sdma_copy_image_t2t_scanline(const struct radv_device * device,struct radeon_cmdbuf * cs,const struct radv_sdma_surf * src,const struct radv_sdma_surf * dst,const VkExtent3D extent,struct radeon_winsys_bo * temp_bo)749 radv_sdma_copy_image_t2t_scanline(const struct radv_device *device, struct radeon_cmdbuf *cs,
750 const struct radv_sdma_surf *src, const struct radv_sdma_surf *dst,
751 const VkExtent3D extent, struct radeon_winsys_bo *temp_bo)
752 {
753 const struct radv_sdma_chunked_copy_info info = radv_sdma_get_chunked_copy_info(device, src, extent);
754 struct radv_sdma_surf t2l_src = *src;
755 struct radv_sdma_surf t2l_dst = {
756 .va = temp_bo->va,
757 .bpp = src->bpp,
758 .blk_w = src->blk_w,
759 .blk_h = src->blk_h,
760 .pitch = info.aligned_row_pitch * src->blk_w,
761 };
762 struct radv_sdma_surf l2t_dst = *dst;
763 struct radv_sdma_surf l2t_src = {
764 .va = temp_bo->va,
765 .bpp = dst->bpp,
766 .blk_w = dst->blk_w,
767 .blk_h = dst->blk_h,
768 .pitch = info.aligned_row_pitch * dst->blk_w,
769 };
770
771 for (unsigned slice = 0; slice < extent.depth; ++slice) {
772 for (unsigned row = 0; row < info.extent_vertical_blocks; row += info.num_rows_per_copy) {
773 const unsigned rows = MIN2(info.extent_vertical_blocks - row, info.num_rows_per_copy);
774
775 const VkExtent3D t2l_extent = {
776 .width = info.extent_horizontal_blocks * src->blk_w,
777 .height = rows * src->blk_h,
778 .depth = 1,
779 };
780
781 t2l_src.offset.y = src->offset.y + row * src->blk_h;
782 t2l_src.offset.z = src->offset.z + slice;
783 t2l_dst.slice_pitch = t2l_dst.pitch * t2l_extent.height;
784
785 radv_sdma_emit_copy_tiled_sub_window(device, cs, &t2l_src, &t2l_dst, t2l_extent, true);
786 radv_sdma_emit_nop(device, cs);
787
788 const VkExtent3D l2t_extent = {
789 .width = info.extent_horizontal_blocks * dst->blk_w,
790 .height = rows * dst->blk_h,
791 .depth = 1,
792 };
793
794 l2t_dst.offset.y = dst->offset.y + row * dst->blk_h;
795 l2t_dst.offset.z = dst->offset.z + slice;
796 l2t_src.slice_pitch = l2t_src.pitch * l2t_extent.height;
797
798 radv_sdma_emit_copy_tiled_sub_window(device, cs, &l2t_dst, &l2t_src, l2t_extent, false);
799 radv_sdma_emit_nop(device, cs);
800 }
801 }
802 }
803