xref: /aosp_15_r20/external/mesa3d/src/asahi/vulkan/hk_image_view.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2024 Valve Corporation
3  * Copyright 2024 Alyssa Rosenzweig
4  * Copyright 2022-2023 Collabora Ltd. and Red Hat Inc.
5  * SPDX-License-Identifier: MIT
6  */
7 #include "hk_image_view.h"
8 #include "util/format/u_format.h"
9 #include "vulkan/vulkan_core.h"
10 
11 #include "agx_helpers.h"
12 #include "agx_nir_passes.h"
13 #include "agx_pack.h"
14 #include "hk_device.h"
15 #include "hk_entrypoints.h"
16 #include "hk_image.h"
17 #include "hk_physical_device.h"
18 
19 #include "layout.h"
20 #include "vk_format.h"
21 
22 enum hk_desc_usage {
23    HK_DESC_USAGE_SAMPLED,
24    HK_DESC_USAGE_STORAGE,
25    HK_DESC_USAGE_INPUT,
26    HK_DESC_USAGE_BG_EOT,
27    HK_DESC_USAGE_LAYERED_BG_EOT,
28    HK_DESC_USAGE_EMRT,
29 };
30 
31 static bool
hk_image_view_type_is_array(VkImageViewType view_type)32 hk_image_view_type_is_array(VkImageViewType view_type)
33 {
34    switch (view_type) {
35    case VK_IMAGE_VIEW_TYPE_1D:
36    case VK_IMAGE_VIEW_TYPE_2D:
37    case VK_IMAGE_VIEW_TYPE_3D:
38    case VK_IMAGE_VIEW_TYPE_CUBE:
39       return false;
40 
41    case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
42    case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
43    case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
44       return true;
45 
46    default:
47       unreachable("Invalid image view type");
48    }
49 }
50 
51 static enum agx_texture_dimension
translate_image_view_type(VkImageViewType view_type,bool msaa,bool layered,enum hk_desc_usage usage)52 translate_image_view_type(VkImageViewType view_type, bool msaa, bool layered,
53                           enum hk_desc_usage usage)
54 {
55    if (usage == HK_DESC_USAGE_EMRT || usage == HK_DESC_USAGE_INPUT ||
56        (usage == HK_DESC_USAGE_LAYERED_BG_EOT && layered)) {
57       return msaa ? AGX_TEXTURE_DIMENSION_2D_ARRAY_MULTISAMPLED
58                   : AGX_TEXTURE_DIMENSION_2D_ARRAY;
59    }
60 
61    /* For background/EOT, we ignore the application-provided view type */
62    if (usage == HK_DESC_USAGE_BG_EOT || usage == HK_DESC_USAGE_LAYERED_BG_EOT) {
63       return msaa ? AGX_TEXTURE_DIMENSION_2D_MULTISAMPLED
64                   : AGX_TEXTURE_DIMENSION_2D;
65    }
66 
67    bool cubes_to_2d = usage != HK_DESC_USAGE_SAMPLED;
68 
69    switch (view_type) {
70    case VK_IMAGE_VIEW_TYPE_1D:
71    case VK_IMAGE_VIEW_TYPE_2D:
72       return msaa ? AGX_TEXTURE_DIMENSION_2D_MULTISAMPLED
73                   : AGX_TEXTURE_DIMENSION_2D;
74 
75    case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
76    case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
77       return msaa ? AGX_TEXTURE_DIMENSION_2D_ARRAY_MULTISAMPLED
78                   : AGX_TEXTURE_DIMENSION_2D_ARRAY;
79 
80    case VK_IMAGE_VIEW_TYPE_3D:
81       assert(!msaa);
82       return AGX_TEXTURE_DIMENSION_3D;
83 
84    case VK_IMAGE_VIEW_TYPE_CUBE:
85       assert(!msaa);
86       return cubes_to_2d ? AGX_TEXTURE_DIMENSION_2D_ARRAY
87                          : AGX_TEXTURE_DIMENSION_CUBE;
88 
89    case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
90       assert(!msaa);
91       return cubes_to_2d ? AGX_TEXTURE_DIMENSION_2D_ARRAY
92                          : AGX_TEXTURE_DIMENSION_CUBE_ARRAY;
93 
94    default:
95       unreachable("Invalid image view type");
96    }
97 }
98 
99 static enum pipe_swizzle
vk_swizzle_to_pipe(VkComponentSwizzle swizzle)100 vk_swizzle_to_pipe(VkComponentSwizzle swizzle)
101 {
102    switch (swizzle) {
103    case VK_COMPONENT_SWIZZLE_R:
104       return PIPE_SWIZZLE_X;
105    case VK_COMPONENT_SWIZZLE_G:
106       return PIPE_SWIZZLE_Y;
107    case VK_COMPONENT_SWIZZLE_B:
108       return PIPE_SWIZZLE_Z;
109    case VK_COMPONENT_SWIZZLE_A:
110       return PIPE_SWIZZLE_W;
111    case VK_COMPONENT_SWIZZLE_ONE:
112       return PIPE_SWIZZLE_1;
113    case VK_COMPONENT_SWIZZLE_ZERO:
114       return PIPE_SWIZZLE_0;
115    default:
116       unreachable("Invalid component swizzle");
117    }
118 }
119 
120 static enum pipe_format
get_stencil_format(enum pipe_format format)121 get_stencil_format(enum pipe_format format)
122 {
123    switch (format) {
124    case PIPE_FORMAT_S8_UINT:
125       return PIPE_FORMAT_S8_UINT;
126    case PIPE_FORMAT_Z24_UNORM_S8_UINT:
127       return PIPE_FORMAT_X24S8_UINT;
128    case PIPE_FORMAT_S8_UINT_Z24_UNORM:
129       return PIPE_FORMAT_S8X24_UINT;
130    case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
131       return PIPE_FORMAT_X32_S8X24_UINT;
132    default:
133       unreachable("Unsupported depth/stencil format");
134    }
135 }
136 
137 struct hk_3d {
138    unsigned x, y, z;
139 };
140 
141 static struct hk_3d
view_denominator(struct hk_image_view * view)142 view_denominator(struct hk_image_view *view)
143 {
144    enum pipe_format view_format = vk_format_to_pipe_format(view->vk.format);
145    enum pipe_format img_format =
146       vk_format_to_pipe_format(view->vk.image->format);
147 
148    if (util_format_is_compressed(view_format)) {
149       /*
150        * We can do an uncompressed view of a compressed image but not the other
151        * way around.
152        */
153       assert(util_format_is_compressed(img_format));
154       assert(util_format_get_blockwidth(img_format) ==
155              util_format_get_blockwidth(view_format));
156       assert(util_format_get_blockheight(img_format) ==
157              util_format_get_blockheight(view_format));
158       assert(util_format_get_blockdepth(img_format) ==
159              util_format_get_blockdepth(view_format));
160 
161       return (struct hk_3d){1, 1, 1};
162    }
163 
164    if (!util_format_is_compressed(img_format)) {
165       /* Both formats uncompressed */
166       return (struct hk_3d){1, 1, 1};
167    }
168 
169    /* Else, img is compressed but view is not */
170    return (struct hk_3d){
171       util_format_get_blockwidth(img_format),
172       util_format_get_blockheight(img_format),
173       util_format_get_blockdepth(img_format),
174    };
175 }
176 
177 static enum pipe_format
format_for_plane(struct hk_image_view * view,unsigned view_plane)178 format_for_plane(struct hk_image_view *view, unsigned view_plane)
179 {
180    const struct vk_format_ycbcr_info *ycbcr_info =
181       vk_format_get_ycbcr_info(view->vk.format);
182 
183    assert(ycbcr_info || view_plane == 0);
184    VkFormat plane_format =
185       ycbcr_info ? ycbcr_info->planes[view_plane].format : view->vk.format;
186 
187    enum pipe_format p_format = vk_format_to_pipe_format(plane_format);
188    if (view->vk.aspects == VK_IMAGE_ASPECT_STENCIL_BIT)
189       p_format = get_stencil_format(p_format);
190 
191    return p_format;
192 }
193 
194 static void
pack_texture(struct hk_image_view * view,unsigned view_plane,enum hk_desc_usage usage,struct agx_texture_packed * out)195 pack_texture(struct hk_image_view *view, unsigned view_plane,
196              enum hk_desc_usage usage, struct agx_texture_packed *out)
197 {
198    struct hk_image *image = container_of(view->vk.image, struct hk_image, vk);
199    const uint8_t image_plane = view->planes[view_plane].image_plane;
200    struct ail_layout *layout = &image->planes[image_plane].layout;
201    uint64_t base_addr = hk_image_base_address(image, image_plane);
202 
203    bool cubes_to_2d = usage != HK_DESC_USAGE_SAMPLED;
204 
205    unsigned level = view->vk.base_mip_level;
206    unsigned layer = view->vk.base_array_layer;
207 
208    enum pipe_format p_format = format_for_plane(view, view_plane);
209    const struct util_format_description *desc =
210       util_format_description(p_format);
211 
212    struct hk_3d denom = view_denominator(view);
213 
214    uint8_t format_swizzle[4] = {
215       desc->swizzle[0],
216       desc->swizzle[1],
217       desc->swizzle[2],
218       desc->swizzle[3],
219    };
220 
221    /* Different APIs have different depth/stencil swizzle rules. Vulkan expects
222     * R001 behaviour, override here because Mesa's format table is not that.
223     */
224    if (util_format_is_depth_or_stencil(p_format)) {
225       format_swizzle[0] = PIPE_SWIZZLE_X;
226       format_swizzle[1] = PIPE_SWIZZLE_0;
227       format_swizzle[2] = PIPE_SWIZZLE_0;
228       format_swizzle[3] = PIPE_SWIZZLE_1;
229    }
230 
231    /* We only have a single swizzle for the user swizzle and the format
232     * fixup, so compose them now.
233     */
234    uint8_t out_swizzle[4];
235    uint8_t view_swizzle[4] = {
236       vk_swizzle_to_pipe(view->vk.swizzle.r),
237       vk_swizzle_to_pipe(view->vk.swizzle.g),
238       vk_swizzle_to_pipe(view->vk.swizzle.b),
239       vk_swizzle_to_pipe(view->vk.swizzle.a),
240    };
241 
242    unsigned layers = view->vk.layer_count;
243    if (view->vk.view_type == VK_IMAGE_VIEW_TYPE_3D) {
244       layers = DIV_ROUND_UP(layout->depth_px, denom.z);
245    } else if (!cubes_to_2d &&
246               (view->vk.view_type == VK_IMAGE_VIEW_TYPE_CUBE ||
247                view->vk.view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)) {
248 
249       layers /= 6;
250    }
251 
252    util_format_compose_swizzles(format_swizzle, view_swizzle, out_swizzle);
253 
254    agx_pack(out, TEXTURE, cfg) {
255       cfg.dimension = translate_image_view_type(
256          view->vk.view_type, view->vk.image->samples > 1, layers > 1, usage);
257       cfg.layout = agx_translate_layout(layout->tiling);
258       cfg.channels = ail_pixel_format[p_format].channels;
259       cfg.type = ail_pixel_format[p_format].type;
260       cfg.srgb = util_format_is_srgb(p_format);
261 
262       cfg.swizzle_r = agx_channel_from_pipe(out_swizzle[0]);
263       cfg.swizzle_g = agx_channel_from_pipe(out_swizzle[1]);
264       cfg.swizzle_b = agx_channel_from_pipe(out_swizzle[2]);
265       cfg.swizzle_a = agx_channel_from_pipe(out_swizzle[3]);
266 
267       if (denom.x > 1) {
268          assert(view->vk.level_count == 1);
269          assert(view->vk.layer_count == 1);
270 
271          cfg.address = base_addr + ail_get_layer_level_B(layout, layer, level);
272          cfg.width = DIV_ROUND_UP(u_minify(layout->width_px, level), denom.x);
273          cfg.height = DIV_ROUND_UP(u_minify(layout->height_px, level), denom.y);
274          cfg.first_level = 0;
275          cfg.last_level = 1;
276       } else {
277          cfg.address = base_addr + ail_get_layer_offset_B(layout, layer);
278          cfg.width = layout->width_px;
279          cfg.height = layout->height_px;
280          cfg.first_level = level;
281          cfg.last_level = level + view->vk.level_count - 1;
282       }
283 
284       cfg.srgb = (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
285       cfg.unk_mipmapped = layout->levels > 1;
286       cfg.srgb_2_channel = cfg.srgb && util_format_colormask(desc) == 0x3;
287 
288       if (ail_is_compressed(layout)) {
289          cfg.compressed_1 = true;
290          cfg.extended = true;
291       }
292 
293       if (ail_is_compressed(layout)) {
294          cfg.acceleration_buffer = base_addr + layout->metadata_offset_B +
295                                    (layer * layout->compression_layer_stride_B);
296       }
297 
298       if (layout->tiling == AIL_TILING_LINEAR &&
299           (hk_image_view_type_is_array(view->vk.view_type))) {
300 
301          cfg.depth_linear = layers;
302          cfg.layer_stride_linear = layout->layer_stride_B - 0x80;
303          cfg.extended = true;
304       } else {
305          assert((layout->tiling != AIL_TILING_LINEAR) || (layers == 1));
306          cfg.depth = layers;
307       }
308 
309       if (view->vk.image->samples > 1) {
310          cfg.samples = agx_translate_sample_count(view->vk.image->samples);
311       }
312 
313       if (layout->tiling == AIL_TILING_LINEAR) {
314          cfg.stride = ail_get_linear_stride_B(layout, 0) - 16;
315       } else {
316          assert(layout->tiling == AIL_TILING_TWIDDLED ||
317                 layout->tiling == AIL_TILING_TWIDDLED_COMPRESSED);
318 
319          cfg.page_aligned_layers = layout->page_aligned_layers;
320       }
321    }
322 }
323 
324 static void
pack_pbe(struct hk_device * dev,struct hk_image_view * view,unsigned view_plane,enum hk_desc_usage usage,struct agx_pbe_packed * out)325 pack_pbe(struct hk_device *dev, struct hk_image_view *view, unsigned view_plane,
326          enum hk_desc_usage usage, struct agx_pbe_packed *out)
327 {
328    struct hk_image *image = container_of(view->vk.image, struct hk_image, vk);
329    const uint8_t image_plane = view->planes[view_plane].image_plane;
330    struct ail_layout *layout = &image->planes[image_plane].layout;
331    uint64_t base_addr = hk_image_base_address(image, image_plane);
332 
333    unsigned level = view->vk.base_mip_level;
334    unsigned layer = view->vk.base_array_layer;
335 
336    enum pipe_format p_format = format_for_plane(view, view_plane);
337    const struct util_format_description *desc =
338       util_format_description(p_format);
339 
340    bool eot =
341       usage == HK_DESC_USAGE_BG_EOT || usage == HK_DESC_USAGE_LAYERED_BG_EOT;
342 
343    /* The tilebuffer is already in sRGB space if needed. Do not convert for
344     * end-of-tile descriptors.
345     */
346    if (eot)
347       p_format = util_format_linear(p_format);
348 
349    bool msaa = view->vk.image->samples > 1;
350    struct hk_3d denom = view_denominator(view);
351 
352    unsigned layers = view->vk.view_type == VK_IMAGE_VIEW_TYPE_3D
353                         ? image->vk.extent.depth
354                         : view->vk.layer_count;
355 
356    agx_pack(out, PBE, cfg) {
357       cfg.dimension =
358          translate_image_view_type(view->vk.view_type, msaa, layers > 1, usage);
359       cfg.layout = agx_translate_layout(layout->tiling);
360       cfg.channels = ail_pixel_format[p_format].channels;
361       cfg.type = ail_pixel_format[p_format].type;
362       cfg.srgb = util_format_is_srgb(p_format);
363 
364       assert(desc->nr_channels >= 1 && desc->nr_channels <= 4);
365 
366       for (unsigned i = 0; i < desc->nr_channels; ++i) {
367          if (desc->swizzle[i] == 0)
368             cfg.swizzle_r = i;
369          else if (desc->swizzle[i] == 1)
370             cfg.swizzle_g = i;
371          else if (desc->swizzle[i] == 2)
372             cfg.swizzle_b = i;
373          else if (desc->swizzle[i] == 3)
374             cfg.swizzle_a = i;
375       }
376 
377       cfg.buffer = base_addr + ail_get_layer_offset_B(layout, layer);
378       cfg.unk_mipmapped = layout->levels > 1;
379 
380       if (msaa & !eot) {
381          /* Multisampled images are bound like buffer textures, with
382           * addressing arithmetic to determine the texel to write.
383           *
384           * Note that the end-of-tile program uses real multisample images
385           * with image_write_block instructions.
386           */
387          unsigned blocksize_B = util_format_get_blocksize(p_format);
388          unsigned size_px =
389             (layout->size_B - layout->layer_stride_B * layer) / blocksize_B;
390 
391          cfg.dimension = AGX_TEXTURE_DIMENSION_2D;
392          cfg.layout = AGX_LAYOUT_LINEAR;
393          cfg.width = AGX_TEXTURE_BUFFER_WIDTH;
394          cfg.height = DIV_ROUND_UP(size_px, cfg.width);
395          cfg.stride = (cfg.width * blocksize_B) - 4;
396          cfg.layers = 1;
397          cfg.levels = 1;
398 
399          cfg.buffer += layout->level_offsets_B[level];
400          cfg.level = 0;
401       } else {
402          if (denom.x > 1) {
403             assert(denom.z == 1 && "todo how to handle?");
404             assert(view->vk.level_count == 1);
405             assert(view->vk.layer_count == 1);
406 
407             cfg.buffer =
408                base_addr + ail_get_layer_level_B(layout, layer, level);
409             cfg.width =
410                DIV_ROUND_UP(u_minify(layout->width_px, level), denom.x);
411             cfg.height =
412                DIV_ROUND_UP(u_minify(layout->height_px, level), denom.y);
413             cfg.level = 0;
414          } else {
415             cfg.buffer = base_addr + ail_get_layer_offset_B(layout, layer);
416             cfg.width = layout->width_px;
417             cfg.height = layout->height_px;
418             cfg.level = level;
419          }
420 
421          if (layout->tiling == AIL_TILING_LINEAR &&
422              (hk_image_view_type_is_array(view->vk.view_type))) {
423 
424             cfg.depth_linear = layers;
425             cfg.layer_stride_linear = (layout->layer_stride_B - 0x80);
426             cfg.extended = true;
427          } else {
428             assert((layout->tiling != AIL_TILING_LINEAR) || (layers == 1));
429             cfg.layers = layers;
430          }
431 
432          cfg.levels = image->vk.mip_levels;
433 
434          if (layout->tiling == AIL_TILING_LINEAR) {
435             cfg.stride = ail_get_linear_stride_B(layout, level) - 4;
436             assert(cfg.levels == 1);
437          } else {
438             cfg.page_aligned_layers = layout->page_aligned_layers;
439          }
440 
441          if (image->vk.samples > 1)
442             cfg.samples = agx_translate_sample_count(image->vk.samples);
443       }
444 
445       if (ail_is_compressed(layout) && usage != HK_DESC_USAGE_EMRT) {
446          cfg.compressed_1 = true;
447          cfg.extended = true;
448 
449          cfg.acceleration_buffer = base_addr + layout->metadata_offset_B +
450                                    (layer * layout->compression_layer_stride_B);
451       }
452 
453       /* When the descriptor isn't extended architecturally, we use
454        * the last 8 bytes as a sideband to accelerate image atomics.
455        */
456       if (!cfg.extended && layout->writeable_image) {
457          if (msaa) {
458             assert(denom.x == 1 && "no MSAA of block-compressed");
459 
460             cfg.aligned_width_msaa_sw =
461                align(u_minify(layout->width_px, level),
462                      layout->tilesize_el[level].width_el);
463          } else {
464             cfg.level_offset_sw = ail_get_level_offset_B(layout, cfg.level);
465          }
466 
467          cfg.sample_count_log2_sw = util_logbase2(image->vk.samples);
468 
469          if (layout->tiling == AIL_TILING_TWIDDLED) {
470             struct ail_tile tile_size = layout->tilesize_el[level];
471             cfg.tile_width_sw = tile_size.width_el;
472             cfg.tile_height_sw = tile_size.height_el;
473 
474             cfg.layer_stride_sw = layout->layer_stride_B;
475          }
476       }
477    };
478 }
479 
480 static VkResult
add_descriptor(struct hk_device * dev,struct hk_image_view * view,struct agx_texture_packed * desc,struct agx_texture_packed * cached,uint32_t * index)481 add_descriptor(struct hk_device *dev, struct hk_image_view *view,
482                struct agx_texture_packed *desc,
483                struct agx_texture_packed *cached, uint32_t *index)
484 {
485    /* First, look for a descriptor we already uploaded */
486    for (unsigned i = 0; i < view->descriptor_count; ++i) {
487       if (memcmp(&cached[i], desc, sizeof *desc) == 0) {
488          *index = view->descriptor_index[i];
489          return VK_SUCCESS;
490       }
491    }
492 
493    /* Else, add a new descriptor */
494    VkResult result =
495       hk_descriptor_table_add(dev, &dev->images, desc, sizeof *desc, index);
496    if (result != VK_SUCCESS)
497       return result;
498 
499    uint32_t local_index = view->descriptor_count++;
500    assert(local_index < HK_MAX_IMAGE_DESCS);
501 
502    cached[local_index] = *desc;
503    view->descriptor_index[local_index] = *index;
504    return VK_SUCCESS;
505 }
506 
507 static VkResult
hk_image_view_init(struct hk_device * dev,struct hk_image_view * view,bool driver_internal,const VkImageViewCreateInfo * pCreateInfo)508 hk_image_view_init(struct hk_device *dev, struct hk_image_view *view,
509                    bool driver_internal,
510                    const VkImageViewCreateInfo *pCreateInfo)
511 {
512    VK_FROM_HANDLE(hk_image, image, pCreateInfo->image);
513    VkResult result;
514 
515    memset(view, 0, sizeof(*view));
516 
517    vk_image_view_init(&dev->vk, &view->vk, driver_internal, pCreateInfo);
518 
519    /* First, figure out which image planes we need. For depth/stencil, we only
520     * have one aspect viewed at a time.
521     */
522    if (image->vk.aspects &
523        (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
524 
525       view->plane_count = 1;
526       view->planes[0].image_plane =
527          hk_image_aspects_to_plane(image, view->vk.aspects);
528    } else {
529       /* For other formats, retrieve the plane count from the aspect mask
530        * and then walk through the aspect mask to map each image plane
531        * to its corresponding view plane
532        */
533       assert(util_bitcount(view->vk.aspects) ==
534              vk_format_get_plane_count(view->vk.format));
535       view->plane_count = 0;
536       u_foreach_bit(aspect_bit, view->vk.aspects) {
537          uint8_t image_plane =
538             hk_image_aspects_to_plane(image, 1u << aspect_bit);
539          view->planes[view->plane_count++].image_plane = image_plane;
540       }
541    }
542 
543    struct agx_texture_packed cached[HK_MAX_IMAGE_DESCS];
544 
545    /* Finally, fill in each view plane separately */
546    for (unsigned view_plane = 0; view_plane < view->plane_count; view_plane++) {
547       const struct {
548          VkImageUsageFlagBits flag;
549          enum hk_desc_usage usage;
550          uint32_t *tex;
551          uint32_t *pbe;
552       } descriptors[] = {
553          {VK_IMAGE_USAGE_SAMPLED_BIT, HK_DESC_USAGE_SAMPLED,
554           &view->planes[view_plane].sampled_desc_index},
555 
556          {VK_IMAGE_USAGE_STORAGE_BIT, HK_DESC_USAGE_STORAGE,
557           &view->planes[view_plane].ro_storage_desc_index,
558           &view->planes[view_plane].storage_desc_index},
559 
560          {VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, HK_DESC_USAGE_INPUT,
561           &view->planes[view_plane].ia_desc_index},
562 
563          {VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, HK_DESC_USAGE_BG_EOT,
564           &view->planes[view_plane].background_desc_index,
565           &view->planes[view_plane].eot_pbe_desc_index},
566 
567          {VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, HK_DESC_USAGE_LAYERED_BG_EOT,
568           &view->planes[view_plane].layered_background_desc_index,
569           &view->planes[view_plane].layered_eot_pbe_desc_index},
570       };
571 
572       for (unsigned i = 0; i < ARRAY_SIZE(descriptors); ++i) {
573          if (!(view->vk.usage & descriptors[i].flag))
574             continue;
575 
576          for (unsigned is_pbe = 0; is_pbe < 2; ++is_pbe) {
577             struct agx_texture_packed desc;
578             uint32_t *out = is_pbe ? descriptors[i].pbe : descriptors[i].tex;
579 
580             if (!out)
581                continue;
582 
583             if (is_pbe) {
584                static_assert(sizeof(struct agx_pbe_packed) ==
585                              sizeof(struct agx_texture_packed));
586 
587                pack_pbe(dev, view, view_plane, descriptors[i].usage,
588                         (struct agx_pbe_packed *)&desc);
589             } else {
590                pack_texture(view, view_plane, descriptors[i].usage, &desc);
591             }
592 
593             result = add_descriptor(dev, view, &desc, cached, out);
594             if (result != VK_SUCCESS)
595                return result;
596          }
597       }
598 
599       if (view->vk.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
600          pack_texture(view, view_plane, HK_DESC_USAGE_EMRT,
601                       &view->planes[view_plane].emrt_texture);
602 
603          pack_pbe(dev, view, view_plane, HK_DESC_USAGE_EMRT,
604                   &view->planes[view_plane].emrt_pbe);
605       }
606    }
607 
608    return VK_SUCCESS;
609 }
610 
611 VKAPI_ATTR void VKAPI_CALL
hk_DestroyImageView(VkDevice _device,VkImageView imageView,const VkAllocationCallbacks * pAllocator)612 hk_DestroyImageView(VkDevice _device, VkImageView imageView,
613                     const VkAllocationCallbacks *pAllocator)
614 {
615    VK_FROM_HANDLE(hk_device, dev, _device);
616    VK_FROM_HANDLE(hk_image_view, view, imageView);
617 
618    if (!view)
619       return;
620 
621    for (uint8_t d = 0; d < view->descriptor_count; ++d) {
622       hk_descriptor_table_remove(dev, &dev->images, view->descriptor_index[d]);
623    }
624 
625    vk_image_view_finish(&view->vk);
626    vk_free2(&dev->vk.alloc, pAllocator, view);
627 }
628 
629 VKAPI_ATTR VkResult VKAPI_CALL
hk_CreateImageView(VkDevice _device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)630 hk_CreateImageView(VkDevice _device, const VkImageViewCreateInfo *pCreateInfo,
631                    const VkAllocationCallbacks *pAllocator, VkImageView *pView)
632 {
633    VK_FROM_HANDLE(hk_device, dev, _device);
634    struct hk_image_view *view;
635    VkResult result;
636 
637    view = vk_alloc2(&dev->vk.alloc, pAllocator, sizeof(*view), 8,
638                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
639    if (!view)
640       return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
641 
642    result = hk_image_view_init(
643       dev, view, pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_INTERNAL_MESA,
644       pCreateInfo);
645    if (result != VK_SUCCESS) {
646       hk_DestroyImageView(_device, hk_image_view_to_handle(view), pAllocator);
647       return result;
648    }
649 
650    *pView = hk_image_view_to_handle(view);
651 
652    return VK_SUCCESS;
653 }
654