1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #ifndef NVK_IMAGE_H
6 #define NVK_IMAGE_H 1
7
8 #include "nvk_private.h"
9 #include "nvk_device_memory.h"
10
11 #include "vk_image.h"
12
13 #include "nil.h"
14
15 /* Because small images can end up with an array_stride_B that is less than
16 * the sparse block size (in bytes), we have to set SINGLE_MIPTAIL_BIT when
17 * advertising sparse properties to the client. This means that we get one
18 * single memory range for the miptail of the image. For large images with
19 * mipTailStartLod > 0, we have to deal with the array stride ourselves.
20 *
21 * We do this by returning NVK_MIP_TAIL_START_OFFSET as the image's
22 * imageMipTailOffset. We can then detect anything with that address as
23 * being part of the miptail and re-map it accordingly. The Vulkan spec
24 * explicitly allows for this.
25 *
26 * From the Vulkan 1.3.279 spec:
27 *
28 * "When VK_SPARSE_MEMORY_BIND_METADATA_BIT is present, the resourceOffset
29 * must have been derived explicitly from the imageMipTailOffset in the
30 * sparse resource properties returned for the metadata aspect. By
31 * manipulating the value returned for imageMipTailOffset, the
32 * resourceOffset does not have to correlate directly to a device virtual
33 * address offset, and may instead be whatever value makes it easiest for
34 * the implementation to derive the correct device virtual address."
35 */
36 #define NVK_MIP_TAIL_START_OFFSET 0x6d74000000000000UL
37
38 struct nvk_device_memory;
39 struct nvk_physical_device;
40 struct nvk_queue;
41 struct nvkmd_mem;
42 struct nvkmd_va;
43
44 VkFormatFeatureFlags2
45 nvk_get_image_format_features(struct nvk_physical_device *pdevice,
46 VkFormat format, VkImageTiling tiling,
47 uint64_t drm_format_mod);
48
49 void
50 nvk_get_drm_format_modifier_properties_list(struct nvk_physical_device *pdev,
51 VkFormat vk_format,
52 VkBaseOutStructure *ext);
53
54 uint32_t
55 nvk_image_max_dimension(const struct nv_device_info *info,
56 VkImageType image_type);
57
58 struct nvk_image_plane {
59 struct nil_image nil;
60 uint64_t addr;
61
62 /** Reserved VA for sparse images, NULL otherwise. */
63 struct nvkmd_va *va;
64 };
65
66 struct nvk_image {
67 struct vk_image vk;
68
69 /** True if the planes are bound separately
70 *
71 * This is set based on VK_IMAGE_CREATE_DISJOINT_BIT
72 */
73 bool disjoint;
74
75 uint8_t plane_count;
76 struct nvk_image_plane planes[3];
77
78 /* In order to support D32_SFLOAT_S8_UINT, a temp area is
79 * needed. The stencil plane can't be a copied using the DMA
80 * engine in a single pass since it would need 8 components support.
81 * Instead we allocate a 16-bit temp, that gets copied into, then
82 * copied again down to the 8-bit result.
83 */
84 struct nvk_image_plane stencil_copy_temp;
85
86 /* The hardware doesn't support rendering to linear images except
87 * under certain conditions, so to support DRM_FORMAT_MOD_LINEAR
88 * rendering in the general case, we need to keep a tiled copy, which would
89 * be used to fake support if the conditions aren't satisfied.
90 */
91 struct nvk_image_plane linear_tiled_shadow;
92 struct nvkmd_mem *linear_tiled_shadow_mem;
93 };
94
95 VK_DEFINE_NONDISP_HANDLE_CASTS(nvk_image, vk.base, VkImage, VK_OBJECT_TYPE_IMAGE)
96
97 static inline uint64_t
nvk_image_plane_base_address(const struct nvk_image_plane * plane)98 nvk_image_plane_base_address(const struct nvk_image_plane *plane)
99 {
100 return plane->addr;
101 }
102
103 static inline uint64_t
nvk_image_base_address(const struct nvk_image * image,uint8_t plane)104 nvk_image_base_address(const struct nvk_image *image, uint8_t plane)
105 {
106 return nvk_image_plane_base_address(&image->planes[plane]);
107 }
108
109 static inline uint8_t
nvk_image_aspects_to_plane(ASSERTED const struct nvk_image * image,VkImageAspectFlags aspectMask)110 nvk_image_aspects_to_plane(ASSERTED const struct nvk_image *image,
111 VkImageAspectFlags aspectMask)
112 {
113 /* Memory planes are only allowed for memory operations */
114 assert(!(aspectMask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
115 VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
116 VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
117 VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)));
118
119 /* Verify that the aspects are actually in the image */
120 assert(!(aspectMask & ~image->vk.aspects));
121
122 /* Must only be one aspect unless it's depth/stencil */
123 assert(aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT |
124 VK_IMAGE_ASPECT_STENCIL_BIT) ||
125 util_bitcount(aspectMask) == 1);
126
127 switch(aspectMask) {
128 case VK_IMAGE_ASPECT_PLANE_1_BIT: return 1;
129 case VK_IMAGE_ASPECT_PLANE_2_BIT: return 2;
130 default: return 0;
131 }
132 }
133
134 static inline uint8_t
nvk_image_memory_aspects_to_plane(ASSERTED const struct nvk_image * image,VkImageAspectFlags aspectMask)135 nvk_image_memory_aspects_to_plane(ASSERTED const struct nvk_image *image,
136 VkImageAspectFlags aspectMask)
137 {
138 if (aspectMask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
139 VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
140 VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
141 VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
142 /* We don't support DRM format modifiers on anything but single-plane
143 * color at the moment.
144 */
145 assert(aspectMask == VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT);
146 return 0;
147 } else {
148 return nvk_image_aspects_to_plane(image, aspectMask);
149 }
150 }
151
152 VkResult nvk_queue_image_bind(struct nvk_queue *queue,
153 const VkSparseImageMemoryBindInfo *bind_info);
154
155 VkResult nvk_queue_image_opaque_bind(struct nvk_queue *queue,
156 const VkSparseImageOpaqueMemoryBindInfo *bind_info);
157
158 #endif
159