xref: /aosp_15_r20/external/virglrenderer/src/venus/vkr_cs.h (revision bbecb9d118dfdb95f99bd754f8fa9be01f189df3)
1 /*
2  * Copyright 2021 Google LLC
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #ifndef VKR_CS_H
7 #define VKR_CS_H
8 
9 #include "vkr_common.h"
10 
11 /* This is to avoid integer overflows and to catch bogus allocations (e.g.,
12  * the guest driver encodes an uninitialized value).  In practice, the largest
13  * allocations we've seen are from vkGetPipelineCacheData and are dozens of
14  * MBs.
15  */
16 #define VKR_CS_DECODER_TEMP_POOL_MAX_SIZE (1u * 1024 * 1024 * 1024)
17 
18 struct iovec;
19 
20 struct vkr_cs_encoder {
21    bool *fatal_error;
22 
23    struct {
24       const struct vkr_resource_attachment *attachment;
25       const struct iovec *iov;
26       int iov_count;
27       size_t offset;
28       size_t size;
29 
30       int cached_index;
31       size_t cached_offset;
32    } stream;
33 
34    size_t remaining_size;
35    int next_iov;
36    uint8_t *cur;
37    const uint8_t *end;
38 };
39 
40 struct vkr_cs_decoder_saved_state {
41    const uint8_t *cur;
42    const uint8_t *end;
43 
44    uint32_t pool_buffer_count;
45    uint8_t *pool_reset_to;
46 };
47 
48 /*
49  * We usually need many small allocations during decoding.  Those allocations
50  * are suballocated from the temp pool.
51  *
52  * After a command is decoded, vkr_cs_decoder_reset_temp_pool is called to
53  * reset pool->cur.  After an entire command stream is decoded,
54  * vkr_cs_decoder_gc_temp_pool is called to garbage collect pool->buffers.
55  */
56 struct vkr_cs_decoder_temp_pool {
57    uint8_t **buffers;
58    uint32_t buffer_count;
59    uint32_t buffer_max;
60    size_t total_size;
61 
62    uint8_t *reset_to;
63 
64    uint8_t *cur;
65    const uint8_t *end;
66 };
67 
68 struct vkr_cs_decoder {
69    const struct hash_table *object_table;
70 
71    bool fatal_error;
72    struct vkr_cs_decoder_temp_pool temp_pool;
73 
74    struct vkr_cs_decoder_saved_state saved_states[1];
75    uint32_t saved_state_count;
76 
77    const uint8_t *cur;
78    const uint8_t *end;
79 };
80 
81 static inline void
vkr_cs_encoder_init(struct vkr_cs_encoder * enc,bool * fatal_error)82 vkr_cs_encoder_init(struct vkr_cs_encoder *enc, bool *fatal_error)
83 {
84    memset(enc, 0, sizeof(*enc));
85    enc->fatal_error = fatal_error;
86 }
87 
88 static inline void
vkr_cs_encoder_set_fatal(const struct vkr_cs_encoder * enc)89 vkr_cs_encoder_set_fatal(const struct vkr_cs_encoder *enc)
90 {
91    *enc->fatal_error = true;
92 }
93 
94 void
95 vkr_cs_encoder_set_stream(struct vkr_cs_encoder *enc,
96                           const struct vkr_resource_attachment *att,
97                           size_t offset,
98                           size_t size);
99 
100 void
101 vkr_cs_encoder_seek_stream(struct vkr_cs_encoder *enc, size_t pos);
102 
103 void
104 vkr_cs_encoder_write_internal(struct vkr_cs_encoder *enc,
105                               size_t size,
106                               const void *val,
107                               size_t val_size);
108 
109 static inline void
vkr_cs_encoder_write(struct vkr_cs_encoder * enc,size_t size,const void * val,size_t val_size)110 vkr_cs_encoder_write(struct vkr_cs_encoder *enc,
111                      size_t size,
112                      const void *val,
113                      size_t val_size)
114 {
115    assert(val_size <= size);
116 
117    if (unlikely(size > (size_t)(enc->end - enc->cur))) {
118       vkr_cs_encoder_write_internal(enc, size, val, val_size);
119       return;
120    }
121 
122    /* we should not rely on the compiler to optimize away memcpy... */
123    memcpy(enc->cur, val, val_size);
124    enc->cur += size;
125 }
126 
127 void
128 vkr_cs_decoder_init(struct vkr_cs_decoder *dec, const struct hash_table *object_table);
129 
130 void
131 vkr_cs_decoder_fini(struct vkr_cs_decoder *dec);
132 
133 void
134 vkr_cs_decoder_reset(struct vkr_cs_decoder *dec);
135 
136 static inline void
vkr_cs_decoder_set_fatal(const struct vkr_cs_decoder * dec)137 vkr_cs_decoder_set_fatal(const struct vkr_cs_decoder *dec)
138 {
139    ((struct vkr_cs_decoder *)dec)->fatal_error = true;
140 }
141 
142 static inline bool
vkr_cs_decoder_get_fatal(const struct vkr_cs_decoder * dec)143 vkr_cs_decoder_get_fatal(const struct vkr_cs_decoder *dec)
144 {
145    return dec->fatal_error;
146 }
147 
148 static inline void
vkr_cs_decoder_set_stream(struct vkr_cs_decoder * dec,const void * data,size_t size)149 vkr_cs_decoder_set_stream(struct vkr_cs_decoder *dec, const void *data, size_t size)
150 {
151    dec->cur = data;
152    dec->end = dec->cur + size;
153 }
154 
155 static inline bool
vkr_cs_decoder_has_command(const struct vkr_cs_decoder * dec)156 vkr_cs_decoder_has_command(const struct vkr_cs_decoder *dec)
157 {
158    return dec->cur < dec->end;
159 }
160 
161 bool
162 vkr_cs_decoder_push_state(struct vkr_cs_decoder *dec);
163 
164 void
165 vkr_cs_decoder_pop_state(struct vkr_cs_decoder *dec);
166 
167 static inline bool
vkr_cs_decoder_peek_internal(const struct vkr_cs_decoder * dec,size_t size,void * val,size_t val_size)168 vkr_cs_decoder_peek_internal(const struct vkr_cs_decoder *dec,
169                              size_t size,
170                              void *val,
171                              size_t val_size)
172 {
173    assert(val_size <= size);
174 
175    if (unlikely(size > (size_t)(dec->end - dec->cur))) {
176       vkr_log("failed to peek %zu bytes", size);
177       vkr_cs_decoder_set_fatal(dec);
178       memset(val, 0, val_size);
179       return false;
180    }
181 
182    /* we should not rely on the compiler to optimize away memcpy... */
183    memcpy(val, dec->cur, val_size);
184    return true;
185 }
186 
187 static inline void
vkr_cs_decoder_read(struct vkr_cs_decoder * dec,size_t size,void * val,size_t val_size)188 vkr_cs_decoder_read(struct vkr_cs_decoder *dec, size_t size, void *val, size_t val_size)
189 {
190    if (vkr_cs_decoder_peek_internal(dec, size, val, val_size))
191       dec->cur += size;
192 }
193 
194 static inline void
vkr_cs_decoder_peek(const struct vkr_cs_decoder * dec,size_t size,void * val,size_t val_size)195 vkr_cs_decoder_peek(const struct vkr_cs_decoder *dec,
196                     size_t size,
197                     void *val,
198                     size_t val_size)
199 {
200    vkr_cs_decoder_peek_internal(dec, size, val, val_size);
201 }
202 
203 static inline struct vkr_object *
vkr_cs_decoder_lookup_object(const struct vkr_cs_decoder * dec,vkr_object_id id,VkObjectType type)204 vkr_cs_decoder_lookup_object(const struct vkr_cs_decoder *dec,
205                              vkr_object_id id,
206                              VkObjectType type)
207 {
208    struct vkr_object *obj;
209 
210    if (!id)
211       return NULL;
212 
213    const struct hash_entry *entry =
214       _mesa_hash_table_search((struct hash_table *)dec->object_table, &id);
215    obj = likely(entry) ? entry->data : NULL;
216    if (unlikely(!obj || obj->type != type)) {
217       if (obj)
218          vkr_log("object %" PRIu64 " has type %d, not %d", id, obj->type, type);
219       else
220          vkr_log("failed to look up object %" PRIu64, id);
221       vkr_cs_decoder_set_fatal(dec);
222    }
223 
224    return obj;
225 }
226 
227 static inline void
vkr_cs_decoder_reset_temp_pool(struct vkr_cs_decoder * dec)228 vkr_cs_decoder_reset_temp_pool(struct vkr_cs_decoder *dec)
229 {
230    struct vkr_cs_decoder_temp_pool *pool = &dec->temp_pool;
231    pool->cur = pool->reset_to;
232 }
233 
234 bool
235 vkr_cs_decoder_alloc_temp_internal(struct vkr_cs_decoder *dec, size_t size);
236 
237 static inline void *
vkr_cs_decoder_alloc_temp(struct vkr_cs_decoder * dec,size_t size)238 vkr_cs_decoder_alloc_temp(struct vkr_cs_decoder *dec, size_t size)
239 {
240    struct vkr_cs_decoder_temp_pool *pool = &dec->temp_pool;
241 
242    if (unlikely(size > (size_t)(pool->end - pool->cur))) {
243       if (!vkr_cs_decoder_alloc_temp_internal(dec, size)) {
244          vkr_log("failed to suballocate %zu bytes from the temp pool", size);
245          vkr_cs_decoder_set_fatal(dec);
246          return NULL;
247       }
248    }
249 
250    /* align to 64-bit after we know size is at most
251     * VKR_CS_DECODER_TEMP_POOL_MAX_SIZE and cannot overflow
252     */
253    size = align64(size, 8);
254    assert(size <= (size_t)(pool->end - pool->cur));
255 
256    void *ptr = pool->cur;
257    pool->cur += size;
258    return ptr;
259 }
260 
261 static inline bool
vkr_cs_handle_indirect_id(VkObjectType type)262 vkr_cs_handle_indirect_id(VkObjectType type)
263 {
264    /* Dispatchable handles may or may not have enough bits to store
265     * vkr_object_id.  Non-dispatchable handles always have enough bits to
266     * store vkr_object_id.
267     *
268     * This should compile to a constant after inlining.
269     */
270    switch (type) {
271    case VK_OBJECT_TYPE_INSTANCE:
272    case VK_OBJECT_TYPE_PHYSICAL_DEVICE:
273    case VK_OBJECT_TYPE_DEVICE:
274    case VK_OBJECT_TYPE_QUEUE:
275    case VK_OBJECT_TYPE_COMMAND_BUFFER:
276       return sizeof(VkInstance) < sizeof(vkr_object_id);
277    default:
278       return false;
279    }
280 }
281 
282 static inline vkr_object_id
vkr_cs_handle_load_id(const void ** handle,VkObjectType type)283 vkr_cs_handle_load_id(const void **handle, VkObjectType type)
284 {
285    const vkr_object_id *p = vkr_cs_handle_indirect_id(type)
286                                ? *(const vkr_object_id **)handle
287                                : (const vkr_object_id *)handle;
288    return *p;
289 }
290 
291 static inline void
vkr_cs_handle_store_id(void ** handle,vkr_object_id id,VkObjectType type)292 vkr_cs_handle_store_id(void **handle, vkr_object_id id, VkObjectType type)
293 {
294    vkr_object_id *p = vkr_cs_handle_indirect_id(type) ? *(vkr_object_id **)handle
295                                                       : (vkr_object_id *)handle;
296    *p = id;
297 }
298 
299 #endif /* VKR_CS_H */
300