1 /*
2 * Copyright 2014, 2015 Red Hat.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26
27 #include "util/format/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virtio-gpu/virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39 #include "virgl_video.h"
40
41 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
42
43 #define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
44
45 static const enum virgl_formats virgl_formats_conv_table[PIPE_FORMAT_COUNT] = {
46 CONV_FORMAT(NONE)
47 CONV_FORMAT(B8G8R8A8_UNORM)
48 CONV_FORMAT(B8G8R8X8_UNORM)
49 CONV_FORMAT(A8R8G8B8_UNORM)
50 CONV_FORMAT(X8R8G8B8_UNORM)
51 CONV_FORMAT(B5G5R5A1_UNORM)
52 CONV_FORMAT(B4G4R4A4_UNORM)
53 CONV_FORMAT(B5G6R5_UNORM)
54 CONV_FORMAT(R10G10B10A2_UNORM)
55 CONV_FORMAT(L8_UNORM)
56 CONV_FORMAT(A8_UNORM)
57 CONV_FORMAT(I8_UNORM)
58 CONV_FORMAT(L8A8_UNORM)
59 CONV_FORMAT(L16_UNORM)
60 CONV_FORMAT(UYVY)
61 CONV_FORMAT(YUYV)
62 CONV_FORMAT(Z16_UNORM)
63 CONV_FORMAT(Z32_UNORM)
64 CONV_FORMAT(Z32_FLOAT)
65 CONV_FORMAT(Z24_UNORM_S8_UINT)
66 CONV_FORMAT(S8_UINT_Z24_UNORM)
67 CONV_FORMAT(Z24X8_UNORM)
68 CONV_FORMAT(X8Z24_UNORM)
69 CONV_FORMAT(S8_UINT)
70 CONV_FORMAT(R64_FLOAT)
71 CONV_FORMAT(R64G64_FLOAT)
72 CONV_FORMAT(R64G64B64_FLOAT)
73 CONV_FORMAT(R64G64B64A64_FLOAT)
74 CONV_FORMAT(R32_FLOAT)
75 CONV_FORMAT(R32G32_FLOAT)
76 CONV_FORMAT(R32G32B32_FLOAT)
77 CONV_FORMAT(R32G32B32A32_FLOAT)
78 CONV_FORMAT(R32_UNORM)
79 CONV_FORMAT(R32G32_UNORM)
80 CONV_FORMAT(R32G32B32_UNORM)
81 CONV_FORMAT(R32G32B32A32_UNORM)
82 CONV_FORMAT(R32_USCALED)
83 CONV_FORMAT(R32G32_USCALED)
84 CONV_FORMAT(R32G32B32_USCALED)
85 CONV_FORMAT(R32G32B32A32_USCALED)
86 CONV_FORMAT(R32_SNORM)
87 CONV_FORMAT(R32G32_SNORM)
88 CONV_FORMAT(R32G32B32_SNORM)
89 CONV_FORMAT(R32G32B32A32_SNORM)
90 CONV_FORMAT(R32_SSCALED)
91 CONV_FORMAT(R32G32_SSCALED)
92 CONV_FORMAT(R32G32B32_SSCALED)
93 CONV_FORMAT(R32G32B32A32_SSCALED)
94 CONV_FORMAT(R16_UNORM)
95 CONV_FORMAT(R16G16_UNORM)
96 CONV_FORMAT(R16G16B16_UNORM)
97 CONV_FORMAT(R16G16B16A16_UNORM)
98 CONV_FORMAT(R16_USCALED)
99 CONV_FORMAT(R16G16_USCALED)
100 CONV_FORMAT(R16G16B16_USCALED)
101 CONV_FORMAT(R16G16B16A16_USCALED)
102 CONV_FORMAT(R16_SNORM)
103 CONV_FORMAT(R16G16_SNORM)
104 CONV_FORMAT(R16G16B16_SNORM)
105 CONV_FORMAT(R16G16B16A16_SNORM)
106 CONV_FORMAT(R16_SSCALED)
107 CONV_FORMAT(R16G16_SSCALED)
108 CONV_FORMAT(R16G16B16_SSCALED)
109 CONV_FORMAT(R16G16B16A16_SSCALED)
110 CONV_FORMAT(R8_UNORM)
111 CONV_FORMAT(R8G8_UNORM)
112 CONV_FORMAT(R8G8B8_UNORM)
113 CONV_FORMAT(R8G8B8A8_UNORM)
114 CONV_FORMAT(X8B8G8R8_UNORM)
115 CONV_FORMAT(R8_USCALED)
116 CONV_FORMAT(R8G8_USCALED)
117 CONV_FORMAT(R8G8B8_USCALED)
118 CONV_FORMAT(R8G8B8A8_USCALED)
119 CONV_FORMAT(R8_SNORM)
120 CONV_FORMAT(R8G8_SNORM)
121 CONV_FORMAT(R8G8B8_SNORM)
122 CONV_FORMAT(R8G8B8A8_SNORM)
123 CONV_FORMAT(R8_SSCALED)
124 CONV_FORMAT(R8G8_SSCALED)
125 CONV_FORMAT(R8G8B8_SSCALED)
126 CONV_FORMAT(R8G8B8A8_SSCALED)
127 CONV_FORMAT(R32_FIXED)
128 CONV_FORMAT(R32G32_FIXED)
129 CONV_FORMAT(R32G32B32_FIXED)
130 CONV_FORMAT(R32G32B32A32_FIXED)
131 CONV_FORMAT(R16_FLOAT)
132 CONV_FORMAT(R16G16_FLOAT)
133 CONV_FORMAT(R16G16B16_FLOAT)
134 CONV_FORMAT(R16G16B16A16_FLOAT)
135 CONV_FORMAT(L8_SRGB)
136 CONV_FORMAT(L8A8_SRGB)
137 CONV_FORMAT(R8G8B8_SRGB)
138 CONV_FORMAT(A8B8G8R8_SRGB)
139 CONV_FORMAT(X8B8G8R8_SRGB)
140 CONV_FORMAT(B8G8R8A8_SRGB)
141 CONV_FORMAT(B8G8R8X8_SRGB)
142 CONV_FORMAT(A8R8G8B8_SRGB)
143 CONV_FORMAT(X8R8G8B8_SRGB)
144 CONV_FORMAT(R8G8B8A8_SRGB)
145 CONV_FORMAT(DXT1_RGB)
146 CONV_FORMAT(DXT1_RGBA)
147 CONV_FORMAT(DXT3_RGBA)
148 CONV_FORMAT(DXT5_RGBA)
149 CONV_FORMAT(DXT1_SRGB)
150 CONV_FORMAT(DXT1_SRGBA)
151 CONV_FORMAT(DXT3_SRGBA)
152 CONV_FORMAT(DXT5_SRGBA)
153 CONV_FORMAT(RGTC1_UNORM)
154 CONV_FORMAT(RGTC1_SNORM)
155 CONV_FORMAT(RGTC2_UNORM)
156 CONV_FORMAT(RGTC2_SNORM)
157 CONV_FORMAT(R8G8_B8G8_UNORM)
158 CONV_FORMAT(G8R8_G8B8_UNORM)
159 CONV_FORMAT(R8SG8SB8UX8U_NORM)
160 CONV_FORMAT(R5SG5SB6U_NORM)
161 CONV_FORMAT(A8B8G8R8_UNORM)
162 CONV_FORMAT(B5G5R5X1_UNORM)
163 CONV_FORMAT(R10G10B10A2_USCALED)
164 CONV_FORMAT(R11G11B10_FLOAT)
165 CONV_FORMAT(R9G9B9E5_FLOAT)
166 CONV_FORMAT(Z32_FLOAT_S8X24_UINT)
167 CONV_FORMAT(R1_UNORM)
168 CONV_FORMAT(R10G10B10X2_USCALED)
169 CONV_FORMAT(R10G10B10X2_SNORM)
170 CONV_FORMAT(L4A4_UNORM)
171 CONV_FORMAT(B10G10R10A2_UNORM)
172 CONV_FORMAT(R10SG10SB10SA2U_NORM)
173 CONV_FORMAT(R8G8Bx_SNORM)
174 CONV_FORMAT(R8G8B8X8_UNORM)
175 CONV_FORMAT(B4G4R4X4_UNORM)
176 CONV_FORMAT(X24S8_UINT)
177 CONV_FORMAT(S8X24_UINT)
178 CONV_FORMAT(X32_S8X24_UINT)
179 CONV_FORMAT(B2G3R3_UNORM)
180 CONV_FORMAT(L16A16_UNORM)
181 CONV_FORMAT(A16_UNORM)
182 CONV_FORMAT(I16_UNORM)
183 CONV_FORMAT(LATC1_UNORM)
184 CONV_FORMAT(LATC1_SNORM)
185 CONV_FORMAT(LATC2_UNORM)
186 CONV_FORMAT(LATC2_SNORM)
187 CONV_FORMAT(A8_SNORM)
188 CONV_FORMAT(L8_SNORM)
189 CONV_FORMAT(L8A8_SNORM)
190 CONV_FORMAT(I8_SNORM)
191 CONV_FORMAT(A16_SNORM)
192 CONV_FORMAT(L16_SNORM)
193 CONV_FORMAT(L16A16_SNORM)
194 CONV_FORMAT(I16_SNORM)
195 CONV_FORMAT(A16_FLOAT)
196 CONV_FORMAT(L16_FLOAT)
197 CONV_FORMAT(L16A16_FLOAT)
198 CONV_FORMAT(I16_FLOAT)
199 CONV_FORMAT(A32_FLOAT)
200 CONV_FORMAT(L32_FLOAT)
201 CONV_FORMAT(L32A32_FLOAT)
202 CONV_FORMAT(I32_FLOAT)
203 CONV_FORMAT(YV12)
204 CONV_FORMAT(YV16)
205 CONV_FORMAT(IYUV)
206 CONV_FORMAT(NV12)
207 CONV_FORMAT(NV21)
208 CONV_FORMAT(A4R4_UNORM)
209 CONV_FORMAT(R4A4_UNORM)
210 CONV_FORMAT(R8A8_UNORM)
211 CONV_FORMAT(A8R8_UNORM)
212 CONV_FORMAT(R10G10B10A2_SSCALED)
213 CONV_FORMAT(R10G10B10A2_SNORM)
214 CONV_FORMAT(B10G10R10A2_USCALED)
215 CONV_FORMAT(B10G10R10A2_SSCALED)
216 CONV_FORMAT(B10G10R10A2_SNORM)
217 CONV_FORMAT(R8_UINT)
218 CONV_FORMAT(R8G8_UINT)
219 CONV_FORMAT(R8G8B8_UINT)
220 CONV_FORMAT(R8G8B8A8_UINT)
221 CONV_FORMAT(R8_SINT)
222 CONV_FORMAT(R8G8_SINT)
223 CONV_FORMAT(R8G8B8_SINT)
224 CONV_FORMAT(R8G8B8A8_SINT)
225 CONV_FORMAT(R16_UINT)
226 CONV_FORMAT(R16G16_UINT)
227 CONV_FORMAT(R16G16B16_UINT)
228 CONV_FORMAT(R16G16B16A16_UINT)
229 CONV_FORMAT(R16_SINT)
230 CONV_FORMAT(R16G16_SINT)
231 CONV_FORMAT(R16G16B16_SINT)
232 CONV_FORMAT(R16G16B16A16_SINT)
233 CONV_FORMAT(R32_UINT)
234 CONV_FORMAT(R32G32_UINT)
235 CONV_FORMAT(R32G32B32_UINT)
236 CONV_FORMAT(R32G32B32A32_UINT)
237 CONV_FORMAT(R32_SINT)
238 CONV_FORMAT(R32G32_SINT)
239 CONV_FORMAT(R32G32B32_SINT)
240 CONV_FORMAT(R32G32B32A32_SINT)
241 CONV_FORMAT(A8_UINT)
242 CONV_FORMAT(I8_UINT)
243 CONV_FORMAT(L8_UINT)
244 CONV_FORMAT(L8A8_UINT)
245 CONV_FORMAT(A8_SINT)
246 CONV_FORMAT(I8_SINT)
247 CONV_FORMAT(L8_SINT)
248 CONV_FORMAT(L8A8_SINT)
249 CONV_FORMAT(A16_UINT)
250 CONV_FORMAT(I16_UINT)
251 CONV_FORMAT(L16_UINT)
252 CONV_FORMAT(L16A16_UINT)
253 CONV_FORMAT(A16_SINT)
254 CONV_FORMAT(I16_SINT)
255 CONV_FORMAT(L16_SINT)
256 CONV_FORMAT(L16A16_SINT)
257 CONV_FORMAT(A32_UINT)
258 CONV_FORMAT(I32_UINT)
259 CONV_FORMAT(L32_UINT)
260 CONV_FORMAT(L32A32_UINT)
261 CONV_FORMAT(A32_SINT)
262 CONV_FORMAT(I32_SINT)
263 CONV_FORMAT(L32_SINT)
264 CONV_FORMAT(L32A32_SINT)
265 CONV_FORMAT(B10G10R10A2_UINT)
266 CONV_FORMAT(ETC1_RGB8)
267 CONV_FORMAT(R8G8_R8B8_UNORM)
268 CONV_FORMAT(G8R8_B8R8_UNORM)
269 CONV_FORMAT(R8G8B8X8_SNORM)
270 CONV_FORMAT(R8G8B8X8_SRGB)
271 CONV_FORMAT(R8G8B8X8_UINT)
272 CONV_FORMAT(R8G8B8X8_SINT)
273 CONV_FORMAT(B10G10R10X2_UNORM)
274 CONV_FORMAT(R16G16B16X16_UNORM)
275 CONV_FORMAT(R16G16B16X16_SNORM)
276 CONV_FORMAT(R16G16B16X16_FLOAT)
277 CONV_FORMAT(R16G16B16X16_UINT)
278 CONV_FORMAT(R16G16B16X16_SINT)
279 CONV_FORMAT(R32G32B32X32_FLOAT)
280 CONV_FORMAT(R32G32B32X32_UINT)
281 CONV_FORMAT(R32G32B32X32_SINT)
282 CONV_FORMAT(R8A8_SNORM)
283 CONV_FORMAT(R16A16_UNORM)
284 CONV_FORMAT(R16A16_SNORM)
285 CONV_FORMAT(R16A16_FLOAT)
286 CONV_FORMAT(R32A32_FLOAT)
287 CONV_FORMAT(R8A8_UINT)
288 CONV_FORMAT(R8A8_SINT)
289 CONV_FORMAT(R16A16_UINT)
290 CONV_FORMAT(R16A16_SINT)
291 CONV_FORMAT(R32A32_UINT)
292 CONV_FORMAT(R32A32_SINT)
293 CONV_FORMAT(R10G10B10A2_UINT)
294 CONV_FORMAT(B5G6R5_SRGB)
295 CONV_FORMAT(BPTC_RGBA_UNORM)
296 CONV_FORMAT(BPTC_SRGBA)
297 CONV_FORMAT(BPTC_RGB_FLOAT)
298 CONV_FORMAT(BPTC_RGB_UFLOAT)
299 CONV_FORMAT(G8R8_UNORM)
300 CONV_FORMAT(G8R8_SNORM)
301 CONV_FORMAT(G16R16_UNORM)
302 CONV_FORMAT(G16R16_SNORM)
303 CONV_FORMAT(A8B8G8R8_SNORM)
304 CONV_FORMAT(X8B8G8R8_SNORM)
305 CONV_FORMAT(ETC2_RGB8)
306 CONV_FORMAT(ETC2_SRGB8)
307 CONV_FORMAT(ETC2_RGB8A1)
308 CONV_FORMAT(ETC2_SRGB8A1)
309 CONV_FORMAT(ETC2_RGBA8)
310 CONV_FORMAT(ETC2_SRGBA8)
311 CONV_FORMAT(ETC2_R11_UNORM)
312 CONV_FORMAT(ETC2_R11_SNORM)
313 CONV_FORMAT(ETC2_RG11_UNORM)
314 CONV_FORMAT(ETC2_RG11_SNORM)
315 CONV_FORMAT(ASTC_4x4)
316 CONV_FORMAT(ASTC_5x4)
317 CONV_FORMAT(ASTC_5x5)
318 CONV_FORMAT(ASTC_6x5)
319 CONV_FORMAT(ASTC_6x6)
320 CONV_FORMAT(ASTC_8x5)
321 CONV_FORMAT(ASTC_8x6)
322 CONV_FORMAT(ASTC_8x8)
323 CONV_FORMAT(ASTC_10x5)
324 CONV_FORMAT(ASTC_10x6)
325 CONV_FORMAT(ASTC_10x8)
326 CONV_FORMAT(ASTC_10x10)
327 CONV_FORMAT(ASTC_12x10)
328 CONV_FORMAT(ASTC_12x12)
329 CONV_FORMAT(ASTC_4x4_SRGB)
330 CONV_FORMAT(ASTC_5x4_SRGB)
331 CONV_FORMAT(ASTC_5x5_SRGB)
332 CONV_FORMAT(ASTC_6x5_SRGB)
333 CONV_FORMAT(ASTC_6x6_SRGB)
334 CONV_FORMAT(ASTC_8x5_SRGB)
335 CONV_FORMAT(ASTC_8x6_SRGB)
336 CONV_FORMAT(ASTC_8x8_SRGB)
337 CONV_FORMAT(ASTC_10x5_SRGB)
338 CONV_FORMAT(ASTC_10x6_SRGB)
339 CONV_FORMAT(ASTC_10x8_SRGB)
340 CONV_FORMAT(ASTC_10x10_SRGB)
341 CONV_FORMAT(ASTC_12x10_SRGB)
342 CONV_FORMAT(ASTC_12x12_SRGB)
343 CONV_FORMAT(R10G10B10X2_UNORM)
344 CONV_FORMAT(A4B4G4R4_UNORM)
345 CONV_FORMAT(R8_SRGB)
346 CONV_FORMAT(R8G8_SRGB)
347 CONV_FORMAT(P010)
348 CONV_FORMAT(P012)
349 CONV_FORMAT(P016)
350 CONV_FORMAT(B8G8R8_UNORM)
351 CONV_FORMAT(R3G3B2_UNORM)
352 CONV_FORMAT(R4G4B4A4_UNORM)
353 CONV_FORMAT(R5G5B5A1_UNORM)
354 CONV_FORMAT(R5G6B5_UNORM)
355 CONV_FORMAT(Y8_400_UNORM)
356 CONV_FORMAT(Y8_U8_V8_444_UNORM)
357 CONV_FORMAT(Y8_U8_V8_422_UNORM)
358 CONV_FORMAT(Y8_U8V8_422_UNORM)
359 CONV_FORMAT(Y8_UNORM)
360 CONV_FORMAT(YVYU)
361 CONV_FORMAT(Z16_UNORM_S8_UINT)
362 CONV_FORMAT(Z24_UNORM_S8_UINT_AS_R8G8B8A8)
363 CONV_FORMAT(A1B5G5R5_UINT)
364 CONV_FORMAT(A1B5G5R5_UNORM)
365 CONV_FORMAT(A1R5G5B5_UINT)
366 CONV_FORMAT(A1R5G5B5_UNORM)
367 CONV_FORMAT(A2B10G10R10_UINT)
368 CONV_FORMAT(A2B10G10R10_UNORM)
369 CONV_FORMAT(A2R10G10B10_UINT)
370 CONV_FORMAT(A2R10G10B10_UNORM)
371 CONV_FORMAT(A4B4G4R4_UINT)
372 CONV_FORMAT(A4R4G4B4_UINT)
373 CONV_FORMAT(A4R4G4B4_UNORM)
374 CONV_FORMAT(A8B8G8R8_SINT)
375 CONV_FORMAT(A8B8G8R8_SSCALED)
376 CONV_FORMAT(A8B8G8R8_UINT)
377 CONV_FORMAT(A8B8G8R8_USCALED)
378 CONV_FORMAT(A8R8G8B8_SINT)
379 CONV_FORMAT(A8R8G8B8_SNORM)
380 CONV_FORMAT(A8R8G8B8_UINT)
381 CONV_FORMAT(ASTC_3x3x3)
382 CONV_FORMAT(ASTC_3x3x3_SRGB)
383 CONV_FORMAT(ASTC_4x3x3)
384 CONV_FORMAT(ASTC_4x3x3_SRGB)
385 CONV_FORMAT(ASTC_4x4x3)
386 CONV_FORMAT(ASTC_4x4x3_SRGB)
387 CONV_FORMAT(ASTC_4x4x4)
388 CONV_FORMAT(ASTC_4x4x4_SRGB)
389 CONV_FORMAT(ASTC_5x4x4)
390 CONV_FORMAT(ASTC_5x4x4_SRGB)
391 CONV_FORMAT(ASTC_5x5x4)
392 CONV_FORMAT(ASTC_5x5x4_SRGB)
393 CONV_FORMAT(ASTC_5x5x5)
394 CONV_FORMAT(ASTC_5x5x5_SRGB)
395 CONV_FORMAT(ASTC_6x5x5)
396 CONV_FORMAT(ASTC_6x5x5_SRGB)
397 CONV_FORMAT(ASTC_6x6x5)
398 CONV_FORMAT(ASTC_6x6x5_SRGB)
399 CONV_FORMAT(ASTC_6x6x6)
400 CONV_FORMAT(ASTC_6x6x6_SRGB)
401 CONV_FORMAT(ATC_RGB)
402 CONV_FORMAT(ATC_RGBA_EXPLICIT)
403 CONV_FORMAT(ATC_RGBA_INTERPOLATED)
404 CONV_FORMAT(AYUV)
405 CONV_FORMAT(B10G10R10A2_SINT)
406 CONV_FORMAT(B10G10R10X2_SINT)
407 CONV_FORMAT(B10G10R10X2_SNORM)
408 CONV_FORMAT(B2G3R3_UINT)
409 CONV_FORMAT(B4G4R4A4_UINT)
410 CONV_FORMAT(B5G5R5A1_UINT)
411 CONV_FORMAT(B5G6R5_UINT)
412 CONV_FORMAT(B8G8R8A8_SINT)
413 CONV_FORMAT(B8G8R8A8_SNORM)
414 CONV_FORMAT(B8G8R8A8_SSCALED)
415 CONV_FORMAT(B8G8R8A8_UINT)
416 CONV_FORMAT(B8G8R8A8_USCALED)
417 CONV_FORMAT(B8G8_R8G8_UNORM)
418 CONV_FORMAT(B8G8R8_SINT)
419 CONV_FORMAT(B8G8R8_SNORM)
420 CONV_FORMAT(B8G8R8_SRGB)
421 CONV_FORMAT(B8G8R8_SSCALED)
422 CONV_FORMAT(B8G8R8_UINT)
423 CONV_FORMAT(B8G8R8_USCALED)
424 CONV_FORMAT(B8G8R8X8_SINT)
425 CONV_FORMAT(B8G8R8X8_SNORM)
426 CONV_FORMAT(B8G8R8X8_UINT)
427 CONV_FORMAT(B8R8_G8R8_UNORM)
428 CONV_FORMAT(FXT1_RGB)
429 CONV_FORMAT(FXT1_RGBA)
430 CONV_FORMAT(G16R16_SINT)
431 CONV_FORMAT(G8B8_G8R8_UNORM)
432 CONV_FORMAT(G8_B8_R8_420_UNORM)
433 CONV_FORMAT(G8_B8R8_420_UNORM)
434 CONV_FORMAT(G8R8_SINT)
435 CONV_FORMAT(P030)
436 CONV_FORMAT(R10G10B10A2_SINT)
437 CONV_FORMAT(R10G10B10X2_SINT)
438 CONV_FORMAT(R3G3B2_UINT)
439 CONV_FORMAT(R4G4B4A4_UINT)
440 CONV_FORMAT(R4G4B4X4_UNORM)
441 CONV_FORMAT(R5G5B5A1_UINT)
442 CONV_FORMAT(R5G5B5X1_UNORM)
443 CONV_FORMAT(R5G6B5_SRGB)
444 CONV_FORMAT(R5G6B5_UINT)
445 CONV_FORMAT(R64G64B64A64_SINT)
446 CONV_FORMAT(R64G64B64A64_UINT)
447 CONV_FORMAT(R64G64B64_SINT)
448 CONV_FORMAT(R64G64B64_UINT)
449 CONV_FORMAT(R64G64_SINT)
450 CONV_FORMAT(R64G64_UINT)
451 CONV_FORMAT(R64_SINT)
452 CONV_FORMAT(R64_UINT)
453 CONV_FORMAT(R8_B8_G8_420_UNORM)
454 CONV_FORMAT(R8_B8G8_420_UNORM)
455 CONV_FORMAT(R8B8_R8G8_UNORM)
456 CONV_FORMAT(R8_G8_B8_420_UNORM)
457 CONV_FORMAT(R8_G8B8_420_UNORM)
458 CONV_FORMAT(R8_G8_B8_UNORM)
459 CONV_FORMAT(VYUY)
460 CONV_FORMAT(X1B5G5R5_UNORM)
461 CONV_FORMAT(X1R5G5B5_UNORM)
462 CONV_FORMAT(XYUV)
463 CONV_FORMAT(X8B8G8R8_SINT)
464 CONV_FORMAT(X8R8G8B8_SINT)
465 CONV_FORMAT(X8R8G8B8_SNORM)
466 CONV_FORMAT(Y16_U16_V16_420_UNORM)
467 CONV_FORMAT(Y16_U16_V16_422_UNORM)
468 CONV_FORMAT(Y16_U16V16_422_UNORM)
469 CONV_FORMAT(Y16_U16_V16_444_UNORM)
470 CONV_FORMAT(Y210)
471 CONV_FORMAT(Y212)
472 CONV_FORMAT(Y216)
473 CONV_FORMAT(Y410)
474 CONV_FORMAT(Y412)
475 CONV_FORMAT(Y416)
476 };
477 #undef CONV_FORMAT
478
pipe_to_virgl_format(enum pipe_format format)479 enum virgl_formats pipe_to_virgl_format(enum pipe_format format)
480 {
481 enum virgl_formats vformat = virgl_formats_conv_table[format];
482 if (format != PIPE_FORMAT_NONE && !vformat)
483 debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format));
484 return vformat;
485 }
486
virgl_to_pipe_format(enum virgl_formats format)487 enum pipe_format virgl_to_pipe_format(enum virgl_formats format)
488 {
489 enum pipe_format pformat;
490
491 for (pformat = PIPE_FORMAT_NONE; pformat < PIPE_FORMAT_COUNT; pformat++)
492 if (virgl_formats_conv_table[pformat] == format)
493 return pformat;
494
495 debug_printf("VIRGL: virgl format %u not in the format table\n", format);
496 return PIPE_FORMAT_NONE;
497 }
498
virgl_encoder_write_cmd_dword(struct virgl_context * ctx,uint32_t dword)499 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
500 uint32_t dword)
501 {
502 int len = (dword >> 16);
503
504 if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
505 ctx->base.flush(&ctx->base, NULL, 0);
506
507 virgl_encoder_write_dword(ctx->cbuf, dword);
508 return 0;
509 }
510
virgl_encoder_emit_resource(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_resource * res)511 static void virgl_encoder_emit_resource(struct virgl_screen *vs,
512 struct virgl_cmd_buf *buf,
513 struct virgl_resource *res)
514 {
515 struct virgl_winsys *vws = vs->vws;
516 if (res && res->hw_res)
517 vws->emit_res(vws, buf, res->hw_res, true);
518 else {
519 virgl_encoder_write_dword(buf, 0);
520 }
521 }
522
virgl_encoder_write_res(struct virgl_context * ctx,struct virgl_resource * res)523 static void virgl_encoder_write_res(struct virgl_context *ctx,
524 struct virgl_resource *res)
525 {
526 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
527 virgl_encoder_emit_resource(vs, ctx->cbuf, res);
528 }
529
virgl_encode_bind_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)530 int virgl_encode_bind_object(struct virgl_context *ctx,
531 uint32_t handle, uint32_t object)
532 {
533 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
534 virgl_encoder_write_dword(ctx->cbuf, handle);
535 return 0;
536 }
537
virgl_encode_delete_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)538 int virgl_encode_delete_object(struct virgl_context *ctx,
539 uint32_t handle, uint32_t object)
540 {
541 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
542 virgl_encoder_write_dword(ctx->cbuf, handle);
543 return 0;
544 }
545
virgl_encode_blend_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_blend_state * blend_state)546 int virgl_encode_blend_state(struct virgl_context *ctx,
547 uint32_t handle,
548 const struct pipe_blend_state *blend_state)
549 {
550 uint32_t tmp;
551 int i;
552
553 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
554 virgl_encoder_write_dword(ctx->cbuf, handle);
555
556 tmp =
557 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
558 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
559 VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
560 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
561 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
562
563 virgl_encoder_write_dword(ctx->cbuf, tmp);
564
565 tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
566 virgl_encoder_write_dword(ctx->cbuf, tmp);
567
568 for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
569 /* We use alpha src factor to pass the advanced blend equation value
570 * to the host. By doing so, we don't have to change the protocol.
571 */
572 uint32_t alpha = (i == 0 && blend_state->advanced_blend_func)
573 ? blend_state->advanced_blend_func
574 : blend_state->rt[i].alpha_src_factor;
575 tmp =
576 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
577 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
578 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
579 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
580 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
581 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(alpha) |
582 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
583 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
584 virgl_encoder_write_dword(ctx->cbuf, tmp);
585 }
586 return 0;
587 }
588
virgl_encode_dsa_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_depth_stencil_alpha_state * dsa_state)589 int virgl_encode_dsa_state(struct virgl_context *ctx,
590 uint32_t handle,
591 const struct pipe_depth_stencil_alpha_state *dsa_state)
592 {
593 uint32_t tmp;
594 int i;
595 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
596 virgl_encoder_write_dword(ctx->cbuf, handle);
597
598 tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth_enabled) |
599 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth_writemask) |
600 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth_func) |
601 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha_enabled) |
602 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha_func);
603 virgl_encoder_write_dword(ctx->cbuf, tmp);
604
605 for (i = 0; i < 2; i++) {
606 tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
607 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
608 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
609 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
610 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
611 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
612 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
613 virgl_encoder_write_dword(ctx->cbuf, tmp);
614 }
615
616 virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha_ref_value));
617 return 0;
618 }
virgl_encode_rasterizer_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_rasterizer_state * state)619 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
620 uint32_t handle,
621 const struct pipe_rasterizer_state *state)
622 {
623 uint32_t tmp;
624
625 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
626 virgl_encoder_write_dword(ctx->cbuf, handle);
627
628 tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
629 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
630 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
631 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
632 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
633 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
634 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
635 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
636 VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
637 VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
638 VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
639 VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
640 VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
641 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
642 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
643 VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
644 VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
645 VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
646 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
647 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
648 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
649 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
650 VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
651 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
652 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
653 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
654 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
655 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
656 VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
657
658 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
659 virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
660 virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
661 tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
662 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
663 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
664 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
665 virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
666 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
667 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
668 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
669 return 0;
670 }
671
virgl_emit_shader_header(struct virgl_context * ctx,uint32_t handle,uint32_t len,uint32_t type,uint32_t offlen,uint32_t num_tokens)672 static void virgl_emit_shader_header(struct virgl_context *ctx,
673 uint32_t handle, uint32_t len,
674 uint32_t type, uint32_t offlen,
675 uint32_t num_tokens)
676 {
677 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
678 virgl_encoder_write_dword(ctx->cbuf, handle);
679 virgl_encoder_write_dword(ctx->cbuf, type);
680 virgl_encoder_write_dword(ctx->cbuf, offlen);
681 virgl_encoder_write_dword(ctx->cbuf, num_tokens);
682 }
683
virgl_emit_shader_streamout(struct virgl_context * ctx,const struct pipe_stream_output_info * so_info)684 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
685 const struct pipe_stream_output_info *so_info)
686 {
687 int num_outputs = 0;
688 int i;
689 uint32_t tmp;
690
691 if (so_info)
692 num_outputs = so_info->num_outputs;
693
694 virgl_encoder_write_dword(ctx->cbuf, num_outputs);
695 if (num_outputs) {
696 for (i = 0; i < 4; i++)
697 virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
698
699 for (i = 0; i < so_info->num_outputs; i++) {
700 tmp =
701 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
702 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
703 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
704 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
705 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
706 virgl_encoder_write_dword(ctx->cbuf, tmp);
707 virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
708 }
709 }
710 }
711
virgl_encode_shader_state(struct virgl_context * ctx,uint32_t handle,enum pipe_shader_type type,const struct pipe_stream_output_info * so_info,uint32_t cs_req_local_mem,const struct tgsi_token * tokens)712 int virgl_encode_shader_state(struct virgl_context *ctx,
713 uint32_t handle,
714 enum pipe_shader_type type,
715 const struct pipe_stream_output_info *so_info,
716 uint32_t cs_req_local_mem,
717 const struct tgsi_token *tokens)
718 {
719 char *str, *sptr;
720 uint32_t shader_len, len;
721 bool bret;
722 int num_tokens = tgsi_num_tokens(tokens);
723 int str_total_size = 65536;
724 int retry_size = 1;
725 uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
726 bool first_pass;
727 str = CALLOC(1, str_total_size);
728 if (!str)
729 return -1;
730
731 do {
732 int old_size;
733
734 bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
735 if (bret == false) {
736 if (virgl_debug & VIRGL_DEBUG_VERBOSE)
737 debug_printf("Failed to translate shader in available space - trying again\n");
738 old_size = str_total_size;
739 str_total_size = 65536 * retry_size;
740 retry_size *= 2;
741 str = REALLOC(str, old_size, str_total_size);
742 if (!str)
743 return -1;
744 }
745 } while (bret == false && retry_size < 1024);
746
747 if (bret == false)
748 return -1;
749
750 if (virgl_debug & VIRGL_DEBUG_TGSI)
751 debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
752
753 /* virglrenderer before addbd9c5058dcc9d561b20ab747aed58c53499da mis-counts
754 * the tokens needed for a BARRIER, so ask it to allocate some more space.
755 */
756 const char *barrier = str;
757 while ((barrier = strstr(barrier + 1, "BARRIER")))
758 num_tokens++;
759
760 shader_len = strlen(str) + 1;
761
762 left_bytes = shader_len;
763
764 base_hdr_size = 5;
765 strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
766 first_pass = true;
767 sptr = str;
768 while (left_bytes) {
769 uint32_t length, offlen;
770 int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
771 if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
772 ctx->base.flush(&ctx->base, NULL, 0);
773
774 thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
775
776 length = MIN2(thispass, left_bytes);
777 len = ((length + 3) / 4) + hdr_len;
778
779 if (first_pass)
780 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
781 else
782 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
783
784 virgl_emit_shader_header(ctx, handle, len, virgl_shader_stage_convert(type), offlen, num_tokens);
785
786 if (type == PIPE_SHADER_COMPUTE)
787 virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
788 else
789 virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
790
791 virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
792
793 sptr += length;
794 first_pass = false;
795 left_bytes -= length;
796 }
797
798 FREE(str);
799 return 0;
800 }
801
802
virgl_encode_clear(struct virgl_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)803 int virgl_encode_clear(struct virgl_context *ctx,
804 unsigned buffers,
805 const union pipe_color_union *color,
806 double depth, unsigned stencil)
807 {
808 int i;
809 uint64_t qword;
810
811 STATIC_ASSERT(sizeof(qword) == sizeof(depth));
812 memcpy(&qword, &depth, sizeof(qword));
813
814 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
815 virgl_encoder_write_dword(ctx->cbuf, buffers);
816 for (i = 0; i < 4; i++)
817 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
818 virgl_encoder_write_qword(ctx->cbuf, qword);
819 virgl_encoder_write_dword(ctx->cbuf, stencil);
820 return 0;
821 }
822
virgl_encode_clear_texture(struct virgl_context * ctx,struct virgl_resource * res,unsigned int level,const struct pipe_box * box,const void * data)823 int virgl_encode_clear_texture(struct virgl_context *ctx,
824 struct virgl_resource *res,
825 unsigned int level,
826 const struct pipe_box *box,
827 const void *data)
828 {
829 const struct util_format_description *desc = util_format_description(res->b.format);
830 unsigned block_bits = desc->block.bits;
831 uint32_t arr[4] = {0};
832 /* The spec describe <data> as a pointer to an array of between one
833 * and four components of texel data that will be used as the source
834 * for the constant fill value.
835 * Here, we are just copying the memory into <arr>. We do not try to
836 * re-create the data array. The host part will take care of interpreting
837 * the memory and applying the correct format to the clear call.
838 */
839 memcpy(&arr, data, block_bits / 8);
840
841 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_TEXTURE, 0, VIRGL_CLEAR_TEXTURE_SIZE));
842 virgl_encoder_write_res(ctx, res);
843 virgl_encoder_write_dword(ctx->cbuf, level);
844 virgl_encoder_write_dword(ctx->cbuf, box->x);
845 virgl_encoder_write_dword(ctx->cbuf, box->y);
846 virgl_encoder_write_dword(ctx->cbuf, box->z);
847 virgl_encoder_write_dword(ctx->cbuf, box->width);
848 virgl_encoder_write_dword(ctx->cbuf, box->height);
849 virgl_encoder_write_dword(ctx->cbuf, box->depth);
850 for (unsigned i = 0; i < 4; i++)
851 virgl_encoder_write_dword(ctx->cbuf, arr[i]);
852 return 0;
853 }
854
virgl_encoder_set_framebuffer_state(struct virgl_context * ctx,const struct pipe_framebuffer_state * state)855 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
856 const struct pipe_framebuffer_state *state)
857 {
858 struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
859 int i;
860
861 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
862 virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
863 virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
864 for (i = 0; i < state->nr_cbufs; i++) {
865 struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
866 virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
867 }
868
869 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
870 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
871 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
872 virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
873 virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
874 }
875 return 0;
876 }
877
virgl_encoder_set_viewport_states(struct virgl_context * ctx,int start_slot,int num_viewports,const struct pipe_viewport_state * states)878 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
879 int start_slot,
880 int num_viewports,
881 const struct pipe_viewport_state *states)
882 {
883 int i,v;
884 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
885 virgl_encoder_write_dword(ctx->cbuf, start_slot);
886 for (v = 0; v < num_viewports; v++) {
887 for (i = 0; i < 3; i++)
888 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
889 for (i = 0; i < 3; i++)
890 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
891 }
892 return 0;
893 }
894
virgl_encoder_create_vertex_elements(struct virgl_context * ctx,uint32_t handle,unsigned num_elements,const struct pipe_vertex_element * element)895 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
896 uint32_t handle,
897 unsigned num_elements,
898 const struct pipe_vertex_element *element)
899 {
900 int i;
901 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
902 virgl_encoder_write_dword(ctx->cbuf, handle);
903 for (i = 0; i < num_elements; i++) {
904 virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
905 virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
906 virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
907 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(element[i].src_format));
908 }
909 return 0;
910 }
911
virgl_encoder_set_vertex_buffers(struct virgl_context * ctx,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)912 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
913 unsigned num_buffers,
914 const struct pipe_vertex_buffer *buffers)
915 {
916 int i;
917 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
918 for (i = 0; i < num_buffers; i++) {
919 struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
920 virgl_encoder_write_dword(ctx->cbuf, ctx->vertex_elements ? ctx->vertex_elements->strides[i] : 0);
921 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
922 virgl_encoder_write_res(ctx, res);
923 }
924 return 0;
925 }
926
virgl_encoder_set_index_buffer(struct virgl_context * ctx,const struct virgl_indexbuf * ib)927 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
928 const struct virgl_indexbuf *ib)
929 {
930 int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
931 struct virgl_resource *res = NULL;
932 if (ib)
933 res = virgl_resource(ib->buffer);
934
935 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
936 virgl_encoder_write_res(ctx, res);
937 if (ib) {
938 virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
939 virgl_encoder_write_dword(ctx->cbuf, ib->offset);
940 }
941 return 0;
942 }
943
virgl_encoder_draw_vbo(struct virgl_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw)944 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
945 const struct pipe_draw_info *info,
946 unsigned drawid_offset,
947 const struct pipe_draw_indirect_info *indirect,
948 const struct pipe_draw_start_count_bias *draw)
949 {
950 uint32_t length = VIRGL_DRAW_VBO_SIZE;
951 if (info->mode == MESA_PRIM_PATCHES || drawid_offset > 0)
952 length = VIRGL_DRAW_VBO_SIZE_TESS;
953 if (indirect && indirect->buffer)
954 length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
955 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
956 virgl_encoder_write_dword(ctx->cbuf, draw->start);
957 virgl_encoder_write_dword(ctx->cbuf, draw->count);
958 virgl_encoder_write_dword(ctx->cbuf, info->mode);
959 virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
960 virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
961 virgl_encoder_write_dword(ctx->cbuf, info->index_size ? draw->index_bias : 0);
962 virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
963 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
964 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart ? info->restart_index : 0);
965 virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->min_index : 0);
966 virgl_encoder_write_dword(ctx->cbuf, info->index_bounds_valid ? info->max_index : ~0);
967 if (indirect && indirect->count_from_stream_output)
968 virgl_encoder_write_dword(ctx->cbuf, indirect->count_from_stream_output->buffer_size);
969 else
970 virgl_encoder_write_dword(ctx->cbuf, 0);
971 if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
972 virgl_encoder_write_dword(ctx->cbuf, ctx->patch_vertices); /* vertices per patch */
973 virgl_encoder_write_dword(ctx->cbuf, drawid_offset); /* drawid */
974 }
975 if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
976 virgl_encoder_write_res(ctx, virgl_resource(indirect->buffer));
977 virgl_encoder_write_dword(ctx->cbuf, indirect->offset);
978 virgl_encoder_write_dword(ctx->cbuf, indirect->stride); /* indirect stride */
979 virgl_encoder_write_dword(ctx->cbuf, indirect->draw_count); /* indirect draw count */
980 virgl_encoder_write_dword(ctx->cbuf, indirect->indirect_draw_count_offset); /* indirect draw count offset */
981 if (indirect->indirect_draw_count)
982 virgl_encoder_write_res(ctx, virgl_resource(indirect->indirect_draw_count));
983 else
984 virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
985 }
986 return 0;
987 }
988
virgl_encoder_create_surface_common(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)989 static int virgl_encoder_create_surface_common(struct virgl_context *ctx,
990 uint32_t handle,
991 struct virgl_resource *res,
992 const struct pipe_surface *templat)
993 {
994 virgl_encoder_write_dword(ctx->cbuf, handle);
995 virgl_encoder_write_res(ctx, res);
996 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(templat->format));
997
998 assert(templat->texture->target != PIPE_BUFFER);
999 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
1000 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
1001
1002 return 0;
1003 }
1004
virgl_encoder_create_surface(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)1005 int virgl_encoder_create_surface(struct virgl_context *ctx,
1006 uint32_t handle,
1007 struct virgl_resource *res,
1008 const struct pipe_surface *templat)
1009 {
1010 if (templat->nr_samples > 0) {
1011 ASSERTED struct virgl_screen *rs = virgl_screen(ctx->base.screen);
1012 assert(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_IMPLICIT_MSAA);
1013
1014 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_MSAA_SURFACE, VIRGL_OBJ_MSAA_SURFACE_SIZE));
1015 virgl_encoder_create_surface_common(ctx, handle, res, templat);
1016 virgl_encoder_write_dword(ctx->cbuf, templat->nr_samples);
1017 } else {
1018 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
1019 virgl_encoder_create_surface_common(ctx, handle, res, templat);
1020 }
1021
1022 return 0;
1023 }
1024
virgl_encoder_create_so_target(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,unsigned buffer_offset,unsigned buffer_size)1025 int virgl_encoder_create_so_target(struct virgl_context *ctx,
1026 uint32_t handle,
1027 struct virgl_resource *res,
1028 unsigned buffer_offset,
1029 unsigned buffer_size)
1030 {
1031 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
1032 virgl_encoder_write_dword(ctx->cbuf, handle);
1033 virgl_encoder_write_res(ctx, res);
1034 virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
1035 virgl_encoder_write_dword(ctx->cbuf, buffer_size);
1036 return 0;
1037 }
1038
1039 enum virgl_transfer3d_encode_stride {
1040 /* The stride and layer_stride are explicitly specified in the command. */
1041 virgl_transfer3d_explicit_stride,
1042 /* The stride and layer_stride are inferred by the host. In this case, the
1043 * host will use the image stride and layer_stride for the specified level.
1044 */
1045 virgl_transfer3d_host_inferred_stride,
1046 };
1047
virgl_encoder_transfer3d_common(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * xfer,enum virgl_transfer3d_encode_stride encode_stride)1048 static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
1049 struct virgl_cmd_buf *buf,
1050 struct virgl_transfer *xfer,
1051 enum virgl_transfer3d_encode_stride encode_stride)
1052
1053 {
1054 struct pipe_transfer *transfer = &xfer->base;
1055 unsigned stride;
1056 uintptr_t layer_stride;
1057
1058 if (encode_stride == virgl_transfer3d_explicit_stride) {
1059 stride = transfer->stride;
1060 layer_stride = transfer->layer_stride;
1061 } else if (encode_stride == virgl_transfer3d_host_inferred_stride) {
1062 stride = 0;
1063 layer_stride = 0;
1064 } else {
1065 assert(!"Invalid virgl_transfer3d_encode_stride value");
1066 }
1067
1068 /* We cannot use virgl_encoder_emit_resource with transfer->resource here
1069 * because transfer->resource might have a different virgl_hw_res than what
1070 * this transfer targets, which is saved in xfer->hw_res.
1071 */
1072 vs->vws->emit_res(vs->vws, buf, xfer->hw_res, true);
1073 virgl_encoder_write_dword(buf, transfer->level);
1074 virgl_encoder_write_dword(buf, transfer->usage);
1075 virgl_encoder_write_dword(buf, stride);
1076 virgl_encoder_write_dword(buf, layer_stride);
1077 virgl_encoder_write_dword(buf, transfer->box.x);
1078 virgl_encoder_write_dword(buf, transfer->box.y);
1079 virgl_encoder_write_dword(buf, transfer->box.z);
1080 virgl_encoder_write_dword(buf, transfer->box.width);
1081 virgl_encoder_write_dword(buf, transfer->box.height);
1082 virgl_encoder_write_dword(buf, transfer->box.depth);
1083 }
1084
virgl_encoder_flush_frontbuffer(struct virgl_context * ctx,struct virgl_resource * res)1085 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
1086 struct virgl_resource *res)
1087 {
1088 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
1089 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
1090 return 0;
1091 }
1092
virgl_encode_sampler_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_sampler_state * state)1093 int virgl_encode_sampler_state(struct virgl_context *ctx,
1094 uint32_t handle,
1095 const struct pipe_sampler_state *state)
1096 {
1097 uint32_t tmp;
1098 int i;
1099 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
1100 virgl_encoder_write_dword(ctx->cbuf, handle);
1101
1102 tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
1103 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
1104 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
1105 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
1106 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
1107 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
1108 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
1109 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
1110 VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map) |
1111 VIRGL_OBJ_SAMPLE_STATE_S0_MAX_ANISOTROPY(state->max_anisotropy);
1112
1113 virgl_encoder_write_dword(ctx->cbuf, tmp);
1114 virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
1115 virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
1116 virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
1117 for (i = 0; i < 4; i++)
1118 virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
1119 return 0;
1120 }
1121
1122
virgl_encode_sampler_view(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_sampler_view * state)1123 int virgl_encode_sampler_view(struct virgl_context *ctx,
1124 uint32_t handle,
1125 struct virgl_resource *res,
1126 const struct pipe_sampler_view *state)
1127 {
1128 unsigned elem_size = util_format_get_blocksize(state->format);
1129 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
1130 uint32_t tmp;
1131 uint32_t dword_fmt_target = pipe_to_virgl_format(state->format);
1132 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
1133 virgl_encoder_write_dword(ctx->cbuf, handle);
1134 virgl_encoder_write_res(ctx, res);
1135 if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
1136 dword_fmt_target |= (state->target << 24);
1137 virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
1138 if (res->b.target == PIPE_BUFFER) {
1139 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
1140 virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
1141 } else {
1142 if (res->metadata.plane) {
1143 assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
1144 virgl_encoder_write_dword(ctx->cbuf, res->metadata.plane);
1145 } else {
1146 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
1147 }
1148 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
1149 }
1150 tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
1151 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
1152 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
1153 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
1154 virgl_encoder_write_dword(ctx->cbuf, tmp);
1155 return 0;
1156 }
1157
virgl_encode_set_sampler_views(struct virgl_context * ctx,enum pipe_shader_type shader_type,uint32_t start_slot,uint32_t num_views,struct virgl_sampler_view ** views)1158 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
1159 enum pipe_shader_type shader_type,
1160 uint32_t start_slot,
1161 uint32_t num_views,
1162 struct virgl_sampler_view **views)
1163 {
1164 int i;
1165 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
1166 virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader_type));
1167 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1168 for (i = 0; i < num_views; i++) {
1169 uint32_t handle = views[i] ? views[i]->handle : 0;
1170 virgl_encoder_write_dword(ctx->cbuf, handle);
1171 }
1172 return 0;
1173 }
1174
virgl_encode_bind_sampler_states(struct virgl_context * ctx,enum pipe_shader_type shader_type,uint32_t start_slot,uint32_t num_handles,uint32_t * handles)1175 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
1176 enum pipe_shader_type shader_type,
1177 uint32_t start_slot,
1178 uint32_t num_handles,
1179 uint32_t *handles)
1180 {
1181 int i;
1182 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
1183 virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader_type));
1184 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1185 for (i = 0; i < num_handles; i++)
1186 virgl_encoder_write_dword(ctx->cbuf, handles[i]);
1187 return 0;
1188 }
1189
virgl_encoder_write_constant_buffer(struct virgl_context * ctx,enum pipe_shader_type shader,uint32_t index,uint32_t size,const void * data)1190 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
1191 enum pipe_shader_type shader,
1192 uint32_t index,
1193 uint32_t size,
1194 const void *data)
1195 {
1196 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
1197 virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader));
1198 virgl_encoder_write_dword(ctx->cbuf, index);
1199 if (data)
1200 virgl_encoder_write_block(ctx->cbuf, data, size * 4);
1201 return 0;
1202 }
1203
virgl_encoder_set_uniform_buffer(struct virgl_context * ctx,enum pipe_shader_type shader,uint32_t index,uint32_t offset,uint32_t length,struct virgl_resource * res)1204 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
1205 enum pipe_shader_type shader,
1206 uint32_t index,
1207 uint32_t offset,
1208 uint32_t length,
1209 struct virgl_resource *res)
1210 {
1211 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
1212 virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader));
1213 virgl_encoder_write_dword(ctx->cbuf, index);
1214 virgl_encoder_write_dword(ctx->cbuf, offset);
1215 virgl_encoder_write_dword(ctx->cbuf, length);
1216 virgl_encoder_write_res(ctx, res);
1217 return 0;
1218 }
1219
1220
virgl_encoder_set_stencil_ref(struct virgl_context * ctx,const struct pipe_stencil_ref * ref)1221 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
1222 const struct pipe_stencil_ref *ref)
1223 {
1224 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
1225 virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
1226 return 0;
1227 }
1228
virgl_encoder_set_blend_color(struct virgl_context * ctx,const struct pipe_blend_color * color)1229 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
1230 const struct pipe_blend_color *color)
1231 {
1232 int i;
1233 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
1234 for (i = 0; i < 4; i++)
1235 virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
1236 return 0;
1237 }
1238
virgl_encoder_set_scissor_state(struct virgl_context * ctx,unsigned start_slot,int num_scissors,const struct pipe_scissor_state * ss)1239 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
1240 unsigned start_slot,
1241 int num_scissors,
1242 const struct pipe_scissor_state *ss)
1243 {
1244 int i;
1245 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
1246 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1247 for (i = 0; i < num_scissors; i++) {
1248 virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
1249 virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
1250 }
1251 return 0;
1252 }
1253
virgl_encoder_set_polygon_stipple(struct virgl_context * ctx,const struct pipe_poly_stipple * ps)1254 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
1255 const struct pipe_poly_stipple *ps)
1256 {
1257 int i;
1258 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
1259 for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
1260 virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
1261 }
1262 }
1263
virgl_encoder_set_sample_mask(struct virgl_context * ctx,unsigned sample_mask)1264 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
1265 unsigned sample_mask)
1266 {
1267 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
1268 virgl_encoder_write_dword(ctx->cbuf, sample_mask);
1269 }
1270
virgl_encoder_set_min_samples(struct virgl_context * ctx,unsigned min_samples)1271 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
1272 unsigned min_samples)
1273 {
1274 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
1275 virgl_encoder_write_dword(ctx->cbuf, min_samples);
1276 }
1277
virgl_encoder_set_clip_state(struct virgl_context * ctx,const struct pipe_clip_state * clip)1278 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
1279 const struct pipe_clip_state *clip)
1280 {
1281 int i, j;
1282 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
1283 for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
1284 for (j = 0; j < 4; j++) {
1285 virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
1286 }
1287 }
1288 }
1289
virgl_encode_resource_copy_region(struct virgl_context * ctx,struct virgl_resource * dst_res,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct virgl_resource * src_res,unsigned src_level,const struct pipe_box * src_box)1290 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
1291 struct virgl_resource *dst_res,
1292 unsigned dst_level,
1293 unsigned dstx, unsigned dsty, unsigned dstz,
1294 struct virgl_resource *src_res,
1295 unsigned src_level,
1296 const struct pipe_box *src_box)
1297 {
1298 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
1299 virgl_encoder_write_res(ctx, dst_res);
1300 virgl_encoder_write_dword(ctx->cbuf, dst_level);
1301 virgl_encoder_write_dword(ctx->cbuf, dstx);
1302 virgl_encoder_write_dword(ctx->cbuf, dsty);
1303 virgl_encoder_write_dword(ctx->cbuf, dstz);
1304 virgl_encoder_write_res(ctx, src_res);
1305 virgl_encoder_write_dword(ctx->cbuf, src_level);
1306 virgl_encoder_write_dword(ctx->cbuf, src_box->x);
1307 virgl_encoder_write_dword(ctx->cbuf, src_box->y);
1308 virgl_encoder_write_dword(ctx->cbuf, src_box->z);
1309 virgl_encoder_write_dword(ctx->cbuf, src_box->width);
1310 virgl_encoder_write_dword(ctx->cbuf, src_box->height);
1311 virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
1312 return 0;
1313 }
1314
virgl_encode_blit(struct virgl_context * ctx,struct virgl_resource * dst_res,struct virgl_resource * src_res,const struct pipe_blit_info * blit)1315 int virgl_encode_blit(struct virgl_context *ctx,
1316 struct virgl_resource *dst_res,
1317 struct virgl_resource *src_res,
1318 const struct pipe_blit_info *blit)
1319 {
1320 uint32_t tmp;
1321 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
1322 tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
1323 VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
1324 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
1325 VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
1326 VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
1327 virgl_encoder_write_dword(ctx->cbuf, tmp);
1328 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
1329 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
1330
1331 virgl_encoder_write_res(ctx, dst_res);
1332 virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
1333 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->dst.format));
1334 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
1335 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
1336 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
1337 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
1338 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
1339 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
1340
1341 virgl_encoder_write_res(ctx, src_res);
1342 virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
1343 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->src.format));
1344 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
1345 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
1346 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
1347 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
1348 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
1349 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
1350 return 0;
1351 }
1352
virgl_encoder_create_query(struct virgl_context * ctx,uint32_t handle,uint query_type,uint query_index,struct virgl_resource * res,uint32_t offset)1353 int virgl_encoder_create_query(struct virgl_context *ctx,
1354 uint32_t handle,
1355 uint query_type,
1356 uint query_index,
1357 struct virgl_resource *res,
1358 uint32_t offset)
1359 {
1360 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
1361 virgl_encoder_write_dword(ctx->cbuf, handle);
1362 virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
1363 virgl_encoder_write_dword(ctx->cbuf, offset);
1364 virgl_encoder_write_res(ctx, res);
1365 return 0;
1366 }
1367
virgl_encoder_begin_query(struct virgl_context * ctx,uint32_t handle)1368 int virgl_encoder_begin_query(struct virgl_context *ctx,
1369 uint32_t handle)
1370 {
1371 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
1372 virgl_encoder_write_dword(ctx->cbuf, handle);
1373 return 0;
1374 }
1375
virgl_encoder_end_query(struct virgl_context * ctx,uint32_t handle)1376 int virgl_encoder_end_query(struct virgl_context *ctx,
1377 uint32_t handle)
1378 {
1379 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
1380 virgl_encoder_write_dword(ctx->cbuf, handle);
1381 return 0;
1382 }
1383
virgl_encoder_get_query_result(struct virgl_context * ctx,uint32_t handle,bool wait)1384 int virgl_encoder_get_query_result(struct virgl_context *ctx,
1385 uint32_t handle, bool wait)
1386 {
1387 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
1388 virgl_encoder_write_dword(ctx->cbuf, handle);
1389 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1390 return 0;
1391 }
1392
virgl_encoder_render_condition(struct virgl_context * ctx,uint32_t handle,bool condition,enum pipe_render_cond_flag mode)1393 int virgl_encoder_render_condition(struct virgl_context *ctx,
1394 uint32_t handle, bool condition,
1395 enum pipe_render_cond_flag mode)
1396 {
1397 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
1398 virgl_encoder_write_dword(ctx->cbuf, handle);
1399 virgl_encoder_write_dword(ctx->cbuf, condition);
1400 virgl_encoder_write_dword(ctx->cbuf, mode);
1401 return 0;
1402 }
1403
virgl_encoder_set_so_targets(struct virgl_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,unsigned append_bitmask)1404 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
1405 unsigned num_targets,
1406 struct pipe_stream_output_target **targets,
1407 unsigned append_bitmask)
1408 {
1409 int i;
1410
1411 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
1412 virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
1413 for (i = 0; i < num_targets; i++) {
1414 struct virgl_so_target *tg = virgl_so_target(targets[i]);
1415 virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
1416 }
1417 return 0;
1418 }
1419
1420
virgl_encoder_set_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1421 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1422 {
1423 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
1424 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1425 return 0;
1426 }
1427
virgl_encoder_create_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1428 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1429 {
1430 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
1431 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1432 return 0;
1433 }
1434
virgl_encoder_destroy_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1435 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1436 {
1437 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
1438 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1439 return 0;
1440 }
1441
virgl_encode_link_shader(struct virgl_context * ctx,uint32_t * handles)1442 int virgl_encode_link_shader(struct virgl_context *ctx, uint32_t *handles)
1443 {
1444 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LINK_SHADER, 0, VIRGL_LINK_SHADER_SIZE));
1445 virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_VERTEX]);
1446 virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_FRAGMENT]);
1447 virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_GEOMETRY]);
1448 virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_TESS_CTRL]);
1449 virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_TESS_EVAL]);
1450 virgl_encoder_write_dword(ctx->cbuf, handles[PIPE_SHADER_COMPUTE]);
1451 return 0;
1452 }
1453
virgl_encode_bind_shader(struct virgl_context * ctx,uint32_t handle,enum pipe_shader_type type)1454 int virgl_encode_bind_shader(struct virgl_context *ctx,
1455 uint32_t handle,
1456 enum pipe_shader_type type)
1457 {
1458 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
1459 virgl_encoder_write_dword(ctx->cbuf, handle);
1460 virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(type));
1461 return 0;
1462 }
1463
virgl_encode_set_tess_state(struct virgl_context * ctx,const float outer[4],const float inner[2])1464 int virgl_encode_set_tess_state(struct virgl_context *ctx,
1465 const float outer[4],
1466 const float inner[2])
1467 {
1468 int i;
1469 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
1470 for (i = 0; i < 4; i++)
1471 virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
1472 for (i = 0; i < 2; i++)
1473 virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
1474 return 0;
1475 }
1476
virgl_encode_set_shader_buffers(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1477 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
1478 enum pipe_shader_type shader,
1479 unsigned start_slot, unsigned count,
1480 const struct pipe_shader_buffer *buffers)
1481 {
1482 int i;
1483 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
1484
1485 virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader));
1486 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1487 for (i = 0; i < count; i++) {
1488 if (buffers && buffers[i].buffer) {
1489 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1490 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1491 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1492 virgl_encoder_write_res(ctx, res);
1493
1494 util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1495 buffers[i].buffer_offset + buffers[i].buffer_size);
1496 virgl_resource_dirty(res, 0);
1497 } else {
1498 virgl_encoder_write_dword(ctx->cbuf, 0);
1499 virgl_encoder_write_dword(ctx->cbuf, 0);
1500 virgl_encoder_write_dword(ctx->cbuf, 0);
1501 }
1502 }
1503 return 0;
1504 }
1505
virgl_encode_set_hw_atomic_buffers(struct virgl_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1506 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
1507 unsigned start_slot, unsigned count,
1508 const struct pipe_shader_buffer *buffers)
1509 {
1510 int i;
1511 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
1512
1513 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1514 for (i = 0; i < count; i++) {
1515 if (buffers && buffers[i].buffer) {
1516 struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1517 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1518 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1519 virgl_encoder_write_res(ctx, res);
1520
1521 util_range_add(&res->b, &res->valid_buffer_range, buffers[i].buffer_offset,
1522 buffers[i].buffer_offset + buffers[i].buffer_size);
1523 virgl_resource_dirty(res, 0);
1524 } else {
1525 virgl_encoder_write_dword(ctx->cbuf, 0);
1526 virgl_encoder_write_dword(ctx->cbuf, 0);
1527 virgl_encoder_write_dword(ctx->cbuf, 0);
1528 }
1529 }
1530 return 0;
1531 }
1532
virgl_encode_set_shader_images(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_image_view * images)1533 int virgl_encode_set_shader_images(struct virgl_context *ctx,
1534 enum pipe_shader_type shader,
1535 unsigned start_slot, unsigned count,
1536 const struct pipe_image_view *images)
1537 {
1538 int i;
1539 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1540
1541 virgl_encoder_write_dword(ctx->cbuf, virgl_shader_stage_convert(shader));
1542 virgl_encoder_write_dword(ctx->cbuf, start_slot);
1543 for (i = 0; i < count; i++) {
1544 if (images && images[i].resource) {
1545 struct virgl_resource *res = virgl_resource(images[i].resource);
1546 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(images[i].format));
1547 virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1548 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1549 virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1550 virgl_encoder_write_res(ctx, res);
1551
1552 if (res->b.target == PIPE_BUFFER) {
1553 util_range_add(&res->b, &res->valid_buffer_range, images[i].u.buf.offset,
1554 images[i].u.buf.offset + images[i].u.buf.size);
1555 }
1556 virgl_resource_dirty(res, images[i].u.tex.level);
1557 } else {
1558 virgl_encoder_write_dword(ctx->cbuf, 0);
1559 virgl_encoder_write_dword(ctx->cbuf, 0);
1560 virgl_encoder_write_dword(ctx->cbuf, 0);
1561 virgl_encoder_write_dword(ctx->cbuf, 0);
1562 virgl_encoder_write_dword(ctx->cbuf, 0);
1563 }
1564 }
1565 return 0;
1566 }
1567
virgl_encode_memory_barrier(struct virgl_context * ctx,unsigned flags)1568 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1569 unsigned flags)
1570 {
1571 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1572 virgl_encoder_write_dword(ctx->cbuf, flags);
1573 return 0;
1574 }
1575
virgl_encode_launch_grid(struct virgl_context * ctx,const struct pipe_grid_info * grid_info)1576 int virgl_encode_launch_grid(struct virgl_context *ctx,
1577 const struct pipe_grid_info *grid_info)
1578 {
1579 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1580 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1581 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1582 virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1583 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1584 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1585 virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1586 if (grid_info->indirect) {
1587 struct virgl_resource *res = virgl_resource(grid_info->indirect);
1588 virgl_encoder_write_res(ctx, res);
1589 } else
1590 virgl_encoder_write_dword(ctx->cbuf, 0);
1591 virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1592 return 0;
1593 }
1594
virgl_encode_texture_barrier(struct virgl_context * ctx,unsigned flags)1595 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1596 unsigned flags)
1597 {
1598 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1599 virgl_encoder_write_dword(ctx->cbuf, flags);
1600 return 0;
1601 }
1602
virgl_encode_host_debug_flagstring(struct virgl_context * ctx,const char * flagstring)1603 int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1604 const char *flagstring)
1605 {
1606 unsigned long slen = strlen(flagstring) + 1;
1607 uint32_t sslen;
1608 uint32_t string_length;
1609
1610 if (!slen)
1611 return 0;
1612
1613 if (slen > 4 * 0xffff) {
1614 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1615 slen = 4 * 0xffff;
1616 }
1617
1618 sslen = (uint32_t )(slen + 3) / 4;
1619 string_length = (uint32_t)MIN2(sslen * 4, slen);
1620
1621 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1622 virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1623 return 0;
1624 }
1625
virgl_encode_tweak(struct virgl_context * ctx,enum vrend_tweak_type tweak,uint32_t value)1626 int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value)
1627 {
1628 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE));
1629 virgl_encoder_write_dword(ctx->cbuf, tweak);
1630 virgl_encoder_write_dword(ctx->cbuf, value);
1631 return 0;
1632 }
1633
1634
virgl_encode_get_query_result_qbo(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,bool wait,uint32_t result_type,uint32_t offset,uint32_t index)1635 int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
1636 uint32_t handle,
1637 struct virgl_resource *res, bool wait,
1638 uint32_t result_type,
1639 uint32_t offset,
1640 uint32_t index)
1641 {
1642 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
1643 virgl_encoder_write_dword(ctx->cbuf, handle);
1644 virgl_encoder_write_res(ctx, res);
1645 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1646 virgl_encoder_write_dword(ctx->cbuf, result_type);
1647 virgl_encoder_write_dword(ctx->cbuf, offset);
1648 virgl_encoder_write_dword(ctx->cbuf, index);
1649 return 0;
1650 }
1651
virgl_encode_transfer(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * trans,uint32_t direction)1652 void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
1653 struct virgl_transfer *trans, uint32_t direction)
1654 {
1655 uint32_t command;
1656 struct virgl_resource *vres = virgl_resource(trans->base.resource);
1657 enum virgl_transfer3d_encode_stride stride_type =
1658 virgl_transfer3d_host_inferred_stride;
1659
1660 if (trans->base.box.depth == 1 && trans->base.level == 0 &&
1661 trans->base.resource->target == PIPE_TEXTURE_2D &&
1662 vres->blob_mem == VIRGL_BLOB_MEM_HOST3D_GUEST)
1663 stride_type = virgl_transfer3d_explicit_stride;
1664
1665 command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
1666 virgl_encoder_write_dword(buf, command);
1667 virgl_encoder_transfer3d_common(vs, buf, trans, stride_type);
1668 virgl_encoder_write_dword(buf, trans->offset);
1669 virgl_encoder_write_dword(buf, direction);
1670 }
1671
virgl_encode_copy_transfer(struct virgl_context * ctx,struct virgl_transfer * trans)1672 void virgl_encode_copy_transfer(struct virgl_context *ctx,
1673 struct virgl_transfer *trans)
1674 {
1675 uint32_t command;
1676 struct virgl_screen *vs = virgl_screen(ctx->base.screen);
1677 // set always synchronized to 1, second bit is used for direction
1678 uint32_t direction_and_synchronized = VIRGL_COPY_TRANSFER3D_FLAGS_SYNCHRONIZED;
1679
1680 if (vs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS) {
1681 if (trans->direction == VIRGL_TRANSFER_TO_HOST) {
1682 // do nothing, as 0 means transfer to host
1683 } else if (trans->direction == VIRGL_TRANSFER_FROM_HOST) {
1684 direction_and_synchronized |= VIRGL_COPY_TRANSFER3D_FLAGS_READ_FROM_HOST;
1685 } else {
1686 // something wrong happened here
1687 assert(0);
1688 }
1689 }
1690 assert(trans->copy_src_hw_res);
1691 command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
1692
1693 virgl_encoder_write_cmd_dword(ctx, command);
1694 /* Copy transfers need to explicitly specify the stride, since it may differ
1695 * from the image stride.
1696 */
1697 virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
1698 vs->vws->emit_res(vs->vws, ctx->cbuf, trans->copy_src_hw_res, true);
1699 virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
1700 virgl_encoder_write_dword(ctx->cbuf, direction_and_synchronized);
1701 }
1702
virgl_encode_end_transfers(struct virgl_cmd_buf * buf)1703 void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
1704 {
1705 uint32_t command, diff;
1706 diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
1707 if (diff) {
1708 command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
1709 virgl_encoder_write_dword(buf, command);
1710 }
1711 }
1712
virgl_encode_get_memory_info(struct virgl_context * ctx,struct virgl_resource * res)1713 void virgl_encode_get_memory_info(struct virgl_context *ctx, struct virgl_resource *res)
1714 {
1715 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_MEMORY_INFO, 0, 1));
1716 virgl_encoder_write_res(ctx, res);
1717 }
1718
virgl_encode_emit_string_marker(struct virgl_context * ctx,const char * message,int len)1719 void virgl_encode_emit_string_marker(struct virgl_context *ctx,
1720 const char *message, int len)
1721 {
1722 /* len is guaranteed to be non-negative but be defensive */
1723 assert(len >= 0);
1724 if (len <= 0)
1725 return;
1726
1727 if (len > 4 * 0xffff) {
1728 debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1729 len = 4 * 0xffff;
1730 }
1731
1732 uint32_t buf_len = (uint32_t )(len + 3) / 4 + 1;
1733 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_EMIT_STRING_MARKER, 0, buf_len));
1734 virgl_encoder_write_dword(ctx->cbuf, len);
1735 virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)message, len);
1736 }
1737
virgl_encode_create_video_codec(struct virgl_context * ctx,struct virgl_video_codec * cdc)1738 void virgl_encode_create_video_codec(struct virgl_context *ctx,
1739 struct virgl_video_codec *cdc)
1740 {
1741 struct virgl_screen *rs = virgl_screen(ctx->base.screen);
1742 uint32_t len = rs->caps.caps.v2.host_feature_check_version >= 14 ? 8 : 7;
1743
1744 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_VIDEO_CODEC, 0, len));
1745 virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1746 virgl_encoder_write_dword(ctx->cbuf, cdc->base.profile);
1747 virgl_encoder_write_dword(ctx->cbuf, cdc->base.entrypoint);
1748 virgl_encoder_write_dword(ctx->cbuf, cdc->base.chroma_format);
1749 virgl_encoder_write_dword(ctx->cbuf, cdc->base.level);
1750 virgl_encoder_write_dword(ctx->cbuf, cdc->base.width);
1751 virgl_encoder_write_dword(ctx->cbuf, cdc->base.height);
1752 if (rs->caps.caps.v2.host_feature_check_version >= 14)
1753 virgl_encoder_write_dword(ctx->cbuf, cdc->base.max_references);
1754 }
1755
virgl_encode_destroy_video_codec(struct virgl_context * ctx,struct virgl_video_codec * cdc)1756 void virgl_encode_destroy_video_codec(struct virgl_context *ctx,
1757 struct virgl_video_codec *cdc)
1758 {
1759 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_VIDEO_CODEC, 0, 1));
1760 virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1761 }
1762
virgl_encode_create_video_buffer(struct virgl_context * ctx,struct virgl_video_buffer * vbuf)1763 void virgl_encode_create_video_buffer(struct virgl_context *ctx,
1764 struct virgl_video_buffer *vbuf)
1765 {
1766 unsigned i;
1767
1768 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_VIDEO_BUFFER, 0,
1769 4 + vbuf->num_planes));
1770 virgl_encoder_write_dword(ctx->cbuf, vbuf->handle);
1771 virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(vbuf->buf->buffer_format));
1772 virgl_encoder_write_dword(ctx->cbuf, vbuf->buf->width);
1773 virgl_encoder_write_dword(ctx->cbuf, vbuf->buf->height);
1774 for (i = 0; i < vbuf->num_planes; i++)
1775 virgl_encoder_write_res(ctx, virgl_resource(vbuf->plane_views[i]->texture));
1776 }
1777
virgl_encode_destroy_video_buffer(struct virgl_context * ctx,struct virgl_video_buffer * buf)1778 void virgl_encode_destroy_video_buffer(struct virgl_context *ctx,
1779 struct virgl_video_buffer *buf)
1780 {
1781 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_VIDEO_BUFFER, 0, 1));
1782 virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1783 }
1784
virgl_encode_begin_frame(struct virgl_context * ctx,struct virgl_video_codec * cdc,struct virgl_video_buffer * buf)1785 void virgl_encode_begin_frame(struct virgl_context *ctx,
1786 struct virgl_video_codec *cdc,
1787 struct virgl_video_buffer *buf)
1788 {
1789 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_FRAME, 0, 2));
1790 virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1791 virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1792 }
1793
virgl_encode_decode_bitstream(struct virgl_context * ctx,struct virgl_video_codec * cdc,struct virgl_video_buffer * buf,void * desc,uint32_t desc_size)1794 void virgl_encode_decode_bitstream(struct virgl_context *ctx,
1795 struct virgl_video_codec *cdc,
1796 struct virgl_video_buffer *buf,
1797 void *desc, uint32_t desc_size)
1798 {
1799 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DECODE_BITSTREAM, 0, 5));
1800 virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1801 virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1802 virgl_encoder_write_res(ctx, virgl_resource(cdc->desc_buffers[cdc->cur_buffer]));
1803 virgl_encoder_write_res(ctx, virgl_resource(cdc->bs_buffers[cdc->cur_buffer]));
1804 virgl_encoder_write_dword(ctx->cbuf, cdc->bs_size);
1805 }
1806
virgl_encode_encode_bitstream(struct virgl_context * ctx,struct virgl_video_codec * cdc,struct virgl_video_buffer * buf,struct virgl_resource * tgt)1807 void virgl_encode_encode_bitstream(struct virgl_context *ctx,
1808 struct virgl_video_codec *cdc,
1809 struct virgl_video_buffer *buf,
1810 struct virgl_resource *tgt)
1811 {
1812 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_ENCODE_BITSTREAM, 0, 5));
1813 virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1814 virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1815 virgl_encoder_write_res(ctx, tgt);
1816 virgl_encoder_write_res(ctx, virgl_resource(cdc->desc_buffers[cdc->cur_buffer]));
1817 virgl_encoder_write_res(ctx, virgl_resource(cdc->feed_buffers[cdc->cur_buffer]));
1818 }
1819
virgl_encode_end_frame(struct virgl_context * ctx,struct virgl_video_codec * cdc,struct virgl_video_buffer * buf)1820 void virgl_encode_end_frame(struct virgl_context *ctx,
1821 struct virgl_video_codec *cdc,
1822 struct virgl_video_buffer *buf)
1823 {
1824 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_FRAME, 0, 2));
1825 virgl_encoder_write_dword(ctx->cbuf, cdc->handle);
1826 virgl_encoder_write_dword(ctx->cbuf, buf->handle);
1827 }
1828
virgl_encode_clear_surface(struct virgl_context * ctx,struct pipe_surface * surf,unsigned buffers,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1829 int virgl_encode_clear_surface(struct virgl_context *ctx,
1830 struct pipe_surface *surf,
1831 unsigned buffers,
1832 const union pipe_color_union *color,
1833 unsigned dstx, unsigned dsty,
1834 unsigned width, unsigned height,
1835 bool render_condition_enabled)
1836 {
1837 int i;
1838 uint32_t tmp;
1839 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_SURFACE, 0, VIRGL_CLEAR_SURFACE_SIZE));
1840
1841 tmp = VIRGL_CLEAR_SURFACE_S0_RENDER_CONDITION(render_condition_enabled) |
1842 VIRGL_CLEAR_SURFACE_S0_BUFFERS(buffers);
1843
1844 virgl_encoder_write_dword(ctx->cbuf, tmp);
1845 virgl_encoder_write_dword(ctx->cbuf, virgl_surface(surf)->handle);
1846
1847 for (i = 0; i < 4; i++)
1848 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
1849
1850 virgl_encoder_write_dword(ctx->cbuf, dstx);
1851 virgl_encoder_write_dword(ctx->cbuf, dsty);
1852 virgl_encoder_write_dword(ctx->cbuf, width);
1853 virgl_encoder_write_dword(ctx->cbuf, height);
1854
1855 return 0;
1856 }
1857