xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/virgl/virgl_context.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <string.h>
25 #ifndef _WIN32
26 #include <libsync.h>
27 #endif
28 
29 #include "pipe/p_shader_tokens.h"
30 
31 #include "compiler/nir/nir.h"
32 #include "pipe/p_context.h"
33 #include "pipe/p_defines.h"
34 #include "pipe/p_screen.h"
35 #include "pipe/p_state.h"
36 #include "nir/nir_to_tgsi.h"
37 #include "util/format/u_format.h"
38 #include "indices/u_primconvert.h"
39 #include "util/u_draw.h"
40 #include "util/u_inlines.h"
41 #include "util/u_memory.h"
42 #include "util/u_prim.h"
43 #include "util/u_surface.h"
44 #include "util/u_transfer.h"
45 #include "util/u_helpers.h"
46 #include "util/slab.h"
47 #include "util/u_upload_mgr.h"
48 #include "util/u_blitter.h"
49 
50 #include "virgl_encode.h"
51 #include "virgl_context.h"
52 #include "virtio-gpu/virgl_protocol.h"
53 #include "virgl_resource.h"
54 #include "virgl_screen.h"
55 #include "virgl_staging_mgr.h"
56 #include "virgl_video.h"
57 
58 static uint32_t next_handle;
virgl_object_assign_handle(void)59 uint32_t virgl_object_assign_handle(void)
60 {
61    return p_atomic_inc_return(&next_handle);
62 }
63 
64 bool
virgl_can_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)65 virgl_can_rebind_resource(struct virgl_context *vctx,
66                           struct pipe_resource *res)
67 {
68    /* We cannot rebind resources that are referenced by host objects, which
69     * are
70     *
71     *  - VIRGL_OBJECT_SURFACE
72     *  - VIRGL_OBJECT_SAMPLER_VIEW
73     *  - VIRGL_OBJECT_STREAMOUT_TARGET
74     *
75     * Because surfaces cannot be created from buffers, we require the resource
76     * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
77     */
78    const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
79                                       PIPE_BIND_STREAM_OUTPUT);
80    const unsigned bind_history = virgl_resource(res)->bind_history;
81    return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
82 }
83 
84 void
virgl_rebind_resource(struct virgl_context * vctx,struct pipe_resource * res)85 virgl_rebind_resource(struct virgl_context *vctx,
86                       struct pipe_resource *res)
87 {
88    /* Queries use internally created buffers and do not go through transfers.
89     * Index buffers are not bindable.  They are not tracked.
90     */
91    ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
92                                                PIPE_BIND_CONSTANT_BUFFER |
93                                                PIPE_BIND_SHADER_BUFFER |
94                                                PIPE_BIND_SHADER_IMAGE);
95    const unsigned bind_history = virgl_resource(res)->bind_history;
96    unsigned i;
97 
98    assert(virgl_can_rebind_resource(vctx, res) &&
99           (bind_history & tracked_bind) == bind_history);
100 
101    if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
102       for (i = 0; i < vctx->num_vertex_buffers; i++) {
103          if (vctx->vertex_buffer[i].buffer.resource == res) {
104             vctx->vertex_array_dirty = true;
105             break;
106          }
107       }
108    }
109 
110    if (bind_history & PIPE_BIND_SHADER_BUFFER) {
111       uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
112       while (remaining_mask) {
113          int i = u_bit_scan(&remaining_mask);
114          if (vctx->atomic_buffers[i].buffer == res) {
115             const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
116             virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
117          }
118       }
119    }
120 
121    /* check per-stage shader bindings */
122    if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
123                        PIPE_BIND_SHADER_BUFFER |
124                        PIPE_BIND_SHADER_IMAGE)) {
125       enum pipe_shader_type shader_type;
126       for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
127          const struct virgl_shader_binding_state *binding =
128             &vctx->shader_bindings[shader_type];
129 
130          if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
131             uint32_t remaining_mask = binding->ubo_enabled_mask;
132             while (remaining_mask) {
133                int i = u_bit_scan(&remaining_mask);
134                if (binding->ubos[i].buffer == res) {
135                   const struct pipe_constant_buffer *ubo = &binding->ubos[i];
136                   virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
137                                                    ubo->buffer_offset,
138                                                    ubo->buffer_size,
139                                                    virgl_resource(res));
140                }
141             }
142          }
143 
144          if (bind_history & PIPE_BIND_SHADER_BUFFER) {
145             uint32_t remaining_mask = binding->ssbo_enabled_mask;
146             while (remaining_mask) {
147                int i = u_bit_scan(&remaining_mask);
148                if (binding->ssbos[i].buffer == res) {
149                   const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
150                   virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
151                                                   ssbo);
152                }
153             }
154          }
155 
156          if (bind_history & PIPE_BIND_SHADER_IMAGE) {
157             uint32_t remaining_mask = binding->image_enabled_mask;
158             while (remaining_mask) {
159                int i = u_bit_scan(&remaining_mask);
160                if (binding->images[i].resource == res) {
161                   const struct pipe_image_view *image = &binding->images[i];
162                   virgl_encode_set_shader_images(vctx, shader_type, i, 1,
163                                                  image);
164                }
165             }
166          }
167       }
168    }
169 }
170 
virgl_attach_res_framebuffer(struct virgl_context * vctx)171 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
172 {
173    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
174    struct pipe_surface *surf;
175    struct virgl_resource *res;
176    unsigned i;
177 
178    surf = vctx->framebuffer.zsbuf;
179    if (surf) {
180       res = virgl_resource(surf->texture);
181       if (res) {
182          vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
183          virgl_resource_dirty(res, surf->u.tex.level);
184       }
185    }
186    for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) {
187       surf = vctx->framebuffer.cbufs[i];
188       if (surf) {
189          res = virgl_resource(surf->texture);
190          if (res) {
191             vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
192             virgl_resource_dirty(res, surf->u.tex.level);
193          }
194       }
195    }
196 }
197 
virgl_attach_res_sampler_views(struct virgl_context * vctx,enum pipe_shader_type shader_type)198 static void virgl_attach_res_sampler_views(struct virgl_context *vctx,
199                                            enum pipe_shader_type shader_type)
200 {
201    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
202    const struct virgl_shader_binding_state *binding =
203       &vctx->shader_bindings[shader_type];
204 
205    for (int i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i) {
206       if (binding->views[i] && binding->views[i]->texture) {
207          struct virgl_resource *res = virgl_resource(binding->views[i]->texture);
208          vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
209       }
210    }
211 }
212 
virgl_attach_res_vertex_buffers(struct virgl_context * vctx)213 static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
214 {
215    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
216    struct virgl_resource *res;
217    unsigned i;
218 
219    for (i = 0; i < vctx->num_vertex_buffers; i++) {
220       res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
221       if (res)
222          vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
223    }
224 }
225 
virgl_attach_res_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)226 static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
227 					  struct virgl_indexbuf *ib)
228 {
229    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
230    struct virgl_resource *res;
231 
232    res = virgl_resource(ib->buffer);
233    if (res)
234       vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
235 }
236 
virgl_attach_res_so_targets(struct virgl_context * vctx)237 static void virgl_attach_res_so_targets(struct virgl_context *vctx)
238 {
239    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
240    struct virgl_resource *res;
241    unsigned i;
242 
243    for (i = 0; i < vctx->num_so_targets; i++) {
244       res = virgl_resource(vctx->so_targets[i].base.buffer);
245       if (res)
246          vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
247    }
248 }
249 
virgl_attach_res_uniform_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)250 static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx,
251                                              enum pipe_shader_type shader_type)
252 {
253    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
254    const struct virgl_shader_binding_state *binding =
255       &vctx->shader_bindings[shader_type];
256    uint32_t remaining_mask = binding->ubo_enabled_mask;
257    struct virgl_resource *res;
258 
259    while (remaining_mask) {
260       int i = u_bit_scan(&remaining_mask);
261       res = virgl_resource(binding->ubos[i].buffer);
262       assert(res);
263       vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
264    }
265 }
266 
virgl_attach_res_shader_buffers(struct virgl_context * vctx,enum pipe_shader_type shader_type)267 static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
268                                             enum pipe_shader_type shader_type)
269 {
270    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
271    const struct virgl_shader_binding_state *binding =
272       &vctx->shader_bindings[shader_type];
273    uint32_t remaining_mask = binding->ssbo_enabled_mask;
274    struct virgl_resource *res;
275 
276    while (remaining_mask) {
277       int i = u_bit_scan(&remaining_mask);
278       res = virgl_resource(binding->ssbos[i].buffer);
279       assert(res);
280       vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
281    }
282 }
283 
virgl_attach_res_shader_images(struct virgl_context * vctx,enum pipe_shader_type shader_type)284 static void virgl_attach_res_shader_images(struct virgl_context *vctx,
285                                            enum pipe_shader_type shader_type)
286 {
287    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
288    const struct virgl_shader_binding_state *binding =
289       &vctx->shader_bindings[shader_type];
290    uint32_t remaining_mask = binding->image_enabled_mask;
291    struct virgl_resource *res;
292 
293    while (remaining_mask) {
294       int i = u_bit_scan(&remaining_mask);
295       res = virgl_resource(binding->images[i].resource);
296       assert(res);
297       vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
298    }
299 }
300 
virgl_attach_res_atomic_buffers(struct virgl_context * vctx)301 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
302 {
303    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
304    uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
305    struct virgl_resource *res;
306 
307    while (remaining_mask) {
308       int i = u_bit_scan(&remaining_mask);
309       res = virgl_resource(vctx->atomic_buffers[i].buffer);
310       assert(res);
311       vws->emit_res(vws, vctx->cbuf, res->hw_res, false);
312    }
313 }
314 
315 /*
316  * after flushing, the hw context still has a bunch of
317  * resources bound, so we need to rebind those here.
318  */
virgl_reemit_draw_resources(struct virgl_context * vctx)319 static void virgl_reemit_draw_resources(struct virgl_context *vctx)
320 {
321    enum pipe_shader_type shader_type;
322 
323    /* reattach any flushed resources */
324    /* framebuffer, sampler views, vertex/index/uniform/stream buffers */
325    virgl_attach_res_framebuffer(vctx);
326 
327    for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
328       virgl_attach_res_sampler_views(vctx, shader_type);
329       virgl_attach_res_uniform_buffers(vctx, shader_type);
330       virgl_attach_res_shader_buffers(vctx, shader_type);
331       virgl_attach_res_shader_images(vctx, shader_type);
332    }
333    virgl_attach_res_atomic_buffers(vctx);
334    virgl_attach_res_vertex_buffers(vctx);
335    virgl_attach_res_so_targets(vctx);
336 }
337 
virgl_reemit_compute_resources(struct virgl_context * vctx)338 static void virgl_reemit_compute_resources(struct virgl_context *vctx)
339 {
340    virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
341    virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
342    virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
343    virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
344 
345    virgl_attach_res_atomic_buffers(vctx);
346 }
347 
virgl_create_surface(struct pipe_context * ctx,struct pipe_resource * resource,const struct pipe_surface * templ)348 static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
349                                                 struct pipe_resource *resource,
350                                                 const struct pipe_surface *templ)
351 {
352    struct virgl_context *vctx = virgl_context(ctx);
353    struct virgl_surface *surf;
354    struct virgl_resource *res = virgl_resource(resource);
355    uint32_t handle;
356 
357    /* no support for buffer surfaces */
358    if (resource->target == PIPE_BUFFER)
359       return NULL;
360 
361    surf = CALLOC_STRUCT(virgl_surface);
362    if (!surf)
363       return NULL;
364 
365    assert(ctx->screen->get_param(ctx->screen,
366                                  PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
367           (util_format_is_srgb(templ->format) ==
368            util_format_is_srgb(resource->format)));
369 
370    virgl_resource_dirty(res, 0);
371    handle = virgl_object_assign_handle();
372    pipe_reference_init(&surf->base.reference, 1);
373    pipe_resource_reference(&surf->base.texture, resource);
374    surf->base.context = ctx;
375    surf->base.format = templ->format;
376 
377    surf->base.width = u_minify(resource->width0, templ->u.tex.level);
378    surf->base.height = u_minify(resource->height0, templ->u.tex.level);
379    surf->base.u.tex.level = templ->u.tex.level;
380    surf->base.u.tex.first_layer = templ->u.tex.first_layer;
381    surf->base.u.tex.last_layer = templ->u.tex.last_layer;
382    surf->base.nr_samples = templ->nr_samples;
383 
384    virgl_encoder_create_surface(vctx, handle, res, &surf->base);
385    surf->handle = handle;
386    return &surf->base;
387 }
388 
virgl_surface_destroy(struct pipe_context * ctx,struct pipe_surface * psurf)389 static void virgl_surface_destroy(struct pipe_context *ctx,
390                                  struct pipe_surface *psurf)
391 {
392    struct virgl_context *vctx = virgl_context(ctx);
393    struct virgl_surface *surf = virgl_surface(psurf);
394 
395    pipe_resource_reference(&surf->base.texture, NULL);
396    virgl_encode_delete_object(vctx, surf->handle, VIRGL_OBJECT_SURFACE);
397    FREE(surf);
398 }
399 
virgl_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * blend_state)400 static void *virgl_create_blend_state(struct pipe_context *ctx,
401                                               const struct pipe_blend_state *blend_state)
402 {
403    struct virgl_context *vctx = virgl_context(ctx);
404    uint32_t handle;
405    handle = virgl_object_assign_handle();
406 
407    virgl_encode_blend_state(vctx, handle, blend_state);
408    return (void *)(unsigned long)handle;
409 
410 }
411 
virgl_bind_blend_state(struct pipe_context * ctx,void * blend_state)412 static void virgl_bind_blend_state(struct pipe_context *ctx,
413                                            void *blend_state)
414 {
415    struct virgl_context *vctx = virgl_context(ctx);
416    uint32_t handle = (unsigned long)blend_state;
417    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_BLEND);
418 }
419 
virgl_delete_blend_state(struct pipe_context * ctx,void * blend_state)420 static void virgl_delete_blend_state(struct pipe_context *ctx,
421                                      void *blend_state)
422 {
423    struct virgl_context *vctx = virgl_context(ctx);
424    uint32_t handle = (unsigned long)blend_state;
425    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_BLEND);
426 }
427 
virgl_create_depth_stencil_alpha_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * blend_state)428 static void *virgl_create_depth_stencil_alpha_state(struct pipe_context *ctx,
429                                                    const struct pipe_depth_stencil_alpha_state *blend_state)
430 {
431    struct virgl_context *vctx = virgl_context(ctx);
432    uint32_t handle;
433    handle = virgl_object_assign_handle();
434 
435    virgl_encode_dsa_state(vctx, handle, blend_state);
436    return (void *)(unsigned long)handle;
437 }
438 
virgl_bind_depth_stencil_alpha_state(struct pipe_context * ctx,void * blend_state)439 static void virgl_bind_depth_stencil_alpha_state(struct pipe_context *ctx,
440                                                 void *blend_state)
441 {
442    struct virgl_context *vctx = virgl_context(ctx);
443    uint32_t handle = (unsigned long)blend_state;
444    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_DSA);
445 }
446 
virgl_delete_depth_stencil_alpha_state(struct pipe_context * ctx,void * dsa_state)447 static void virgl_delete_depth_stencil_alpha_state(struct pipe_context *ctx,
448                                                   void *dsa_state)
449 {
450    struct virgl_context *vctx = virgl_context(ctx);
451    uint32_t handle = (unsigned long)dsa_state;
452    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_DSA);
453 }
454 
virgl_create_rasterizer_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * rs_state)455 static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
456                                                    const struct pipe_rasterizer_state *rs_state)
457 {
458    struct virgl_context *vctx = virgl_context(ctx);
459    struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state);
460 
461    if (!vrs)
462       return NULL;
463    vrs->rs = *rs_state;
464    vrs->handle = virgl_object_assign_handle();
465 
466    assert(rs_state->depth_clip_near ||
467           virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable);
468 
469    virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
470    return (void *)vrs;
471 }
472 
virgl_bind_rasterizer_state(struct pipe_context * ctx,void * rs_state)473 static void virgl_bind_rasterizer_state(struct pipe_context *ctx,
474                                                 void *rs_state)
475 {
476    struct virgl_context *vctx = virgl_context(ctx);
477    uint32_t handle = 0;
478    if (rs_state) {
479       struct virgl_rasterizer_state *vrs = rs_state;
480       vctx->rs_state = *vrs;
481       handle = vrs->handle;
482    }
483    virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER);
484 }
485 
virgl_delete_rasterizer_state(struct pipe_context * ctx,void * rs_state)486 static void virgl_delete_rasterizer_state(struct pipe_context *ctx,
487                                          void *rs_state)
488 {
489    struct virgl_context *vctx = virgl_context(ctx);
490    struct virgl_rasterizer_state *vrs = rs_state;
491    virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER);
492    FREE(vrs);
493 }
494 
virgl_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)495 static void virgl_set_framebuffer_state(struct pipe_context *ctx,
496                                                 const struct pipe_framebuffer_state *state)
497 {
498    struct virgl_context *vctx = virgl_context(ctx);
499 
500    vctx->framebuffer = *state;
501    virgl_encoder_set_framebuffer_state(vctx, state);
502    virgl_attach_res_framebuffer(vctx);
503 }
504 
virgl_set_viewport_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)505 static void virgl_set_viewport_states(struct pipe_context *ctx,
506                                      unsigned start_slot,
507                                      unsigned num_viewports,
508                                      const struct pipe_viewport_state *state)
509 {
510    struct virgl_context *vctx = virgl_context(ctx);
511    virgl_encoder_set_viewport_states(vctx, start_slot, num_viewports, state);
512 }
513 
virgl_create_vertex_elements_state(struct pipe_context * ctx,unsigned num_elements,const struct pipe_vertex_element * elements)514 static void *virgl_create_vertex_elements_state(struct pipe_context *ctx,
515                                                         unsigned num_elements,
516                                                         const struct pipe_vertex_element *elements)
517 {
518    struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS];
519    struct virgl_context *vctx = virgl_context(ctx);
520    struct virgl_vertex_elements_state *state =
521       CALLOC_STRUCT(virgl_vertex_elements_state);
522 
523    for (int i = 0; i < num_elements; ++i) {
524       if (elements[i].instance_divisor) {
525          /* Virglrenderer doesn't deal with instance_divisor correctly if
526          * there isn't a 1:1 relationship between elements and bindings.
527          * So let's make sure there is, by duplicating bindings.
528          */
529          for (int j = 0; j < num_elements; ++j) {
530             new_elements[j] = elements[j];
531             new_elements[j].vertex_buffer_index = j;
532             state->binding_map[j] = elements[j].vertex_buffer_index;
533          }
534          elements = new_elements;
535          state->num_bindings = num_elements;
536          break;
537       }
538    }
539    for (int i = 0; i < num_elements; ++i)
540       state->strides[elements[i].vertex_buffer_index] = elements[i].src_stride;
541 
542    state->handle = virgl_object_assign_handle();
543    virgl_encoder_create_vertex_elements(vctx, state->handle,
544                                        num_elements, elements);
545    return state;
546 }
547 
virgl_delete_vertex_elements_state(struct pipe_context * ctx,void * ve)548 static void virgl_delete_vertex_elements_state(struct pipe_context *ctx,
549                                               void *ve)
550 {
551    struct virgl_context *vctx = virgl_context(ctx);
552    struct virgl_vertex_elements_state *state =
553       (struct virgl_vertex_elements_state *)ve;
554    virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
555    FREE(state);
556 }
557 
virgl_bind_vertex_elements_state(struct pipe_context * ctx,void * ve)558 static void virgl_bind_vertex_elements_state(struct pipe_context *ctx,
559                                                      void *ve)
560 {
561    struct virgl_context *vctx = virgl_context(ctx);
562    struct virgl_vertex_elements_state *state =
563       (struct virgl_vertex_elements_state *)ve;
564    vctx->vertex_elements = state;
565    virgl_encode_bind_object(vctx, state ? state->handle : 0,
566                             VIRGL_OBJECT_VERTEX_ELEMENTS);
567    vctx->vertex_array_dirty = true;
568 }
569 
virgl_set_vertex_buffers(struct pipe_context * ctx,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)570 static void virgl_set_vertex_buffers(struct pipe_context *ctx,
571                                     unsigned num_buffers,
572                                     const struct pipe_vertex_buffer *buffers)
573 {
574    struct virgl_context *vctx = virgl_context(ctx);
575 
576    util_set_vertex_buffers_count(vctx->vertex_buffer,
577                                  &vctx->num_vertex_buffers,
578                                  buffers, num_buffers,
579                                  true);
580 
581    if (buffers) {
582       for (unsigned i = 0; i < num_buffers; i++) {
583          struct virgl_resource *res =
584             virgl_resource(buffers[i].buffer.resource);
585          if (res && !buffers[i].is_user_buffer)
586             res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
587       }
588    }
589 
590    vctx->vertex_array_dirty = true;
591 }
592 
virgl_hw_set_vertex_buffers(struct virgl_context * vctx)593 static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx)
594 {
595    if (vctx->vertex_array_dirty) {
596       const struct virgl_vertex_elements_state *ve = vctx->vertex_elements;
597 
598       if (ve && ve->num_bindings) {
599          struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
600          for (int i = 0; i < ve->num_bindings; ++i)
601             vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]];
602 
603          virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers);
604       } else
605          virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer);
606 
607       virgl_attach_res_vertex_buffers(vctx);
608 
609       vctx->vertex_array_dirty = false;
610    }
611 }
612 
virgl_set_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref ref)613 static void virgl_set_stencil_ref(struct pipe_context *ctx,
614                                  const struct pipe_stencil_ref ref)
615 {
616    struct virgl_context *vctx = virgl_context(ctx);
617    virgl_encoder_set_stencil_ref(vctx, &ref);
618 }
619 
virgl_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * color)620 static void virgl_set_blend_color(struct pipe_context *ctx,
621                                  const struct pipe_blend_color *color)
622 {
623    struct virgl_context *vctx = virgl_context(ctx);
624    virgl_encoder_set_blend_color(vctx, color);
625 }
626 
virgl_hw_set_index_buffer(struct virgl_context * vctx,struct virgl_indexbuf * ib)627 static void virgl_hw_set_index_buffer(struct virgl_context *vctx,
628                                      struct virgl_indexbuf *ib)
629 {
630    virgl_encoder_set_index_buffer(vctx, ib);
631    virgl_attach_res_index_buffer(vctx, ib);
632 }
633 
virgl_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type shader,uint index,bool take_ownership,const struct pipe_constant_buffer * buf)634 static void virgl_set_constant_buffer(struct pipe_context *ctx,
635                                      enum pipe_shader_type shader, uint index,
636                                       bool take_ownership,
637                                      const struct pipe_constant_buffer *buf)
638 {
639    struct virgl_context *vctx = virgl_context(ctx);
640    struct virgl_shader_binding_state *binding =
641       &vctx->shader_bindings[shader];
642 
643    if (buf && buf->buffer) {
644       struct virgl_resource *res = virgl_resource(buf->buffer);
645       res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
646 
647       virgl_encoder_set_uniform_buffer(vctx, shader, index,
648                                        buf->buffer_offset,
649                                        buf->buffer_size, res);
650 
651       if (take_ownership) {
652          pipe_resource_reference(&binding->ubos[index].buffer, NULL);
653          binding->ubos[index].buffer = buf->buffer;
654       } else {
655          pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
656       }
657       binding->ubos[index] = *buf;
658       binding->ubo_enabled_mask |= 1 << index;
659    } else {
660       static const struct pipe_constant_buffer dummy_ubo;
661       if (!buf)
662          buf = &dummy_ubo;
663       virgl_encoder_write_constant_buffer(vctx, shader, index,
664                                           buf->buffer_size / 4,
665                                           buf->user_buffer);
666 
667       pipe_resource_reference(&binding->ubos[index].buffer, NULL);
668       binding->ubo_enabled_mask &= ~(1 << index);
669    }
670 }
671 
672 static bool
lower_gles_arrayshadow_offset_filter(const nir_instr * instr,UNUSED const void * data)673 lower_gles_arrayshadow_offset_filter(const nir_instr *instr,
674                                      UNUSED const void *data)
675 {
676    if (instr->type != nir_instr_type_tex)
677       return false;
678 
679    nir_tex_instr *tex = nir_instr_as_tex(instr);
680 
681    if (!tex->is_shadow || !tex->is_array)
682       return false;
683 
684    // textureGradOffset can be used directly
685    int grad_index = nir_tex_instr_src_index(tex, nir_tex_src_ddx);
686    int proj_index = nir_tex_instr_src_index(tex, nir_tex_src_projector);
687    if (grad_index >= 0 && proj_index < 0)
688       return false;
689 
690    int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
691    if (offset_index >= 0)
692       return true;
693 
694    return false;
695 }
696 
virgl_shader_encoder(struct pipe_context * ctx,const struct pipe_shader_state * shader,unsigned type)697 static void *virgl_shader_encoder(struct pipe_context *ctx,
698                                   const struct pipe_shader_state *shader,
699                                   unsigned type)
700 {
701    struct virgl_context *vctx = virgl_context(ctx);
702    struct virgl_screen *rs = virgl_screen(ctx->screen);
703    uint32_t handle;
704    const struct tgsi_token *tokens;
705    const struct tgsi_token *ntt_tokens = NULL;
706    struct tgsi_token *new_tokens;
707    int ret;
708    bool is_separable = false;
709 
710    if (shader->type == PIPE_SHADER_IR_NIR) {
711       struct nir_to_tgsi_options options = {
712          .unoptimized_ra = true,
713          .lower_fabs = true,
714          .lower_ssbo_bindings =
715                rs->caps.caps.v2.host_feature_check_version >= 16,
716          .non_compute_membar_needs_all_modes = true
717       };
718 
719       if (!(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_TEXTURE_SHADOW_LOD) &&
720           rs->caps.caps.v2.capability_bits & VIRGL_CAP_HOST_IS_GLES) {
721          nir_lower_tex_options lower_tex_options = {
722             .lower_offset_filter = lower_gles_arrayshadow_offset_filter,
723          };
724 
725          NIR_PASS_V(shader->ir.nir, nir_lower_tex, &lower_tex_options);
726       }
727 
728       nir_shader *s = nir_shader_clone(NULL, shader->ir.nir);
729 
730       /* The host can't handle certain IO slots as separable, because we can't assign
731        * more than 32 IO locations explicitly, and with varyings and patches we already
732        * exhaust the possible ways of handling this for the varyings with generic names,
733        * so drop the flag in these cases */
734       const uint64_t drop_slots_for_separable_io = 0xffull << VARYING_SLOT_TEX0 |
735                                                         1 <<  VARYING_SLOT_FOGC |
736                                                         1 <<  VARYING_SLOT_BFC0 |
737                                                         1 <<  VARYING_SLOT_BFC1 |
738                                                         1 <<  VARYING_SLOT_COL0 |
739                                                         1 <<  VARYING_SLOT_COL1;
740       bool keep_separable_flags = true;
741       if (s->info.stage != MESA_SHADER_VERTEX)
742          keep_separable_flags &= !(s->info.inputs_read & drop_slots_for_separable_io);
743       if (s->info.stage != MESA_SHADER_FRAGMENT)
744          keep_separable_flags &= !(s->info.outputs_written & drop_slots_for_separable_io);
745 
746       /* Propagare the separable shader property to the host, unless
747        * it is an internal shader - these are marked separable even though they are not. */
748       is_separable = s->info.separate_shader && !s->info.internal && keep_separable_flags;
749       ntt_tokens = tokens = nir_to_tgsi_options(s, vctx->base.screen, &options); /* takes ownership */
750    } else {
751       tokens = shader->tokens;
752    }
753 
754    new_tokens = virgl_tgsi_transform(rs, tokens, is_separable);
755    if (!new_tokens)
756       return NULL;
757 
758    handle = virgl_object_assign_handle();
759    /* encode VS state */
760    ret = virgl_encode_shader_state(vctx, handle, type,
761                                    &shader->stream_output, 0,
762                                    new_tokens);
763    if (ret) {
764       FREE((void *)ntt_tokens);
765       return NULL;
766    }
767 
768    FREE((void *)ntt_tokens);
769    FREE(new_tokens);
770    return (void *)(unsigned long)handle;
771 
772 }
virgl_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)773 static void *virgl_create_vs_state(struct pipe_context *ctx,
774                                    const struct pipe_shader_state *shader)
775 {
776    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX);
777 }
778 
virgl_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)779 static void *virgl_create_tcs_state(struct pipe_context *ctx,
780                                    const struct pipe_shader_state *shader)
781 {
782    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL);
783 }
784 
virgl_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)785 static void *virgl_create_tes_state(struct pipe_context *ctx,
786                                    const struct pipe_shader_state *shader)
787 {
788    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL);
789 }
790 
virgl_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)791 static void *virgl_create_gs_state(struct pipe_context *ctx,
792                                    const struct pipe_shader_state *shader)
793 {
794    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_GEOMETRY);
795 }
796 
virgl_create_fs_state(struct pipe_context * ctx,const struct pipe_shader_state * shader)797 static void *virgl_create_fs_state(struct pipe_context *ctx,
798                                    const struct pipe_shader_state *shader)
799 {
800    return virgl_shader_encoder(ctx, shader, PIPE_SHADER_FRAGMENT);
801 }
802 
803 static void
virgl_delete_fs_state(struct pipe_context * ctx,void * fs)804 virgl_delete_fs_state(struct pipe_context *ctx,
805                      void *fs)
806 {
807    uint32_t handle = (unsigned long)fs;
808    struct virgl_context *vctx = virgl_context(ctx);
809 
810    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
811 }
812 
813 static void
virgl_delete_gs_state(struct pipe_context * ctx,void * gs)814 virgl_delete_gs_state(struct pipe_context *ctx,
815                      void *gs)
816 {
817    uint32_t handle = (unsigned long)gs;
818    struct virgl_context *vctx = virgl_context(ctx);
819 
820    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
821 }
822 
823 static void
virgl_delete_vs_state(struct pipe_context * ctx,void * vs)824 virgl_delete_vs_state(struct pipe_context *ctx,
825                      void *vs)
826 {
827    uint32_t handle = (unsigned long)vs;
828    struct virgl_context *vctx = virgl_context(ctx);
829 
830    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
831 }
832 
833 static void
virgl_delete_tcs_state(struct pipe_context * ctx,void * tcs)834 virgl_delete_tcs_state(struct pipe_context *ctx,
835                        void *tcs)
836 {
837    uint32_t handle = (unsigned long)tcs;
838    struct virgl_context *vctx = virgl_context(ctx);
839 
840    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
841 }
842 
843 static void
virgl_delete_tes_state(struct pipe_context * ctx,void * tes)844 virgl_delete_tes_state(struct pipe_context *ctx,
845                       void *tes)
846 {
847    uint32_t handle = (unsigned long)tes;
848    struct virgl_context *vctx = virgl_context(ctx);
849 
850    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
851 }
852 
virgl_bind_vs_state(struct pipe_context * ctx,void * vss)853 static void virgl_bind_vs_state(struct pipe_context *ctx,
854                                         void *vss)
855 {
856    uint32_t handle = (unsigned long)vss;
857    struct virgl_context *vctx = virgl_context(ctx);
858 
859    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX);
860 }
861 
virgl_bind_tcs_state(struct pipe_context * ctx,void * vss)862 static void virgl_bind_tcs_state(struct pipe_context *ctx,
863                                void *vss)
864 {
865    uint32_t handle = (unsigned long)vss;
866    struct virgl_context *vctx = virgl_context(ctx);
867 
868    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL);
869 }
870 
virgl_bind_tes_state(struct pipe_context * ctx,void * vss)871 static void virgl_bind_tes_state(struct pipe_context *ctx,
872                                void *vss)
873 {
874    uint32_t handle = (unsigned long)vss;
875    struct virgl_context *vctx = virgl_context(ctx);
876 
877    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL);
878 }
879 
virgl_bind_gs_state(struct pipe_context * ctx,void * vss)880 static void virgl_bind_gs_state(struct pipe_context *ctx,
881                                void *vss)
882 {
883    uint32_t handle = (unsigned long)vss;
884    struct virgl_context *vctx = virgl_context(ctx);
885 
886    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_GEOMETRY);
887 }
888 
889 
virgl_bind_fs_state(struct pipe_context * ctx,void * vss)890 static void virgl_bind_fs_state(struct pipe_context *ctx,
891                                         void *vss)
892 {
893    uint32_t handle = (unsigned long)vss;
894    struct virgl_context *vctx = virgl_context(ctx);
895 
896    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_FRAGMENT);
897 }
898 
virgl_clear(struct pipe_context * ctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)899 static void virgl_clear(struct pipe_context *ctx,
900                                 unsigned buffers,
901                                 const struct pipe_scissor_state *scissor_state,
902                                 const union pipe_color_union *color,
903                                 double depth, unsigned stencil)
904 {
905    struct virgl_context *vctx = virgl_context(ctx);
906 
907    if (!vctx->num_draws)
908       virgl_reemit_draw_resources(vctx);
909    vctx->num_draws++;
910 
911    virgl_encode_clear(vctx, buffers, color, depth, stencil);
912 }
913 
virgl_clear_render_target(struct pipe_context * ctx,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)914 static void virgl_clear_render_target(struct pipe_context *ctx,
915                                       struct pipe_surface *dst,
916                                       const union pipe_color_union *color,
917                                       unsigned dstx, unsigned dsty,
918                                       unsigned width, unsigned height,
919                                       bool render_condition_enabled)
920 {
921    struct virgl_context *vctx = virgl_context(ctx);
922 
923    virgl_encode_clear_surface(vctx, dst, PIPE_CLEAR_COLOR0, color,
924                              dstx, dsty, width, height, render_condition_enabled);
925 
926    /* Mark as dirty, since we are updating the host side resource
927     * without going through the corresponding guest side resource, and
928     * hence the two will diverge.
929     */
930    virgl_resource_dirty(virgl_resource(dst->texture), dst->u.tex.level);
931 }
932 
virgl_clear_depth_stencil(struct pipe_context * ctx,struct pipe_surface * dst,unsigned clear_flags,double depth,unsigned stencil,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)933 static void virgl_clear_depth_stencil(struct pipe_context *ctx,
934                                       struct pipe_surface *dst,
935                                       unsigned clear_flags,
936                                       double depth,
937                                       unsigned stencil,
938                                       unsigned dstx, unsigned dsty,
939                                       unsigned width, unsigned height,
940                                       bool render_condition_enabled)
941 {
942    struct virgl_context *vctx = virgl_context(ctx);
943 
944    union pipe_color_union color;
945    memcpy(color.ui, &depth, sizeof(double));
946    color.ui[3] = stencil;
947 
948    virgl_encode_clear_surface(vctx, dst, clear_flags, &color,
949                              dstx, dsty, width, height, render_condition_enabled);
950 
951    /* Mark as dirty, since we are updating the host side resource
952     * without going through the corresponding guest side resource, and
953     * hence the two will diverge.
954     */
955    virgl_resource_dirty(virgl_resource(dst->texture), dst->u.tex.level);
956 }
957 
virgl_clear_render_target_stub(struct pipe_context * ctx,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)958 static void virgl_clear_render_target_stub(struct pipe_context *ctx,
959                                            struct pipe_surface *dst,
960                                            const union pipe_color_union *color,
961                                            unsigned dstx, unsigned dsty,
962                                            unsigned width, unsigned height,
963                                            bool render_condition_enabled)
964 {
965    if (virgl_debug & VIRGL_DEBUG_VERBOSE)
966          debug_printf("VIRGL: clear depth stencil unsupported.\n");
967    return;
968 }
969 
virgl_clear_texture(struct pipe_context * ctx,struct pipe_resource * res,unsigned int level,const struct pipe_box * box,const void * data)970 static void virgl_clear_texture(struct pipe_context *ctx,
971                                 struct pipe_resource *res,
972                                 unsigned int level,
973                                 const struct pipe_box *box,
974                                 const void *data)
975 {
976    struct virgl_screen *rs = virgl_screen(ctx->screen);
977    struct virgl_resource *vres = virgl_resource(res);
978 
979    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_CLEAR_TEXTURE) {
980       struct virgl_context *vctx = virgl_context(ctx);
981       virgl_encode_clear_texture(vctx, vres, level, box, data);
982    } else {
983       u_default_clear_texture(ctx, res, level, box, data);
984    }
985    /* Mark as dirty, since we are updating the host side resource
986     * without going through the corresponding guest side resource, and
987     * hence the two will diverge.
988     */
989    virgl_resource_dirty(vres, level);
990 }
991 
virgl_draw_vbo(struct pipe_context * ctx,const struct pipe_draw_info * dinfo,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)992 static void virgl_draw_vbo(struct pipe_context *ctx,
993                            const struct pipe_draw_info *dinfo,
994                            unsigned drawid_offset,
995                            const struct pipe_draw_indirect_info *indirect,
996                            const struct pipe_draw_start_count_bias *draws,
997                            unsigned num_draws)
998 {
999    if (num_draws > 1) {
1000       util_draw_multi(ctx, dinfo, drawid_offset, indirect, draws, num_draws);
1001       return;
1002    }
1003 
1004    if (!indirect && (!draws[0].count || !dinfo->instance_count))
1005       return;
1006 
1007    struct virgl_context *vctx = virgl_context(ctx);
1008    struct virgl_screen *rs = virgl_screen(ctx->screen);
1009    struct virgl_indexbuf ib = { 0 };
1010    struct pipe_draw_info info = *dinfo;
1011 
1012    if (!indirect &&
1013        !dinfo->primitive_restart &&
1014        !u_trim_pipe_prim(dinfo->mode, (unsigned*)&draws[0].count))
1015       return;
1016 
1017    if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
1018       util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs);
1019       util_primconvert_draw_vbo(vctx->primconvert, dinfo, drawid_offset, indirect, draws, num_draws);
1020       return;
1021    }
1022    if (info.index_size) {
1023            pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
1024            ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
1025            ib.index_size = dinfo->index_size;
1026            ib.offset = draws[0].start * ib.index_size;
1027 
1028            if (ib.user_buffer) {
1029                    unsigned start_offset = draws[0].start * ib.index_size;
1030                    u_upload_data(vctx->uploader, 0,
1031                                  draws[0].count * ib.index_size, 4,
1032                                  (char*)ib.user_buffer + start_offset,
1033                                  &ib.offset, &ib.buffer);
1034                    ib.user_buffer = NULL;
1035            }
1036            virgl_hw_set_index_buffer(vctx, &ib);
1037    }
1038 
1039    if (!vctx->num_draws)
1040       virgl_reemit_draw_resources(vctx);
1041    vctx->num_draws++;
1042 
1043    virgl_hw_set_vertex_buffers(vctx);
1044 
1045    virgl_encoder_draw_vbo(vctx, &info, drawid_offset, indirect, &draws[0]);
1046 
1047    pipe_resource_reference(&ib.buffer, NULL);
1048 
1049 }
1050 
virgl_submit_cmd(struct virgl_winsys * vws,struct virgl_cmd_buf * cbuf,struct pipe_fence_handle ** fence)1051 static void virgl_submit_cmd(struct virgl_winsys *vws,
1052                              struct virgl_cmd_buf *cbuf,
1053 			     struct pipe_fence_handle **fence)
1054 {
1055    if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
1056       struct pipe_fence_handle *sync_fence = NULL;
1057 
1058       vws->submit_cmd(vws, cbuf, &sync_fence);
1059 
1060       vws->fence_wait(vws, sync_fence, OS_TIMEOUT_INFINITE);
1061       vws->fence_reference(vws, &sync_fence, NULL);
1062    } else {
1063       vws->submit_cmd(vws, cbuf, fence);
1064    }
1065 }
1066 
virgl_flush_eq(struct virgl_context * ctx,void * closure,struct pipe_fence_handle ** fence)1067 void virgl_flush_eq(struct virgl_context *ctx, void *closure,
1068                     struct pipe_fence_handle **fence)
1069 {
1070    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
1071 
1072    /* skip empty cbuf */
1073    if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw &&
1074        ctx->queue.num_dwords == 0 &&
1075        !fence)
1076       return;
1077 
1078    if (ctx->num_draws)
1079       u_upload_unmap(ctx->uploader);
1080 
1081    /* send the buffer to the remote side for decoding */
1082    ctx->num_draws = ctx->num_compute = 0;
1083 
1084    virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
1085 
1086    virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
1087 
1088    /* Reserve some space for transfers. */
1089    if (ctx->encoded_transfers)
1090       ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1091 
1092    virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
1093 
1094    ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
1095 
1096    /* We have flushed the command queue, including any pending copy transfers
1097     * involving staging resources.
1098     */
1099    ctx->queued_staging_res_size = 0;
1100 }
1101 
virgl_flush_from_st(struct pipe_context * ctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags)1102 static void virgl_flush_from_st(struct pipe_context *ctx,
1103                                struct pipe_fence_handle **fence,
1104                                enum pipe_flush_flags flags)
1105 {
1106    struct virgl_context *vctx = virgl_context(ctx);
1107 
1108    virgl_flush_eq(vctx, vctx, fence);
1109 }
1110 
virgl_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * texture,const struct pipe_sampler_view * state)1111 static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
1112                                       struct pipe_resource *texture,
1113                                       const struct pipe_sampler_view *state)
1114 {
1115    struct virgl_context *vctx = virgl_context(ctx);
1116    struct virgl_sampler_view *grview;
1117    uint32_t handle;
1118    struct virgl_resource *res;
1119 
1120    if (!state)
1121       return NULL;
1122 
1123    grview = CALLOC_STRUCT(virgl_sampler_view);
1124    if (!grview)
1125       return NULL;
1126 
1127    res = virgl_resource(texture);
1128    handle = virgl_object_assign_handle();
1129    virgl_encode_sampler_view(vctx, handle, res, state);
1130 
1131    grview->base = *state;
1132    grview->base.reference.count = 1;
1133 
1134    grview->base.texture = NULL;
1135    grview->base.context = ctx;
1136    pipe_resource_reference(&grview->base.texture, texture);
1137    grview->handle = handle;
1138    return &grview->base;
1139 }
1140 
virgl_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type shader_type,unsigned start_slot,unsigned num_views,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** views)1141 static void virgl_set_sampler_views(struct pipe_context *ctx,
1142                                    enum pipe_shader_type shader_type,
1143                                    unsigned start_slot,
1144                                    unsigned num_views,
1145                                    unsigned unbind_num_trailing_slots,
1146                                    bool take_ownership,
1147                                    struct pipe_sampler_view **views)
1148 {
1149    struct virgl_context *vctx = virgl_context(ctx);
1150    struct virgl_shader_binding_state *binding =
1151       &vctx->shader_bindings[shader_type];
1152 
1153    for (unsigned i = 0; i < num_views; i++) {
1154       unsigned idx = start_slot + i;
1155       if (views && views[i]) {
1156          struct virgl_resource *res = virgl_resource(views[i]->texture);
1157          res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
1158 
1159          if (take_ownership) {
1160             pipe_sampler_view_reference(&binding->views[idx], NULL);
1161             binding->views[idx] = views[i];
1162          } else {
1163             pipe_sampler_view_reference(&binding->views[idx], views[i]);
1164          }
1165       } else {
1166          pipe_sampler_view_reference(&binding->views[idx], NULL);
1167       }
1168    }
1169 
1170    virgl_encode_set_sampler_views(vctx, shader_type,
1171          start_slot, num_views, (struct virgl_sampler_view **)binding->views);
1172    virgl_attach_res_sampler_views(vctx, shader_type);
1173 
1174    if (unbind_num_trailing_slots) {
1175       virgl_set_sampler_views(ctx, shader_type, start_slot + num_views,
1176                               unbind_num_trailing_slots, 0, false, NULL);
1177    }
1178 }
1179 
1180 static void
virgl_texture_barrier(struct pipe_context * ctx,unsigned flags)1181 virgl_texture_barrier(struct pipe_context *ctx, unsigned flags)
1182 {
1183    struct virgl_context *vctx = virgl_context(ctx);
1184    struct virgl_screen *rs = virgl_screen(ctx->screen);
1185 
1186    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER) &&
1187        !(rs->caps.caps.v2.capability_bits_v2 & VIRGL_CAP_V2_BLEND_EQUATION))
1188       return;
1189    virgl_encode_texture_barrier(vctx, flags);
1190 }
1191 
virgl_destroy_sampler_view(struct pipe_context * ctx,struct pipe_sampler_view * view)1192 static void virgl_destroy_sampler_view(struct pipe_context *ctx,
1193                                  struct pipe_sampler_view *view)
1194 {
1195    struct virgl_context *vctx = virgl_context(ctx);
1196    struct virgl_sampler_view *grview = virgl_sampler_view(view);
1197 
1198    virgl_encode_delete_object(vctx, grview->handle, VIRGL_OBJECT_SAMPLER_VIEW);
1199    pipe_resource_reference(&view->texture, NULL);
1200    FREE(view);
1201 }
1202 
virgl_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)1203 static void *virgl_create_sampler_state(struct pipe_context *ctx,
1204                                         const struct pipe_sampler_state *state)
1205 {
1206    struct virgl_context *vctx = virgl_context(ctx);
1207    uint32_t handle;
1208 
1209    handle = virgl_object_assign_handle();
1210 
1211    virgl_encode_sampler_state(vctx, handle, state);
1212    return (void *)(unsigned long)handle;
1213 }
1214 
virgl_delete_sampler_state(struct pipe_context * ctx,void * ss)1215 static void virgl_delete_sampler_state(struct pipe_context *ctx,
1216                                       void *ss)
1217 {
1218    struct virgl_context *vctx = virgl_context(ctx);
1219    uint32_t handle = (unsigned long)ss;
1220 
1221    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SAMPLER_STATE);
1222 }
1223 
virgl_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned num_samplers,void ** samplers)1224 static void virgl_bind_sampler_states(struct pipe_context *ctx,
1225                                      enum pipe_shader_type shader,
1226                                      unsigned start_slot,
1227                                      unsigned num_samplers,
1228                                      void **samplers)
1229 {
1230    struct virgl_context *vctx = virgl_context(ctx);
1231    uint32_t handles[PIPE_MAX_SAMPLERS];
1232    int i;
1233    for (i = 0; i < num_samplers; i++) {
1234       handles[i] = (unsigned long)(samplers[i]);
1235    }
1236    virgl_encode_bind_sampler_states(vctx, shader, start_slot, num_samplers, handles);
1237 }
1238 
virgl_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * ps)1239 static void virgl_set_polygon_stipple(struct pipe_context *ctx,
1240                                      const struct pipe_poly_stipple *ps)
1241 {
1242    struct virgl_context *vctx = virgl_context(ctx);
1243    virgl_encoder_set_polygon_stipple(vctx, ps);
1244 }
1245 
virgl_set_scissor_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_scissor,const struct pipe_scissor_state * ss)1246 static void virgl_set_scissor_states(struct pipe_context *ctx,
1247                                     unsigned start_slot,
1248                                     unsigned num_scissor,
1249                                    const struct pipe_scissor_state *ss)
1250 {
1251    struct virgl_context *vctx = virgl_context(ctx);
1252    virgl_encoder_set_scissor_state(vctx, start_slot, num_scissor, ss);
1253 }
1254 
virgl_set_sample_mask(struct pipe_context * ctx,unsigned sample_mask)1255 static void virgl_set_sample_mask(struct pipe_context *ctx,
1256                                  unsigned sample_mask)
1257 {
1258    struct virgl_context *vctx = virgl_context(ctx);
1259    virgl_encoder_set_sample_mask(vctx, sample_mask);
1260 }
1261 
virgl_set_min_samples(struct pipe_context * ctx,unsigned min_samples)1262 static void virgl_set_min_samples(struct pipe_context *ctx,
1263                                  unsigned min_samples)
1264 {
1265    struct virgl_context *vctx = virgl_context(ctx);
1266    struct virgl_screen *rs = virgl_screen(ctx->screen);
1267 
1268    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES))
1269       return;
1270    virgl_encoder_set_min_samples(vctx, min_samples);
1271 }
1272 
virgl_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * clip)1273 static void virgl_set_clip_state(struct pipe_context *ctx,
1274                                 const struct pipe_clip_state *clip)
1275 {
1276    struct virgl_context *vctx = virgl_context(ctx);
1277    virgl_encoder_set_clip_state(vctx, clip);
1278 }
1279 
virgl_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])1280 static void virgl_set_tess_state(struct pipe_context *ctx,
1281                                  const float default_outer_level[4],
1282                                  const float default_inner_level[2])
1283 {
1284    struct virgl_context *vctx = virgl_context(ctx);
1285    struct virgl_screen *rs = virgl_screen(ctx->screen);
1286 
1287    if (!rs->caps.caps.v1.bset.has_tessellation_shaders)
1288       return;
1289    virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level);
1290 }
1291 
virgl_set_patch_vertices(struct pipe_context * ctx,uint8_t patch_vertices)1292 static void virgl_set_patch_vertices(struct pipe_context *ctx, uint8_t patch_vertices)
1293 {
1294    struct virgl_context *vctx = virgl_context(ctx);
1295 
1296    vctx->patch_vertices = patch_vertices;
1297 }
1298 
virgl_resource_copy_region(struct pipe_context * ctx,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)1299 static void virgl_resource_copy_region(struct pipe_context *ctx,
1300                                       struct pipe_resource *dst,
1301                                       unsigned dst_level,
1302                                       unsigned dstx, unsigned dsty, unsigned dstz,
1303                                       struct pipe_resource *src,
1304                                       unsigned src_level,
1305                                       const struct pipe_box *src_box)
1306 {
1307    struct virgl_context *vctx = virgl_context(ctx);
1308    struct virgl_resource *dres = virgl_resource(dst);
1309    struct virgl_resource *sres = virgl_resource(src);
1310 
1311    if (dres->b.target == PIPE_BUFFER)
1312       util_range_add(&dres->b, &dres->valid_buffer_range, dstx, dstx + src_box->width);
1313    virgl_resource_dirty(dres, dst_level);
1314 
1315    virgl_encode_resource_copy_region(vctx, dres,
1316                                     dst_level, dstx, dsty, dstz,
1317                                     sres, src_level,
1318                                     src_box);
1319 }
1320 
1321 static void
virgl_flush_resource(struct pipe_context * pipe,struct pipe_resource * resource)1322 virgl_flush_resource(struct pipe_context *pipe,
1323                     struct pipe_resource *resource)
1324 {
1325 }
1326 
virgl_blit(struct pipe_context * ctx,const struct pipe_blit_info * blit)1327 static void virgl_blit(struct pipe_context *ctx,
1328                       const struct pipe_blit_info *blit)
1329 {
1330    struct virgl_context *vctx = virgl_context(ctx);
1331    struct virgl_resource *dres = virgl_resource(blit->dst.resource);
1332    struct virgl_resource *sres = virgl_resource(blit->src.resource);
1333 
1334    assert(ctx->screen->get_param(ctx->screen,
1335                                  PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) ||
1336           (util_format_is_srgb(blit->dst.resource->format) ==
1337             util_format_is_srgb(blit->dst.format)));
1338 
1339    virgl_resource_dirty(dres, blit->dst.level);
1340    virgl_encode_blit(vctx, dres, sres,
1341                     blit);
1342 }
1343 
virgl_set_hw_atomic_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1344 static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
1345                                         unsigned start_slot,
1346                                         unsigned count,
1347                                         const struct pipe_shader_buffer *buffers)
1348 {
1349    struct virgl_context *vctx = virgl_context(ctx);
1350 
1351    vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1352    for (unsigned i = 0; i < count; i++) {
1353       unsigned idx = start_slot + i;
1354       if (buffers && buffers[i].buffer) {
1355          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1356          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1357 
1358          pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
1359                                  buffers[i].buffer);
1360          vctx->atomic_buffers[idx] = buffers[i];
1361          vctx->atomic_buffer_enabled_mask |= 1 << idx;
1362       } else {
1363          pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
1364       }
1365    }
1366 
1367    virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
1368 }
1369 
virgl_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)1370 static void virgl_set_shader_buffers(struct pipe_context *ctx,
1371                                      enum pipe_shader_type shader,
1372                                      unsigned start_slot, unsigned count,
1373                                      const struct pipe_shader_buffer *buffers,
1374                                      unsigned writable_bitmask)
1375 {
1376    struct virgl_context *vctx = virgl_context(ctx);
1377    struct virgl_screen *rs = virgl_screen(ctx->screen);
1378    struct virgl_shader_binding_state *binding =
1379       &vctx->shader_bindings[shader];
1380 
1381    binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1382    for (unsigned i = 0; i < count; i++) {
1383       unsigned idx = start_slot + i;
1384       if (buffers && buffers[i].buffer) {
1385          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1386          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
1387 
1388          pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
1389          binding->ssbos[idx] = buffers[i];
1390          binding->ssbo_enabled_mask |= 1 << idx;
1391       } else {
1392          pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
1393       }
1394    }
1395 
1396    uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1397       rs->caps.caps.v2.max_shader_buffer_frag_compute :
1398       rs->caps.caps.v2.max_shader_buffer_other_stages;
1399    if (!max_shader_buffer)
1400       return;
1401    virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
1402 }
1403 
virgl_create_fence_fd(struct pipe_context * ctx,struct pipe_fence_handle ** fence,int fd,enum pipe_fd_type type)1404 static void virgl_create_fence_fd(struct pipe_context *ctx,
1405                                   struct pipe_fence_handle **fence,
1406                                   int fd,
1407                                   enum pipe_fd_type type)
1408 {
1409    assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
1410    struct virgl_screen *rs = virgl_screen(ctx->screen);
1411 
1412    if (rs->vws->cs_create_fence)
1413       *fence = rs->vws->cs_create_fence(rs->vws, fd);
1414 }
1415 
virgl_fence_server_sync(struct pipe_context * ctx,struct pipe_fence_handle * fence)1416 static void virgl_fence_server_sync(struct pipe_context *ctx,
1417 			            struct pipe_fence_handle *fence)
1418 {
1419    struct virgl_context *vctx = virgl_context(ctx);
1420    struct virgl_screen *rs = virgl_screen(ctx->screen);
1421 
1422    if (rs->vws->fence_server_sync)
1423       rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
1424 }
1425 
virgl_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)1426 static void virgl_set_shader_images(struct pipe_context *ctx,
1427                                     enum pipe_shader_type shader,
1428                                     unsigned start_slot, unsigned count,
1429                                     unsigned unbind_num_trailing_slots,
1430                                     const struct pipe_image_view *images)
1431 {
1432    struct virgl_context *vctx = virgl_context(ctx);
1433    struct virgl_screen *rs = virgl_screen(ctx->screen);
1434    struct virgl_shader_binding_state *binding =
1435       &vctx->shader_bindings[shader];
1436 
1437    binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
1438    for (unsigned i = 0; i < count; i++) {
1439       unsigned idx = start_slot + i;
1440       if (images && images[i].resource) {
1441          struct virgl_resource *res = virgl_resource(images[i].resource);
1442          res->bind_history |= PIPE_BIND_SHADER_IMAGE;
1443 
1444          pipe_resource_reference(&binding->images[idx].resource,
1445                                  images[i].resource);
1446          binding->images[idx] = images[i];
1447          binding->image_enabled_mask |= 1 << idx;
1448       } else {
1449          pipe_resource_reference(&binding->images[idx].resource, NULL);
1450       }
1451    }
1452 
1453    uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
1454      rs->caps.caps.v2.max_shader_image_frag_compute :
1455      rs->caps.caps.v2.max_shader_image_other_stages;
1456    if (!max_shader_images)
1457       return;
1458    virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
1459 
1460    if (unbind_num_trailing_slots) {
1461       virgl_set_shader_images(ctx, shader, start_slot + count,
1462                               unbind_num_trailing_slots, 0, NULL);
1463    }
1464 }
1465 
virgl_memory_barrier(struct pipe_context * ctx,unsigned flags)1466 static void virgl_memory_barrier(struct pipe_context *ctx,
1467                                  unsigned flags)
1468 {
1469    struct virgl_context *vctx = virgl_context(ctx);
1470    struct virgl_screen *rs = virgl_screen(ctx->screen);
1471 
1472    if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER))
1473       return;
1474    virgl_encode_memory_barrier(vctx, flags);
1475 }
1476 
virgl_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * state)1477 static void *virgl_create_compute_state(struct pipe_context *ctx,
1478                                         const struct pipe_compute_state *state)
1479 {
1480    struct virgl_context *vctx = virgl_context(ctx);
1481    uint32_t handle;
1482    const struct tgsi_token *ntt_tokens = NULL;
1483    const struct tgsi_token *tokens;
1484    struct pipe_stream_output_info so_info = { 0 };
1485    int ret;
1486 
1487    if (state->ir_type == PIPE_SHADER_IR_NIR) {
1488       struct nir_to_tgsi_options options = {
1489          .unoptimized_ra = true,
1490          .lower_fabs = true
1491       };
1492       nir_shader *s = nir_shader_clone(NULL, state->prog);
1493       ntt_tokens = tokens = nir_to_tgsi_options(s, vctx->base.screen, &options); /* takes ownership */
1494    } else {
1495       tokens = state->prog;
1496    }
1497 
1498    void *new_tokens = virgl_tgsi_transform((struct virgl_screen *)vctx->base.screen, tokens, false);
1499    if (!new_tokens)
1500       return NULL;
1501 
1502    handle = virgl_object_assign_handle();
1503    ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE,
1504                                    &so_info,
1505                                    state->static_shared_mem,
1506                                    new_tokens);
1507    if (ret) {
1508       FREE((void *)ntt_tokens);
1509       return NULL;
1510    }
1511 
1512    FREE((void *)ntt_tokens);
1513    FREE(new_tokens);
1514 
1515    return (void *)(unsigned long)handle;
1516 }
1517 
virgl_bind_compute_state(struct pipe_context * ctx,void * state)1518 static void virgl_bind_compute_state(struct pipe_context *ctx, void *state)
1519 {
1520    uint32_t handle = (unsigned long)state;
1521    struct virgl_context *vctx = virgl_context(ctx);
1522 
1523    virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE);
1524 }
1525 
virgl_delete_compute_state(struct pipe_context * ctx,void * state)1526 static void virgl_delete_compute_state(struct pipe_context *ctx, void *state)
1527 {
1528    uint32_t handle = (unsigned long)state;
1529    struct virgl_context *vctx = virgl_context(ctx);
1530 
1531    virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER);
1532 }
1533 
virgl_launch_grid(struct pipe_context * ctx,const struct pipe_grid_info * info)1534 static void virgl_launch_grid(struct pipe_context *ctx,
1535                               const struct pipe_grid_info *info)
1536 {
1537    struct virgl_context *vctx = virgl_context(ctx);
1538 
1539    if (!vctx->num_compute)
1540       virgl_reemit_compute_resources(vctx);
1541    vctx->num_compute++;
1542 
1543    virgl_encode_launch_grid(vctx, info);
1544 }
1545 
1546 static void
virgl_release_shader_binding(struct virgl_context * vctx,enum pipe_shader_type shader_type)1547 virgl_release_shader_binding(struct virgl_context *vctx,
1548                              enum pipe_shader_type shader_type)
1549 {
1550    struct virgl_shader_binding_state *binding =
1551       &vctx->shader_bindings[shader_type];
1552 
1553    for (int i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i) {
1554       if (binding->views[i]) {
1555          pipe_sampler_view_reference(
1556                   (struct pipe_sampler_view **)&binding->views[i], NULL);
1557       }
1558    }
1559 
1560    while (binding->ubo_enabled_mask) {
1561       int i = u_bit_scan(&binding->ubo_enabled_mask);
1562       pipe_resource_reference(&binding->ubos[i].buffer, NULL);
1563    }
1564 
1565    while (binding->ssbo_enabled_mask) {
1566       int i = u_bit_scan(&binding->ssbo_enabled_mask);
1567       pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
1568    }
1569 
1570    while (binding->image_enabled_mask) {
1571       int i = u_bit_scan(&binding->image_enabled_mask);
1572       pipe_resource_reference(&binding->images[i].resource, NULL);
1573    }
1574 }
1575 
1576 static void
virgl_emit_string_marker(struct pipe_context * ctx,const char * message,int len)1577 virgl_emit_string_marker(struct pipe_context *ctx, const char *message,  int len)
1578 {
1579     struct virgl_context *vctx = virgl_context(ctx);
1580     virgl_encode_emit_string_marker(vctx, message, len);
1581 }
1582 
1583 static void
virgl_context_destroy(struct pipe_context * ctx)1584 virgl_context_destroy( struct pipe_context *ctx )
1585 {
1586    struct virgl_context *vctx = virgl_context(ctx);
1587    struct virgl_screen *rs = virgl_screen(ctx->screen);
1588    enum pipe_shader_type shader_type;
1589 
1590    vctx->framebuffer.zsbuf = NULL;
1591    vctx->framebuffer.nr_cbufs = 0;
1592    virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1593    virgl_flush_eq(vctx, vctx, NULL);
1594 
1595    for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
1596       virgl_release_shader_binding(vctx, shader_type);
1597 
1598    while (vctx->atomic_buffer_enabled_mask) {
1599       int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
1600       pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
1601    }
1602 
1603    rs->vws->cmd_buf_destroy(vctx->cbuf);
1604    if (vctx->uploader)
1605       u_upload_destroy(vctx->uploader);
1606    if (vctx->supports_staging)
1607       virgl_staging_destroy(&vctx->staging);
1608    util_primconvert_destroy(vctx->primconvert);
1609    virgl_transfer_queue_fini(&vctx->queue);
1610 
1611    slab_destroy_child(&vctx->transfer_pool);
1612    FREE(vctx);
1613 }
1614 
virgl_get_sample_position(struct pipe_context * ctx,unsigned sample_count,unsigned index,float * out_value)1615 static void virgl_get_sample_position(struct pipe_context *ctx,
1616 				      unsigned sample_count,
1617 				      unsigned index,
1618 				      float *out_value)
1619 {
1620    struct virgl_context *vctx = virgl_context(ctx);
1621    struct virgl_screen *vs = virgl_screen(vctx->base.screen);
1622 
1623    if (sample_count > vs->caps.caps.v1.max_samples) {
1624       debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n",
1625 		   sample_count, vs->caps.caps.v1.max_samples);
1626       return;
1627    }
1628 
1629    /* The following is basically copied from dri/i965gen6_get_sample_position
1630     * The only addition is that we hold the msaa positions for all sample
1631     * counts in a flat array. */
1632    uint32_t bits = 0;
1633    if (sample_count == 1) {
1634       out_value[0] = out_value[1] = 0.5f;
1635       return;
1636    } else if (sample_count == 2) {
1637       bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index);
1638    } else if (sample_count <= 4) {
1639       bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index);
1640    } else if (sample_count <= 8) {
1641       bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3));
1642    } else if (sample_count <= 16) {
1643       bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3));
1644    }
1645    out_value[0] = ((bits >> 4) & 0xf) / 16.0f;
1646    out_value[1] = (bits & 0xf) / 16.0f;
1647 
1648    if (virgl_debug & VIRGL_DEBUG_VERBOSE)
1649       debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n",
1650                    index, sample_count, out_value[0], out_value[1]);
1651 }
1652 
virgl_send_tweaks(struct virgl_context * vctx,struct virgl_screen * rs)1653 static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
1654 {
1655    if (rs->tweak_gles_emulate_bgra)
1656       virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
1657 
1658    if (rs->tweak_gles_apply_bgra_dest_swizzle)
1659       virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
1660 
1661    if (rs->tweak_gles_tf3_value > 0)
1662       virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
1663                          rs->tweak_gles_tf3_value);
1664 }
1665 
virgl_link_shader(struct pipe_context * ctx,void ** handles)1666 static void virgl_link_shader(struct pipe_context *ctx, void **handles)
1667 {
1668    struct virgl_context *vctx = virgl_context(ctx);
1669    struct virgl_screen *rs = virgl_screen(vctx->base.screen);
1670 
1671    uint32_t shader_handles[PIPE_SHADER_TYPES];
1672    for (uint32_t i = 0; i < PIPE_SHADER_TYPES; ++i)
1673       shader_handles[i] = (uintptr_t)handles[i];
1674    virgl_encode_link_shader(vctx, shader_handles);
1675 
1676    /* block until shader linking is finished on host */
1677    if (rs->shader_sync && !unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
1678       struct virgl_winsys *vws = rs->vws;
1679       struct pipe_fence_handle *sync_fence;
1680       virgl_flush_eq(vctx, vctx, &sync_fence);
1681       vws->fence_wait(vws, sync_fence, OS_TIMEOUT_INFINITE);
1682       vws->fence_reference(vws, &sync_fence, NULL);
1683    }
1684 }
1685 
virgl_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)1686 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
1687                                           void *priv,
1688                                           unsigned flags)
1689 {
1690    struct virgl_context *vctx;
1691    struct virgl_screen *rs = virgl_screen(pscreen);
1692    vctx = CALLOC_STRUCT(virgl_context);
1693    const char *host_debug_flagstring;
1694 
1695    vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS);
1696    if (!vctx->cbuf) {
1697       FREE(vctx);
1698       return NULL;
1699    }
1700 
1701    vctx->base.destroy = virgl_context_destroy;
1702    vctx->base.create_surface = virgl_create_surface;
1703    vctx->base.surface_destroy = virgl_surface_destroy;
1704    vctx->base.set_framebuffer_state = virgl_set_framebuffer_state;
1705    vctx->base.create_blend_state = virgl_create_blend_state;
1706    vctx->base.bind_blend_state = virgl_bind_blend_state;
1707    vctx->base.delete_blend_state = virgl_delete_blend_state;
1708    vctx->base.create_depth_stencil_alpha_state = virgl_create_depth_stencil_alpha_state;
1709    vctx->base.bind_depth_stencil_alpha_state = virgl_bind_depth_stencil_alpha_state;
1710    vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
1711    vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
1712    vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
1713    vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
1714 
1715    vctx->base.set_viewport_states = virgl_set_viewport_states;
1716    vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
1717    vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
1718    vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
1719    vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
1720    vctx->base.set_constant_buffer = virgl_set_constant_buffer;
1721 
1722    vctx->base.set_tess_state = virgl_set_tess_state;
1723    vctx->base.set_patch_vertices = virgl_set_patch_vertices;
1724    vctx->base.create_vs_state = virgl_create_vs_state;
1725    vctx->base.create_tcs_state = virgl_create_tcs_state;
1726    vctx->base.create_tes_state = virgl_create_tes_state;
1727    vctx->base.create_gs_state = virgl_create_gs_state;
1728    vctx->base.create_fs_state = virgl_create_fs_state;
1729 
1730    vctx->base.bind_vs_state = virgl_bind_vs_state;
1731    vctx->base.bind_tcs_state = virgl_bind_tcs_state;
1732    vctx->base.bind_tes_state = virgl_bind_tes_state;
1733    vctx->base.bind_gs_state = virgl_bind_gs_state;
1734    vctx->base.bind_fs_state = virgl_bind_fs_state;
1735 
1736    vctx->base.delete_vs_state = virgl_delete_vs_state;
1737    vctx->base.delete_tcs_state = virgl_delete_tcs_state;
1738    vctx->base.delete_tes_state = virgl_delete_tes_state;
1739    vctx->base.delete_gs_state = virgl_delete_gs_state;
1740    vctx->base.delete_fs_state = virgl_delete_fs_state;
1741 
1742    vctx->base.create_compute_state = virgl_create_compute_state;
1743    vctx->base.bind_compute_state = virgl_bind_compute_state;
1744    vctx->base.delete_compute_state = virgl_delete_compute_state;
1745    vctx->base.launch_grid = virgl_launch_grid;
1746 
1747    vctx->base.clear = virgl_clear;
1748    if (rs->caps.caps.v2.host_feature_check_version >= 21) {
1749       vctx->base.clear_render_target = virgl_clear_render_target;
1750       vctx->base.clear_depth_stencil = virgl_clear_depth_stencil;
1751    } else {
1752       // Stub is required by VL backend
1753       vctx->base.clear_render_target = virgl_clear_render_target_stub;
1754    }
1755    vctx->base.clear_texture = virgl_clear_texture;
1756    vctx->base.draw_vbo = virgl_draw_vbo;
1757    vctx->base.flush = virgl_flush_from_st;
1758    vctx->base.screen = pscreen;
1759    vctx->base.create_sampler_view = virgl_create_sampler_view;
1760    vctx->base.sampler_view_destroy = virgl_destroy_sampler_view;
1761    vctx->base.set_sampler_views = virgl_set_sampler_views;
1762    vctx->base.texture_barrier = virgl_texture_barrier;
1763 
1764    vctx->base.create_sampler_state = virgl_create_sampler_state;
1765    vctx->base.delete_sampler_state = virgl_delete_sampler_state;
1766    vctx->base.bind_sampler_states = virgl_bind_sampler_states;
1767 
1768    vctx->base.set_polygon_stipple = virgl_set_polygon_stipple;
1769    vctx->base.set_scissor_states = virgl_set_scissor_states;
1770    vctx->base.set_sample_mask = virgl_set_sample_mask;
1771    vctx->base.set_min_samples = virgl_set_min_samples;
1772    vctx->base.set_stencil_ref = virgl_set_stencil_ref;
1773    vctx->base.set_clip_state = virgl_set_clip_state;
1774 
1775    vctx->base.set_blend_color = virgl_set_blend_color;
1776 
1777    vctx->base.get_sample_position = virgl_get_sample_position;
1778 
1779    vctx->base.resource_copy_region = virgl_resource_copy_region;
1780    vctx->base.flush_resource = virgl_flush_resource;
1781    vctx->base.blit =  virgl_blit;
1782    vctx->base.create_fence_fd = virgl_create_fence_fd;
1783    vctx->base.fence_server_sync = virgl_fence_server_sync;
1784 
1785    vctx->base.set_shader_buffers = virgl_set_shader_buffers;
1786    vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
1787    vctx->base.set_shader_images = virgl_set_shader_images;
1788    vctx->base.memory_barrier = virgl_memory_barrier;
1789    vctx->base.emit_string_marker = virgl_emit_string_marker;
1790 
1791    vctx->base.create_video_codec = virgl_video_create_codec;
1792    vctx->base.create_video_buffer = virgl_video_create_buffer;
1793 
1794    if (rs->caps.caps.v2.host_feature_check_version >= 7)
1795       vctx->base.link_shader = virgl_link_shader;
1796 
1797    virgl_init_context_resource_functions(&vctx->base);
1798    virgl_init_query_functions(vctx);
1799    virgl_init_so_functions(vctx);
1800 
1801    slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
1802    virgl_transfer_queue_init(&vctx->queue, vctx);
1803    vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
1804                        (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
1805 
1806    /* Reserve some space for transfers. */
1807    if (vctx->encoded_transfers)
1808       vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS;
1809 
1810    vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask);
1811    vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024,
1812                                      PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0);
1813    if (!vctx->uploader)
1814            goto fail;
1815    vctx->base.stream_uploader = vctx->uploader;
1816    vctx->base.const_uploader = vctx->uploader;
1817 
1818    /* We use a special staging buffer as the source of copy transfers. */
1819    if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
1820        vctx->encoded_transfers) {
1821       virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
1822       vctx->supports_staging = true;
1823    }
1824 
1825    vctx->hw_sub_ctx_id = p_atomic_inc_return(&rs->sub_ctx_id);
1826    virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1827 
1828    virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id);
1829 
1830    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) {
1831       host_debug_flagstring = getenv("VIRGL_HOST_DEBUG");
1832       if (host_debug_flagstring)
1833          virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
1834    }
1835 
1836    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
1837       virgl_send_tweaks(vctx, rs);
1838 
1839    /* On Android, a virgl_screen is generally created first by the HWUI
1840     * service, followed by the application's no-op attempt to do the same with
1841     * eglInitialize(). To retain the ability for apps to set their own driver
1842     * config procedurally right before context creation, we must check the
1843     * envvar again.
1844     */
1845 #if DETECT_OS_ANDROID
1846    if (!rs->shader_sync) {
1847       uint64_t debug_options = debug_get_flags_option("VIRGL_DEBUG",
1848                                                       virgl_debug_options, 0);
1849       rs->shader_sync |= !!(debug_options & VIRGL_DEBUG_SHADER_SYNC);
1850    }
1851 #endif
1852 
1853    return &vctx->base;
1854 fail:
1855    virgl_context_destroy(&vctx->base);
1856    return NULL;
1857 }
1858