xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/tegra/tegra_context.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2014-2018 NVIDIA Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <inttypes.h>
25 #include <stdlib.h>
26 
27 #include "util/u_debug.h"
28 #include "util/u_draw.h"
29 #include "util/u_inlines.h"
30 #include "util/u_upload_mgr.h"
31 
32 #include "tegra_context.h"
33 #include "tegra_resource.h"
34 #include "tegra_screen.h"
35 
36 static void
tegra_destroy(struct pipe_context * pcontext)37 tegra_destroy(struct pipe_context *pcontext)
38 {
39    struct tegra_context *context = to_tegra_context(pcontext);
40 
41    if (context->base.stream_uploader)
42       u_upload_destroy(context->base.stream_uploader);
43 
44    context->gpu->destroy(context->gpu);
45    free(context);
46 }
47 
48 static void
tegra_draw_vbo(struct pipe_context * pcontext,const struct pipe_draw_info * pinfo,unsigned drawid_offset,const struct pipe_draw_indirect_info * pindirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)49 tegra_draw_vbo(struct pipe_context *pcontext,
50                const struct pipe_draw_info *pinfo,
51                unsigned drawid_offset,
52                const struct pipe_draw_indirect_info *pindirect,
53                const struct pipe_draw_start_count_bias *draws,
54                unsigned num_draws)
55 {
56    if (num_draws > 1) {
57       util_draw_multi(pcontext, pinfo, drawid_offset, pindirect, draws, num_draws);
58       return;
59    }
60 
61    if (!pindirect && (!draws[0].count || !pinfo->instance_count))
62       return;
63 
64    struct tegra_context *context = to_tegra_context(pcontext);
65    struct pipe_draw_indirect_info indirect;
66    struct pipe_draw_info info;
67 
68    if (pinfo && ((pindirect && pindirect->buffer) || pinfo->index_size)) {
69       memcpy(&info, pinfo, sizeof(info));
70 
71       if (pindirect && pindirect->buffer) {
72          memcpy(&indirect, pindirect, sizeof(indirect));
73          indirect.buffer = tegra_resource_unwrap(pindirect->buffer);
74          indirect.indirect_draw_count = tegra_resource_unwrap(pindirect->indirect_draw_count);
75          pindirect = &indirect;
76       }
77 
78       if (pinfo->index_size && !pinfo->has_user_indices)
79          info.index.resource = tegra_resource_unwrap(info.index.resource);
80 
81       pinfo = &info;
82    }
83 
84    context->gpu->draw_vbo(context->gpu, pinfo, drawid_offset, pindirect, draws, num_draws);
85 }
86 
87 static void
tegra_render_condition(struct pipe_context * pcontext,struct pipe_query * query,bool condition,unsigned int mode)88 tegra_render_condition(struct pipe_context *pcontext,
89                        struct pipe_query *query,
90                        bool condition,
91                        unsigned int mode)
92 {
93    struct tegra_context *context = to_tegra_context(pcontext);
94 
95    context->gpu->render_condition(context->gpu, query, condition, mode);
96 }
97 
98 static struct pipe_query *
tegra_create_query(struct pipe_context * pcontext,unsigned int query_type,unsigned int index)99 tegra_create_query(struct pipe_context *pcontext, unsigned int query_type,
100                    unsigned int index)
101 {
102    struct tegra_context *context = to_tegra_context(pcontext);
103 
104    return context->gpu->create_query(context->gpu, query_type, index);
105 }
106 
107 static struct pipe_query *
tegra_create_batch_query(struct pipe_context * pcontext,unsigned int num_queries,unsigned int * queries)108 tegra_create_batch_query(struct pipe_context *pcontext,
109                          unsigned int num_queries,
110                          unsigned int *queries)
111 {
112    struct tegra_context *context = to_tegra_context(pcontext);
113 
114    return context->gpu->create_batch_query(context->gpu, num_queries,
115                                            queries);
116 }
117 
118 static void
tegra_destroy_query(struct pipe_context * pcontext,struct pipe_query * query)119 tegra_destroy_query(struct pipe_context *pcontext, struct pipe_query *query)
120 {
121    struct tegra_context *context = to_tegra_context(pcontext);
122 
123    context->gpu->destroy_query(context->gpu, query);
124 }
125 
126 static bool
tegra_begin_query(struct pipe_context * pcontext,struct pipe_query * query)127 tegra_begin_query(struct pipe_context *pcontext, struct pipe_query *query)
128 {
129    struct tegra_context *context = to_tegra_context(pcontext);
130 
131    return context->gpu->begin_query(context->gpu, query);
132 }
133 
134 static bool
tegra_end_query(struct pipe_context * pcontext,struct pipe_query * query)135 tegra_end_query(struct pipe_context *pcontext, struct pipe_query *query)
136 {
137    struct tegra_context *context = to_tegra_context(pcontext);
138 
139    return context->gpu->end_query(context->gpu, query);
140 }
141 
142 static bool
tegra_get_query_result(struct pipe_context * pcontext,struct pipe_query * query,bool wait,union pipe_query_result * result)143 tegra_get_query_result(struct pipe_context *pcontext,
144                        struct pipe_query *query,
145                        bool wait,
146                        union pipe_query_result *result)
147 {
148    struct tegra_context *context = to_tegra_context(pcontext);
149 
150    return context->gpu->get_query_result(context->gpu, query, wait,
151                      result);
152 }
153 
154 static void
tegra_get_query_result_resource(struct pipe_context * pcontext,struct pipe_query * query,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned int offset)155 tegra_get_query_result_resource(struct pipe_context *pcontext,
156                                 struct pipe_query *query,
157                                 enum pipe_query_flags flags,
158                                 enum pipe_query_value_type result_type,
159                                 int index,
160                                 struct pipe_resource *resource,
161                                 unsigned int offset)
162 {
163    struct tegra_context *context = to_tegra_context(pcontext);
164 
165    context->gpu->get_query_result_resource(context->gpu, query, flags,
166                                            result_type, index, resource,
167                                            offset);
168 }
169 
170 static void
tegra_set_active_query_state(struct pipe_context * pcontext,bool enable)171 tegra_set_active_query_state(struct pipe_context *pcontext, bool enable)
172 {
173    struct tegra_context *context = to_tegra_context(pcontext);
174 
175    context->gpu->set_active_query_state(context->gpu, enable);
176 }
177 
178 static void *
tegra_create_blend_state(struct pipe_context * pcontext,const struct pipe_blend_state * cso)179 tegra_create_blend_state(struct pipe_context *pcontext,
180                          const struct pipe_blend_state *cso)
181 {
182    struct tegra_context *context = to_tegra_context(pcontext);
183 
184    return context->gpu->create_blend_state(context->gpu, cso);
185 }
186 
187 static void
tegra_bind_blend_state(struct pipe_context * pcontext,void * so)188 tegra_bind_blend_state(struct pipe_context *pcontext, void *so)
189 {
190    struct tegra_context *context = to_tegra_context(pcontext);
191 
192    context->gpu->bind_blend_state(context->gpu, so);
193 }
194 
195 static void
tegra_delete_blend_state(struct pipe_context * pcontext,void * so)196 tegra_delete_blend_state(struct pipe_context *pcontext, void *so)
197 {
198    struct tegra_context *context = to_tegra_context(pcontext);
199 
200    context->gpu->delete_blend_state(context->gpu, so);
201 }
202 
203 static void *
tegra_create_sampler_state(struct pipe_context * pcontext,const struct pipe_sampler_state * cso)204 tegra_create_sampler_state(struct pipe_context *pcontext,
205                            const struct pipe_sampler_state *cso)
206 {
207    struct tegra_context *context = to_tegra_context(pcontext);
208 
209    return context->gpu->create_sampler_state(context->gpu, cso);
210 }
211 
212 static void
tegra_bind_sampler_states(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned start_slot,unsigned num_samplers,void ** samplers)213 tegra_bind_sampler_states(struct pipe_context *pcontext, enum pipe_shader_type shader,
214                           unsigned start_slot, unsigned num_samplers,
215                           void **samplers)
216 {
217    struct tegra_context *context = to_tegra_context(pcontext);
218 
219    context->gpu->bind_sampler_states(context->gpu, shader, start_slot,
220                                      num_samplers, samplers);
221 }
222 
223 static void
tegra_delete_sampler_state(struct pipe_context * pcontext,void * so)224 tegra_delete_sampler_state(struct pipe_context *pcontext, void *so)
225 {
226    struct tegra_context *context = to_tegra_context(pcontext);
227 
228    context->gpu->delete_sampler_state(context->gpu, so);
229 }
230 
231 static void *
tegra_create_rasterizer_state(struct pipe_context * pcontext,const struct pipe_rasterizer_state * cso)232 tegra_create_rasterizer_state(struct pipe_context *pcontext,
233                               const struct pipe_rasterizer_state *cso)
234 {
235    struct tegra_context *context = to_tegra_context(pcontext);
236 
237    return context->gpu->create_rasterizer_state(context->gpu, cso);
238 }
239 
240 static void
tegra_bind_rasterizer_state(struct pipe_context * pcontext,void * so)241 tegra_bind_rasterizer_state(struct pipe_context *pcontext, void *so)
242 {
243    struct tegra_context *context = to_tegra_context(pcontext);
244 
245    context->gpu->bind_rasterizer_state(context->gpu, so);
246 }
247 
248 static void
tegra_delete_rasterizer_state(struct pipe_context * pcontext,void * so)249 tegra_delete_rasterizer_state(struct pipe_context *pcontext, void *so)
250 {
251    struct tegra_context *context = to_tegra_context(pcontext);
252 
253    context->gpu->delete_rasterizer_state(context->gpu, so);
254 }
255 
256 static void *
tegra_create_depth_stencil_alpha_state(struct pipe_context * pcontext,const struct pipe_depth_stencil_alpha_state * cso)257 tegra_create_depth_stencil_alpha_state(struct pipe_context *pcontext,
258                                        const struct pipe_depth_stencil_alpha_state *cso)
259 {
260    struct tegra_context *context = to_tegra_context(pcontext);
261 
262    return context->gpu->create_depth_stencil_alpha_state(context->gpu, cso);
263 }
264 
265 static void
tegra_bind_depth_stencil_alpha_state(struct pipe_context * pcontext,void * so)266 tegra_bind_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
267 {
268    struct tegra_context *context = to_tegra_context(pcontext);
269 
270    context->gpu->bind_depth_stencil_alpha_state(context->gpu, so);
271 }
272 
273 static void
tegra_delete_depth_stencil_alpha_state(struct pipe_context * pcontext,void * so)274 tegra_delete_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
275 {
276    struct tegra_context *context = to_tegra_context(pcontext);
277 
278    context->gpu->delete_depth_stencil_alpha_state(context->gpu, so);
279 }
280 
281 static void *
tegra_create_fs_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)282 tegra_create_fs_state(struct pipe_context *pcontext,
283                       const struct pipe_shader_state *cso)
284 {
285    struct tegra_context *context = to_tegra_context(pcontext);
286 
287    return context->gpu->create_fs_state(context->gpu, cso);
288 }
289 
290 static void
tegra_bind_fs_state(struct pipe_context * pcontext,void * so)291 tegra_bind_fs_state(struct pipe_context *pcontext, void *so)
292 {
293    struct tegra_context *context = to_tegra_context(pcontext);
294 
295    context->gpu->bind_fs_state(context->gpu, so);
296 }
297 
298 static void
tegra_delete_fs_state(struct pipe_context * pcontext,void * so)299 tegra_delete_fs_state(struct pipe_context *pcontext, void *so)
300 {
301    struct tegra_context *context = to_tegra_context(pcontext);
302 
303    context->gpu->delete_fs_state(context->gpu, so);
304 }
305 
306 static void *
tegra_create_vs_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)307 tegra_create_vs_state(struct pipe_context *pcontext,
308                       const struct pipe_shader_state *cso)
309 {
310    struct tegra_context *context = to_tegra_context(pcontext);
311 
312    return context->gpu->create_vs_state(context->gpu, cso);
313 }
314 
315 static void
tegra_bind_vs_state(struct pipe_context * pcontext,void * so)316 tegra_bind_vs_state(struct pipe_context *pcontext, void *so)
317 {
318    struct tegra_context *context = to_tegra_context(pcontext);
319 
320    context->gpu->bind_vs_state(context->gpu, so);
321 }
322 
323 static void
tegra_delete_vs_state(struct pipe_context * pcontext,void * so)324 tegra_delete_vs_state(struct pipe_context *pcontext, void *so)
325 {
326    struct tegra_context *context = to_tegra_context(pcontext);
327 
328    context->gpu->delete_vs_state(context->gpu, so);
329 }
330 
331 static void *
tegra_create_gs_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)332 tegra_create_gs_state(struct pipe_context *pcontext,
333                       const struct pipe_shader_state *cso)
334 {
335    struct tegra_context *context = to_tegra_context(pcontext);
336 
337    return context->gpu->create_gs_state(context->gpu, cso);
338 }
339 
340 static void
tegra_bind_gs_state(struct pipe_context * pcontext,void * so)341 tegra_bind_gs_state(struct pipe_context *pcontext, void *so)
342 {
343    struct tegra_context *context = to_tegra_context(pcontext);
344 
345    context->gpu->bind_gs_state(context->gpu, so);
346 }
347 
348 static void
tegra_delete_gs_state(struct pipe_context * pcontext,void * so)349 tegra_delete_gs_state(struct pipe_context *pcontext, void *so)
350 {
351    struct tegra_context *context = to_tegra_context(pcontext);
352 
353    context->gpu->delete_gs_state(context->gpu, so);
354 }
355 
356 static void *
tegra_create_tcs_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)357 tegra_create_tcs_state(struct pipe_context *pcontext,
358                        const struct pipe_shader_state *cso)
359 {
360    struct tegra_context *context = to_tegra_context(pcontext);
361 
362    return context->gpu->create_tcs_state(context->gpu, cso);
363 }
364 
365 static void
tegra_bind_tcs_state(struct pipe_context * pcontext,void * so)366 tegra_bind_tcs_state(struct pipe_context *pcontext, void *so)
367 {
368    struct tegra_context *context = to_tegra_context(pcontext);
369 
370    context->gpu->bind_tcs_state(context->gpu, so);
371 }
372 
373 static void
tegra_delete_tcs_state(struct pipe_context * pcontext,void * so)374 tegra_delete_tcs_state(struct pipe_context *pcontext, void *so)
375 {
376    struct tegra_context *context = to_tegra_context(pcontext);
377 
378    context->gpu->delete_tcs_state(context->gpu, so);
379 }
380 
381 static void *
tegra_create_tes_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)382 tegra_create_tes_state(struct pipe_context *pcontext,
383                        const struct pipe_shader_state *cso)
384 {
385    struct tegra_context *context = to_tegra_context(pcontext);
386 
387    return context->gpu->create_tes_state(context->gpu, cso);
388 }
389 
390 static void
tegra_bind_tes_state(struct pipe_context * pcontext,void * so)391 tegra_bind_tes_state(struct pipe_context *pcontext, void *so)
392 {
393    struct tegra_context *context = to_tegra_context(pcontext);
394 
395    context->gpu->bind_tes_state(context->gpu, so);
396 }
397 
398 static void
tegra_delete_tes_state(struct pipe_context * pcontext,void * so)399 tegra_delete_tes_state(struct pipe_context *pcontext, void *so)
400 {
401    struct tegra_context *context = to_tegra_context(pcontext);
402 
403    context->gpu->delete_tes_state(context->gpu, so);
404 }
405 
406 static void *
tegra_create_vertex_elements_state(struct pipe_context * pcontext,unsigned num_elements,const struct pipe_vertex_element * elements)407 tegra_create_vertex_elements_state(struct pipe_context *pcontext,
408                                    unsigned num_elements,
409                                    const struct pipe_vertex_element *elements)
410 {
411    struct tegra_context *context = to_tegra_context(pcontext);
412 
413    return context->gpu->create_vertex_elements_state(context->gpu,
414                                                      num_elements,
415                                                      elements);
416 }
417 
418 static void
tegra_bind_vertex_elements_state(struct pipe_context * pcontext,void * so)419 tegra_bind_vertex_elements_state(struct pipe_context *pcontext, void *so)
420 {
421    struct tegra_context *context = to_tegra_context(pcontext);
422 
423    context->gpu->bind_vertex_elements_state(context->gpu, so);
424 }
425 
426 static void
tegra_delete_vertex_elements_state(struct pipe_context * pcontext,void * so)427 tegra_delete_vertex_elements_state(struct pipe_context *pcontext, void *so)
428 {
429    struct tegra_context *context = to_tegra_context(pcontext);
430 
431    context->gpu->delete_vertex_elements_state(context->gpu, so);
432 }
433 
434 static void
tegra_set_blend_color(struct pipe_context * pcontext,const struct pipe_blend_color * color)435 tegra_set_blend_color(struct pipe_context *pcontext,
436                       const struct pipe_blend_color *color)
437 {
438    struct tegra_context *context = to_tegra_context(pcontext);
439 
440    context->gpu->set_blend_color(context->gpu, color);
441 }
442 
443 static void
tegra_set_stencil_ref(struct pipe_context * pcontext,const struct pipe_stencil_ref ref)444 tegra_set_stencil_ref(struct pipe_context *pcontext,
445                       const struct pipe_stencil_ref ref)
446 {
447    struct tegra_context *context = to_tegra_context(pcontext);
448 
449    context->gpu->set_stencil_ref(context->gpu, ref);
450 }
451 
452 static void
tegra_set_sample_mask(struct pipe_context * pcontext,unsigned int mask)453 tegra_set_sample_mask(struct pipe_context *pcontext, unsigned int mask)
454 {
455    struct tegra_context *context = to_tegra_context(pcontext);
456 
457    context->gpu->set_sample_mask(context->gpu, mask);
458 }
459 
460 static void
tegra_set_min_samples(struct pipe_context * pcontext,unsigned int samples)461 tegra_set_min_samples(struct pipe_context *pcontext, unsigned int samples)
462 {
463    struct tegra_context *context = to_tegra_context(pcontext);
464 
465    context->gpu->set_min_samples(context->gpu, samples);
466 }
467 
468 static void
tegra_set_clip_state(struct pipe_context * pcontext,const struct pipe_clip_state * state)469 tegra_set_clip_state(struct pipe_context *pcontext,
470                      const struct pipe_clip_state *state)
471 {
472    struct tegra_context *context = to_tegra_context(pcontext);
473 
474    context->gpu->set_clip_state(context->gpu, state);
475 }
476 
477 static void
tegra_set_constant_buffer(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned int index,bool take_ownership,const struct pipe_constant_buffer * buf)478 tegra_set_constant_buffer(struct pipe_context *pcontext, enum pipe_shader_type shader,
479                           unsigned int index, bool take_ownership,
480                           const struct pipe_constant_buffer *buf)
481 {
482    struct tegra_context *context = to_tegra_context(pcontext);
483    struct pipe_constant_buffer buffer;
484 
485    if (buf && buf->buffer) {
486       memcpy(&buffer, buf, sizeof(buffer));
487       buffer.buffer = tegra_resource_unwrap(buffer.buffer);
488       buf = &buffer;
489    }
490 
491    context->gpu->set_constant_buffer(context->gpu, shader, index, take_ownership, buf);
492 }
493 
494 static void
tegra_set_framebuffer_state(struct pipe_context * pcontext,const struct pipe_framebuffer_state * fb)495 tegra_set_framebuffer_state(struct pipe_context *pcontext,
496                             const struct pipe_framebuffer_state *fb)
497 {
498    struct tegra_context *context = to_tegra_context(pcontext);
499    struct pipe_framebuffer_state state;
500    unsigned i;
501 
502    if (fb) {
503       memcpy(&state, fb, sizeof(state));
504 
505       for (i = 0; i < fb->nr_cbufs; i++)
506          state.cbufs[i] = tegra_surface_unwrap(fb->cbufs[i]);
507 
508       while (i < PIPE_MAX_COLOR_BUFS)
509          state.cbufs[i++] = NULL;
510 
511       state.zsbuf = tegra_surface_unwrap(fb->zsbuf);
512 
513       fb = &state;
514    }
515 
516    context->gpu->set_framebuffer_state(context->gpu, fb);
517 }
518 
519 static void
tegra_set_polygon_stipple(struct pipe_context * pcontext,const struct pipe_poly_stipple * stipple)520 tegra_set_polygon_stipple(struct pipe_context *pcontext,
521                           const struct pipe_poly_stipple *stipple)
522 {
523    struct tegra_context *context = to_tegra_context(pcontext);
524 
525    context->gpu->set_polygon_stipple(context->gpu, stipple);
526 }
527 
528 static void
tegra_set_scissor_states(struct pipe_context * pcontext,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * scissors)529 tegra_set_scissor_states(struct pipe_context *pcontext, unsigned start_slot,
530                          unsigned num_scissors,
531                          const struct pipe_scissor_state *scissors)
532 {
533    struct tegra_context *context = to_tegra_context(pcontext);
534 
535    context->gpu->set_scissor_states(context->gpu, start_slot, num_scissors,
536                                     scissors);
537 }
538 
539 static void
tegra_set_window_rectangles(struct pipe_context * pcontext,bool include,unsigned int num_rectangles,const struct pipe_scissor_state * rectangles)540 tegra_set_window_rectangles(struct pipe_context *pcontext, bool include,
541                             unsigned int num_rectangles,
542                             const struct pipe_scissor_state *rectangles)
543 {
544    struct tegra_context *context = to_tegra_context(pcontext);
545 
546    context->gpu->set_window_rectangles(context->gpu, include, num_rectangles,
547                                        rectangles);
548 }
549 
550 static void
tegra_set_viewport_states(struct pipe_context * pcontext,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * viewports)551 tegra_set_viewport_states(struct pipe_context *pcontext, unsigned start_slot,
552                           unsigned num_viewports,
553                           const struct pipe_viewport_state *viewports)
554 {
555    struct tegra_context *context = to_tegra_context(pcontext);
556 
557    context->gpu->set_viewport_states(context->gpu, start_slot, num_viewports,
558                                      viewports);
559 }
560 
561 static void
tegra_set_sampler_views(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned start_slot,unsigned num_views,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** pviews)562 tegra_set_sampler_views(struct pipe_context *pcontext, enum pipe_shader_type shader,
563                         unsigned start_slot, unsigned num_views,
564                         unsigned unbind_num_trailing_slots,
565                         bool take_ownership,
566                         struct pipe_sampler_view **pviews)
567 {
568    struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
569    struct tegra_context *context = to_tegra_context(pcontext);
570    struct tegra_sampler_view *view;
571    unsigned i;
572 
573    for (i = 0; i < num_views; i++) {
574       /* adjust private reference count */
575       view = to_tegra_sampler_view(pviews[i]);
576       if (view) {
577          view->refcount--;
578          if (!view->refcount) {
579             view->refcount = 100000000;
580             p_atomic_add(&view->gpu->reference.count, view->refcount);
581          }
582       }
583 
584       views[i] = tegra_sampler_view_unwrap(pviews[i]);
585    }
586 
587    context->gpu->set_sampler_views(context->gpu, shader, start_slot,
588                                    num_views, unbind_num_trailing_slots,
589                                    take_ownership, views);
590 }
591 
592 static void
tegra_set_tess_state(struct pipe_context * pcontext,const float default_outer_level[4],const float default_inner_level[2])593 tegra_set_tess_state(struct pipe_context *pcontext,
594                      const float default_outer_level[4],
595                      const float default_inner_level[2])
596 {
597    struct tegra_context *context = to_tegra_context(pcontext);
598 
599    context->gpu->set_tess_state(context->gpu, default_outer_level,
600                                 default_inner_level);
601 }
602 
603 static void
tegra_set_debug_callback(struct pipe_context * pcontext,const struct util_debug_callback * callback)604 tegra_set_debug_callback(struct pipe_context *pcontext,
605                          const struct util_debug_callback *callback)
606 {
607    struct tegra_context *context = to_tegra_context(pcontext);
608 
609    context->gpu->set_debug_callback(context->gpu, callback);
610 }
611 
612 static void
tegra_set_shader_buffers(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)613 tegra_set_shader_buffers(struct pipe_context *pcontext, enum pipe_shader_type shader,
614                          unsigned start, unsigned count,
615                          const struct pipe_shader_buffer *buffers,
616                          unsigned writable_bitmask)
617 {
618    struct tegra_context *context = to_tegra_context(pcontext);
619 
620    context->gpu->set_shader_buffers(context->gpu, shader, start, count,
621                                     buffers, writable_bitmask);
622 }
623 
624 static void
tegra_set_shader_images(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)625 tegra_set_shader_images(struct pipe_context *pcontext, enum pipe_shader_type shader,
626                         unsigned start, unsigned count,
627                         unsigned unbind_num_trailing_slots,
628                         const struct pipe_image_view *images)
629 {
630    struct tegra_context *context = to_tegra_context(pcontext);
631 
632    context->gpu->set_shader_images(context->gpu, shader, start, count,
633                                    unbind_num_trailing_slots, images);
634 }
635 
636 static void
tegra_set_vertex_buffers(struct pipe_context * pcontext,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)637 tegra_set_vertex_buffers(struct pipe_context *pcontext,
638                          unsigned num_buffers,
639                          const struct pipe_vertex_buffer *buffers)
640 {
641    struct tegra_context *context = to_tegra_context(pcontext);
642    struct pipe_vertex_buffer buf[PIPE_MAX_SHADER_INPUTS];
643    unsigned i;
644 
645    if (num_buffers && buffers) {
646       memcpy(buf, buffers, num_buffers * sizeof(struct pipe_vertex_buffer));
647 
648       for (i = 0; i < num_buffers; i++) {
649          if (!buf[i].is_user_buffer)
650             buf[i].buffer.resource = tegra_resource_unwrap(buf[i].buffer.resource);
651       }
652 
653       buffers = buf;
654    }
655 
656    context->gpu->set_vertex_buffers(context->gpu, num_buffers, buffers);
657 }
658 
659 static struct pipe_stream_output_target *
tegra_create_stream_output_target(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned buffer_offset,unsigned buffer_size)660 tegra_create_stream_output_target(struct pipe_context *pcontext,
661                                   struct pipe_resource *presource,
662                                   unsigned buffer_offset,
663                                   unsigned buffer_size)
664 {
665    struct tegra_resource *resource = to_tegra_resource(presource);
666    struct tegra_context *context = to_tegra_context(pcontext);
667 
668    return context->gpu->create_stream_output_target(context->gpu,
669                                                     resource->gpu,
670                                                     buffer_offset,
671                                                     buffer_size);
672 }
673 
674 static void
tegra_stream_output_target_destroy(struct pipe_context * pcontext,struct pipe_stream_output_target * target)675 tegra_stream_output_target_destroy(struct pipe_context *pcontext,
676                                    struct pipe_stream_output_target *target)
677 {
678    struct tegra_context *context = to_tegra_context(pcontext);
679 
680    context->gpu->stream_output_target_destroy(context->gpu, target);
681 }
682 
683 static void
tegra_set_stream_output_targets(struct pipe_context * pcontext,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)684 tegra_set_stream_output_targets(struct pipe_context *pcontext,
685                                 unsigned num_targets,
686                                 struct pipe_stream_output_target **targets,
687                                 const unsigned *offsets)
688 {
689    struct tegra_context *context = to_tegra_context(pcontext);
690 
691    context->gpu->set_stream_output_targets(context->gpu, num_targets,
692                                            targets, offsets);
693 }
694 
695 static void
tegra_resource_copy_region(struct pipe_context * pcontext,struct pipe_resource * pdst,unsigned int dst_level,unsigned int dstx,unsigned int dsty,unsigned int dstz,struct pipe_resource * psrc,unsigned int src_level,const struct pipe_box * src_box)696 tegra_resource_copy_region(struct pipe_context *pcontext,
697                            struct pipe_resource *pdst,
698                            unsigned int dst_level,
699                            unsigned int dstx,
700                            unsigned int dsty,
701                            unsigned int dstz,
702                            struct pipe_resource *psrc,
703                            unsigned int src_level,
704                            const struct pipe_box *src_box)
705 {
706    struct tegra_context *context = to_tegra_context(pcontext);
707    struct tegra_resource *dst = to_tegra_resource(pdst);
708    struct tegra_resource *src = to_tegra_resource(psrc);
709 
710    context->gpu->resource_copy_region(context->gpu, dst->gpu, dst_level, dstx,
711                                       dsty, dstz, src->gpu, src_level,
712                                       src_box);
713 }
714 
715 static void
tegra_blit(struct pipe_context * pcontext,const struct pipe_blit_info * pinfo)716 tegra_blit(struct pipe_context *pcontext, const struct pipe_blit_info *pinfo)
717 {
718    struct tegra_context *context = to_tegra_context(pcontext);
719    struct pipe_blit_info info;
720 
721    if (pinfo) {
722       memcpy(&info, pinfo, sizeof(info));
723       info.dst.resource = tegra_resource_unwrap(info.dst.resource);
724       info.src.resource = tegra_resource_unwrap(info.src.resource);
725       pinfo = &info;
726    }
727 
728    context->gpu->blit(context->gpu, pinfo);
729 }
730 
731 static void
tegra_clear(struct pipe_context * pcontext,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)732 tegra_clear(struct pipe_context *pcontext, unsigned buffers, const struct pipe_scissor_state *scissor_state,
733             const union pipe_color_union *color, double depth,
734             unsigned stencil)
735 {
736    struct tegra_context *context = to_tegra_context(pcontext);
737 
738    context->gpu->clear(context->gpu, buffers, NULL, color, depth, stencil);
739 }
740 
741 static void
tegra_clear_render_target(struct pipe_context * pcontext,struct pipe_surface * pdst,const union pipe_color_union * color,unsigned int dstx,unsigned int dsty,unsigned int width,unsigned int height,bool render_condition)742 tegra_clear_render_target(struct pipe_context *pcontext,
743                           struct pipe_surface *pdst,
744                           const union pipe_color_union *color,
745                           unsigned int dstx,
746                           unsigned int dsty,
747                           unsigned int width,
748                           unsigned int height,
749                           bool render_condition)
750 {
751    struct tegra_context *context = to_tegra_context(pcontext);
752    struct tegra_surface *dst = to_tegra_surface(pdst);
753 
754    context->gpu->clear_render_target(context->gpu, dst->gpu, color, dstx,
755                                      dsty, width, height, render_condition);
756 }
757 
758 static void
tegra_clear_depth_stencil(struct pipe_context * pcontext,struct pipe_surface * pdst,unsigned int flags,double depth,unsigned int stencil,unsigned int dstx,unsigned int dsty,unsigned int width,unsigned int height,bool render_condition)759 tegra_clear_depth_stencil(struct pipe_context *pcontext,
760                           struct pipe_surface *pdst,
761                           unsigned int flags,
762                           double depth,
763                           unsigned int stencil,
764                           unsigned int dstx,
765                           unsigned int dsty,
766                           unsigned int width,
767                           unsigned int height,
768                           bool render_condition)
769 {
770    struct tegra_context *context = to_tegra_context(pcontext);
771    struct tegra_surface *dst = to_tegra_surface(pdst);
772 
773    context->gpu->clear_depth_stencil(context->gpu, dst->gpu, flags, depth,
774                                      stencil, dstx, dsty, width, height,
775                                      render_condition);
776 }
777 
778 static void
tegra_clear_texture(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned int level,const struct pipe_box * box,const void * data)779 tegra_clear_texture(struct pipe_context *pcontext,
780                     struct pipe_resource *presource,
781                     unsigned int level,
782                     const struct pipe_box *box,
783                     const void *data)
784 {
785    struct tegra_resource *resource = to_tegra_resource(presource);
786    struct tegra_context *context = to_tegra_context(pcontext);
787 
788    context->gpu->clear_texture(context->gpu, resource->gpu, level, box, data);
789 }
790 
791 static void
tegra_clear_buffer(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned int offset,unsigned int size,const void * value,int value_size)792 tegra_clear_buffer(struct pipe_context *pcontext,
793                    struct pipe_resource *presource,
794                    unsigned int offset,
795                    unsigned int size,
796                    const void *value,
797                    int value_size)
798 {
799    struct tegra_resource *resource = to_tegra_resource(presource);
800    struct tegra_context *context = to_tegra_context(pcontext);
801 
802    context->gpu->clear_buffer(context->gpu, resource->gpu, offset, size,
803                               value, value_size);
804 }
805 
806 static void
tegra_flush(struct pipe_context * pcontext,struct pipe_fence_handle ** fence,unsigned flags)807 tegra_flush(struct pipe_context *pcontext, struct pipe_fence_handle **fence,
808             unsigned flags)
809 {
810    struct tegra_context *context = to_tegra_context(pcontext);
811 
812    context->gpu->flush(context->gpu, fence, flags);
813 }
814 
815 static void
tegra_create_fence_fd(struct pipe_context * pcontext,struct pipe_fence_handle ** fence,int fd,enum pipe_fd_type type)816 tegra_create_fence_fd(struct pipe_context *pcontext,
817                       struct pipe_fence_handle **fence,
818                       int fd, enum pipe_fd_type type)
819 {
820    struct tegra_context *context = to_tegra_context(pcontext);
821 
822    assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
823    context->gpu->create_fence_fd(context->gpu, fence, fd, type);
824 }
825 
826 static void
tegra_fence_server_sync(struct pipe_context * pcontext,struct pipe_fence_handle * fence)827 tegra_fence_server_sync(struct pipe_context *pcontext,
828                         struct pipe_fence_handle *fence)
829 {
830    struct tegra_context *context = to_tegra_context(pcontext);
831 
832    context->gpu->fence_server_sync(context->gpu, fence);
833 }
834 
835 static struct pipe_sampler_view *
tegra_create_sampler_view(struct pipe_context * pcontext,struct pipe_resource * presource,const struct pipe_sampler_view * template)836 tegra_create_sampler_view(struct pipe_context *pcontext,
837                           struct pipe_resource *presource,
838                           const struct pipe_sampler_view *template)
839 {
840    struct tegra_resource *resource = to_tegra_resource(presource);
841    struct tegra_context *context = to_tegra_context(pcontext);
842    struct tegra_sampler_view *view;
843 
844    view = calloc(1, sizeof(*view));
845    if (!view)
846       return NULL;
847 
848    view->base = *template;
849    view->base.context = pcontext;
850    /* overwrite to prevent reference from being released */
851    view->base.texture = NULL;
852    pipe_reference_init(&view->base.reference, 1);
853    pipe_resource_reference(&view->base.texture, presource);
854 
855    view->gpu = context->gpu->create_sampler_view(context->gpu, resource->gpu,
856                                                  template);
857 
858    /* use private reference count */
859    view->gpu->reference.count += 100000000;
860    view->refcount = 100000000;
861 
862    return &view->base;
863 }
864 
865 static void
tegra_sampler_view_destroy(struct pipe_context * pcontext,struct pipe_sampler_view * pview)866 tegra_sampler_view_destroy(struct pipe_context *pcontext,
867                            struct pipe_sampler_view *pview)
868 {
869    struct tegra_sampler_view *view = to_tegra_sampler_view(pview);
870 
871    pipe_resource_reference(&view->base.texture, NULL);
872    /* adjust private reference count */
873    p_atomic_add(&view->gpu->reference.count, -view->refcount);
874    pipe_sampler_view_reference(&view->gpu, NULL);
875    free(view);
876 }
877 
878 static struct pipe_surface *
tegra_create_surface(struct pipe_context * pcontext,struct pipe_resource * presource,const struct pipe_surface * template)879 tegra_create_surface(struct pipe_context *pcontext,
880                      struct pipe_resource *presource,
881                      const struct pipe_surface *template)
882 {
883    struct tegra_resource *resource = to_tegra_resource(presource);
884    struct tegra_context *context = to_tegra_context(pcontext);
885    struct tegra_surface *surface;
886 
887    surface = calloc(1, sizeof(*surface));
888    if (!surface)
889       return NULL;
890 
891    surface->gpu = context->gpu->create_surface(context->gpu, resource->gpu,
892                                                template);
893    if (!surface->gpu) {
894       free(surface);
895       return NULL;
896    }
897 
898    memcpy(&surface->base, surface->gpu, sizeof(*surface->gpu));
899    /* overwrite to prevent reference from being released */
900    surface->base.texture = NULL;
901 
902    pipe_reference_init(&surface->base.reference, 1);
903    pipe_resource_reference(&surface->base.texture, presource);
904    surface->base.context = &context->base;
905 
906    return &surface->base;
907 }
908 
909 static void
tegra_surface_destroy(struct pipe_context * pcontext,struct pipe_surface * psurface)910 tegra_surface_destroy(struct pipe_context *pcontext,
911                       struct pipe_surface *psurface)
912 {
913    struct tegra_surface *surface = to_tegra_surface(psurface);
914 
915    pipe_resource_reference(&surface->base.texture, NULL);
916    pipe_surface_reference(&surface->gpu, NULL);
917    free(surface);
918 }
919 
920 static void *
tegra_transfer_map(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)921 tegra_transfer_map(struct pipe_context *pcontext,
922                    struct pipe_resource *presource,
923                    unsigned level, unsigned usage,
924                    const struct pipe_box *box,
925                    struct pipe_transfer **ptransfer)
926 {
927    struct tegra_resource *resource = to_tegra_resource(presource);
928    struct tegra_context *context = to_tegra_context(pcontext);
929    struct tegra_transfer *transfer;
930 
931    transfer = calloc(1, sizeof(*transfer));
932    if (!transfer)
933       return NULL;
934 
935    if (presource->target == PIPE_BUFFER) {
936       transfer->map = context->gpu->buffer_map(context->gpu, resource->gpu,
937                                                  level, usage, box,
938                                                  &transfer->gpu);
939    } else {
940       transfer->map = context->gpu->texture_map(context->gpu, resource->gpu,
941                                                  level, usage, box,
942                                                  &transfer->gpu);
943    }
944    memcpy(&transfer->base, transfer->gpu, sizeof(*transfer->gpu));
945    transfer->base.resource = NULL;
946    pipe_resource_reference(&transfer->base.resource, presource);
947 
948    *ptransfer = &transfer->base;
949 
950    return transfer->map;
951 }
952 
953 static void
tegra_transfer_flush_region(struct pipe_context * pcontext,struct pipe_transfer * ptransfer,const struct pipe_box * box)954 tegra_transfer_flush_region(struct pipe_context *pcontext,
955                             struct pipe_transfer *ptransfer,
956                             const struct pipe_box *box)
957 {
958    struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
959    struct tegra_context *context = to_tegra_context(pcontext);
960 
961    context->gpu->transfer_flush_region(context->gpu, transfer->gpu, box);
962 }
963 
964 static void
tegra_transfer_unmap(struct pipe_context * pcontext,struct pipe_transfer * ptransfer)965 tegra_transfer_unmap(struct pipe_context *pcontext,
966                      struct pipe_transfer *ptransfer)
967 {
968    struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
969    struct tegra_context *context = to_tegra_context(pcontext);
970 
971    if (ptransfer->resource->target == PIPE_BUFFER)
972       context->gpu->buffer_unmap(context->gpu, transfer->gpu);
973    else
974       context->gpu->texture_unmap(context->gpu, transfer->gpu);
975    pipe_resource_reference(&transfer->base.resource, NULL);
976    free(transfer);
977 }
978 
979 static void
tegra_buffer_subdata(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned usage,unsigned offset,unsigned size,const void * data)980 tegra_buffer_subdata(struct pipe_context *pcontext,
981                      struct pipe_resource *presource,
982                      unsigned usage, unsigned offset,
983                      unsigned size, const void *data)
984 {
985    struct tegra_resource *resource = to_tegra_resource(presource);
986    struct tegra_context *context = to_tegra_context(pcontext);
987 
988    context->gpu->buffer_subdata(context->gpu, resource->gpu, usage, offset,
989                                 size, data);
990 }
991 
992 static void
tegra_texture_subdata(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,uintptr_t layer_stride)993 tegra_texture_subdata(struct pipe_context *pcontext,
994                       struct pipe_resource *presource,
995                       unsigned level,
996                       unsigned usage,
997                       const struct pipe_box *box,
998                       const void *data,
999                       unsigned stride,
1000                       uintptr_t layer_stride)
1001 {
1002    struct tegra_resource *resource = to_tegra_resource(presource);
1003    struct tegra_context *context = to_tegra_context(pcontext);
1004 
1005    context->gpu->texture_subdata(context->gpu, resource->gpu, level, usage,
1006                                  box, data, stride, layer_stride);
1007 }
1008 
1009 static void
tegra_texture_barrier(struct pipe_context * pcontext,unsigned int flags)1010 tegra_texture_barrier(struct pipe_context *pcontext, unsigned int flags)
1011 {
1012    struct tegra_context *context = to_tegra_context(pcontext);
1013 
1014    context->gpu->texture_barrier(context->gpu, flags);
1015 }
1016 
1017 static void
tegra_memory_barrier(struct pipe_context * pcontext,unsigned int flags)1018 tegra_memory_barrier(struct pipe_context *pcontext, unsigned int flags)
1019 {
1020    struct tegra_context *context = to_tegra_context(pcontext);
1021 
1022    if (!(flags & ~PIPE_BARRIER_UPDATE))
1023       return;
1024 
1025    context->gpu->memory_barrier(context->gpu, flags);
1026 }
1027 
1028 static struct pipe_video_codec *
tegra_create_video_codec(struct pipe_context * pcontext,const struct pipe_video_codec * template)1029 tegra_create_video_codec(struct pipe_context *pcontext,
1030                          const struct pipe_video_codec *template)
1031 {
1032    struct tegra_context *context = to_tegra_context(pcontext);
1033 
1034    return context->gpu->create_video_codec(context->gpu, template);
1035 }
1036 
1037 static struct pipe_video_buffer *
tegra_create_video_buffer(struct pipe_context * pcontext,const struct pipe_video_buffer * template)1038 tegra_create_video_buffer(struct pipe_context *pcontext,
1039                           const struct pipe_video_buffer *template)
1040 {
1041    struct tegra_context *context = to_tegra_context(pcontext);
1042 
1043    return context->gpu->create_video_buffer(context->gpu, template);
1044 }
1045 
1046 static void *
tegra_create_compute_state(struct pipe_context * pcontext,const struct pipe_compute_state * template)1047 tegra_create_compute_state(struct pipe_context *pcontext,
1048                            const struct pipe_compute_state *template)
1049 {
1050    struct tegra_context *context = to_tegra_context(pcontext);
1051 
1052    return context->gpu->create_compute_state(context->gpu, template);
1053 }
1054 
1055 static void
tegra_bind_compute_state(struct pipe_context * pcontext,void * so)1056 tegra_bind_compute_state(struct pipe_context *pcontext, void *so)
1057 {
1058    struct tegra_context *context = to_tegra_context(pcontext);
1059 
1060    context->gpu->bind_compute_state(context->gpu, so);
1061 }
1062 
1063 static void
tegra_delete_compute_state(struct pipe_context * pcontext,void * so)1064 tegra_delete_compute_state(struct pipe_context *pcontext, void *so)
1065 {
1066    struct tegra_context *context = to_tegra_context(pcontext);
1067 
1068    context->gpu->delete_compute_state(context->gpu, so);
1069 }
1070 
1071 static void
tegra_set_compute_resources(struct pipe_context * pcontext,unsigned int start,unsigned int count,struct pipe_surface ** resources)1072 tegra_set_compute_resources(struct pipe_context *pcontext,
1073                             unsigned int start, unsigned int count,
1074                             struct pipe_surface **resources)
1075 {
1076    struct tegra_context *context = to_tegra_context(pcontext);
1077 
1078    /* XXX unwrap resources */
1079 
1080    context->gpu->set_compute_resources(context->gpu, start, count, resources);
1081 }
1082 
1083 static void
tegra_set_global_binding(struct pipe_context * pcontext,unsigned int first,unsigned int count,struct pipe_resource ** resources,uint32_t ** handles)1084 tegra_set_global_binding(struct pipe_context *pcontext, unsigned int first,
1085                          unsigned int count, struct pipe_resource **resources,
1086                          uint32_t **handles)
1087 {
1088    struct tegra_context *context = to_tegra_context(pcontext);
1089 
1090    /* XXX unwrap resources */
1091 
1092    context->gpu->set_global_binding(context->gpu, first, count, resources,
1093                                     handles);
1094 }
1095 
1096 static void
tegra_launch_grid(struct pipe_context * pcontext,const struct pipe_grid_info * info)1097 tegra_launch_grid(struct pipe_context *pcontext,
1098                   const struct pipe_grid_info *info)
1099 {
1100    struct tegra_context *context = to_tegra_context(pcontext);
1101 
1102    /* XXX unwrap info->indirect? */
1103 
1104    context->gpu->launch_grid(context->gpu, info);
1105 }
1106 
1107 static void
tegra_get_sample_position(struct pipe_context * pcontext,unsigned int count,unsigned int index,float * value)1108 tegra_get_sample_position(struct pipe_context *pcontext, unsigned int count,
1109                           unsigned int index, float *value)
1110 {
1111    struct tegra_context *context = to_tegra_context(pcontext);
1112 
1113    context->gpu->get_sample_position(context->gpu, count, index, value);
1114 }
1115 
1116 static uint64_t
tegra_get_timestamp(struct pipe_context * pcontext)1117 tegra_get_timestamp(struct pipe_context *pcontext)
1118 {
1119    struct tegra_context *context = to_tegra_context(pcontext);
1120 
1121    return context->gpu->get_timestamp(context->gpu);
1122 }
1123 
1124 static void
tegra_flush_resource(struct pipe_context * pcontext,struct pipe_resource * presource)1125 tegra_flush_resource(struct pipe_context *pcontext,
1126                      struct pipe_resource *presource)
1127 {
1128    struct tegra_resource *resource = to_tegra_resource(presource);
1129    struct tegra_context *context = to_tegra_context(pcontext);
1130 
1131    context->gpu->flush_resource(context->gpu, resource->gpu);
1132 }
1133 
1134 static void
tegra_invalidate_resource(struct pipe_context * pcontext,struct pipe_resource * presource)1135 tegra_invalidate_resource(struct pipe_context *pcontext,
1136                           struct pipe_resource *presource)
1137 {
1138    struct tegra_resource *resource = to_tegra_resource(presource);
1139    struct tegra_context *context = to_tegra_context(pcontext);
1140 
1141    context->gpu->invalidate_resource(context->gpu, resource->gpu);
1142 }
1143 
1144 static enum pipe_reset_status
tegra_get_device_reset_status(struct pipe_context * pcontext)1145 tegra_get_device_reset_status(struct pipe_context *pcontext)
1146 {
1147    struct tegra_context *context = to_tegra_context(pcontext);
1148 
1149    return context->gpu->get_device_reset_status(context->gpu);
1150 }
1151 
1152 static void
tegra_set_device_reset_callback(struct pipe_context * pcontext,const struct pipe_device_reset_callback * cb)1153 tegra_set_device_reset_callback(struct pipe_context *pcontext,
1154                                 const struct pipe_device_reset_callback *cb)
1155 {
1156    struct tegra_context *context = to_tegra_context(pcontext);
1157 
1158    context->gpu->set_device_reset_callback(context->gpu, cb);
1159 }
1160 
1161 static void
tegra_dump_debug_state(struct pipe_context * pcontext,FILE * stream,unsigned int flags)1162 tegra_dump_debug_state(struct pipe_context *pcontext, FILE *stream,
1163                        unsigned int flags)
1164 {
1165    struct tegra_context *context = to_tegra_context(pcontext);
1166 
1167    context->gpu->dump_debug_state(context->gpu, stream, flags);
1168 }
1169 
1170 static void
tegra_emit_string_marker(struct pipe_context * pcontext,const char * string,int length)1171 tegra_emit_string_marker(struct pipe_context *pcontext, const char *string,
1172                          int length)
1173 {
1174    struct tegra_context *context = to_tegra_context(pcontext);
1175 
1176    context->gpu->emit_string_marker(context->gpu, string, length);
1177 }
1178 
1179 static bool
tegra_generate_mipmap(struct pipe_context * pcontext,struct pipe_resource * presource,enum pipe_format format,unsigned int base_level,unsigned int last_level,unsigned int first_layer,unsigned int last_layer)1180 tegra_generate_mipmap(struct pipe_context *pcontext,
1181                       struct pipe_resource *presource,
1182                       enum pipe_format format,
1183                       unsigned int base_level,
1184                       unsigned int last_level,
1185                       unsigned int first_layer,
1186                       unsigned int last_layer)
1187 {
1188    struct tegra_resource *resource = to_tegra_resource(presource);
1189    struct tegra_context *context = to_tegra_context(pcontext);
1190 
1191    return context->gpu->generate_mipmap(context->gpu, resource->gpu, format,
1192                                         base_level, last_level, first_layer,
1193                                         last_layer);
1194 }
1195 
1196 static uint64_t
tegra_create_texture_handle(struct pipe_context * pcontext,struct pipe_sampler_view * view,const struct pipe_sampler_state * state)1197 tegra_create_texture_handle(struct pipe_context *pcontext,
1198                             struct pipe_sampler_view *view,
1199                             const struct pipe_sampler_state *state)
1200 {
1201    struct tegra_context *context = to_tegra_context(pcontext);
1202 
1203    return context->gpu->create_texture_handle(context->gpu, view, state);
1204 }
1205 
tegra_delete_texture_handle(struct pipe_context * pcontext,uint64_t handle)1206 static void tegra_delete_texture_handle(struct pipe_context *pcontext,
1207                                         uint64_t handle)
1208 {
1209    struct tegra_context *context = to_tegra_context(pcontext);
1210 
1211    context->gpu->delete_texture_handle(context->gpu, handle);
1212 }
1213 
tegra_make_texture_handle_resident(struct pipe_context * pcontext,uint64_t handle,bool resident)1214 static void tegra_make_texture_handle_resident(struct pipe_context *pcontext,
1215                                                uint64_t handle, bool resident)
1216 {
1217    struct tegra_context *context = to_tegra_context(pcontext);
1218 
1219    context->gpu->make_texture_handle_resident(context->gpu, handle, resident);
1220 }
1221 
tegra_create_image_handle(struct pipe_context * pcontext,const struct pipe_image_view * image)1222 static uint64_t tegra_create_image_handle(struct pipe_context *pcontext,
1223                                           const struct pipe_image_view *image)
1224 {
1225    struct tegra_context *context = to_tegra_context(pcontext);
1226 
1227    return context->gpu->create_image_handle(context->gpu, image);
1228 }
1229 
tegra_delete_image_handle(struct pipe_context * pcontext,uint64_t handle)1230 static void tegra_delete_image_handle(struct pipe_context *pcontext,
1231                                       uint64_t handle)
1232 {
1233    struct tegra_context *context = to_tegra_context(pcontext);
1234 
1235    context->gpu->delete_image_handle(context->gpu, handle);
1236 }
1237 
tegra_make_image_handle_resident(struct pipe_context * pcontext,uint64_t handle,unsigned access,bool resident)1238 static void tegra_make_image_handle_resident(struct pipe_context *pcontext,
1239                                              uint64_t handle, unsigned access,
1240                                              bool resident)
1241 {
1242    struct tegra_context *context = to_tegra_context(pcontext);
1243 
1244    context->gpu->make_image_handle_resident(context->gpu, handle, access,
1245                                             resident);
1246 }
1247 
1248 struct pipe_context *
tegra_screen_context_create(struct pipe_screen * pscreen,void * priv,unsigned int flags)1249 tegra_screen_context_create(struct pipe_screen *pscreen, void *priv,
1250                             unsigned int flags)
1251 {
1252    struct tegra_screen *screen = to_tegra_screen(pscreen);
1253    struct tegra_context *context;
1254 
1255    context = calloc(1, sizeof(*context));
1256    if (!context)
1257       return NULL;
1258 
1259    context->gpu = screen->gpu->context_create(screen->gpu, priv, flags);
1260    if (!context->gpu) {
1261       debug_error("failed to create GPU context\n");
1262       goto free;
1263    }
1264 
1265    context->base.screen = &screen->base;
1266    context->base.priv = priv;
1267 
1268    /*
1269     * Create custom stream and const uploaders. Note that technically nouveau
1270     * already creates uploaders that could be reused, but that would make the
1271     * resource unwrapping rather complicate. The reason for that is that both
1272     * uploaders create resources based on the context that they were created
1273     * from, which means that nouveau's uploader will use the nouveau context
1274     * which means that those resources must not be unwrapped. So before each
1275     * resource is unwrapped, the code would need to check that it does not
1276     * correspond to the uploaders' buffers.
1277     *
1278     * However, duplicating the uploaders here sounds worse than it is. The
1279     * default implementation that nouveau uses allocates buffers lazily, and
1280     * since it is never used, no buffers will every be allocated and the only
1281     * memory wasted is that occupied by the nouveau uploader itself.
1282     */
1283    context->base.stream_uploader = u_upload_create_default(&context->base);
1284    if (!context->base.stream_uploader)
1285       goto destroy;
1286 
1287    context->base.const_uploader = context->base.stream_uploader;
1288 
1289    context->base.destroy = tegra_destroy;
1290 
1291    context->base.draw_vbo = tegra_draw_vbo;
1292 
1293    context->base.render_condition = tegra_render_condition;
1294 
1295    context->base.create_query = tegra_create_query;
1296    context->base.create_batch_query = tegra_create_batch_query;
1297    context->base.destroy_query = tegra_destroy_query;
1298    context->base.begin_query = tegra_begin_query;
1299    context->base.end_query = tegra_end_query;
1300    context->base.get_query_result = tegra_get_query_result;
1301    context->base.get_query_result_resource = tegra_get_query_result_resource;
1302    context->base.set_active_query_state = tegra_set_active_query_state;
1303 
1304    context->base.create_blend_state = tegra_create_blend_state;
1305    context->base.bind_blend_state = tegra_bind_blend_state;
1306    context->base.delete_blend_state = tegra_delete_blend_state;
1307 
1308    context->base.create_sampler_state = tegra_create_sampler_state;
1309    context->base.bind_sampler_states = tegra_bind_sampler_states;
1310    context->base.delete_sampler_state = tegra_delete_sampler_state;
1311 
1312    context->base.create_rasterizer_state = tegra_create_rasterizer_state;
1313    context->base.bind_rasterizer_state = tegra_bind_rasterizer_state;
1314    context->base.delete_rasterizer_state = tegra_delete_rasterizer_state;
1315 
1316    context->base.create_depth_stencil_alpha_state = tegra_create_depth_stencil_alpha_state;
1317    context->base.bind_depth_stencil_alpha_state = tegra_bind_depth_stencil_alpha_state;
1318    context->base.delete_depth_stencil_alpha_state = tegra_delete_depth_stencil_alpha_state;
1319 
1320    context->base.create_fs_state = tegra_create_fs_state;
1321    context->base.bind_fs_state = tegra_bind_fs_state;
1322    context->base.delete_fs_state = tegra_delete_fs_state;
1323 
1324    context->base.create_vs_state = tegra_create_vs_state;
1325    context->base.bind_vs_state = tegra_bind_vs_state;
1326    context->base.delete_vs_state = tegra_delete_vs_state;
1327 
1328    context->base.create_gs_state = tegra_create_gs_state;
1329    context->base.bind_gs_state = tegra_bind_gs_state;
1330    context->base.delete_gs_state = tegra_delete_gs_state;
1331 
1332    context->base.create_tcs_state = tegra_create_tcs_state;
1333    context->base.bind_tcs_state = tegra_bind_tcs_state;
1334    context->base.delete_tcs_state = tegra_delete_tcs_state;
1335 
1336    context->base.create_tes_state = tegra_create_tes_state;
1337    context->base.bind_tes_state = tegra_bind_tes_state;
1338    context->base.delete_tes_state = tegra_delete_tes_state;
1339 
1340    context->base.create_vertex_elements_state = tegra_create_vertex_elements_state;
1341    context->base.bind_vertex_elements_state = tegra_bind_vertex_elements_state;
1342    context->base.delete_vertex_elements_state = tegra_delete_vertex_elements_state;
1343 
1344    context->base.set_blend_color = tegra_set_blend_color;
1345    context->base.set_stencil_ref = tegra_set_stencil_ref;
1346    context->base.set_sample_mask = tegra_set_sample_mask;
1347    context->base.set_min_samples = tegra_set_min_samples;
1348    context->base.set_clip_state = tegra_set_clip_state;
1349 
1350    context->base.set_constant_buffer = tegra_set_constant_buffer;
1351    context->base.set_framebuffer_state = tegra_set_framebuffer_state;
1352    context->base.set_polygon_stipple = tegra_set_polygon_stipple;
1353    context->base.set_scissor_states = tegra_set_scissor_states;
1354    context->base.set_window_rectangles = tegra_set_window_rectangles;
1355    context->base.set_viewport_states = tegra_set_viewport_states;
1356    context->base.set_sampler_views = tegra_set_sampler_views;
1357    context->base.set_tess_state = tegra_set_tess_state;
1358 
1359    context->base.set_debug_callback = tegra_set_debug_callback;
1360 
1361    context->base.set_shader_buffers = tegra_set_shader_buffers;
1362    context->base.set_shader_images = tegra_set_shader_images;
1363    context->base.set_vertex_buffers = tegra_set_vertex_buffers;
1364 
1365    context->base.create_stream_output_target = tegra_create_stream_output_target;
1366    context->base.stream_output_target_destroy = tegra_stream_output_target_destroy;
1367    context->base.set_stream_output_targets = tegra_set_stream_output_targets;
1368 
1369    context->base.resource_copy_region = tegra_resource_copy_region;
1370    context->base.blit = tegra_blit;
1371    context->base.clear = tegra_clear;
1372    context->base.clear_render_target = tegra_clear_render_target;
1373    context->base.clear_depth_stencil = tegra_clear_depth_stencil;
1374    context->base.clear_texture = tegra_clear_texture;
1375    context->base.clear_buffer = tegra_clear_buffer;
1376    context->base.flush = tegra_flush;
1377 
1378    context->base.create_fence_fd = tegra_create_fence_fd;
1379    context->base.fence_server_sync = tegra_fence_server_sync;
1380 
1381    context->base.create_sampler_view = tegra_create_sampler_view;
1382    context->base.sampler_view_destroy = tegra_sampler_view_destroy;
1383 
1384    context->base.create_surface = tegra_create_surface;
1385    context->base.surface_destroy = tegra_surface_destroy;
1386 
1387    context->base.buffer_map = tegra_transfer_map;
1388    context->base.texture_map = tegra_transfer_map;
1389    context->base.transfer_flush_region = tegra_transfer_flush_region;
1390    context->base.buffer_unmap = tegra_transfer_unmap;
1391    context->base.texture_unmap = tegra_transfer_unmap;
1392    context->base.buffer_subdata = tegra_buffer_subdata;
1393    context->base.texture_subdata = tegra_texture_subdata;
1394 
1395    context->base.texture_barrier = tegra_texture_barrier;
1396    context->base.memory_barrier = tegra_memory_barrier;
1397 
1398    context->base.create_video_codec = tegra_create_video_codec;
1399    context->base.create_video_buffer = tegra_create_video_buffer;
1400 
1401    context->base.create_compute_state = tegra_create_compute_state;
1402    context->base.bind_compute_state = tegra_bind_compute_state;
1403    context->base.delete_compute_state = tegra_delete_compute_state;
1404    context->base.set_compute_resources = tegra_set_compute_resources;
1405    context->base.set_global_binding = tegra_set_global_binding;
1406    context->base.launch_grid = tegra_launch_grid;
1407    context->base.get_sample_position = tegra_get_sample_position;
1408    context->base.get_timestamp = tegra_get_timestamp;
1409 
1410    context->base.flush_resource = tegra_flush_resource;
1411    context->base.invalidate_resource = tegra_invalidate_resource;
1412 
1413    context->base.get_device_reset_status = tegra_get_device_reset_status;
1414    context->base.set_device_reset_callback = tegra_set_device_reset_callback;
1415    context->base.dump_debug_state = tegra_dump_debug_state;
1416    context->base.emit_string_marker = tegra_emit_string_marker;
1417 
1418    context->base.generate_mipmap = tegra_generate_mipmap;
1419 
1420    context->base.create_texture_handle = tegra_create_texture_handle;
1421    context->base.delete_texture_handle = tegra_delete_texture_handle;
1422    context->base.make_texture_handle_resident = tegra_make_texture_handle_resident;
1423    context->base.create_image_handle = tegra_create_image_handle;
1424    context->base.delete_image_handle = tegra_delete_image_handle;
1425    context->base.make_image_handle_resident = tegra_make_image_handle_resident;
1426 
1427    return &context->base;
1428 
1429 destroy:
1430    context->gpu->destroy(context->gpu);
1431 free:
1432    free(context);
1433    return NULL;
1434 }
1435