xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/radeonsi/si_state_viewport.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "si_build_pm4.h"
8 #include "util/u_upload_mgr.h"
9 #include "util/u_viewport.h"
10 
11 #define GFX6_MAX_VIEWPORT_SIZE   16384
12 #define GFX12_MAX_VIEWPORT_SIZE  32768 /* TODO: this should be 64K, but maxx/maxy doesn't have enough bits */
13 
si_get_small_prim_cull_info(struct si_context * sctx,struct si_small_prim_cull_info * out)14 static void si_get_small_prim_cull_info(struct si_context *sctx, struct si_small_prim_cull_info *out)
15 {
16    /* This is needed by the small primitive culling, because it's done
17     * in screen space.
18     */
19    struct si_small_prim_cull_info info;
20    unsigned num_samples = si_get_num_coverage_samples(sctx);
21    assert(num_samples >= 1);
22 
23    info.scale[0] = sctx->viewports.states[0].scale[0];
24    info.scale[1] = sctx->viewports.states[0].scale[1];
25    info.translate[0] = sctx->viewports.states[0].translate[0];
26    info.translate[1] = sctx->viewports.states[0].translate[1];
27 
28    /* The viewport shouldn't flip the X axis for the small prim culling to work. */
29    assert(-info.scale[0] + info.translate[0] <= info.scale[0] + info.translate[0]);
30 
31    /* Compute the line width used by the rasterizer. */
32    float line_width = sctx->queued.named.rasterizer->line_width;
33    if (num_samples == 1)
34       line_width = roundf(line_width);
35    line_width = MAX2(line_width, 1);
36 
37    float half_line_width = line_width * 0.5;
38    if (info.scale[0] == 0 || info.scale[1] == 0) {
39      info.clip_half_line_width[0] = 0;
40      info.clip_half_line_width[1] = 0;
41    } else {
42      info.clip_half_line_width[0] = half_line_width / fabs(info.scale[0]);
43      info.clip_half_line_width[1] = half_line_width / fabs(info.scale[1]);
44    }
45 
46    /* If the Y axis is inverted (OpenGL default framebuffer), reverse it.
47     * This is because the viewport transformation inverts the clip space
48     * bounding box, so min becomes max, which breaks small primitive
49     * culling.
50     */
51    if (sctx->viewport0_y_inverted) {
52       info.scale[1] = -info.scale[1];
53       info.translate[1] = -info.translate[1];
54    }
55 
56    /* This is what the hardware does. */
57    if (!sctx->queued.named.rasterizer->half_pixel_center) {
58       info.translate[0] += 0.5;
59       info.translate[1] += 0.5;
60    }
61 
62    memcpy(info.scale_no_aa, info.scale, sizeof(info.scale));
63    memcpy(info.translate_no_aa, info.translate, sizeof(info.translate));
64 
65    /* Scale the framebuffer up, so that samples become pixels and small
66     * primitive culling is the same for all sample counts.
67     * This only works with the standard DX sample positions, because
68     * the samples are evenly spaced on both X and Y axes.
69     */
70    for (unsigned i = 0; i < 2; i++) {
71       info.scale[i] *= num_samples;
72       info.translate[i] *= num_samples;
73    }
74 
75    *out = info;
76 }
77 
si_emit_cull_state(struct si_context * sctx,unsigned index)78 static void si_emit_cull_state(struct si_context *sctx, unsigned index)
79 {
80    assert(sctx->screen->use_ngg_culling);
81 
82    struct si_small_prim_cull_info info;
83    si_get_small_prim_cull_info(sctx, &info);
84 
85    if (!sctx->small_prim_cull_info_buf ||
86        memcmp(&info, &sctx->last_small_prim_cull_info, sizeof(info))) {
87       unsigned offset = 0;
88 
89       u_upload_data(sctx->b.const_uploader, 0, sizeof(info),
90                     si_optimal_tcc_alignment(sctx, sizeof(info)), &info, &offset,
91                     (struct pipe_resource **)&sctx->small_prim_cull_info_buf);
92 
93       sctx->small_prim_cull_info_address = sctx->small_prim_cull_info_buf->gpu_address + offset;
94       sctx->last_small_prim_cull_info = info;
95    }
96 
97    /* This will end up in SGPR6 as (value << 8), shifted by the hw. */
98    radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->small_prim_cull_info_buf,
99                              RADEON_USAGE_READ | RADEON_PRIO_CONST_BUFFER);
100 
101    if (sctx->gfx_level >= GFX12) {
102       gfx12_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 +
103                             GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
104                             sctx->small_prim_cull_info_address);
105    } else if (sctx->screen->info.has_set_sh_pairs_packed) {
106       gfx11_push_gfx_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 +
107                             GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
108                             sctx->small_prim_cull_info_address);
109    } else {
110       radeon_begin(&sctx->gfx_cs);
111       radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + GFX9_SGPR_SMALL_PRIM_CULL_INFO * 4,
112                         sctx->small_prim_cull_info_address);
113       radeon_end();
114    }
115 
116    /* Better subpixel precision increases the efficiency of small
117     * primitive culling. (more precision means a tighter bounding box
118     * around primitives and more accurate elimination)
119     */
120    unsigned quant_mode = sctx->viewports.as_scissor[0].quant_mode;
121    float small_prim_precision_no_aa = 0;
122    unsigned num_samples = si_get_num_coverage_samples(sctx);
123 
124    if (quant_mode == SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH)
125       small_prim_precision_no_aa = 1.0 / 4096.0;
126    else if (quant_mode == SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH)
127       small_prim_precision_no_aa = 1.0 / 1024.0;
128    else
129       small_prim_precision_no_aa = 1.0 / 256.0;
130 
131    float small_prim_precision = num_samples * small_prim_precision_no_aa;
132 
133    /* Set VS_STATE.SMALL_PRIM_PRECISION for NGG culling.
134     *
135     * small_prim_precision is 1 / 2^n. We only need n between 5 (1/32) and 12 (1/4096).
136     * Such a floating point value can be packed into 4 bits as follows:
137     * If we pass the first 4 bits of the exponent to the shader and set the next 3 bits
138     * to 1, we'll get the number exactly because all other bits are always 0. See:
139     *                                                               1
140     * value  =  (0x70 | value.exponent[0:3]) << 23  =  ------------------------------
141     *                                                  2 ^ (15 - value.exponent[0:3])
142     *
143     * So pass only the first 4 bits of the float exponent to the shader.
144     */
145    SET_FIELD(sctx->current_gs_state, GS_STATE_SMALL_PRIM_PRECISION_NO_AA,
146              (fui(small_prim_precision_no_aa) >> 23) & 0xf);
147    SET_FIELD(sctx->current_gs_state, GS_STATE_SMALL_PRIM_PRECISION,
148              (fui(small_prim_precision) >> 23) & 0xf);
149 }
150 
si_set_scissor_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * state)151 static void si_set_scissor_states(struct pipe_context *pctx, unsigned start_slot,
152                                   unsigned num_scissors, const struct pipe_scissor_state *state)
153 {
154    struct si_context *ctx = (struct si_context *)pctx;
155    int i;
156 
157    for (i = 0; i < num_scissors; i++)
158       ctx->scissors[start_slot + i] = state[i];
159 
160    if (!ctx->queued.named.rasterizer->scissor_enable)
161       return;
162 
163    si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
164 }
165 
166 /* Since the guard band disables clipping, we have to clip per-pixel
167  * using a scissor.
168  */
si_get_scissor_from_viewport(struct si_context * ctx,const struct pipe_viewport_state * vp,struct si_signed_scissor * scissor)169 static void si_get_scissor_from_viewport(struct si_context *ctx,
170                                          const struct pipe_viewport_state *vp,
171                                          struct si_signed_scissor *scissor)
172 {
173    float tmp, minx, miny, maxx, maxy;
174 
175    /* Convert (-1, -1) and (1, 1) from clip space into window space. */
176    minx = -vp->scale[0] + vp->translate[0];
177    miny = -vp->scale[1] + vp->translate[1];
178    maxx = vp->scale[0] + vp->translate[0];
179    maxy = vp->scale[1] + vp->translate[1];
180 
181    /* Handle inverted viewports. */
182    if (minx > maxx) {
183       tmp = minx;
184       minx = maxx;
185       maxx = tmp;
186    }
187    if (miny > maxy) {
188       tmp = miny;
189       miny = maxy;
190       maxy = tmp;
191    }
192 
193    /* Convert to integer and round up the max bounds. */
194    scissor->minx = minx;
195    scissor->miny = miny;
196    scissor->maxx = ceilf(maxx);
197    scissor->maxy = ceilf(maxy);
198 }
199 
si_clamp_scissor(struct si_context * ctx,struct pipe_scissor_state * out,struct si_signed_scissor * scissor)200 static void si_clamp_scissor(struct si_context *ctx, struct pipe_scissor_state *out,
201                              struct si_signed_scissor *scissor)
202 {
203    unsigned max_scissor = ctx->gfx_level >= GFX12 ? GFX12_MAX_VIEWPORT_SIZE : GFX6_MAX_VIEWPORT_SIZE;
204 
205    out->minx = CLAMP(scissor->minx, 0, max_scissor);
206    out->miny = CLAMP(scissor->miny, 0, max_scissor);
207    out->maxx = CLAMP(scissor->maxx, 0, max_scissor);
208    out->maxy = CLAMP(scissor->maxy, 0, max_scissor);
209 }
210 
si_clip_scissor(struct pipe_scissor_state * out,struct pipe_scissor_state * clip)211 static void si_clip_scissor(struct pipe_scissor_state *out, struct pipe_scissor_state *clip)
212 {
213    out->minx = MAX2(out->minx, clip->minx);
214    out->miny = MAX2(out->miny, clip->miny);
215    out->maxx = MIN2(out->maxx, clip->maxx);
216    out->maxy = MIN2(out->maxy, clip->maxy);
217 }
218 
si_scissor_make_union(struct si_signed_scissor * out,struct si_signed_scissor * in)219 static void si_scissor_make_union(struct si_signed_scissor *out, struct si_signed_scissor *in)
220 {
221    out->minx = MIN2(out->minx, in->minx);
222    out->miny = MIN2(out->miny, in->miny);
223    out->maxx = MAX2(out->maxx, in->maxx);
224    out->maxy = MAX2(out->maxy, in->maxy);
225    out->quant_mode = MIN2(out->quant_mode, in->quant_mode);
226 }
227 
si_emit_one_scissor(struct si_context * ctx,struct radeon_cmdbuf * cs,struct si_signed_scissor * vp_scissor,struct pipe_scissor_state * scissor)228 static void si_emit_one_scissor(struct si_context *ctx, struct radeon_cmdbuf *cs,
229                                 struct si_signed_scissor *vp_scissor,
230                                 struct pipe_scissor_state *scissor)
231 {
232    struct pipe_scissor_state final;
233 
234    if (ctx->vs_disables_clipping_viewport) {
235       final.minx = final.miny = 0;
236       final.maxx = final.maxy = ctx->gfx_level >= GFX12 ? GFX12_MAX_VIEWPORT_SIZE : GFX6_MAX_VIEWPORT_SIZE;
237    } else {
238       si_clamp_scissor(ctx, &final, vp_scissor);
239    }
240 
241    if (scissor)
242       si_clip_scissor(&final, scissor);
243 
244    radeon_begin(cs);
245    if (ctx->gfx_level >= GFX12) {
246       if (final.maxx == 0 || final.maxy == 0) {
247          /* An empty scissor must be done like this because the bottom-right bounds are inclusive. */
248          radeon_emit(S_028250_TL_X(1) | S_028250_TL_Y_GFX12(1));
249          radeon_emit(S_028254_BR_X(0) | S_028254_BR_Y(0));
250       } else {
251          radeon_emit(S_028250_TL_X(final.minx) | S_028250_TL_Y_GFX12(final.miny));
252          radeon_emit(S_028254_BR_X(final.maxx - 1) | S_028254_BR_Y(final.maxy - 1));
253       }
254    } else {
255       /* Workaround for a hw bug on GFX6 that occurs when PA_SU_HARDWARE_SCREEN_OFFSET != 0 and
256        * any_scissor.BR_X/Y <= 0.
257        */
258       if (ctx->gfx_level == GFX6 && (final.maxx == 0 || final.maxy == 0)) {
259          radeon_emit(S_028250_TL_X(1) | S_028250_TL_Y_GFX6(1) | S_028250_WINDOW_OFFSET_DISABLE(1));
260          radeon_emit(S_028254_BR_X(1) | S_028254_BR_Y(1));
261       } else {
262          radeon_emit(S_028250_TL_X(final.minx) | S_028250_TL_Y_GFX6(final.miny) |
263                      S_028250_WINDOW_OFFSET_DISABLE(1));
264          radeon_emit(S_028254_BR_X(final.maxx) | S_028254_BR_Y(final.maxy));
265       }
266    }
267    radeon_end();
268 }
269 
si_emit_guardband(struct si_context * sctx,unsigned index)270 static void si_emit_guardband(struct si_context *sctx, unsigned index)
271 {
272    const struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
273    struct si_signed_scissor vp_as_scissor;
274    struct pipe_viewport_state vp;
275    float left, top, right, bottom, max_range, guardband_x, guardband_y;
276 
277    if (sctx->vs_writes_viewport_index) {
278       /* Shaders can draw to any viewport. Make a union of all
279        * viewports. */
280       vp_as_scissor = sctx->viewports.as_scissor[0];
281       for (unsigned i = 1; i < SI_MAX_VIEWPORTS; i++) {
282          si_scissor_make_union(&vp_as_scissor, &sctx->viewports.as_scissor[i]);
283       }
284    } else {
285       vp_as_scissor = sctx->viewports.as_scissor[0];
286    }
287 
288    /* Blits don't set the viewport state. The vertex shader determines
289     * the viewport size by scaling the coordinates, so we don't know
290     * how large the viewport is. Assume the worst case.
291     */
292    if (sctx->vs_disables_clipping_viewport)
293       vp_as_scissor.quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
294 
295    /* Determine the optimal hardware screen offset to center the viewport
296     * within the viewport range in order to maximize the guardband size.
297     */
298    int hw_screen_offset_x = (vp_as_scissor.maxx + vp_as_scissor.minx) / 2;
299    int hw_screen_offset_y = (vp_as_scissor.maxy + vp_as_scissor.miny) / 2;
300 
301    /* GFX6-GFX7 need to align the offset to an ubertile consisting of all SEs. */
302    const unsigned hw_screen_offset_alignment =
303       sctx->gfx_level >= GFX11 ? 32 :
304       sctx->gfx_level >= GFX8 ? 16 : MAX2(sctx->screen->se_tile_repeat, 16);
305    const unsigned max_hw_screen_offset = sctx->gfx_level >= GFX12 ? 32752 : 8176;
306 
307    /* Indexed by quantization modes */
308    static int max_viewport_size[] = {65536, 16384, 4096};
309 
310    /* Ensure that the whole viewport stays representable in
311     * absolute coordinates.
312     * See comment in si_set_viewport_states.
313     */
314    assert(vp_as_scissor.maxx <= max_viewport_size[vp_as_scissor.quant_mode] &&
315           vp_as_scissor.maxy <= max_viewport_size[vp_as_scissor.quant_mode]);
316 
317    hw_screen_offset_x = CLAMP(hw_screen_offset_x, 0, max_hw_screen_offset);
318    hw_screen_offset_y = CLAMP(hw_screen_offset_y, 0, max_hw_screen_offset);
319 
320    /* Align the screen offset by dropping the low bits. */
321    hw_screen_offset_x &= ~(hw_screen_offset_alignment - 1);
322    hw_screen_offset_y &= ~(hw_screen_offset_alignment - 1);
323 
324    /* Apply the offset to center the viewport and maximize the guardband. */
325    vp_as_scissor.minx -= hw_screen_offset_x;
326    vp_as_scissor.maxx -= hw_screen_offset_x;
327    vp_as_scissor.miny -= hw_screen_offset_y;
328    vp_as_scissor.maxy -= hw_screen_offset_y;
329 
330    /* Reconstruct the viewport transformation from the scissor. */
331    vp.translate[0] = (vp_as_scissor.minx + vp_as_scissor.maxx) / 2.0;
332    vp.translate[1] = (vp_as_scissor.miny + vp_as_scissor.maxy) / 2.0;
333    vp.scale[0] = vp_as_scissor.maxx - vp.translate[0];
334    vp.scale[1] = vp_as_scissor.maxy - vp.translate[1];
335 
336    /* Treat a 0x0 viewport as 1x1 to prevent division by zero. */
337    if (vp_as_scissor.minx == vp_as_scissor.maxx)
338       vp.scale[0] = 0.5;
339    if (vp_as_scissor.miny == vp_as_scissor.maxy)
340       vp.scale[1] = 0.5;
341 
342    /* Find the biggest guard band that is inside the supported viewport
343     * range. The guard band is specified as a horizontal and vertical
344     * distance from (0,0) in clip space.
345     *
346     * This is done by applying the inverse viewport transformation
347     * on the viewport limits to get those limits in clip space.
348     *
349     * The viewport range is [-max_viewport_size/2 - 1, max_viewport_size/2].
350     * (-1 to the min coord because max_viewport_size is odd and ViewportBounds
351     * Min/Max are -32768, 32767).
352     */
353    assert(vp_as_scissor.quant_mode < ARRAY_SIZE(max_viewport_size));
354    max_range = max_viewport_size[vp_as_scissor.quant_mode] / 2;
355    left = (-max_range - 1 - vp.translate[0]) / vp.scale[0];
356    right = (max_range - vp.translate[0]) / vp.scale[0];
357    top = (-max_range - 1 - vp.translate[1]) / vp.scale[1];
358    bottom = (max_range - vp.translate[1]) / vp.scale[1];
359 
360    assert(left <= -1 && top <= -1 && right >= 1 && bottom >= 1);
361 
362    guardband_x = MIN2(-left, right);
363    guardband_y = MIN2(-top, bottom);
364 
365    float discard_x = 1.0;
366    float discard_y = 1.0;
367    float distance = sctx->current_clip_discard_distance;
368 
369    /* Add half the point size / line width */
370    discard_x += distance / (2.0 * vp.scale[0]);
371    discard_y += distance / (2.0 * vp.scale[1]);
372 
373    /* Discard primitives that would lie entirely outside the viewport area. */
374    discard_x = MIN2(discard_x, guardband_x);
375    discard_y = MIN2(discard_y, guardband_y);
376 
377    unsigned pa_su_vtx_cntl = S_028BE4_PIX_CENTER(rs->half_pixel_center) |
378                              S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
379                              S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH +
380                                                  vp_as_scissor.quant_mode);
381    unsigned pa_su_hardware_screen_offset = S_028234_HW_SCREEN_OFFSET_X(hw_screen_offset_x >> 4) |
382                                            S_028234_HW_SCREEN_OFFSET_Y(hw_screen_offset_y >> 4);
383 
384    /* If any of the GB registers is updated, all of them must be updated.
385     * R_028BE8_PA_CL_GB_VERT_CLIP_ADJ, R_028BEC_PA_CL_GB_VERT_DISC_ADJ
386     * R_028BF0_PA_CL_GB_HORZ_CLIP_ADJ, R_028BF4_PA_CL_GB_HORZ_DISC_ADJ
387     */
388    if (sctx->gfx_level >= GFX12) {
389       radeon_begin(&sctx->gfx_cs);
390       gfx12_begin_context_regs();
391       gfx12_opt_set_context_reg(R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
392                                 pa_su_vtx_cntl);
393       gfx12_opt_set_context_reg4(R_02842C_PA_CL_GB_VERT_CLIP_ADJ,
394                                  SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ,
395                                  fui(guardband_y), fui(discard_y),
396                                  fui(guardband_x), fui(discard_x));
397       gfx12_opt_set_context_reg(R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
398                                 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
399                                 pa_su_hardware_screen_offset);
400       gfx12_end_context_regs();
401       radeon_end(); /* don't track context rolls on GFX12 */
402    } else if (sctx->screen->info.has_set_context_pairs_packed) {
403       radeon_begin(&sctx->gfx_cs);
404       gfx11_begin_packed_context_regs();
405       gfx11_opt_set_context_reg(R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
406                                 pa_su_vtx_cntl);
407       gfx11_opt_set_context_reg4(R_028BE8_PA_CL_GB_VERT_CLIP_ADJ,
408                                  SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ,
409                                  fui(guardband_y), fui(discard_y),
410                                  fui(guardband_x), fui(discard_x));
411       gfx11_opt_set_context_reg(R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
412                                 SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
413                                 pa_su_hardware_screen_offset);
414       gfx11_end_packed_context_regs();
415       radeon_end(); /* don't track context rolls on GFX11 */
416    } else {
417       radeon_begin(&sctx->gfx_cs);
418       radeon_opt_set_context_reg5(R_028BE4_PA_SU_VTX_CNTL, SI_TRACKED_PA_SU_VTX_CNTL,
419                                   pa_su_vtx_cntl,
420                                   fui(guardband_y), fui(discard_y),
421                                   fui(guardband_x), fui(discard_x));
422       radeon_opt_set_context_reg(R_028234_PA_SU_HARDWARE_SCREEN_OFFSET,
423                                  SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
424                                  pa_su_hardware_screen_offset);
425       radeon_end_update_context_roll();
426    }
427 }
428 
si_emit_scissors(struct si_context * ctx,unsigned index)429 static void si_emit_scissors(struct si_context *ctx, unsigned index)
430 {
431    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
432    struct pipe_scissor_state *states = ctx->scissors;
433    bool scissor_enabled = ctx->queued.named.rasterizer->scissor_enable;
434 
435    /* The simple case: Only 1 viewport is active. */
436    if (!ctx->vs_writes_viewport_index) {
437       struct si_signed_scissor *vp = &ctx->viewports.as_scissor[0];
438 
439       radeon_begin(cs);
440       radeon_set_context_reg_seq(R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2);
441       radeon_end();
442 
443       si_emit_one_scissor(ctx, cs, vp, scissor_enabled ? &states[0] : NULL);
444       return;
445    }
446 
447    /* All registers in the array need to be updated if any of them is changed.
448     * This is a hardware requirement.
449     */
450    radeon_begin(cs);
451    radeon_set_context_reg_seq(R_028250_PA_SC_VPORT_SCISSOR_0_TL, SI_MAX_VIEWPORTS * 2);
452    radeon_end();
453 
454    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
455       si_emit_one_scissor(ctx, cs, &ctx->viewports.as_scissor[i],
456                           scissor_enabled ? &states[i] : NULL);
457    }
458 }
459 
si_set_viewport_states(struct pipe_context * pctx,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * state)460 static void si_set_viewport_states(struct pipe_context *pctx, unsigned start_slot,
461                                    unsigned num_viewports, const struct pipe_viewport_state *state)
462 {
463    struct si_context *ctx = (struct si_context *)pctx;
464    int i;
465 
466    for (i = 0; i < num_viewports; i++) {
467       unsigned index = start_slot + i;
468       struct si_signed_scissor *scissor = &ctx->viewports.as_scissor[index];
469 
470       ctx->viewports.states[index] = state[i];
471 
472       si_get_scissor_from_viewport(ctx, &state[i], scissor);
473 
474       int max_corner = MAX2(
475          MAX2(abs(scissor->maxx), abs(scissor->maxy)),
476          MAX2(abs(scissor->minx), abs(scissor->miny)));
477 
478       /* Determine the best quantization mode (subpixel precision),
479        * but also leave enough space for the guardband.
480        *
481        * Note that primitive binning requires QUANT_MODE == 16_8 on Vega10
482        * and Raven1 for line and rectangle primitive types to work correctly.
483        * Always use 16_8 if primitive binning is possible to occur.
484        */
485       if ((ctx->family == CHIP_VEGA10 || ctx->family == CHIP_RAVEN) && ctx->screen->dpbb_allowed)
486          max_corner = 16384; /* Use QUANT_MODE == 16_8. */
487 
488       /* Another constraint is that all coordinates in the viewport
489        * are representable in fixed point with respect to the
490        * surface origin.
491        *
492        * It means that PA_SU_HARDWARE_SCREEN_OFFSET can't be given
493        * an offset that would make the upper corner of the viewport
494        * greater than the maximum representable number post
495        * quantization, ie 2^quant_bits.
496        *
497        * This does not matter for 14.10 and 16.8 formats since the
498        * offset is already limited at 8k, but it means we can't use
499        * 12.12 if we are drawing to some pixels outside the lower
500        * 4k x 4k of the render target.
501        */
502 
503       if (max_corner <= 1024) /* 4K scanline area for guardband */
504          scissor->quant_mode = SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH;
505       else if (max_corner <= 4096) /* 16K scanline area for guardband */
506          scissor->quant_mode = SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH;
507       else /* 64K scanline area for guardband */
508          scissor->quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
509    }
510 
511    if (start_slot == 0) {
512       ctx->viewport0_y_inverted = state->scale[1] < 0;
513 
514       /* NGG cull state uses the viewport and quant mode. */
515       if (ctx->screen->use_ngg_culling)
516          si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
517    }
518 
519    si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
520    si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
521    si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
522 }
523 
gfx6_emit_one_viewport(struct si_context * ctx,struct pipe_viewport_state * state)524 static void gfx6_emit_one_viewport(struct si_context *ctx, struct pipe_viewport_state *state)
525 {
526    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
527 
528    radeon_begin(cs);
529    radeon_emit(fui(state->scale[0]));
530    radeon_emit(fui(state->translate[0]));
531    radeon_emit(fui(state->scale[1]));
532    radeon_emit(fui(state->translate[1]));
533    radeon_emit(fui(state->scale[2]));
534    radeon_emit(fui(state->translate[2]));
535    radeon_end();
536 }
537 
gfx6_emit_viewports(struct si_context * ctx)538 static void gfx6_emit_viewports(struct si_context *ctx)
539 {
540    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
541    struct pipe_viewport_state *states = ctx->viewports.states;
542 
543    /* The simple case: Only 1 viewport is active. */
544    if (!ctx->vs_writes_viewport_index) {
545       radeon_begin(cs);
546       radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE, 6);
547       radeon_end();
548 
549       gfx6_emit_one_viewport(ctx, &states[0]);
550       return;
551    }
552 
553    /* All registers in the array need to be updated if any of them is changed.
554     * This is a hardware requirement.
555     */
556    radeon_begin(cs);
557    radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE + 0, SI_MAX_VIEWPORTS * 6);
558    radeon_end();
559 
560    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++)
561       gfx6_emit_one_viewport(ctx, &states[i]);
562 }
563 
si_viewport_zmin_zmax(const struct pipe_viewport_state * vp,bool halfz,bool window_space_position,float * zmin,float * zmax)564 static inline void si_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
565                                          bool window_space_position, float *zmin, float *zmax)
566 {
567    if (window_space_position) {
568       *zmin = 0;
569       *zmax = 1;
570       return;
571    }
572    util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
573 }
574 
gfx6_emit_depth_ranges(struct si_context * ctx)575 static void gfx6_emit_depth_ranges(struct si_context *ctx)
576 {
577    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
578    struct pipe_viewport_state *states = ctx->viewports.states;
579    bool clip_halfz = ctx->queued.named.rasterizer->clip_halfz;
580    bool window_space = ctx->vs_disables_clipping_viewport;
581    float zmin, zmax;
582 
583    /* The simple case: Only 1 viewport is active. */
584    if (!ctx->vs_writes_viewport_index) {
585       si_viewport_zmin_zmax(&states[0], clip_halfz, window_space, &zmin, &zmax);
586 
587       radeon_begin(cs);
588       radeon_set_context_reg_seq(R_0282D0_PA_SC_VPORT_ZMIN_0, 2);
589       radeon_emit(fui(zmin));
590       radeon_emit(fui(zmax));
591       radeon_end();
592       return;
593    }
594 
595    /* All registers in the array need to be updated if any of them is changed.
596     * This is a hardware requirement.
597     */
598    radeon_begin(cs);
599    radeon_set_context_reg_seq(R_0282D0_PA_SC_VPORT_ZMIN_0, SI_MAX_VIEWPORTS * 2);
600    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
601       si_viewport_zmin_zmax(&states[i], clip_halfz, window_space, &zmin, &zmax);
602       radeon_emit(fui(zmin));
603       radeon_emit(fui(zmax));
604    }
605    radeon_end();
606 }
607 
gfx6_emit_viewport_states(struct si_context * ctx,unsigned index)608 static void gfx6_emit_viewport_states(struct si_context *ctx, unsigned index)
609 {
610    gfx6_emit_viewports(ctx);
611    gfx6_emit_depth_ranges(ctx);
612 }
613 
gfx12_emit_viewport_states(struct si_context * ctx,unsigned index)614 static void gfx12_emit_viewport_states(struct si_context *ctx, unsigned index)
615 {
616    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
617    struct pipe_viewport_state *states = ctx->viewports.states;
618    bool clip_halfz = ctx->queued.named.rasterizer->clip_halfz;
619    bool window_space = ctx->vs_disables_clipping_viewport;
620    float zmin, zmax;
621 
622    /* The simple case: Only 1 viewport is active. */
623    if (!ctx->vs_writes_viewport_index) {
624       si_viewport_zmin_zmax(&states[0], clip_halfz, window_space, &zmin, &zmax);
625 
626       radeon_begin(cs);
627       radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE, 8);
628       radeon_emit(fui(states[0].scale[0]));
629       radeon_emit(fui(states[0].translate[0]));
630       radeon_emit(fui(states[0].scale[1]));
631       radeon_emit(fui(states[0].translate[1]));
632       radeon_emit(fui(states[0].scale[2]));
633       radeon_emit(fui(states[0].translate[2]));
634       radeon_emit(fui(zmin));
635       radeon_emit(fui(zmax));
636       radeon_end();
637       return;
638    }
639 
640    /* All registers in the array need to be updated if any of them is changed.
641     * This is (or was) a hardware requirement.
642     */
643    radeon_begin(cs);
644    radeon_set_context_reg_seq(R_02843C_PA_CL_VPORT_XSCALE, SI_MAX_VIEWPORTS * 8);
645 
646    for (unsigned i = 0; i < SI_MAX_VIEWPORTS; i++) {
647       si_viewport_zmin_zmax(&states[i], clip_halfz, window_space, &zmin, &zmax);
648 
649       radeon_emit(fui(states[i].scale[0]));
650       radeon_emit(fui(states[i].translate[0]));
651       radeon_emit(fui(states[i].scale[1]));
652       radeon_emit(fui(states[i].translate[1]));
653       radeon_emit(fui(states[i].scale[2]));
654       radeon_emit(fui(states[i].translate[2]));
655       radeon_emit(fui(zmin));
656       radeon_emit(fui(zmax));
657    }
658    radeon_end();
659 }
660 
661 /**
662  * This reacts to 2 state changes:
663  * - VS.writes_viewport_index
664  * - VS output position in window space (enable/disable)
665  *
666  * Normally, we only emit 1 viewport and 1 scissor if no shader is using
667  * the VIEWPORT_INDEX output, and emitting the other viewports and scissors
668  * is delayed. When a shader with VIEWPORT_INDEX appears, this should be
669  * called to emit the rest.
670  */
si_update_vs_viewport_state(struct si_context * ctx)671 void si_update_vs_viewport_state(struct si_context *ctx)
672 {
673    struct si_shader_ctx_state *vs = si_get_vs(ctx);
674    struct si_shader_info *info = vs->cso ? &vs->cso->info : NULL;
675    bool vs_window_space;
676 
677    if (!info)
678       return;
679 
680    /* When the VS disables clipping and viewport transformation. */
681    vs_window_space = vs->cso->stage == MESA_SHADER_VERTEX && info->base.vs.window_space_position;
682 
683    if (ctx->vs_disables_clipping_viewport != vs_window_space) {
684       ctx->vs_disables_clipping_viewport = vs_window_space;
685       si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
686       si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
687       si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
688    }
689 
690    /* Viewport index handling. */
691    if (ctx->vs_writes_viewport_index == info->writes_viewport_index)
692       return;
693 
694    /* This changes how the guardband is computed. */
695    ctx->vs_writes_viewport_index = info->writes_viewport_index;
696    si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
697 
698    /* Emit scissors and viewports that were enabled by having
699     * the ViewportIndex output.
700     */
701    if (info->writes_viewport_index) {
702       si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
703       si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
704    }
705 }
706 
si_emit_window_rectangles(struct si_context * sctx,unsigned index)707 static void si_emit_window_rectangles(struct si_context *sctx, unsigned index)
708 {
709    /* There are four clipping rectangles. Their corner coordinates are inclusive.
710     * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
711     * on whether the pixel is inside cliprects 0-3, respectively. For example,
712     * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
713     * the number 3 (binary 0011).
714     *
715     * If CLIPRECT_RULE & (1 << number), the pixel is rasterized.
716     */
717    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
718    static const unsigned outside[4] = {
719       /* outside rectangle 0 */
720       V_02820C_OUT | V_02820C_IN_1 | V_02820C_IN_2 | V_02820C_IN_21 | V_02820C_IN_3 |
721       V_02820C_IN_31 | V_02820C_IN_32 | V_02820C_IN_321,
722       /* outside rectangles 0, 1 */
723       V_02820C_OUT | V_02820C_IN_2 | V_02820C_IN_3 | V_02820C_IN_32,
724       /* outside rectangles 0, 1, 2 */
725       V_02820C_OUT | V_02820C_IN_3,
726       /* outside rectangles 0, 1, 2, 3 */
727       V_02820C_OUT,
728    };
729    const unsigned disabled = 0xffff; /* all inside and outside cases */
730    unsigned num_rectangles = sctx->num_window_rectangles;
731    struct pipe_scissor_state *rects = sctx->window_rectangles;
732    unsigned rule;
733 
734    assert(num_rectangles <= 4);
735 
736    if (num_rectangles == 0)
737       rule = disabled;
738    else if (sctx->window_rectangles_include)
739       rule = ~outside[num_rectangles - 1];
740    else
741       rule = outside[num_rectangles - 1];
742 
743    if (sctx->gfx_level >= GFX12) {
744       radeon_begin(cs);
745       gfx12_begin_context_regs();
746       gfx12_opt_set_context_reg(R_02820C_PA_SC_CLIPRECT_RULE, SI_TRACKED_PA_SC_CLIPRECT_RULE, rule);
747 
748       if (num_rectangles) {
749          for (unsigned i = 0; i < num_rectangles; i++) {
750             gfx12_set_context_reg(R_028210_PA_SC_CLIPRECT_0_TL + i * 8,
751                                   S_028210_TL_X(rects[i].minx) | S_028210_TL_Y(rects[i].miny));
752             gfx12_set_context_reg(R_028214_PA_SC_CLIPRECT_0_BR + i * 8,
753                                   S_028214_BR_X(rects[i].maxx) | S_028214_BR_Y(rects[i].maxy));
754          }
755 
756          for (unsigned i = 0; i < num_rectangles; i++) {
757             gfx12_set_context_reg(R_028374_PA_SC_CLIPRECT_0_EXT + i * 4,
758                                   S_028374_TL_X_EXT(rects[i].minx >> 15) |
759                                   S_028374_TL_Y_EXT(rects[i].miny >> 15) |
760                                   S_028374_BR_X_EXT(rects[i].maxx >> 15) |
761                                   S_028374_BR_Y_EXT(rects[i].maxy >> 15));
762          }
763       }
764       gfx12_end_context_regs();
765       radeon_end();
766    } else {
767       radeon_begin(cs);
768       radeon_opt_set_context_reg(R_02820C_PA_SC_CLIPRECT_RULE, SI_TRACKED_PA_SC_CLIPRECT_RULE,
769                                  rule);
770       if (num_rectangles) {
771          radeon_set_context_reg_seq(R_028210_PA_SC_CLIPRECT_0_TL, num_rectangles * 2);
772          for (unsigned i = 0; i < num_rectangles; i++) {
773             radeon_emit(S_028210_TL_X(rects[i].minx) | S_028210_TL_Y(rects[i].miny));
774             radeon_emit(S_028214_BR_X(rects[i].maxx) | S_028214_BR_Y(rects[i].maxy));
775          }
776       }
777       radeon_end();
778    }
779 }
780 
si_set_window_rectangles(struct pipe_context * ctx,bool include,unsigned num_rectangles,const struct pipe_scissor_state * rects)781 static void si_set_window_rectangles(struct pipe_context *ctx, bool include,
782                                      unsigned num_rectangles,
783                                      const struct pipe_scissor_state *rects)
784 {
785    struct si_context *sctx = (struct si_context *)ctx;
786 
787    sctx->num_window_rectangles = num_rectangles;
788    sctx->window_rectangles_include = include;
789    if (num_rectangles) {
790       memcpy(sctx->window_rectangles, rects, sizeof(*rects) * num_rectangles);
791    }
792 
793    si_mark_atom_dirty(sctx, &sctx->atoms.s.window_rectangles);
794 }
795 
si_init_viewport_functions(struct si_context * ctx)796 void si_init_viewport_functions(struct si_context *ctx)
797 {
798    ctx->atoms.s.guardband.emit = si_emit_guardband;
799    ctx->atoms.s.scissors.emit = si_emit_scissors;
800    if (ctx->gfx_level >= GFX12)
801       ctx->atoms.s.viewports.emit = gfx12_emit_viewport_states;
802    else
803       ctx->atoms.s.viewports.emit = gfx6_emit_viewport_states;
804    ctx->atoms.s.window_rectangles.emit = si_emit_window_rectangles;
805    ctx->atoms.s.ngg_cull_state.emit = si_emit_cull_state;
806 
807    ctx->b.set_scissor_states = si_set_scissor_states;
808    ctx->b.set_viewport_states = si_set_viewport_states;
809    ctx->b.set_window_rectangles = si_set_window_rectangles;
810 
811    for (unsigned i = 0; i < 16; i++)
812       ctx->viewports.as_scissor[i].quant_mode = SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH;
813 }
814