1 /*
2 * Copyright © 2016 Rob Clark <[email protected]>
3 * Copyright © 2018 Google, Inc.
4 * SPDX-License-Identifier: MIT
5 */
6
7 #define FD_BO_NO_HARDPIN 1
8
9 #include "fd6_const.h"
10 #include "fd6_compute.h"
11 #include "fd6_pack.h"
12
13 #define emit_const_user fd6_emit_const_user
14 #define emit_const_bo fd6_emit_const_bo
15 #include "ir3_const.h"
16
17 /* regid: base const register
18 * prsc or dwords: buffer containing constant values
19 * sizedwords: size of const value buffer
20 */
21 void
fd6_emit_const_user(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)22 fd6_emit_const_user(struct fd_ringbuffer *ring,
23 const struct ir3_shader_variant *v, uint32_t regid,
24 uint32_t sizedwords, const uint32_t *dwords)
25 {
26 emit_const_asserts(ring, v, regid, sizedwords);
27
28 /* NOTE we cheat a bit here, since we know mesa is aligning
29 * the size of the user buffer to 16 bytes. And we want to
30 * cut cycles in a hot path.
31 */
32 uint32_t align_sz = align(sizedwords, 4);
33
34 if (fd6_geom_stage(v->type)) {
35 OUT_PKTBUF(ring, CP_LOAD_STATE6_GEOM, dwords, align_sz,
36 CP_LOAD_STATE6_0(.dst_off = regid / 4, .state_type = ST6_CONSTANTS,
37 .state_src = SS6_DIRECT,
38 .state_block = fd6_stage2shadersb(v->type),
39 .num_unit = DIV_ROUND_UP(sizedwords, 4)),
40 CP_LOAD_STATE6_1(),
41 CP_LOAD_STATE6_2());
42 } else {
43 OUT_PKTBUF(ring, CP_LOAD_STATE6_FRAG, dwords, align_sz,
44 CP_LOAD_STATE6_0(.dst_off = regid / 4, .state_type = ST6_CONSTANTS,
45 .state_src = SS6_DIRECT,
46 .state_block = fd6_stage2shadersb(v->type),
47 .num_unit = DIV_ROUND_UP(sizedwords, 4)),
48 CP_LOAD_STATE6_1(),
49 CP_LOAD_STATE6_2());
50 }
51 }
52 void
fd6_emit_const_bo(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t offset,uint32_t sizedwords,struct fd_bo * bo)53 fd6_emit_const_bo(struct fd_ringbuffer *ring,
54 const struct ir3_shader_variant *v, uint32_t regid,
55 uint32_t offset, uint32_t sizedwords, struct fd_bo *bo)
56 {
57 uint32_t dst_off = regid / 4;
58 assert(dst_off % 4 == 0);
59 uint32_t num_unit = DIV_ROUND_UP(sizedwords, 4);
60 assert(num_unit % 4 == 0);
61
62 emit_const_asserts(ring, v, regid, sizedwords);
63
64 if (fd6_geom_stage(v->type)) {
65 OUT_PKT(ring, CP_LOAD_STATE6_GEOM,
66 CP_LOAD_STATE6_0(.dst_off = dst_off, .state_type = ST6_CONSTANTS,
67 .state_src = SS6_INDIRECT,
68 .state_block = fd6_stage2shadersb(v->type),
69 .num_unit = num_unit, ),
70 CP_LOAD_STATE6_EXT_SRC_ADDR(.bo = bo, .bo_offset = offset));
71 } else {
72 OUT_PKT(ring, CP_LOAD_STATE6_FRAG,
73 CP_LOAD_STATE6_0(.dst_off = dst_off, .state_type = ST6_CONSTANTS,
74 .state_src = SS6_INDIRECT,
75 .state_block = fd6_stage2shadersb(v->type),
76 .num_unit = num_unit, ),
77 CP_LOAD_STATE6_EXT_SRC_ADDR(.bo = bo, .bo_offset = offset));
78 }
79 }
80
81 static bool
is_stateobj(struct fd_ringbuffer * ring)82 is_stateobj(struct fd_ringbuffer *ring)
83 {
84 return true;
85 }
86
87 static void
emit_const_ptrs(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t dst_offset,uint32_t num,struct fd_bo ** bos,uint32_t * offsets)88 emit_const_ptrs(struct fd_ringbuffer *ring, const struct ir3_shader_variant *v,
89 uint32_t dst_offset, uint32_t num, struct fd_bo **bos,
90 uint32_t *offsets)
91 {
92 unreachable("shouldn't be called on a6xx");
93 }
94
95 static void
emit_stage_tess_consts(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t * params,int num_params)96 emit_stage_tess_consts(struct fd_ringbuffer *ring, const struct ir3_shader_variant *v,
97 uint32_t *params, int num_params)
98 {
99 const struct ir3_const_state *const_state = ir3_const_state(v);
100 const unsigned regid = const_state->offsets.primitive_param;
101 int size = MIN2(1 + regid, v->constlen) - regid;
102 if (size > 0)
103 fd6_emit_const_user(ring, v, regid * 4, num_params, params);
104 }
105
106 struct fd_ringbuffer *
fd6_build_tess_consts(struct fd6_emit * emit)107 fd6_build_tess_consts(struct fd6_emit *emit)
108 {
109 struct fd_context *ctx = emit->ctx;
110
111 struct fd_ringbuffer *constobj = fd_submit_new_ringbuffer(
112 ctx->batch->submit, 0x1000, FD_RINGBUFFER_STREAMING);
113
114 /* VS sizes are in bytes since that's what STLW/LDLW use, while the HS
115 * size is dwords, since that's what LDG/STG use.
116 */
117 unsigned num_vertices = emit->hs
118 ? ctx->patch_vertices
119 : emit->gs->gs.vertices_in;
120
121 uint32_t vs_params[4] = {
122 emit->vs->output_size * num_vertices * 4, /* vs primitive stride */
123 emit->vs->output_size * 4, /* vs vertex stride */
124 0, 0};
125
126 emit_stage_tess_consts(constobj, emit->vs, vs_params, ARRAY_SIZE(vs_params));
127
128 if (emit->hs) {
129 uint32_t hs_params[4] = {
130 emit->vs->output_size * num_vertices * 4, /* vs primitive stride */
131 emit->vs->output_size * 4, /* vs vertex stride */
132 emit->hs->output_size, ctx->patch_vertices};
133
134 emit_stage_tess_consts(constobj, emit->hs, hs_params,
135 ARRAY_SIZE(hs_params));
136
137 if (emit->gs)
138 num_vertices = emit->gs->gs.vertices_in;
139
140 uint32_t ds_params[4] = {
141 emit->ds->output_size * num_vertices * 4, /* ds primitive stride */
142 emit->ds->output_size * 4, /* ds vertex stride */
143 emit->hs->output_size, /* hs vertex stride (dwords) */
144 emit->hs->tess.tcs_vertices_out};
145
146 emit_stage_tess_consts(constobj, emit->ds, ds_params,
147 ARRAY_SIZE(ds_params));
148 }
149
150 if (emit->gs) {
151 const struct ir3_shader_variant *prev;
152 if (emit->ds)
153 prev = emit->ds;
154 else
155 prev = emit->vs;
156
157 uint32_t gs_params[4] = {
158 prev->output_size * num_vertices * 4, /* ds primitive stride */
159 prev->output_size * 4, /* ds vertex stride */
160 0,
161 0,
162 };
163
164 num_vertices = emit->gs->gs.vertices_in;
165 emit_stage_tess_consts(constobj, emit->gs, gs_params,
166 ARRAY_SIZE(gs_params));
167 }
168
169 return constobj;
170 }
171
172 static void
fd6_emit_ubos(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_constbuf_stateobj * constbuf)173 fd6_emit_ubos(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
174 struct fd_constbuf_stateobj *constbuf)
175 {
176 const struct ir3_const_state *const_state = ir3_const_state(v);
177 int num_ubos = const_state->num_ubos;
178
179 if (!num_ubos)
180 return;
181
182 OUT_PKT7(ring, fd6_stage2opcode(v->type), 3 + (2 * num_ubos));
183 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
184 CP_LOAD_STATE6_0_STATE_TYPE(ST6_UBO) |
185 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
186 CP_LOAD_STATE6_0_STATE_BLOCK(fd6_stage2shadersb(v->type)) |
187 CP_LOAD_STATE6_0_NUM_UNIT(num_ubos));
188 OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
189 OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
190
191 for (int i = 0; i < num_ubos; i++) {
192 /* NIR constant data is packed into the end of the shader. */
193 if (i == const_state->consts_ubo.idx) {
194 int size_vec4s = DIV_ROUND_UP(v->constant_data_size, 16);
195 OUT_RELOC(ring, v->bo, v->info.constant_data_offset,
196 (uint64_t)A6XX_UBO_1_SIZE(size_vec4s) << 32, 0);
197 continue;
198 }
199
200 struct pipe_constant_buffer *cb = &constbuf->cb[i];
201
202 if (cb->buffer) {
203 int size_vec4s = DIV_ROUND_UP(cb->buffer_size, 16);
204 OUT_RELOC(ring, fd_resource(cb->buffer)->bo, cb->buffer_offset,
205 (uint64_t)A6XX_UBO_1_SIZE(size_vec4s) << 32, 0);
206 } else {
207 OUT_RING(ring, 0xbad00000 | (i << 16));
208 OUT_RING(ring, A6XX_UBO_1_SIZE(0));
209 }
210 }
211 }
212
213 unsigned
fd6_user_consts_cmdstream_size(const struct ir3_shader_variant * v)214 fd6_user_consts_cmdstream_size(const struct ir3_shader_variant *v)
215 {
216 if (!v)
217 return 0;
218
219 const struct ir3_const_state *const_state = ir3_const_state(v);
220 const struct ir3_ubo_analysis_state *ubo_state = &const_state->ubo_state;
221 unsigned packets, size;
222
223 /* pre-calculate size required for userconst stateobj: */
224 ir3_user_consts_size(ubo_state, &packets, &size);
225
226 /* also account for UBO addresses: */
227 packets += 1;
228 size += 2 * const_state->num_ubos;
229
230 unsigned sizedwords = (4 * packets) + size;
231 return sizedwords * 4;
232 }
233
234 static void
emit_user_consts(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_constbuf_stateobj * constbuf)235 emit_user_consts(const struct ir3_shader_variant *v,
236 struct fd_ringbuffer *ring,
237 struct fd_constbuf_stateobj *constbuf)
238 {
239 ir3_emit_user_consts(v, ring, constbuf);
240 fd6_emit_ubos(v, ring, constbuf);
241 }
242
243 template <fd6_pipeline_type PIPELINE>
244 struct fd_ringbuffer *
fd6_build_user_consts(struct fd6_emit * emit)245 fd6_build_user_consts(struct fd6_emit *emit)
246 {
247 struct fd_context *ctx = emit->ctx;
248 unsigned sz = emit->prog->user_consts_cmdstream_size;
249
250 struct fd_ringbuffer *constobj =
251 fd_submit_new_ringbuffer(ctx->batch->submit, sz, FD_RINGBUFFER_STREAMING);
252
253 emit_user_consts(emit->vs, constobj, &ctx->constbuf[PIPE_SHADER_VERTEX]);
254 if (PIPELINE == HAS_TESS_GS) {
255 if (emit->hs) {
256 emit_user_consts(emit->hs, constobj, &ctx->constbuf[PIPE_SHADER_TESS_CTRL]);
257 emit_user_consts(emit->ds, constobj, &ctx->constbuf[PIPE_SHADER_TESS_EVAL]);
258 }
259 if (emit->gs) {
260 emit_user_consts(emit->gs, constobj, &ctx->constbuf[PIPE_SHADER_GEOMETRY]);
261 }
262 }
263 emit_user_consts(emit->fs, constobj, &ctx->constbuf[PIPE_SHADER_FRAGMENT]);
264
265 return constobj;
266 }
267
268 template struct fd_ringbuffer * fd6_build_user_consts<HAS_TESS_GS>(struct fd6_emit *emit);
269 template struct fd_ringbuffer * fd6_build_user_consts<NO_TESS_GS>(struct fd6_emit *emit);
270
271 template <fd6_pipeline_type PIPELINE>
272 struct fd_ringbuffer *
fd6_build_driver_params(struct fd6_emit * emit)273 fd6_build_driver_params(struct fd6_emit *emit)
274 {
275 struct fd_context *ctx = emit->ctx;
276 struct fd6_context *fd6_ctx = fd6_context(ctx);
277 unsigned num_dp = emit->prog->num_driver_params;
278
279 if (!num_dp) {
280 fd6_ctx->has_dp_state = false;
281 return NULL;
282 }
283
284 unsigned size_dwords = num_dp * (4 + IR3_DP_VS_COUNT); /* 4dw PKT7 header */
285 struct fd_ringbuffer *dpconstobj = fd_submit_new_ringbuffer(
286 ctx->batch->submit, size_dwords * 4, FD_RINGBUFFER_STREAMING);
287
288 if (emit->vs->need_driver_params) {
289 ir3_emit_driver_params(emit->vs, dpconstobj, ctx, emit->info,
290 emit->indirect, emit->draw, emit->draw_id);
291 }
292
293 if (PIPELINE == HAS_TESS_GS) {
294 if (emit->gs && emit->gs->need_driver_params) {
295 ir3_emit_driver_params(emit->gs, dpconstobj, ctx, emit->info,
296 emit->indirect, emit->draw, 0);
297 }
298
299 if (emit->hs && emit->hs->need_driver_params) {
300 ir3_emit_hs_driver_params(emit->hs, dpconstobj, ctx);
301 }
302
303 if (emit->ds && emit->ds->need_driver_params) {
304 ir3_emit_driver_params(emit->ds, dpconstobj, ctx, emit->info,
305 emit->indirect, emit->draw, 0);
306 }
307 }
308
309 fd6_ctx->has_dp_state = true;
310
311 return dpconstobj;
312 }
313
314 template struct fd_ringbuffer * fd6_build_driver_params<HAS_TESS_GS>(struct fd6_emit *emit);
315 template struct fd_ringbuffer * fd6_build_driver_params<NO_TESS_GS>(struct fd6_emit *emit);
316
317 void
fd6_emit_cs_driver_params(struct fd_context * ctx,struct fd_ringbuffer * ring,struct fd6_compute_state * cs,const struct pipe_grid_info * info)318 fd6_emit_cs_driver_params(struct fd_context *ctx,
319 struct fd_ringbuffer *ring,
320 struct fd6_compute_state *cs,
321 const struct pipe_grid_info *info)
322 {
323 ir3_emit_cs_driver_params(cs->v, ring, ctx, info);
324 }
325
326 void
fd6_emit_cs_user_consts(struct fd_context * ctx,struct fd_ringbuffer * ring,struct fd6_compute_state * cs)327 fd6_emit_cs_user_consts(struct fd_context *ctx,
328 struct fd_ringbuffer *ring,
329 struct fd6_compute_state *cs)
330 {
331 emit_user_consts(cs->v, ring, &ctx->constbuf[PIPE_SHADER_COMPUTE]);
332 }
333
334 void
fd6_emit_immediates(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring)335 fd6_emit_immediates(const struct ir3_shader_variant *v,
336 struct fd_ringbuffer *ring)
337 {
338 ir3_emit_immediates(v, ring);
339 }
340
341 void
fd6_emit_link_map(const struct ir3_shader_variant * producer,const struct ir3_shader_variant * consumer,struct fd_ringbuffer * ring)342 fd6_emit_link_map(const struct ir3_shader_variant *producer,
343 const struct ir3_shader_variant *consumer,
344 struct fd_ringbuffer *ring)
345 {
346 ir3_emit_link_map(producer, consumer, ring);
347 }
348