1 /*
2 * Copyright © 2021 Ilia Mirkin <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Ilia Mirkin <[email protected]>
7 */
8
9 #include "pipe/p_state.h"
10
11 #include "freedreno_resource.h"
12
13 #include "fd4_compute.h"
14 #include "fd4_context.h"
15 #include "fd4_emit.h"
16
17 /* maybe move to fd4_program? */
18 static void
cs_program_emit(struct fd_ringbuffer * ring,struct ir3_shader_variant * v)19 cs_program_emit(struct fd_ringbuffer *ring, struct ir3_shader_variant *v)
20 {
21 const struct ir3_info *i = &v->info;
22 enum a3xx_threadsize thrsz = i->double_threadsize ? FOUR_QUADS : TWO_QUADS;
23 unsigned instrlen = v->instrlen;
24
25 /* XXX verify that this is the case on a4xx */
26 /* if shader is more than 32*16 instructions, don't preload it. Similar
27 * to the combined restriction of 64*16 for VS+FS
28 */
29 if (instrlen > 32)
30 instrlen = 0;
31
32 OUT_PKT0(ring, REG_A4XX_SP_SP_CTRL_REG, 1);
33 OUT_RING(ring, 0x00860010); /* SP_SP_CTRL_REG */
34
35 OUT_PKT0(ring, REG_A4XX_HLSQ_CONTROL_0_REG, 1);
36 OUT_RING(ring, A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(TWO_QUADS) |
37 A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT |
38 0x000001a0 /* XXX */);
39
40 OUT_PKT0(ring, REG_A4XX_SP_CS_CTRL_REG0, 1);
41 OUT_RING(ring, A4XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
42 A4XX_SP_CS_CTRL_REG0_SUPERTHREADMODE |
43 A4XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
44 A4XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1));
45
46 OUT_PKT0(ring, REG_A4XX_HLSQ_UPDATE_CONTROL, 1);
47 OUT_RING(ring, 0x00000038); /* HLSQ_UPDATE_CONTROL */
48
49 OUT_PKT0(ring, REG_A4XX_HLSQ_CS_CONTROL_REG, 1);
50 OUT_RING(ring, A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(0) |
51 A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(0) |
52 A4XX_HLSQ_CS_CONTROL_REG_ENABLED |
53 A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(1) |
54 COND(v->has_ssbo, A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE) |
55 A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(v->constlen / 4));
56
57 uint32_t driver_param_base = v->const_state->offsets.driver_param * 4;
58 uint32_t local_invocation_id, work_group_id, local_group_size_id,
59 num_wg_id, work_dim_id, unused_id;
60 local_invocation_id =
61 ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
62 work_group_id = driver_param_base + IR3_DP_WORKGROUP_ID_X;
63 num_wg_id = driver_param_base + IR3_DP_NUM_WORK_GROUPS_X;
64 local_group_size_id = driver_param_base + IR3_DP_LOCAL_GROUP_SIZE_X;
65 work_dim_id = driver_param_base + IR3_DP_WORK_DIM;
66 /* NOTE: At some point we'll want to use this, it's probably WGOFFSETCONSTID */
67 unused_id = driver_param_base + IR3_DP_BASE_GROUP_X;
68
69 OUT_PKT0(ring, REG_A4XX_HLSQ_CL_CONTROL_0, 2);
70 OUT_RING(ring, A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(work_group_id) |
71 A4XX_HLSQ_CL_CONTROL_0_KERNELDIMCONSTID(work_dim_id) |
72 A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(local_invocation_id));
73 OUT_RING(ring, A4XX_HLSQ_CL_CONTROL_1_UNK0CONSTID(unused_id) |
74 A4XX_HLSQ_CL_CONTROL_1_WORKGROUPSIZECONSTID(local_group_size_id));
75
76 OUT_PKT0(ring, REG_A4XX_HLSQ_CL_KERNEL_CONST, 1);
77 OUT_RING(ring, A4XX_HLSQ_CL_KERNEL_CONST_UNK0CONSTID(unused_id) |
78 A4XX_HLSQ_CL_KERNEL_CONST_NUMWGCONSTID(num_wg_id));
79
80 OUT_PKT0(ring, REG_A4XX_HLSQ_CL_WG_OFFSET, 1);
81 OUT_RING(ring, A4XX_HLSQ_CL_WG_OFFSET_UNK0CONSTID(unused_id));
82
83 OUT_PKT0(ring, REG_A4XX_HLSQ_MODE_CONTROL, 1);
84 OUT_RING(ring, 0x00000003); /* HLSQ_MODE_CONTROL */
85
86 OUT_PKT0(ring, REG_A4XX_HLSQ_UPDATE_CONTROL, 1);
87 OUT_RING(ring, 0x00000000); /* HLSQ_UPDATE_CONTROL */
88
89 OUT_PKT0(ring, REG_A4XX_SP_CS_OBJ_START, 1);
90 OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START */
91
92 OUT_PKT0(ring, REG_A4XX_SP_CS_LENGTH_REG, 1);
93 OUT_RING(ring, v->instrlen);
94
95 if (instrlen > 0)
96 fd4_emit_shader(ring, v);
97 }
98
99 static void
fd4_launch_grid(struct fd_context * ctx,const struct pipe_grid_info * info)100 fd4_launch_grid(struct fd_context *ctx,
101 const struct pipe_grid_info *info) assert_dt
102 {
103 struct fd4_context *fd4_ctx = fd4_context(ctx);
104 struct ir3_shader_key key = {
105 .has_per_samp = fd4_ctx->castc_srgb,
106 .fastc_srgb = fd4_ctx->castc_srgb,
107 };
108 struct ir3_shader *shader = ir3_get_shader(ctx->compute);
109 struct ir3_shader_variant *v;
110 struct fd_ringbuffer *ring = ctx->batch->draw;
111 unsigned nglobal = 0;
112
113 if (ir3_get_shader_info(ctx->compute)->uses_texture_gather) {
114 key.has_per_samp = true;
115 memcpy(key.fsampler_swizzles, fd4_ctx->csampler_swizzles,
116 sizeof(key.fsampler_swizzles));
117 }
118
119 v = ir3_shader_variant(shader, key, false, &ctx->debug);
120 if (!v)
121 return;
122
123 if (ctx->dirty_shader[PIPE_SHADER_COMPUTE] & FD_DIRTY_SHADER_PROG)
124 cs_program_emit(ring, v);
125
126 fd4_emit_cs_state(ctx, ring, v);
127 fd4_emit_cs_consts(v, ring, ctx, info);
128
129 u_foreach_bit (i, ctx->global_bindings.enabled_mask)
130 nglobal++;
131
132 if (nglobal > 0) {
133 /* global resources don't otherwise get an OUT_RELOC(), since
134 * the raw ptr address is emitted ir ir3_emit_cs_consts().
135 * So to make the kernel aware that these buffers are referenced
136 * by the batch, emit dummy reloc's as part of a no-op packet
137 * payload:
138 */
139 OUT_PKT3(ring, CP_NOP, 2 * nglobal);
140 u_foreach_bit (i, ctx->global_bindings.enabled_mask) {
141 struct pipe_resource *prsc = ctx->global_bindings.buf[i];
142 OUT_RELOC(ring, fd_resource(prsc)->bo, 0, 0, 0);
143 }
144 }
145
146 const unsigned *local_size =
147 info->block; // v->shader->nir->info->workgroup_size;
148 const unsigned *num_groups = info->grid;
149 /* for some reason, mesa/st doesn't set info->work_dim, so just assume 3: */
150 const unsigned work_dim = info->work_dim ? info->work_dim : 3;
151 OUT_PKT0(ring, REG_A4XX_HLSQ_CL_NDRANGE_0, 7);
152 OUT_RING(ring, A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(work_dim) |
153 A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
154 A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
155 A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
156 OUT_RING(ring,
157 A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(local_size[0] * num_groups[0]));
158 OUT_RING(ring, 0); /* HLSQ_CL_NDRANGE_2_GLOBALOFF_X */
159 OUT_RING(ring,
160 A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(local_size[1] * num_groups[1]));
161 OUT_RING(ring, 0); /* HLSQ_CL_NDRANGE_4_GLOBALOFF_Y */
162 OUT_RING(ring,
163 A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(local_size[2] * num_groups[2]));
164 OUT_RING(ring, 0); /* HLSQ_CL_NDRANGE_6_GLOBALOFF_Z */
165
166 if (info->indirect) {
167 struct fd_resource *rsc = fd_resource(info->indirect);
168
169 fd_event_write(ctx->batch, ring, CACHE_FLUSH);
170 fd_wfi(ctx->batch, ring);
171
172 OUT_PKT3(ring, CP_EXEC_CS_INDIRECT, 3);
173 OUT_RING(ring, 0x00000000);
174 OUT_RELOC(ring, rsc->bo, info->indirect_offset, 0, 0);
175 OUT_RING(ring,
176 A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(local_size[0] - 1) |
177 A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(local_size[1] - 1) |
178 A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(local_size[2] - 1));
179 } else {
180 OUT_PKT3(ring, CP_EXEC_CS, 4);
181 OUT_RING(ring, 0x00000000);
182 OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(info->grid[0]));
183 OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(info->grid[1]));
184 OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(info->grid[2]));
185 }
186 }
187
188 void
fd4_compute_init(struct pipe_context * pctx)189 fd4_compute_init(struct pipe_context *pctx) disable_thread_safety_analysis
190 {
191 struct fd_context *ctx = fd_context(pctx);
192 ctx->launch_grid = fd4_launch_grid;
193 pctx->create_compute_state = ir3_shader_compute_state_create;
194 pctx->delete_compute_state = ir3_shader_state_delete;
195 }
196