1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28
29 #include "common/intel_l3_config.h"
30
31 /**
32 * This file implements some lightweight memcpy/memset operations on the GPU
33 * using a vertex buffer and streamout.
34 */
35
36 /**
37 * Returns the greatest common divisor of a and b that is a power of two.
38 */
39 static uint64_t
gcd_pow2_u64(uint64_t a,uint64_t b)40 gcd_pow2_u64(uint64_t a, uint64_t b)
41 {
42 assert(a > 0 || b > 0);
43
44 unsigned a_log2 = ffsll(a) - 1;
45 unsigned b_log2 = ffsll(b) - 1;
46
47 /* If either a or b is 0, then a_log2 or b_log2 will be UINT_MAX in which
48 * case, the MIN2() will take the other one. If both are 0 then we will
49 * hit the assert above.
50 */
51 return 1 << MIN2(a_log2, b_log2);
52 }
53
54 static void
emit_common_so_memcpy(struct anv_batch * batch,struct anv_device * device,const struct intel_l3_config * l3_config)55 emit_common_so_memcpy(struct anv_batch *batch, struct anv_device *device,
56 const struct intel_l3_config *l3_config)
57 {
58 #if GFX_VER >= 8
59 anv_batch_emit(batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
60 vfi.InstancingEnable = false;
61 vfi.VertexElementIndex = 0;
62 }
63 anv_batch_emit(batch, GENX(3DSTATE_VF_SGVS), sgvs);
64 #endif
65
66 /* Disable all shader stages */
67 anv_batch_emit(batch, GENX(3DSTATE_VS), vs);
68 anv_batch_emit(batch, GENX(3DSTATE_HS), hs);
69 anv_batch_emit(batch, GENX(3DSTATE_TE), te);
70 anv_batch_emit(batch, GENX(3DSTATE_DS), DS);
71 anv_batch_emit(batch, GENX(3DSTATE_GS), gs);
72 anv_batch_emit(batch, GENX(3DSTATE_PS), gs);
73
74 anv_batch_emit(batch, GENX(3DSTATE_SBE), sbe) {
75 sbe.VertexURBEntryReadOffset = 1;
76 sbe.NumberofSFOutputAttributes = 1;
77 sbe.VertexURBEntryReadLength = 1;
78 #if GFX_VER >= 8
79 sbe.ForceVertexURBEntryReadLength = true;
80 sbe.ForceVertexURBEntryReadOffset = true;
81 #endif
82 }
83
84 /* Emit URB setup. We tell it that the VS is active because we want it to
85 * allocate space for the VS. Even though one isn't run, we need VUEs to
86 * store the data that VF is going to pass to SOL.
87 */
88 const unsigned entry_size[4] = { DIV_ROUND_UP(32, 64), 1, 1, 1 };
89
90 genX(emit_urb_setup)(device, batch, l3_config,
91 VK_SHADER_STAGE_VERTEX_BIT, entry_size, NULL);
92
93 #if GFX_VER >= 8
94 anv_batch_emit(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
95 topo.PrimitiveTopologyType = _3DPRIM_POINTLIST;
96 }
97 #endif
98
99 anv_batch_emit(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
100 vf.StatisticsEnable = false;
101 }
102 }
103
104 static void
emit_so_memcpy(struct anv_batch * batch,struct anv_device * device,struct anv_address dst,struct anv_address src,uint32_t size)105 emit_so_memcpy(struct anv_batch *batch, struct anv_device *device,
106 struct anv_address dst, struct anv_address src,
107 uint32_t size)
108 {
109 /* The maximum copy block size is 4 32-bit components at a time. */
110 assert(size % 4 == 0);
111 unsigned bs = gcd_pow2_u64(16, size);
112
113 enum isl_format format;
114 switch (bs) {
115 case 4: format = ISL_FORMAT_R32_UINT; break;
116 case 8: format = ISL_FORMAT_R32G32_UINT; break;
117 case 16: format = ISL_FORMAT_R32G32B32A32_UINT; break;
118 default:
119 unreachable("Invalid size");
120 }
121
122 uint32_t *dw;
123 dw = anv_batch_emitn(batch, 5, GENX(3DSTATE_VERTEX_BUFFERS));
124 GENX(VERTEX_BUFFER_STATE_pack)(batch, dw + 1,
125 &(struct GENX(VERTEX_BUFFER_STATE)) {
126 .VertexBufferIndex = 32, /* Reserved for this */
127 .AddressModifyEnable = true,
128 .BufferStartingAddress = src,
129 .BufferPitch = bs,
130 .MOCS = anv_mocs(device, src.bo, 0),
131 #if (GFX_VER >= 8)
132 .BufferSize = size,
133 #else
134 .EndAddress = anv_address_add(src, size - 1),
135 #endif
136 });
137
138 dw = anv_batch_emitn(batch, 3, GENX(3DSTATE_VERTEX_ELEMENTS));
139 GENX(VERTEX_ELEMENT_STATE_pack)(batch, dw + 1,
140 &(struct GENX(VERTEX_ELEMENT_STATE)) {
141 .VertexBufferIndex = 32,
142 .Valid = true,
143 .SourceElementFormat = format,
144 .SourceElementOffset = 0,
145 .Component0Control = (bs >= 4) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
146 .Component1Control = (bs >= 8) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
147 .Component2Control = (bs >= 12) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
148 .Component3Control = (bs >= 16) ? VFCOMP_STORE_SRC : VFCOMP_STORE_0,
149 });
150
151
152 anv_batch_emit(batch, GENX(3DSTATE_SO_BUFFER), sob) {
153 sob.SOBufferIndex = 0;
154 sob.MOCS = anv_mocs(device, dst.bo, ISL_SURF_USAGE_STREAM_OUT_BIT),
155 sob.SurfaceBaseAddress = dst;
156
157 #if GFX_VER >= 8
158 sob.SOBufferEnable = true;
159 sob.SurfaceSize = size / 4 - 1;
160 #else
161 sob.SurfacePitch = bs;
162 sob.SurfaceEndAddress = anv_address_add(dst, size);
163 #endif
164
165 #if GFX_VER >= 8
166 /* As SOL writes out data, it updates the SO_WRITE_OFFSET registers with
167 * the end position of the stream. We need to reset this value to 0 at
168 * the beginning of the run or else SOL will start at the offset from
169 * the previous draw.
170 */
171 sob.StreamOffsetWriteEnable = true;
172 sob.StreamOffset = 0;
173 #endif
174 }
175
176 #if GFX_VER <= 7
177 /* The hardware can do this for us on BDW+ (see above) */
178 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), load) {
179 load.RegisterOffset = GENX(SO_WRITE_OFFSET0_num);
180 load.DataDWord = 0;
181 }
182 #endif
183
184 dw = anv_batch_emitn(batch, 5, GENX(3DSTATE_SO_DECL_LIST),
185 .StreamtoBufferSelects0 = (1 << 0),
186 .NumEntries0 = 1);
187 GENX(SO_DECL_ENTRY_pack)(batch, dw + 3,
188 &(struct GENX(SO_DECL_ENTRY)) {
189 .Stream0Decl = {
190 .OutputBufferSlot = 0,
191 .RegisterIndex = 0,
192 .ComponentMask = (1 << (bs / 4)) - 1,
193 },
194 });
195
196 anv_batch_emit(batch, GENX(3DSTATE_STREAMOUT), so) {
197 so.SOFunctionEnable = true;
198 so.RenderingDisable = true;
199 so.Stream0VertexReadOffset = 0;
200 so.Stream0VertexReadLength = DIV_ROUND_UP(32, 64);
201 #if GFX_VER >= 8
202 so.Buffer0SurfacePitch = bs;
203 #else
204 so.SOBufferEnable0 = true;
205 #endif
206 }
207
208 anv_batch_emit(batch, GENX(3DPRIMITIVE), prim) {
209 prim.VertexAccessType = SEQUENTIAL;
210 prim.PrimitiveTopologyType = _3DPRIM_POINTLIST;
211 prim.VertexCountPerInstance = size / bs;
212 prim.StartVertexLocation = 0;
213 prim.InstanceCount = 1;
214 prim.StartInstanceLocation = 0;
215 prim.BaseVertexLocation = 0;
216 }
217 }
218
219 void
genX(emit_so_memcpy_init)220 genX(emit_so_memcpy_init)(struct anv_memcpy_state *state,
221 struct anv_device *device,
222 struct anv_batch *batch)
223 {
224 memset(state, 0, sizeof(*state));
225
226 state->batch = batch;
227 state->device = device;
228
229 const struct intel_l3_config *cfg = intel_get_default_l3_config(device->info);
230 genX(emit_l3_config)(batch, device, cfg);
231
232 anv_batch_emit(batch, GENX(PIPELINE_SELECT), ps) {
233 ps.PipelineSelection = _3D;
234 }
235
236 emit_common_so_memcpy(batch, device, cfg);
237 }
238
239 void
genX(emit_so_memcpy_fini)240 genX(emit_so_memcpy_fini)(struct anv_memcpy_state *state)
241 {
242 genX(emit_apply_pipe_flushes)(state->batch, state->device, _3D,
243 ANV_PIPE_END_OF_PIPE_SYNC_BIT);
244
245 anv_batch_emit(state->batch, GENX(MI_BATCH_BUFFER_END), end);
246
247 if ((state->batch->next - state->batch->start) & 4)
248 anv_batch_emit(state->batch, GENX(MI_NOOP), noop);
249 }
250
251 void
genX(emit_so_memcpy)252 genX(emit_so_memcpy)(struct anv_memcpy_state *state,
253 struct anv_address dst, struct anv_address src,
254 uint32_t size)
255 {
256 if (GFX_VER >= 8 && !anv_use_relocations(state->device->physical) &&
257 anv_gfx8_9_vb_cache_range_needs_workaround(&state->vb_bound,
258 &state->vb_dirty,
259 src, size)) {
260 genX(emit_apply_pipe_flushes)(state->batch, state->device, _3D,
261 ANV_PIPE_CS_STALL_BIT |
262 ANV_PIPE_VF_CACHE_INVALIDATE_BIT);
263 memset(&state->vb_dirty, 0, sizeof(state->vb_dirty));
264 }
265
266 emit_so_memcpy(state->batch, state->device, dst, src, size);
267 }
268
269 void
genX(cmd_buffer_so_memcpy)270 genX(cmd_buffer_so_memcpy)(struct anv_cmd_buffer *cmd_buffer,
271 struct anv_address dst, struct anv_address src,
272 uint32_t size)
273 {
274 if (size == 0)
275 return;
276
277 if (!cmd_buffer->state.current_l3_config) {
278 const struct intel_l3_config *cfg =
279 intel_get_default_l3_config(cmd_buffer->device->info);
280 genX(cmd_buffer_config_l3)(cmd_buffer, cfg);
281 }
282
283 genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(cmd_buffer, 32, src, size);
284 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
285
286 genX(flush_pipeline_select_3d)(cmd_buffer);
287
288 emit_common_so_memcpy(&cmd_buffer->batch, cmd_buffer->device,
289 cmd_buffer->state.current_l3_config);
290 emit_so_memcpy(&cmd_buffer->batch, cmd_buffer->device, dst, src, size);
291
292 genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(cmd_buffer, SEQUENTIAL,
293 1ull << 32);
294
295 /* Invalidate pipeline & raster discard since we touch
296 * 3DSTATE_STREAMOUT.
297 */
298 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
299 BITSET_SET(cmd_buffer->vk.dynamic_graphics_state.dirty,
300 MESA_VK_DYNAMIC_RS_RASTERIZER_DISCARD_ENABLE);
301 }
302