1 /*
2 * Copyright © 2023 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "xe/iris_batch.h"
24
25 #include "iris_batch.h"
26 #include "iris_context.h"
27 #include "iris_screen.h"
28
29 #include "common/intel_gem.h"
30 #include "common/intel_engine.h"
31 #include "common/xe/intel_device_query.h"
32 #include "common/xe/intel_engine.h"
33
34 #include "drm-uapi/xe_drm.h"
35 #include "drm-uapi/gpu_scheduler.h"
36
37 static enum drm_sched_priority
iris_context_priority_to_drm_sched_priority(enum iris_context_priority priority)38 iris_context_priority_to_drm_sched_priority(enum iris_context_priority priority)
39 {
40 switch (priority) {
41 case IRIS_CONTEXT_HIGH_PRIORITY:
42 return DRM_SCHED_PRIORITY_HIGH;
43 case IRIS_CONTEXT_LOW_PRIORITY:
44 return DRM_SCHED_PRIORITY_MIN;
45 case IRIS_CONTEXT_MEDIUM_PRIORITY:
46 FALLTHROUGH;
47 default:
48 return DRM_SCHED_PRIORITY_NORMAL;
49 }
50 }
51
52 static bool
iris_xe_init_batch(struct iris_bufmgr * bufmgr,struct intel_query_engine_info * engines_info,enum intel_engine_class engine_class,enum iris_context_priority priority,uint32_t * exec_queue_id)53 iris_xe_init_batch(struct iris_bufmgr *bufmgr,
54 struct intel_query_engine_info *engines_info,
55 enum intel_engine_class engine_class,
56 enum iris_context_priority priority, uint32_t *exec_queue_id)
57
58 {
59 struct drm_xe_engine_class_instance *instances;
60
61 instances = malloc(sizeof(*instances) *
62 intel_engines_count(engines_info, engine_class));
63 if (!instances)
64 return false;
65
66 enum drm_sched_priority requested_priority = iris_context_priority_to_drm_sched_priority(priority);
67 enum drm_sched_priority allowed_priority = DRM_SCHED_PRIORITY_MIN;
68 if (requested_priority > DRM_SCHED_PRIORITY_MIN) {
69 struct drm_xe_query_config *config;
70
71 config = xe_device_query_alloc_fetch(iris_bufmgr_get_fd(bufmgr),
72 DRM_XE_DEVICE_QUERY_CONFIG, NULL);
73 if (config)
74 allowed_priority = config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY];
75 free(config);
76 }
77 if (requested_priority < allowed_priority)
78 allowed_priority = requested_priority;
79
80 uint32_t count = 0;
81 for (uint32_t i = 0; i < engines_info->num_engines; i++) {
82 const struct intel_engine_class_instance engine = engines_info->engines[i];
83 if (engine.engine_class != engine_class)
84 continue;
85
86 instances[count].engine_class = intel_engine_class_to_xe(engine.engine_class);
87 instances[count].engine_instance = engine.engine_instance;
88 instances[count++].gt_id = engine.gt_id;
89 }
90 struct drm_xe_ext_set_property ext = {
91 .base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
92 .property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
93 .value = allowed_priority,
94 };
95 struct drm_xe_exec_queue_create create = {
96 .instances = (uintptr_t)instances,
97 .vm_id = iris_bufmgr_get_global_vm_id(bufmgr),
98 .width = 1,
99 .num_placements = count,
100 .extensions = (uintptr_t)&ext,
101 };
102 int ret = intel_ioctl(iris_bufmgr_get_fd(bufmgr),
103 DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create);
104 free(instances);
105 if (ret)
106 goto error_create_exec_queue;
107
108 /* TODO: handle "protected" context/exec_queue */
109 *exec_queue_id = create.exec_queue_id;
110 error_create_exec_queue:
111 return ret == 0;
112 }
113
114 static void
iris_xe_map_intel_engine_class(struct iris_bufmgr * bufmgr,const struct intel_query_engine_info * engines_info,enum intel_engine_class * engine_classes)115 iris_xe_map_intel_engine_class(struct iris_bufmgr *bufmgr,
116 const struct intel_query_engine_info *engines_info,
117 enum intel_engine_class *engine_classes)
118 {
119 engine_classes[IRIS_BATCH_RENDER] = INTEL_ENGINE_CLASS_RENDER;
120 engine_classes[IRIS_BATCH_COMPUTE] = INTEL_ENGINE_CLASS_RENDER;
121 engine_classes[IRIS_BATCH_BLITTER] = INTEL_ENGINE_CLASS_COPY;
122 STATIC_ASSERT(IRIS_BATCH_COUNT == 3);
123
124 if (iris_bufmgr_compute_engine_supported(bufmgr))
125 engine_classes[IRIS_BATCH_COMPUTE] = INTEL_ENGINE_CLASS_COMPUTE;
126 }
127
iris_xe_init_batches(struct iris_context * ice)128 void iris_xe_init_batches(struct iris_context *ice)
129 {
130 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
131 struct iris_bufmgr *bufmgr = screen->bufmgr;
132 const int fd = iris_bufmgr_get_fd(screen->bufmgr);
133 enum intel_engine_class engine_classes[IRIS_BATCH_COUNT];
134 struct intel_query_engine_info *engines_info;
135
136 engines_info = intel_engine_get_info(fd, INTEL_KMD_TYPE_XE);
137 assert(engines_info);
138 if (!engines_info)
139 return;
140 iris_xe_map_intel_engine_class(bufmgr, engines_info, engine_classes);
141
142 iris_foreach_batch(ice, batch) {
143 const enum iris_batch_name name = batch - &ice->batches[0];
144 ASSERTED bool ret;
145
146 ret = iris_xe_init_batch(bufmgr, engines_info, engine_classes[name],
147 ice->priority, &batch->xe.exec_queue_id);
148 assert(ret);
149 }
150
151 free(engines_info);
152 }
153
154 /*
155 * Wait for all previous DRM_IOCTL_XE_EXEC calls over the
156 * drm_xe_exec_queue in this iris_batch to complete.
157 **/
158 static void
iris_xe_wait_exec_queue_idle(struct iris_batch * batch)159 iris_xe_wait_exec_queue_idle(struct iris_batch *batch)
160 {
161 struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
162 struct iris_syncobj *syncobj = iris_create_syncobj(bufmgr);
163 struct drm_xe_sync xe_sync = {
164 .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
165 .flags = DRM_XE_SYNC_FLAG_SIGNAL,
166 };
167 struct drm_xe_exec exec = {
168 .exec_queue_id = batch->xe.exec_queue_id,
169 .num_syncs = 1,
170 .syncs = (uintptr_t)&xe_sync,
171 };
172 int ret;
173
174 if (!syncobj)
175 return;
176
177 xe_sync.handle = syncobj->handle;
178 /* Using the special exec.num_batch_buffer == 0 handling to get syncobj
179 * signaled when the last DRM_IOCTL_XE_EXEC is completed.
180 */
181 ret = intel_ioctl(iris_bufmgr_get_fd(bufmgr), DRM_IOCTL_XE_EXEC, &exec);
182 if (ret == 0) {
183 ASSERTED bool success;
184 success = iris_wait_syncobj(bufmgr, syncobj, INT64_MAX);
185 assert(success);
186 } else {
187 assert(iris_batch_is_banned(bufmgr, -errno) == true);
188 }
189
190 iris_syncobj_destroy(bufmgr, syncobj);
191 }
192
193 static void
iris_xe_destroy_exec_queue(struct iris_batch * batch)194 iris_xe_destroy_exec_queue(struct iris_batch *batch)
195 {
196 struct iris_screen *screen = batch->screen;
197 struct iris_bufmgr *bufmgr = screen->bufmgr;
198 struct drm_xe_exec_queue_destroy destroy = {
199 .exec_queue_id = batch->xe.exec_queue_id,
200 };
201 ASSERTED int ret;
202
203 ret = intel_ioctl(iris_bufmgr_get_fd(bufmgr), DRM_IOCTL_XE_EXEC_QUEUE_DESTROY,
204 &destroy);
205 assert(ret == 0);
206 }
207
iris_xe_destroy_batch(struct iris_batch * batch)208 void iris_xe_destroy_batch(struct iris_batch *batch)
209 {
210 /* Xe KMD don't refcount anything, so resources could be freed while they
211 * are still in use if we don't wait for exec_queue to be idle.
212 */
213 iris_xe_wait_exec_queue_idle(batch);
214 iris_xe_destroy_exec_queue(batch);
215 }
216
iris_xe_replace_batch(struct iris_batch * batch)217 bool iris_xe_replace_batch(struct iris_batch *batch)
218 {
219 enum intel_engine_class engine_classes[IRIS_BATCH_COUNT];
220 struct iris_screen *screen = batch->screen;
221 struct iris_bufmgr *bufmgr = screen->bufmgr;
222 struct iris_context *ice = batch->ice;
223 struct intel_query_engine_info *engines_info;
224 uint32_t new_exec_queue_id;
225 bool ret;
226
227 engines_info = intel_engine_get_info(iris_bufmgr_get_fd(bufmgr),
228 INTEL_KMD_TYPE_XE);
229 if (!engines_info)
230 return false;
231 iris_xe_map_intel_engine_class(bufmgr, engines_info, engine_classes);
232
233 ret = iris_xe_init_batch(bufmgr, engines_info, engine_classes[batch->name],
234 ice->priority, &new_exec_queue_id);
235 if (ret) {
236 iris_xe_destroy_exec_queue(batch);
237 batch->xe.exec_queue_id = new_exec_queue_id;
238 iris_lost_context_state(batch);
239 }
240
241 free(engines_info);
242 return ret;
243 }
244