1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Etnaviv Project
4 */
5
6 #include <drm/drm_file.h>
7 #include <linux/dma-fence-array.h>
8 #include <linux/file.h>
9 #include <linux/dma-resv.h>
10 #include <linux/sync_file.h>
11 #include <linux/uaccess.h>
12 #include <linux/vmalloc.h>
13
14 #include "etnaviv_cmdbuf.h"
15 #include "etnaviv_drv.h"
16 #include "etnaviv_gpu.h"
17 #include "etnaviv_gem.h"
18 #include "etnaviv_perfmon.h"
19 #include "etnaviv_sched.h"
20
21 /*
22 * Cmdstream submission:
23 */
24
25 #define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
26 /* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
27 #define BO_LOCKED 0x4000
28 #define BO_PINNED 0x2000
29
submit_create(struct drm_device * dev,struct etnaviv_gpu * gpu,size_t nr_bos,size_t nr_pmrs)30 static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
31 struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
32 {
33 struct etnaviv_gem_submit *submit;
34 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
35
36 submit = kzalloc(sz, GFP_KERNEL);
37 if (!submit)
38 return NULL;
39
40 submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
41 GFP_KERNEL);
42 if (!submit->pmrs) {
43 kfree(submit);
44 return NULL;
45 }
46 submit->nr_pmrs = nr_pmrs;
47
48 submit->gpu = gpu;
49 kref_init(&submit->refcount);
50
51 return submit;
52 }
53
submit_lookup_objects(struct etnaviv_gem_submit * submit,struct drm_file * file,struct drm_etnaviv_gem_submit_bo * submit_bos,unsigned nr_bos)54 static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
55 struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
56 unsigned nr_bos)
57 {
58 struct drm_etnaviv_gem_submit_bo *bo;
59 unsigned i;
60 int ret = 0;
61
62 spin_lock(&file->table_lock);
63
64 for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
65 struct drm_gem_object *obj;
66
67 if (bo->flags & BO_INVALID_FLAGS) {
68 DRM_ERROR("invalid flags: %x\n", bo->flags);
69 ret = -EINVAL;
70 goto out_unlock;
71 }
72
73 submit->bos[i].flags = bo->flags;
74 if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
75 if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
76 DRM_ERROR("invalid softpin address\n");
77 ret = -EINVAL;
78 goto out_unlock;
79 }
80 submit->bos[i].va = bo->presumed;
81 }
82
83 /* normally use drm_gem_object_lookup(), but for bulk lookup
84 * all under single table_lock just hit object_idr directly:
85 */
86 obj = idr_find(&file->object_idr, bo->handle);
87 if (!obj) {
88 DRM_ERROR("invalid handle %u at index %u\n",
89 bo->handle, i);
90 ret = -EINVAL;
91 goto out_unlock;
92 }
93
94 /*
95 * Take a refcount on the object. The file table lock
96 * prevents the object_idr's refcount on this being dropped.
97 */
98 drm_gem_object_get(obj);
99
100 submit->bos[i].obj = to_etnaviv_bo(obj);
101 }
102
103 out_unlock:
104 submit->nr_bos = i;
105 spin_unlock(&file->table_lock);
106
107 return ret;
108 }
109
submit_unlock_object(struct etnaviv_gem_submit * submit,int i)110 static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
111 {
112 if (submit->bos[i].flags & BO_LOCKED) {
113 struct drm_gem_object *obj = &submit->bos[i].obj->base;
114
115 dma_resv_unlock(obj->resv);
116 submit->bos[i].flags &= ~BO_LOCKED;
117 }
118 }
119
submit_lock_objects(struct etnaviv_gem_submit * submit,struct ww_acquire_ctx * ticket)120 static int submit_lock_objects(struct etnaviv_gem_submit *submit,
121 struct ww_acquire_ctx *ticket)
122 {
123 int contended, slow_locked = -1, i, ret = 0;
124
125 retry:
126 for (i = 0; i < submit->nr_bos; i++) {
127 struct drm_gem_object *obj = &submit->bos[i].obj->base;
128
129 if (slow_locked == i)
130 slow_locked = -1;
131
132 contended = i;
133
134 if (!(submit->bos[i].flags & BO_LOCKED)) {
135 ret = dma_resv_lock_interruptible(obj->resv, ticket);
136 if (ret == -EALREADY)
137 DRM_ERROR("BO at index %u already on submit list\n",
138 i);
139 if (ret)
140 goto fail;
141 submit->bos[i].flags |= BO_LOCKED;
142 }
143 }
144
145 ww_acquire_done(ticket);
146
147 return 0;
148
149 fail:
150 for (; i >= 0; i--)
151 submit_unlock_object(submit, i);
152
153 if (slow_locked > 0)
154 submit_unlock_object(submit, slow_locked);
155
156 if (ret == -EDEADLK) {
157 struct drm_gem_object *obj;
158
159 obj = &submit->bos[contended].obj->base;
160
161 /* we lost out in a seqno race, lock and retry.. */
162 ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
163 if (!ret) {
164 submit->bos[contended].flags |= BO_LOCKED;
165 slow_locked = contended;
166 goto retry;
167 }
168 }
169
170 return ret;
171 }
172
submit_fence_sync(struct etnaviv_gem_submit * submit)173 static int submit_fence_sync(struct etnaviv_gem_submit *submit)
174 {
175 int i, ret = 0;
176
177 for (i = 0; i < submit->nr_bos; i++) {
178 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
179 struct dma_resv *robj = bo->obj->base.resv;
180
181 ret = dma_resv_reserve_fences(robj, 1);
182 if (ret)
183 return ret;
184
185 if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
186 continue;
187
188 ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
189 &bo->obj->base,
190 bo->flags & ETNA_SUBMIT_BO_WRITE);
191 if (ret)
192 return ret;
193 }
194
195 return ret;
196 }
197
submit_attach_object_fences(struct etnaviv_gem_submit * submit)198 static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
199 {
200 int i;
201
202 for (i = 0; i < submit->nr_bos; i++) {
203 struct drm_gem_object *obj = &submit->bos[i].obj->base;
204 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
205
206 dma_resv_add_fence(obj->resv, submit->out_fence, write ?
207 DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
208 submit_unlock_object(submit, i);
209 }
210 }
211
submit_pin_objects(struct etnaviv_gem_submit * submit)212 static int submit_pin_objects(struct etnaviv_gem_submit *submit)
213 {
214 int i, ret = 0;
215
216 for (i = 0; i < submit->nr_bos; i++) {
217 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
218 struct etnaviv_vram_mapping *mapping;
219
220 mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
221 submit->mmu_context,
222 submit->bos[i].va);
223 if (IS_ERR(mapping)) {
224 ret = PTR_ERR(mapping);
225 break;
226 }
227
228 if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
229 submit->bos[i].va != mapping->iova) {
230 etnaviv_gem_mapping_unreference(mapping);
231 return -EINVAL;
232 }
233
234 atomic_inc(&etnaviv_obj->gpu_active);
235
236 submit->bos[i].flags |= BO_PINNED;
237 submit->bos[i].mapping = mapping;
238 }
239
240 return ret;
241 }
242
submit_bo(struct etnaviv_gem_submit * submit,u32 idx,struct etnaviv_gem_submit_bo ** bo)243 static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
244 struct etnaviv_gem_submit_bo **bo)
245 {
246 if (idx >= submit->nr_bos) {
247 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
248 idx, submit->nr_bos);
249 return -EINVAL;
250 }
251
252 *bo = &submit->bos[idx];
253
254 return 0;
255 }
256
257 /* process the reloc's and patch up the cmdstream as needed: */
submit_reloc(struct etnaviv_gem_submit * submit,void * stream,u32 size,const struct drm_etnaviv_gem_submit_reloc * relocs,u32 nr_relocs)258 static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
259 u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
260 u32 nr_relocs)
261 {
262 u32 i, last_offset = 0;
263 u32 *ptr = stream;
264 int ret;
265
266 /* Submits using softpin don't blend with relocs */
267 if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
268 return -EINVAL;
269
270 for (i = 0; i < nr_relocs; i++) {
271 const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
272 struct etnaviv_gem_submit_bo *bo;
273 u32 off;
274
275 if (unlikely(r->flags)) {
276 DRM_ERROR("invalid reloc flags\n");
277 return -EINVAL;
278 }
279
280 if (r->submit_offset % 4) {
281 DRM_ERROR("non-aligned reloc offset: %u\n",
282 r->submit_offset);
283 return -EINVAL;
284 }
285
286 /* offset in dwords: */
287 off = r->submit_offset / 4;
288
289 if ((off >= size ) ||
290 (off < last_offset)) {
291 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
292 return -EINVAL;
293 }
294
295 ret = submit_bo(submit, r->reloc_idx, &bo);
296 if (ret)
297 return ret;
298
299 if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
300 DRM_ERROR("relocation %u outside object\n", i);
301 return -EINVAL;
302 }
303
304 ptr[off] = bo->mapping->iova + r->reloc_offset;
305
306 last_offset = off;
307 }
308
309 return 0;
310 }
311
submit_perfmon_validate(struct etnaviv_gem_submit * submit,u32 exec_state,const struct drm_etnaviv_gem_submit_pmr * pmrs)312 static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
313 u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
314 {
315 u32 i;
316
317 for (i = 0; i < submit->nr_pmrs; i++) {
318 const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
319 struct etnaviv_gem_submit_bo *bo;
320 int ret;
321
322 ret = submit_bo(submit, r->read_idx, &bo);
323 if (ret)
324 return ret;
325
326 /* at offset 0 a sequence number gets stored used for userspace sync */
327 if (r->read_offset == 0) {
328 DRM_ERROR("perfmon request: offset is 0");
329 return -EINVAL;
330 }
331
332 if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
333 DRM_ERROR("perfmon request: offset %u outside object", i);
334 return -EINVAL;
335 }
336
337 if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
338 DRM_ERROR("perfmon request: flags are not valid");
339 return -EINVAL;
340 }
341
342 if (etnaviv_pm_req_validate(r, exec_state)) {
343 DRM_ERROR("perfmon request: domain or signal not valid");
344 return -EINVAL;
345 }
346
347 submit->pmrs[i].flags = r->flags;
348 submit->pmrs[i].domain = r->domain;
349 submit->pmrs[i].signal = r->signal;
350 submit->pmrs[i].sequence = r->sequence;
351 submit->pmrs[i].offset = r->read_offset;
352 submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
353 }
354
355 return 0;
356 }
357
submit_cleanup(struct kref * kref)358 static void submit_cleanup(struct kref *kref)
359 {
360 struct etnaviv_gem_submit *submit =
361 container_of(kref, struct etnaviv_gem_submit, refcount);
362 unsigned i;
363
364 if (submit->cmdbuf.suballoc)
365 etnaviv_cmdbuf_free(&submit->cmdbuf);
366
367 if (submit->mmu_context)
368 etnaviv_iommu_context_put(submit->mmu_context);
369
370 if (submit->prev_mmu_context)
371 etnaviv_iommu_context_put(submit->prev_mmu_context);
372
373 for (i = 0; i < submit->nr_bos; i++) {
374 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
375
376 /* unpin all objects */
377 if (submit->bos[i].flags & BO_PINNED) {
378 etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
379 atomic_dec(&etnaviv_obj->gpu_active);
380 submit->bos[i].mapping = NULL;
381 submit->bos[i].flags &= ~BO_PINNED;
382 }
383
384 /* if the GPU submit failed, objects might still be locked */
385 submit_unlock_object(submit, i);
386 drm_gem_object_put(&etnaviv_obj->base);
387 }
388
389 wake_up_all(&submit->gpu->fence_event);
390
391 if (submit->out_fence) {
392 /*
393 * Remove from user fence array before dropping the reference,
394 * so fence can not be found in lookup anymore.
395 */
396 xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
397 dma_fence_put(submit->out_fence);
398 }
399
400 put_pid(submit->pid);
401
402 kfree(submit->pmrs);
403 kfree(submit);
404 }
405
etnaviv_submit_put(struct etnaviv_gem_submit * submit)406 void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
407 {
408 kref_put(&submit->refcount, submit_cleanup);
409 }
410
etnaviv_ioctl_gem_submit(struct drm_device * dev,void * data,struct drm_file * file)411 int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
412 struct drm_file *file)
413 {
414 struct etnaviv_file_private *ctx = file->driver_priv;
415 struct etnaviv_drm_private *priv = dev->dev_private;
416 struct drm_etnaviv_gem_submit *args = data;
417 struct drm_etnaviv_gem_submit_reloc *relocs;
418 struct drm_etnaviv_gem_submit_pmr *pmrs;
419 struct drm_etnaviv_gem_submit_bo *bos;
420 struct etnaviv_gem_submit *submit;
421 struct etnaviv_gpu *gpu;
422 struct sync_file *sync_file = NULL;
423 struct ww_acquire_ctx ticket;
424 int out_fence_fd = -1;
425 struct pid *pid = get_pid(task_pid(current));
426 void *stream;
427 int ret;
428
429 if (args->pipe >= ETNA_MAX_PIPES)
430 return -EINVAL;
431
432 gpu = priv->gpu[args->pipe];
433 if (!gpu)
434 return -ENXIO;
435
436 if (args->stream_size % 4) {
437 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
438 args->stream_size);
439 return -EINVAL;
440 }
441
442 if (args->exec_state != ETNA_PIPE_3D &&
443 args->exec_state != ETNA_PIPE_2D &&
444 args->exec_state != ETNA_PIPE_VG) {
445 DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
446 return -EINVAL;
447 }
448
449 if (args->flags & ~ETNA_SUBMIT_FLAGS) {
450 DRM_ERROR("invalid flags: 0x%x\n", args->flags);
451 return -EINVAL;
452 }
453
454 if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
455 priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
456 DRM_ERROR("softpin requested on incompatible MMU\n");
457 return -EINVAL;
458 }
459
460 if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
461 args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
462 DRM_ERROR("submit arguments out of size limits\n");
463 return -EINVAL;
464 }
465
466 /*
467 * Copy the command submission and bo array to kernel space in
468 * one go, and do this outside of any locks.
469 */
470 bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
471 relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
472 pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
473 stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
474 if (!bos || !relocs || !pmrs || !stream) {
475 ret = -ENOMEM;
476 goto err_submit_cmds;
477 }
478
479 ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
480 args->nr_bos * sizeof(*bos));
481 if (ret) {
482 ret = -EFAULT;
483 goto err_submit_cmds;
484 }
485
486 ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
487 args->nr_relocs * sizeof(*relocs));
488 if (ret) {
489 ret = -EFAULT;
490 goto err_submit_cmds;
491 }
492
493 ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
494 args->nr_pmrs * sizeof(*pmrs));
495 if (ret) {
496 ret = -EFAULT;
497 goto err_submit_cmds;
498 }
499
500 ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
501 args->stream_size);
502 if (ret) {
503 ret = -EFAULT;
504 goto err_submit_cmds;
505 }
506
507 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
508 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
509 if (out_fence_fd < 0) {
510 ret = out_fence_fd;
511 goto err_submit_cmds;
512 }
513 }
514
515 ww_acquire_init(&ticket, &reservation_ww_class);
516
517 submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
518 if (!submit) {
519 ret = -ENOMEM;
520 goto err_submit_ww_acquire;
521 }
522
523 submit->pid = pid;
524
525 ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
526 ALIGN(args->stream_size, 8) + 8);
527 if (ret)
528 goto err_submit_put;
529
530 submit->ctx = file->driver_priv;
531 submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
532 submit->exec_state = args->exec_state;
533 submit->flags = args->flags;
534
535 ret = drm_sched_job_init(&submit->sched_job,
536 &ctx->sched_entity[args->pipe],
537 1, submit->ctx);
538 if (ret)
539 goto err_submit_put;
540
541 ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
542 if (ret)
543 goto err_submit_job;
544
545 if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
546 !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
547 relocs, args->nr_relocs)) {
548 ret = -EINVAL;
549 goto err_submit_job;
550 }
551
552 if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
553 struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
554 if (!in_fence) {
555 ret = -EINVAL;
556 goto err_submit_job;
557 }
558
559 ret = drm_sched_job_add_dependency(&submit->sched_job,
560 in_fence);
561 if (ret)
562 goto err_submit_job;
563 }
564
565 ret = submit_pin_objects(submit);
566 if (ret)
567 goto err_submit_job;
568
569 ret = submit_reloc(submit, stream, args->stream_size / 4,
570 relocs, args->nr_relocs);
571 if (ret)
572 goto err_submit_job;
573
574 ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
575 if (ret)
576 goto err_submit_job;
577
578 memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
579
580 ret = submit_lock_objects(submit, &ticket);
581 if (ret)
582 goto err_submit_job;
583
584 ret = submit_fence_sync(submit);
585 if (ret)
586 goto err_submit_job;
587
588 ret = etnaviv_sched_push_job(submit);
589 if (ret)
590 goto err_submit_job;
591
592 submit_attach_object_fences(submit);
593
594 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
595 /*
596 * This can be improved: ideally we want to allocate the sync
597 * file before kicking off the GPU job and just attach the
598 * fence to the sync file here, eliminating the ENOMEM
599 * possibility at this stage.
600 */
601 sync_file = sync_file_create(submit->out_fence);
602 if (!sync_file) {
603 ret = -ENOMEM;
604 /*
605 * When this late error is hit, the submit has already
606 * been handed over to the scheduler. At this point
607 * the sched_job must not be cleaned up.
608 */
609 goto err_submit_put;
610 }
611 fd_install(out_fence_fd, sync_file->file);
612 }
613
614 args->fence_fd = out_fence_fd;
615 args->fence = submit->out_fence_id;
616
617 err_submit_job:
618 if (ret)
619 drm_sched_job_cleanup(&submit->sched_job);
620 err_submit_put:
621 etnaviv_submit_put(submit);
622
623 err_submit_ww_acquire:
624 ww_acquire_fini(&ticket);
625
626 err_submit_cmds:
627 if (ret && (out_fence_fd >= 0))
628 put_unused_fd(out_fence_fd);
629 kvfree(stream);
630 kvfree(bos);
631 kvfree(relocs);
632 kvfree(pmrs);
633
634 return ret;
635 }
636