1 /*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27 #include <drm/drm_gem_ttm_helper.h>
28
29 #include "nouveau_drv.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_fence.h"
32 #include "nouveau_abi16.h"
33
34 #include "nouveau_ttm.h"
35 #include "nouveau_gem.h"
36 #include "nouveau_mem.h"
37 #include "nouveau_vmm.h"
38
39 #include <nvif/class.h>
40 #include <nvif/push206e.h>
41
nouveau_ttm_fault(struct vm_fault * vmf)42 static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
43 {
44 struct vm_area_struct *vma = vmf->vma;
45 struct ttm_buffer_object *bo = vma->vm_private_data;
46 pgprot_t prot;
47 vm_fault_t ret;
48
49 ret = ttm_bo_vm_reserve(bo, vmf);
50 if (ret)
51 return ret;
52
53 ret = nouveau_ttm_fault_reserve_notify(bo);
54 if (ret)
55 goto error_unlock;
56
57 nouveau_bo_del_io_reserve_lru(bo);
58 prot = vm_get_page_prot(vma->vm_flags);
59 ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
60 nouveau_bo_add_io_reserve_lru(bo);
61 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
62 return ret;
63
64 error_unlock:
65 dma_resv_unlock(bo->base.resv);
66 return ret;
67 }
68
69 static const struct vm_operations_struct nouveau_ttm_vm_ops = {
70 .fault = nouveau_ttm_fault,
71 .open = ttm_bo_vm_open,
72 .close = ttm_bo_vm_close,
73 .access = ttm_bo_vm_access
74 };
75
76 void
nouveau_gem_object_del(struct drm_gem_object * gem)77 nouveau_gem_object_del(struct drm_gem_object *gem)
78 {
79 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
80 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
81 struct device *dev = drm->dev->dev;
82 int ret;
83
84 ret = pm_runtime_get_sync(dev);
85 if (WARN_ON(ret < 0 && ret != -EACCES)) {
86 pm_runtime_put_autosuspend(dev);
87 return;
88 }
89
90 ttm_bo_put(&nvbo->bo);
91
92 pm_runtime_mark_last_busy(dev);
93 pm_runtime_put_autosuspend(dev);
94 }
95
96 int
nouveau_gem_object_open(struct drm_gem_object * gem,struct drm_file * file_priv)97 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
98 {
99 struct nouveau_cli *cli = nouveau_cli(file_priv);
100 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
101 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
102 struct device *dev = drm->dev->dev;
103 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
104 struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
105 struct nouveau_vma *vma;
106 int ret;
107
108 if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
109 return 0;
110
111 if (nvbo->no_share && uvmm &&
112 drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv)
113 return -EPERM;
114
115 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
116 if (ret)
117 return ret;
118
119 ret = pm_runtime_get_sync(dev);
120 if (ret < 0 && ret != -EACCES) {
121 pm_runtime_put_autosuspend(dev);
122 goto out;
123 }
124
125 /* only create a VMA on binding */
126 if (!nouveau_cli_uvmm(cli))
127 ret = nouveau_vma_new(nvbo, vmm, &vma);
128 else
129 ret = 0;
130 pm_runtime_mark_last_busy(dev);
131 pm_runtime_put_autosuspend(dev);
132 out:
133 ttm_bo_unreserve(&nvbo->bo);
134 return ret;
135 }
136
137 struct nouveau_gem_object_unmap {
138 struct nouveau_cli_work work;
139 struct nouveau_vma *vma;
140 };
141
142 static void
nouveau_gem_object_delete(struct nouveau_vma * vma)143 nouveau_gem_object_delete(struct nouveau_vma *vma)
144 {
145 nouveau_fence_unref(&vma->fence);
146 nouveau_vma_del(&vma);
147 }
148
149 static void
nouveau_gem_object_delete_work(struct nouveau_cli_work * w)150 nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
151 {
152 struct nouveau_gem_object_unmap *work =
153 container_of(w, typeof(*work), work);
154 nouveau_gem_object_delete(work->vma);
155 kfree(work);
156 }
157
158 static void
nouveau_gem_object_unmap(struct nouveau_bo * nvbo,struct nouveau_vma * vma)159 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
160 {
161 struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
162 struct nouveau_gem_object_unmap *work;
163
164 list_del_init(&vma->head);
165
166 if (!fence) {
167 nouveau_gem_object_delete(vma);
168 return;
169 }
170
171 if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
172 WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
173 nouveau_gem_object_delete(vma);
174 return;
175 }
176
177 work->work.func = nouveau_gem_object_delete_work;
178 work->vma = vma;
179 nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
180 }
181
182 void
nouveau_gem_object_close(struct drm_gem_object * gem,struct drm_file * file_priv)183 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
184 {
185 struct nouveau_cli *cli = nouveau_cli(file_priv);
186 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
187 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
188 struct device *dev = drm->dev->dev;
189 struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
190 struct nouveau_vma *vma;
191 int ret;
192
193 if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
194 return;
195
196 if (nouveau_cli_uvmm(cli))
197 return;
198
199 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
200 if (ret)
201 return;
202
203 vma = nouveau_vma_find(nvbo, vmm);
204 if (vma) {
205 if (--vma->refs == 0) {
206 ret = pm_runtime_get_sync(dev);
207 if (!WARN_ON(ret < 0 && ret != -EACCES)) {
208 nouveau_gem_object_unmap(nvbo, vma);
209 pm_runtime_mark_last_busy(dev);
210 }
211 pm_runtime_put_autosuspend(dev);
212 }
213 }
214 ttm_bo_unreserve(&nvbo->bo);
215 }
216
217 const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
218 .free = nouveau_gem_object_del,
219 .open = nouveau_gem_object_open,
220 .close = nouveau_gem_object_close,
221 .export = nouveau_gem_prime_export,
222 .pin = nouveau_gem_prime_pin,
223 .unpin = nouveau_gem_prime_unpin,
224 .get_sg_table = nouveau_gem_prime_get_sg_table,
225 .vmap = drm_gem_ttm_vmap,
226 .vunmap = drm_gem_ttm_vunmap,
227 .mmap = drm_gem_ttm_mmap,
228 .vm_ops = &nouveau_ttm_vm_ops,
229 };
230
231 int
nouveau_gem_new(struct nouveau_cli * cli,u64 size,int align,uint32_t domain,uint32_t tile_mode,uint32_t tile_flags,struct nouveau_bo ** pnvbo)232 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
233 uint32_t tile_mode, uint32_t tile_flags,
234 struct nouveau_bo **pnvbo)
235 {
236 struct nouveau_drm *drm = cli->drm;
237 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
238 struct dma_resv *resv = NULL;
239 struct nouveau_bo *nvbo;
240 int ret;
241
242 if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
243 if (unlikely(!uvmm))
244 return -EINVAL;
245
246 resv = drm_gpuvm_resv(&uvmm->base);
247 }
248
249 if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
250 domain |= NOUVEAU_GEM_DOMAIN_CPU;
251
252 nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
253 tile_flags, false);
254 if (IS_ERR(nvbo))
255 return PTR_ERR(nvbo);
256
257 nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
258 nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
259
260 /* Initialize the embedded gem-object. We return a single gem-reference
261 * to the caller, instead of a normal nouveau_bo ttm reference. */
262 ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
263 if (ret) {
264 drm_gem_object_release(&nvbo->bo.base);
265 kfree(nvbo);
266 return ret;
267 }
268
269 if (resv)
270 dma_resv_lock(resv, NULL);
271
272 ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
273
274 if (resv)
275 dma_resv_unlock(resv);
276
277 if (ret)
278 return ret;
279
280 /* we restrict allowed domains on nv50+ to only the types
281 * that were requested at creation time. not possibly on
282 * earlier chips without busting the ABI.
283 */
284 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
285 NOUVEAU_GEM_DOMAIN_GART;
286 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
287 nvbo->valid_domains &= domain;
288
289 if (nvbo->no_share) {
290 nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base);
291 drm_gem_object_get(nvbo->r_obj);
292 }
293
294 *pnvbo = nvbo;
295 return 0;
296 }
297
298 static int
nouveau_gem_info(struct drm_file * file_priv,struct drm_gem_object * gem,struct drm_nouveau_gem_info * rep)299 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
300 struct drm_nouveau_gem_info *rep)
301 {
302 struct nouveau_cli *cli = nouveau_cli(file_priv);
303 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
304 struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
305 struct nouveau_vma *vma;
306
307 if (is_power_of_2(nvbo->valid_domains))
308 rep->domain = nvbo->valid_domains;
309 else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
310 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
311 else
312 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
313 rep->offset = nvbo->offset;
314 if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
315 !nouveau_cli_uvmm(cli)) {
316 vma = nouveau_vma_find(nvbo, vmm);
317 if (!vma)
318 return -EINVAL;
319
320 rep->offset = vma->addr;
321 } else
322 rep->offset = 0;
323
324 rep->size = nvbo->bo.base.size;
325 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
326 rep->tile_mode = nvbo->mode;
327 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
328 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
329 rep->tile_flags |= nvbo->kind << 8;
330 else
331 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
332 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
333 else
334 rep->tile_flags |= nvbo->zeta;
335 return 0;
336 }
337
338 int
nouveau_gem_ioctl_new(struct drm_device * dev,void * data,struct drm_file * file_priv)339 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
340 struct drm_file *file_priv)
341 {
342 struct nouveau_cli *cli = nouveau_cli(file_priv);
343 struct drm_nouveau_gem_new *req = data;
344 struct nouveau_bo *nvbo = NULL;
345 int ret = 0;
346
347 /* If uvmm wasn't initialized until now disable it completely to prevent
348 * userspace from mixing up UAPIs.
349 */
350 nouveau_cli_disable_uvmm_noinit(cli);
351
352 ret = nouveau_gem_new(cli, req->info.size, req->align,
353 req->info.domain, req->info.tile_mode,
354 req->info.tile_flags, &nvbo);
355 if (ret)
356 return ret;
357
358 ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
359 &req->info.handle);
360 if (ret == 0) {
361 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
362 if (ret)
363 drm_gem_handle_delete(file_priv, req->info.handle);
364 }
365
366 /* drop reference from allocate - handle holds it now */
367 drm_gem_object_put(&nvbo->bo.base);
368 return ret;
369 }
370
371 static int
nouveau_gem_set_domain(struct drm_gem_object * gem,uint32_t read_domains,uint32_t write_domains,uint32_t valid_domains)372 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
373 uint32_t write_domains, uint32_t valid_domains)
374 {
375 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
376 struct ttm_buffer_object *bo = &nvbo->bo;
377 uint32_t domains = valid_domains & nvbo->valid_domains &
378 (write_domains ? write_domains : read_domains);
379 uint32_t pref_domains = 0;
380
381 if (!domains)
382 return -EINVAL;
383
384 valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
385
386 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
387 bo->resource->mem_type == TTM_PL_VRAM)
388 pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
389
390 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
391 bo->resource->mem_type == TTM_PL_TT)
392 pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
393
394 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
395 pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
396
397 else
398 pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
399
400 nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
401
402 return 0;
403 }
404
405 struct validate_op {
406 struct list_head list;
407 struct ww_acquire_ctx ticket;
408 };
409
410 static void
validate_fini_no_ticket(struct validate_op * op,struct nouveau_channel * chan,struct nouveau_fence * fence,struct drm_nouveau_gem_pushbuf_bo * pbbo)411 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
412 struct nouveau_fence *fence,
413 struct drm_nouveau_gem_pushbuf_bo *pbbo)
414 {
415 struct nouveau_bo *nvbo;
416 struct drm_nouveau_gem_pushbuf_bo *b;
417
418 while (!list_empty(&op->list)) {
419 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
420 b = &pbbo[nvbo->pbbo_index];
421
422 if (likely(fence)) {
423 nouveau_bo_fence(nvbo, fence, !!b->write_domains);
424
425 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
426 struct nouveau_vma *vma =
427 (void *)(unsigned long)b->user_priv;
428 nouveau_fence_unref(&vma->fence);
429 dma_fence_get(&fence->base);
430 vma->fence = fence;
431 }
432 }
433
434 if (unlikely(nvbo->validate_mapped)) {
435 ttm_bo_kunmap(&nvbo->kmap);
436 nvbo->validate_mapped = false;
437 }
438
439 list_del(&nvbo->entry);
440 nvbo->reserved_by = NULL;
441 ttm_bo_unreserve(&nvbo->bo);
442 drm_gem_object_put(&nvbo->bo.base);
443 }
444 }
445
446 static void
validate_fini(struct validate_op * op,struct nouveau_channel * chan,struct nouveau_fence * fence,struct drm_nouveau_gem_pushbuf_bo * pbbo)447 validate_fini(struct validate_op *op, struct nouveau_channel *chan,
448 struct nouveau_fence *fence,
449 struct drm_nouveau_gem_pushbuf_bo *pbbo)
450 {
451 validate_fini_no_ticket(op, chan, fence, pbbo);
452 ww_acquire_fini(&op->ticket);
453 }
454
455 static int
validate_init(struct nouveau_channel * chan,struct drm_file * file_priv,struct drm_nouveau_gem_pushbuf_bo * pbbo,int nr_buffers,struct validate_op * op)456 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
457 struct drm_nouveau_gem_pushbuf_bo *pbbo,
458 int nr_buffers, struct validate_op *op)
459 {
460 struct nouveau_cli *cli = nouveau_cli(file_priv);
461 int trycnt = 0;
462 int ret = -EINVAL, i;
463 struct nouveau_bo *res_bo = NULL;
464 LIST_HEAD(gart_list);
465 LIST_HEAD(vram_list);
466 LIST_HEAD(both_list);
467
468 ww_acquire_init(&op->ticket, &reservation_ww_class);
469 retry:
470 if (++trycnt > 100000) {
471 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
472 return -EINVAL;
473 }
474
475 for (i = 0; i < nr_buffers; i++) {
476 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
477 struct drm_gem_object *gem;
478 struct nouveau_bo *nvbo;
479
480 gem = drm_gem_object_lookup(file_priv, b->handle);
481 if (!gem) {
482 NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
483 ret = -ENOENT;
484 break;
485 }
486 nvbo = nouveau_gem_object(gem);
487 if (nvbo == res_bo) {
488 res_bo = NULL;
489 drm_gem_object_put(gem);
490 continue;
491 }
492
493 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
494 NV_PRINTK(err, cli, "multiple instances of buffer %d on "
495 "validation list\n", b->handle);
496 drm_gem_object_put(gem);
497 ret = -EINVAL;
498 break;
499 }
500
501 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
502 if (ret) {
503 list_splice_tail_init(&vram_list, &op->list);
504 list_splice_tail_init(&gart_list, &op->list);
505 list_splice_tail_init(&both_list, &op->list);
506 validate_fini_no_ticket(op, chan, NULL, NULL);
507 if (unlikely(ret == -EDEADLK)) {
508 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
509 &op->ticket);
510 if (!ret)
511 res_bo = nvbo;
512 }
513 if (unlikely(ret)) {
514 if (ret != -ERESTARTSYS)
515 NV_PRINTK(err, cli, "fail reserve\n");
516 break;
517 }
518 }
519
520 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
521 struct nouveau_vmm *vmm = chan->vmm;
522 struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
523 if (!vma) {
524 NV_PRINTK(err, cli, "vma not found!\n");
525 ret = -EINVAL;
526 break;
527 }
528
529 b->user_priv = (uint64_t)(unsigned long)vma;
530 } else {
531 b->user_priv = (uint64_t)(unsigned long)nvbo;
532 }
533
534 nvbo->reserved_by = file_priv;
535 nvbo->pbbo_index = i;
536 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
537 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
538 list_add_tail(&nvbo->entry, &both_list);
539 else
540 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
541 list_add_tail(&nvbo->entry, &vram_list);
542 else
543 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
544 list_add_tail(&nvbo->entry, &gart_list);
545 else {
546 NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
547 b->valid_domains);
548 list_add_tail(&nvbo->entry, &both_list);
549 ret = -EINVAL;
550 break;
551 }
552 if (nvbo == res_bo)
553 goto retry;
554 }
555
556 ww_acquire_done(&op->ticket);
557 list_splice_tail(&vram_list, &op->list);
558 list_splice_tail(&gart_list, &op->list);
559 list_splice_tail(&both_list, &op->list);
560 if (ret)
561 validate_fini(op, chan, NULL, NULL);
562 return ret;
563
564 }
565
566 static int
validate_list(struct nouveau_channel * chan,struct list_head * list,struct drm_nouveau_gem_pushbuf_bo * pbbo)567 validate_list(struct nouveau_channel *chan,
568 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
569 {
570 struct nouveau_cli *cli = chan->cli;
571 struct nouveau_drm *drm = cli->drm;
572 struct nouveau_bo *nvbo;
573 int ret, relocs = 0;
574
575 list_for_each_entry(nvbo, list, entry) {
576 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
577
578 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
579 b->write_domains,
580 b->valid_domains);
581 if (unlikely(ret)) {
582 NV_PRINTK(err, cli, "fail set_domain\n");
583 return ret;
584 }
585
586 ret = nouveau_bo_validate(nvbo, true, false);
587 if (unlikely(ret)) {
588 if (ret != -ERESTARTSYS)
589 NV_PRINTK(err, cli, "fail ttm_validate\n");
590 return ret;
591 }
592
593 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
594 if (unlikely(ret)) {
595 if (ret != -ERESTARTSYS)
596 NV_PRINTK(err, cli, "fail post-validate sync\n");
597 return ret;
598 }
599
600 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
601 if (nvbo->offset == b->presumed.offset &&
602 ((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
603 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
604 (nvbo->bo.resource->mem_type == TTM_PL_TT &&
605 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
606 continue;
607
608 if (nvbo->bo.resource->mem_type == TTM_PL_TT)
609 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
610 else
611 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
612 b->presumed.offset = nvbo->offset;
613 b->presumed.valid = 0;
614 relocs++;
615 }
616 }
617
618 return relocs;
619 }
620
621 static int
nouveau_gem_pushbuf_validate(struct nouveau_channel * chan,struct drm_file * file_priv,struct drm_nouveau_gem_pushbuf_bo * pbbo,int nr_buffers,struct validate_op * op,bool * apply_relocs)622 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
623 struct drm_file *file_priv,
624 struct drm_nouveau_gem_pushbuf_bo *pbbo,
625 int nr_buffers,
626 struct validate_op *op, bool *apply_relocs)
627 {
628 struct nouveau_cli *cli = nouveau_cli(file_priv);
629 int ret;
630
631 INIT_LIST_HEAD(&op->list);
632
633 if (nr_buffers == 0)
634 return 0;
635
636 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
637 if (unlikely(ret)) {
638 if (ret != -ERESTARTSYS)
639 NV_PRINTK(err, cli, "validate_init\n");
640 return ret;
641 }
642
643 ret = validate_list(chan, &op->list, pbbo);
644 if (unlikely(ret < 0)) {
645 if (ret != -ERESTARTSYS)
646 NV_PRINTK(err, cli, "validating bo list\n");
647 validate_fini(op, chan, NULL, NULL);
648 return ret;
649 } else if (ret > 0) {
650 *apply_relocs = true;
651 }
652
653 return 0;
654 }
655
656 static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli * cli,struct drm_nouveau_gem_pushbuf * req,struct drm_nouveau_gem_pushbuf_reloc * reloc,struct drm_nouveau_gem_pushbuf_bo * bo)657 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
658 struct drm_nouveau_gem_pushbuf *req,
659 struct drm_nouveau_gem_pushbuf_reloc *reloc,
660 struct drm_nouveau_gem_pushbuf_bo *bo)
661 {
662 int ret = 0;
663 unsigned i;
664
665 for (i = 0; i < req->nr_relocs; i++) {
666 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
667 struct drm_nouveau_gem_pushbuf_bo *b;
668 struct nouveau_bo *nvbo;
669 uint32_t data;
670 long lret;
671
672 if (unlikely(r->bo_index >= req->nr_buffers)) {
673 NV_PRINTK(err, cli, "reloc bo index invalid\n");
674 ret = -EINVAL;
675 break;
676 }
677
678 b = &bo[r->bo_index];
679 if (b->presumed.valid)
680 continue;
681
682 if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
683 NV_PRINTK(err, cli, "reloc container bo index invalid\n");
684 ret = -EINVAL;
685 break;
686 }
687 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
688
689 if (unlikely(r->reloc_bo_offset + 4 >
690 nvbo->bo.base.size)) {
691 NV_PRINTK(err, cli, "reloc outside of bo\n");
692 ret = -EINVAL;
693 break;
694 }
695
696 if (!nvbo->kmap.virtual) {
697 ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
698 &nvbo->kmap);
699 if (ret) {
700 NV_PRINTK(err, cli, "failed kmap for reloc\n");
701 break;
702 }
703 nvbo->validate_mapped = true;
704 }
705
706 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
707 data = b->presumed.offset + r->data;
708 else
709 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
710 data = (b->presumed.offset + r->data) >> 32;
711 else
712 data = r->data;
713
714 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
715 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
716 data |= r->tor;
717 else
718 data |= r->vor;
719 }
720
721 lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
722 DMA_RESV_USAGE_BOOKKEEP,
723 false, 15 * HZ);
724 if (!lret)
725 ret = -EBUSY;
726 else if (lret > 0)
727 ret = 0;
728 else
729 ret = lret;
730
731 if (ret) {
732 NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n",
733 ret);
734 break;
735 }
736
737 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
738 }
739
740 return ret;
741 }
742
743 int
nouveau_gem_ioctl_pushbuf(struct drm_device * dev,void * data,struct drm_file * file_priv)744 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
745 struct drm_file *file_priv)
746 {
747 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
748 struct nouveau_cli *cli = nouveau_cli(file_priv);
749 struct nouveau_abi16_chan *temp;
750 struct nouveau_drm *drm = nouveau_drm(dev);
751 struct drm_nouveau_gem_pushbuf *req = data;
752 struct drm_nouveau_gem_pushbuf_push *push;
753 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
754 struct drm_nouveau_gem_pushbuf_bo *bo;
755 struct nouveau_channel *chan = NULL;
756 struct validate_op op;
757 struct nouveau_fence *fence = NULL;
758 int i, j, ret = 0;
759 bool do_reloc = false, sync = false;
760
761 if (unlikely(!abi16))
762 return -ENOMEM;
763
764 if (unlikely(nouveau_cli_uvmm(cli)))
765 return nouveau_abi16_put(abi16, -ENOSYS);
766
767 list_for_each_entry(temp, &abi16->channels, head) {
768 if (temp->chan->chid == req->channel) {
769 chan = temp->chan;
770 break;
771 }
772 }
773
774 if (!chan)
775 return nouveau_abi16_put(abi16, -ENOENT);
776 if (unlikely(atomic_read(&chan->killed)))
777 return nouveau_abi16_put(abi16, -ENODEV);
778
779 sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
780
781 req->vram_available = drm->gem.vram_available;
782 req->gart_available = drm->gem.gart_available;
783 if (unlikely(req->nr_push == 0))
784 goto out_next;
785
786 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
787 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
788 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
789 return nouveau_abi16_put(abi16, -EINVAL);
790 }
791
792 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
793 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
794 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
795 return nouveau_abi16_put(abi16, -EINVAL);
796 }
797
798 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
799 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
800 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
801 return nouveau_abi16_put(abi16, -EINVAL);
802 }
803
804 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
805 if (IS_ERR(push))
806 return nouveau_abi16_put(abi16, PTR_ERR(push));
807
808 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
809 if (IS_ERR(bo)) {
810 u_free(push);
811 return nouveau_abi16_put(abi16, PTR_ERR(bo));
812 }
813
814 /* Ensure all push buffers are on validate list */
815 for (i = 0; i < req->nr_push; i++) {
816 if (push[i].bo_index >= req->nr_buffers) {
817 NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
818 ret = -EINVAL;
819 goto out_prevalid;
820 }
821 }
822
823 /* Validate buffer list */
824 revalidate:
825 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
826 req->nr_buffers, &op, &do_reloc);
827 if (ret) {
828 if (ret != -ERESTARTSYS)
829 NV_PRINTK(err, cli, "validate: %d\n", ret);
830 goto out_prevalid;
831 }
832
833 /* Apply any relocations that are required */
834 if (do_reloc) {
835 if (!reloc) {
836 validate_fini(&op, chan, NULL, bo);
837 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
838 if (IS_ERR(reloc)) {
839 ret = PTR_ERR(reloc);
840 goto out_prevalid;
841 }
842
843 goto revalidate;
844 }
845
846 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
847 if (ret) {
848 NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
849 goto out;
850 }
851 }
852
853 if (chan->dma.ib_max) {
854 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
855 if (ret) {
856 NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
857 goto out;
858 }
859
860 for (i = 0; i < req->nr_push; i++) {
861 struct nouveau_vma *vma = (void *)(unsigned long)
862 bo[push[i].bo_index].user_priv;
863 u64 addr = vma->addr + push[i].offset;
864 u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
865 bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
866
867 nv50_dma_push(chan, addr, length, no_prefetch);
868 }
869 } else
870 if (drm->client.device.info.chipset >= 0x25) {
871 ret = PUSH_WAIT(&chan->chan.push, req->nr_push * 2);
872 if (ret) {
873 NV_PRINTK(err, cli, "cal_space: %d\n", ret);
874 goto out;
875 }
876
877 for (i = 0; i < req->nr_push; i++) {
878 struct nouveau_bo *nvbo = (void *)(unsigned long)
879 bo[push[i].bo_index].user_priv;
880
881 PUSH_CALL(&chan->chan.push, nvbo->offset + push[i].offset);
882 PUSH_DATA(&chan->chan.push, 0);
883 }
884 } else {
885 ret = PUSH_WAIT(&chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
886 if (ret) {
887 NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
888 goto out;
889 }
890
891 for (i = 0; i < req->nr_push; i++) {
892 struct nouveau_bo *nvbo = (void *)(unsigned long)
893 bo[push[i].bo_index].user_priv;
894 uint32_t cmd;
895
896 cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
897 cmd |= 0x20000000;
898 if (unlikely(cmd != req->suffix0)) {
899 if (!nvbo->kmap.virtual) {
900 ret = ttm_bo_kmap(&nvbo->bo, 0,
901 PFN_UP(nvbo->bo.base.size),
902 &nvbo->kmap);
903 if (ret) {
904 WIND_RING(chan);
905 goto out;
906 }
907 nvbo->validate_mapped = true;
908 }
909
910 nouveau_bo_wr32(nvbo, (push[i].offset +
911 push[i].length - 8) / 4, cmd);
912 }
913
914 PUSH_JUMP(&chan->chan.push, nvbo->offset + push[i].offset);
915 PUSH_DATA(&chan->chan.push, 0);
916 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
917 PUSH_DATA(&chan->chan.push, 0);
918 }
919 }
920
921 ret = nouveau_fence_new(&fence, chan);
922 if (ret) {
923 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
924 WIND_RING(chan);
925 goto out;
926 }
927
928 if (sync) {
929 if (!(ret = nouveau_fence_wait(fence, false, false))) {
930 if ((ret = dma_fence_get_status(&fence->base)) == 1)
931 ret = 0;
932 }
933 }
934
935 out:
936 validate_fini(&op, chan, fence, bo);
937 nouveau_fence_unref(&fence);
938
939 if (do_reloc) {
940 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
941 u64_to_user_ptr(req->buffers);
942
943 for (i = 0; i < req->nr_buffers; i++) {
944 if (bo[i].presumed.valid)
945 continue;
946
947 if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
948 sizeof(bo[i].presumed))) {
949 ret = -EFAULT;
950 break;
951 }
952 }
953 }
954 out_prevalid:
955 if (!IS_ERR(reloc))
956 u_free(reloc);
957 u_free(bo);
958 u_free(push);
959
960 out_next:
961 if (chan->dma.ib_max) {
962 req->suffix0 = 0x00000000;
963 req->suffix1 = 0x00000000;
964 } else
965 if (drm->client.device.info.chipset >= 0x25) {
966 req->suffix0 = 0x00020000;
967 req->suffix1 = 0x00000000;
968 } else {
969 req->suffix0 = 0x20000000 |
970 (chan->push.addr + ((chan->dma.cur + 2) << 2));
971 req->suffix1 = 0x00000000;
972 }
973
974 return nouveau_abi16_put(abi16, ret);
975 }
976
977 int
nouveau_gem_ioctl_cpu_prep(struct drm_device * dev,void * data,struct drm_file * file_priv)978 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
979 struct drm_file *file_priv)
980 {
981 struct drm_nouveau_gem_cpu_prep *req = data;
982 struct drm_gem_object *gem;
983 struct nouveau_bo *nvbo;
984 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
985 bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
986 long lret;
987 int ret;
988
989 gem = drm_gem_object_lookup(file_priv, req->handle);
990 if (!gem)
991 return -ENOENT;
992 nvbo = nouveau_gem_object(gem);
993
994 lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
995 dma_resv_usage_rw(write), true,
996 no_wait ? 0 : 30 * HZ);
997 if (!lret)
998 ret = -EBUSY;
999 else if (lret > 0)
1000 ret = 0;
1001 else
1002 ret = lret;
1003
1004 nouveau_bo_sync_for_cpu(nvbo);
1005 drm_gem_object_put(gem);
1006
1007 return ret;
1008 }
1009
1010 int
nouveau_gem_ioctl_cpu_fini(struct drm_device * dev,void * data,struct drm_file * file_priv)1011 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
1012 struct drm_file *file_priv)
1013 {
1014 struct drm_nouveau_gem_cpu_fini *req = data;
1015 struct drm_gem_object *gem;
1016 struct nouveau_bo *nvbo;
1017
1018 gem = drm_gem_object_lookup(file_priv, req->handle);
1019 if (!gem)
1020 return -ENOENT;
1021 nvbo = nouveau_gem_object(gem);
1022
1023 nouveau_bo_sync_for_device(nvbo);
1024 drm_gem_object_put(gem);
1025 return 0;
1026 }
1027
1028 int
nouveau_gem_ioctl_info(struct drm_device * dev,void * data,struct drm_file * file_priv)1029 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
1030 struct drm_file *file_priv)
1031 {
1032 struct drm_nouveau_gem_info *req = data;
1033 struct drm_gem_object *gem;
1034 int ret;
1035
1036 gem = drm_gem_object_lookup(file_priv, req->handle);
1037 if (!gem)
1038 return -ENOENT;
1039
1040 ret = nouveau_gem_info(file_priv, gem, req);
1041 drm_gem_object_put(gem);
1042 return ret;
1043 }
1044
1045