1 /* 2 * Copyright © 2024 Collabora Ltd. and Red Hat Inc. 3 * SPDX-License-Identifier: MIT 4 */ 5 #ifndef NVKMD_DRM_H 6 #define NVKMD_DRM_H 1 7 8 #include "nvkmd/nvkmd.h" 9 #include "vk_drm_syncobj.h" 10 #include "util/vma.h" 11 12 #include "drm-uapi/nouveau_drm.h" 13 14 #include <sys/types.h> 15 16 struct nouveau_ws_bo; 17 struct nouveau_ws_context; 18 struct nouveau_ws_device; 19 20 struct nvkmd_nouveau_pdev { 21 struct nvkmd_pdev base; 22 23 /* Used for get_vram_used() */ 24 struct nouveau_ws_device *ws_dev; 25 26 int primary_fd; 27 28 struct vk_sync_type syncobj_sync_type; 29 const struct vk_sync_type *sync_types[2]; 30 }; 31 32 NVKMD_DECL_SUBCLASS(pdev, nouveau); 33 34 VkResult nvkmd_nouveau_try_create_pdev(struct _drmDevice *drm_device, 35 struct vk_object_base *log_obj, 36 enum nvk_debug debug_flags, 37 struct nvkmd_pdev **pdev_out); 38 39 #define NVKMD_NOUVEAU_HEAP_START ((uint64_t)4096) 40 #define NVKMD_NOUVEAU_HEAP_END ((uint64_t)(1ull << 38)) 41 #define NVKMD_NOUVEAU_REPLAY_HEAP_START NVKMD_NOUVEAU_HEAP_END 42 #define NVKMD_NOUVEAU_REPLAY_HEAP_END \ 43 ((uint64_t)NOUVEAU_WS_DEVICE_KERNEL_RESERVATION_START) 44 45 struct nvkmd_nouveau_dev { 46 struct nvkmd_dev base; 47 48 struct nouveau_ws_device *ws_dev; 49 50 simple_mtx_t heap_mutex; 51 struct util_vma_heap heap; 52 struct util_vma_heap replay_heap; 53 }; 54 55 NVKMD_DECL_SUBCLASS(dev, nouveau); 56 57 VkResult nvkmd_nouveau_create_dev(struct nvkmd_pdev *pdev, 58 struct vk_object_base *log_obj, 59 struct nvkmd_dev **dev_out); 60 61 struct nvkmd_nouveau_mem { 62 struct nvkmd_mem base; 63 64 struct nouveau_ws_bo *bo; 65 }; 66 67 NVKMD_DECL_SUBCLASS(mem, nouveau); 68 69 VkResult nvkmd_nouveau_alloc_mem(struct nvkmd_dev *dev, 70 struct vk_object_base *log_obj, 71 uint64_t size_B, uint64_t align_B, 72 enum nvkmd_mem_flags flags, 73 struct nvkmd_mem **mem_out); 74 75 VkResult nvkmd_nouveau_alloc_tiled_mem(struct nvkmd_dev *dev, 76 struct vk_object_base *log_obj, 77 uint64_t size_B, uint64_t align_B, 78 uint8_t pte_kind, uint16_t tile_mode, 79 enum nvkmd_mem_flags flags, 80 struct nvkmd_mem **mem_out); 81 82 VkResult nvkmd_nouveau_import_dma_buf(struct nvkmd_dev *dev, 83 struct vk_object_base *log_obj, 84 int fd, struct nvkmd_mem **mem_out); 85 86 struct nvkmd_nouveau_va { 87 struct nvkmd_va base; 88 }; 89 90 NVKMD_DECL_SUBCLASS(va, nouveau); 91 92 VkResult nvkmd_nouveau_alloc_va(struct nvkmd_dev *dev, 93 struct vk_object_base *log_obj, 94 enum nvkmd_va_flags flags, uint8_t pte_kind, 95 uint64_t size_B, uint64_t align_B, 96 uint64_t fixed_addr, struct nvkmd_va **va_out); 97 98 #define NVKMD_NOUVEAU_MAX_SYNCS 256 99 #define NVKMD_NOUVEAU_MAX_BINDS 4096 100 #define NVKMD_NOUVEAU_MAX_PUSH 1024 101 102 struct nvkmd_nouveau_exec_ctx { 103 struct nvkmd_ctx base; 104 105 struct nouveau_ws_device *ws_dev; 106 struct nouveau_ws_context *ws_ctx; 107 108 uint32_t syncobj; 109 110 uint32_t max_push; 111 112 struct drm_nouveau_sync req_wait[NVKMD_NOUVEAU_MAX_SYNCS]; 113 struct drm_nouveau_sync req_sig[NVKMD_NOUVEAU_MAX_SYNCS]; 114 struct drm_nouveau_exec_push req_push[NVKMD_NOUVEAU_MAX_PUSH]; 115 struct drm_nouveau_exec req; 116 }; 117 118 NVKMD_DECL_SUBCLASS(ctx, nouveau_exec); 119 120 struct nvkmd_nouveau_bind_ctx { 121 struct nvkmd_ctx base; 122 123 struct nouveau_ws_device *ws_dev; 124 125 struct drm_nouveau_sync req_wait[NVKMD_NOUVEAU_MAX_SYNCS]; 126 struct drm_nouveau_sync req_sig[NVKMD_NOUVEAU_MAX_SYNCS]; 127 struct drm_nouveau_vm_bind_op req_ops[NVKMD_NOUVEAU_MAX_BINDS]; 128 struct drm_nouveau_vm_bind req; 129 }; 130 131 NVKMD_DECL_SUBCLASS(ctx, nouveau_bind); 132 133 VkResult nvkmd_nouveau_create_ctx(struct nvkmd_dev *dev, 134 struct vk_object_base *log_obj, 135 enum nvkmd_engines engines, 136 struct nvkmd_ctx **ctx_out); 137 138 #endif /* NVKMD_DRM_H */ 139