1 /*
2 * Copyright © 2023 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "i915/anv_gem.h"
24 #include "anv_private.h"
25
26 #include "drm-uapi/i915_drm.h"
27
28 int
anv_i915_gem_get_tiling(struct anv_device * device,uint32_t gem_handle)29 anv_i915_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
30 {
31 if (!device->info->has_tiling_uapi)
32 return -1;
33
34 struct drm_i915_gem_get_tiling get_tiling = {
35 .handle = gem_handle,
36 };
37
38 /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
39 * anymore, so we will need another way to get the tiling. Apparently this
40 * is only used in Android code, so we may need some other way to
41 * communicate the tiling mode.
42 */
43 if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
44 assert(!"Failed to get BO tiling");
45 return -1;
46 }
47
48 return get_tiling.tiling_mode;
49 }
50
51 int
anv_i915_gem_set_tiling(struct anv_device * device,uint32_t gem_handle,uint32_t stride,uint32_t tiling)52 anv_i915_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
53 uint32_t stride, uint32_t tiling)
54 {
55 /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
56 * nothing needs to be done.
57 */
58 if (!device->info->has_tiling_uapi)
59 return 0;
60
61 /* set_tiling overwrites the input on the error path, so we have to open
62 * code intel_ioctl.
63 */
64 struct drm_i915_gem_set_tiling set_tiling = {
65 .handle = gem_handle,
66 .tiling_mode = tiling,
67 .stride = stride,
68 };
69
70 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
71 }
72
73 int
anv_i915_gem_wait(struct anv_device * device,uint32_t gem_handle,int64_t * timeout_ns)74 anv_i915_gem_wait(struct anv_device *device, uint32_t gem_handle,
75 int64_t *timeout_ns)
76 {
77 struct drm_i915_gem_wait wait = {
78 .bo_handle = gem_handle,
79 .timeout_ns = *timeout_ns,
80 .flags = 0,
81 };
82
83 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
84 *timeout_ns = wait.timeout_ns;
85
86 return ret;
87 }
88
89 VkResult
anv_i915_gem_import_bo_alloc_flags_to_bo_flags(struct anv_device * device,struct anv_bo * bo,enum anv_bo_alloc_flags alloc_flags,uint32_t * out_bo_flags)90 anv_i915_gem_import_bo_alloc_flags_to_bo_flags(struct anv_device *device,
91 struct anv_bo *bo,
92 enum anv_bo_alloc_flags alloc_flags,
93 uint32_t *out_bo_flags)
94 {
95 const uint32_t bo_flags =
96 device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags);
97 if (bo->refcount == 0) {
98 *out_bo_flags = bo_flags;
99 return VK_SUCCESS;
100 }
101
102 /* We have to be careful how we combine flags so that it makes sense.
103 * Really, though, if we get to this case and it actually matters, the
104 * client has imported a BO twice in different ways and they get what
105 * they have coming.
106 */
107 uint32_t new_flags = 0;
108 new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_WRITE;
109 new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_ASYNC;
110 new_flags |= (bo->flags & bo_flags) & EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
111 new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_PINNED;
112 new_flags |= (bo->flags | bo_flags) & EXEC_OBJECT_CAPTURE;
113
114 /* It's theoretically possible for a BO to get imported such that it's
115 * both pinned and not pinned. The only way this can happen is if it
116 * gets imported as both a semaphore and a memory object and that would
117 * be an application error. Just fail out in that case.
118 */
119 if ((bo->flags & EXEC_OBJECT_PINNED) !=
120 (bo_flags & EXEC_OBJECT_PINNED))
121 return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
122 "The same BO was imported two different ways");
123
124 /* It's also theoretically possible that someone could export a BO from
125 * one heap and import it into another or to import the same BO into two
126 * different heaps. If this happens, we could potentially end up both
127 * allowing and disallowing 48-bit addresses. There's not much we can
128 * do about it if we're pinning so we just throw an error and hope no
129 * app is actually that stupid.
130 */
131 if ((new_flags & EXEC_OBJECT_PINNED) &&
132 (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
133 (bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
134 return vk_errorf(device, VK_ERROR_INVALID_EXTERNAL_HANDLE,
135 "The same BO was imported on two different heaps");
136
137 *out_bo_flags = new_flags;
138 return VK_SUCCESS;
139 }
140