1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <string.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "anv_private.h"
33 #include "common/i915/intel_defines.h"
34 #include "common/intel_gem.h"
35
36 /**
37 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
38 *
39 * Return gem handle, or 0 on failure. Gem handles are never 0.
40 */
41 uint32_t
anv_gem_create(struct anv_device * device,uint64_t size)42 anv_gem_create(struct anv_device *device, uint64_t size)
43 {
44 struct drm_i915_gem_create gem_create = {
45 .size = size,
46 };
47
48 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
49 if (ret != 0) {
50 /* FIXME: What do we do if this fails? */
51 return 0;
52 }
53
54 return gem_create.handle;
55 }
56
57 void
anv_gem_close(struct anv_device * device,uint32_t gem_handle)58 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
59 {
60 struct drm_gem_close close = {
61 .handle = gem_handle,
62 };
63
64 intel_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
65 }
66
67 /**
68 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
69 */
70 static void*
anv_gem_mmap_offset(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)71 anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
72 uint64_t offset, uint64_t size, uint32_t flags)
73 {
74 struct drm_i915_gem_mmap_offset gem_mmap = {
75 .handle = gem_handle,
76 .flags = device->info->has_local_mem ? I915_MMAP_OFFSET_FIXED :
77 (flags & I915_MMAP_WC) ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
78 };
79 assert(offset == 0);
80
81 /* Get the fake offset back */
82 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
83 if (ret != 0)
84 return MAP_FAILED;
85
86 /* And map it */
87 void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
88 device->fd, gem_mmap.offset);
89 return map;
90 }
91
92 static void*
anv_gem_mmap_legacy(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)93 anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
94 uint64_t offset, uint64_t size, uint32_t flags)
95 {
96 assert(!device->info->has_local_mem);
97
98 struct drm_i915_gem_mmap gem_mmap = {
99 .handle = gem_handle,
100 .offset = offset,
101 .size = size,
102 .flags = flags,
103 };
104
105 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
106 if (ret != 0)
107 return MAP_FAILED;
108
109 return (void *)(uintptr_t) gem_mmap.addr_ptr;
110 }
111
112 /**
113 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
114 */
115 void*
anv_gem_mmap(struct anv_device * device,uint32_t gem_handle,uint64_t offset,uint64_t size,uint32_t flags)116 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
117 uint64_t offset, uint64_t size, uint32_t flags)
118 {
119 void *map;
120 if (device->physical->info.has_mmap_offset)
121 map = anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
122 else
123 map = anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
124
125 if (map != MAP_FAILED)
126 VG(VALGRIND_MALLOCLIKE_BLOCK(map, size, 0, 1));
127
128 return map;
129 }
130
131 /* This is just a wrapper around munmap, but it also notifies valgrind that
132 * this map is no longer valid. Pair this with anv_gem_mmap().
133 */
134 void
anv_gem_munmap(struct anv_device * device,void * p,uint64_t size)135 anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
136 {
137 VG(VALGRIND_FREELIKE_BLOCK(p, 0));
138 munmap(p, size);
139 }
140
141 uint32_t
anv_gem_userptr(struct anv_device * device,void * mem,size_t size)142 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
143 {
144 struct drm_i915_gem_userptr userptr = {
145 .user_ptr = (__u64)((unsigned long) mem),
146 .user_size = size,
147 .flags = 0,
148 };
149
150 if (device->physical->info.has_userptr_probe)
151 userptr.flags |= I915_USERPTR_PROBE;
152
153 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
154 if (ret == -1)
155 return 0;
156
157 return userptr.handle;
158 }
159
160 int
anv_gem_set_caching(struct anv_device * device,uint32_t gem_handle,uint32_t caching)161 anv_gem_set_caching(struct anv_device *device,
162 uint32_t gem_handle, uint32_t caching)
163 {
164 struct drm_i915_gem_caching gem_caching = {
165 .handle = gem_handle,
166 .caching = caching,
167 };
168
169 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
170 }
171
172 /**
173 * On error, \a timeout_ns holds the remaining time.
174 */
175 int
anv_gem_wait(struct anv_device * device,uint32_t gem_handle,int64_t * timeout_ns)176 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
177 {
178 struct drm_i915_gem_wait wait = {
179 .bo_handle = gem_handle,
180 .timeout_ns = *timeout_ns,
181 .flags = 0,
182 };
183
184 int ret = intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
185 *timeout_ns = wait.timeout_ns;
186
187 return ret;
188 }
189
190 int
anv_gem_execbuffer(struct anv_device * device,struct drm_i915_gem_execbuffer2 * execbuf)191 anv_gem_execbuffer(struct anv_device *device,
192 struct drm_i915_gem_execbuffer2 *execbuf)
193 {
194 if (execbuf->flags & I915_EXEC_FENCE_OUT)
195 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
196 else
197 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
198 }
199
200 /** Return -1 on error. */
201 int
anv_gem_get_tiling(struct anv_device * device,uint32_t gem_handle)202 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
203 {
204 if (!device->info->has_tiling_uapi)
205 return -1;
206
207 struct drm_i915_gem_get_tiling get_tiling = {
208 .handle = gem_handle,
209 };
210
211 /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
212 * anymore, so we will need another way to get the tiling. Apparently this
213 * is only used in Android code, so we may need some other way to
214 * communicate the tiling mode.
215 */
216 if (intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
217 assert(!"Failed to get BO tiling");
218 return -1;
219 }
220
221 return get_tiling.tiling_mode;
222 }
223
224 int
anv_gem_set_tiling(struct anv_device * device,uint32_t gem_handle,uint32_t stride,uint32_t tiling)225 anv_gem_set_tiling(struct anv_device *device,
226 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
227 {
228 /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
229 * nothing needs to be done.
230 */
231 if (!device->info->has_tiling_uapi)
232 return 0;
233
234 /* set_tiling overwrites the input on the error path, so we have to open
235 * code intel_ioctl.
236 */
237 struct drm_i915_gem_set_tiling set_tiling = {
238 .handle = gem_handle,
239 .tiling_mode = tiling,
240 .stride = stride,
241 };
242
243 return intel_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
244 }
245
246 bool
anv_gem_has_context_priority(int fd,int priority)247 anv_gem_has_context_priority(int fd, int priority)
248 {
249 return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
250 priority);
251 }
252
253 int
anv_gem_set_context_param(int fd,uint32_t context,uint32_t param,uint64_t value)254 anv_gem_set_context_param(int fd, uint32_t context, uint32_t param, uint64_t value)
255 {
256 int err = 0;
257 if (!intel_gem_set_context_param(fd, context, param, value))
258 err = -errno;
259 return err;
260 }
261
262 int
anv_gem_context_get_reset_stats(int fd,int context,uint32_t * active,uint32_t * pending)263 anv_gem_context_get_reset_stats(int fd, int context,
264 uint32_t *active, uint32_t *pending)
265 {
266 struct drm_i915_reset_stats stats = {
267 .ctx_id = context,
268 };
269
270 int ret = intel_ioctl(fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
271 if (ret == 0) {
272 *active = stats.batch_active;
273 *pending = stats.batch_pending;
274 }
275
276 return ret;
277 }
278
279 int
anv_gem_handle_to_fd(struct anv_device * device,uint32_t gem_handle)280 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
281 {
282 struct drm_prime_handle args = {
283 .handle = gem_handle,
284 .flags = DRM_CLOEXEC | DRM_RDWR,
285 };
286
287 int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
288 if (ret == -1)
289 return -1;
290
291 return args.fd;
292 }
293
294 uint32_t
anv_gem_fd_to_handle(struct anv_device * device,int fd)295 anv_gem_fd_to_handle(struct anv_device *device, int fd)
296 {
297 struct drm_prime_handle args = {
298 .fd = fd,
299 };
300
301 int ret = intel_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
302 if (ret == -1)
303 return 0;
304
305 return args.handle;
306 }
307