1 /*
2 * Copyright © 2015-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #undef _FILE_OFFSET_BITS /* prevent #define open open64 */
25 #undef _TIME_BITS
26
27 #include <string.h>
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdint.h>
31 #include <stdarg.h>
32 #include <fcntl.h>
33 #include <unistd.h>
34 #include <sys/ioctl.h>
35 #include <sys/stat.h>
36 #include <sys/mman.h>
37 #include <sys/sysmacros.h>
38 #include <dlfcn.h>
39 #include <pthread.h>
40 #include "drm-uapi/i915_drm.h"
41
42 #include "util/hash_table.h"
43 #include "util/u_math.h"
44
45 #define MESA_LOG_TAG "INTEL-SANITIZE-GPU"
46 #include "util/log.h"
47 #include "common/intel_mem.h"
48
49 static int (*libc_open)(const char *pathname, int flags, mode_t mode);
50 static int (*libc_close)(int fd);
51 static int (*libc_ioctl)(int fd, unsigned long request, void *argp);
52 static int (*libc_fcntl)(int fd, int cmd, int param);
53
54 #define DRM_MAJOR 226
55
56 /* TODO: we want to make sure that the padding forces
57 * the BO to take another page on the (PP)GTT; 4KB
58 * may or may not be the page size for the BO. Indeed,
59 * depending on GPU, kernel version and GEM size, the
60 * page size can be one of 4KB, 64KB or 2M.
61 */
62 #define PADDING_SIZE 4096
63
64 struct refcnt_hash_table {
65 struct hash_table *t;
66 int refcnt;
67 };
68
69 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
70 #define MUTEX_LOCK() do { \
71 if (unlikely(pthread_mutex_lock(&mutex))) { \
72 mesa_loge("mutex_lock failed"); \
73 abort(); \
74 } \
75 } while (0)
76 #define MUTEX_UNLOCK() do { \
77 if (unlikely(pthread_mutex_unlock(&mutex))) { \
78 mesa_loge("mutex_unlock failed"); \
79 abort(); \
80 } \
81 } while (0)
82
83 static struct hash_table *fds_to_bo_sizes = NULL;
84
85 static inline struct hash_table*
bo_size_table(int fd)86 bo_size_table(int fd)
87 {
88 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
89 (void*)(uintptr_t)fd);
90 return e ? ((struct refcnt_hash_table*)e->data)->t : NULL;
91 }
92
93 static inline uint64_t
bo_size(int fd,uint32_t handle)94 bo_size(int fd, uint32_t handle)
95 {
96 struct hash_table *t = bo_size_table(fd);
97 if (!t)
98 return UINT64_MAX;
99 struct hash_entry *e = _mesa_hash_table_search(t, (void*)(uintptr_t)handle);
100 return e ? (uint64_t)(uintptr_t)e->data : UINT64_MAX;
101 }
102
103 static inline bool
is_drm_fd(int fd)104 is_drm_fd(int fd)
105 {
106 return !!bo_size_table(fd);
107 }
108
109 static inline void
add_drm_fd(int fd)110 add_drm_fd(int fd)
111 {
112 struct refcnt_hash_table *r = malloc(sizeof(*r));
113 r->refcnt = 1;
114 r->t = _mesa_pointer_hash_table_create(NULL);
115 _mesa_hash_table_insert(fds_to_bo_sizes, (void*)(uintptr_t)fd,
116 (void*)(uintptr_t)r);
117 }
118
119 static inline void
dup_drm_fd(int old_fd,int new_fd)120 dup_drm_fd(int old_fd, int new_fd)
121 {
122 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
123 (void*)(uintptr_t)old_fd);
124 struct refcnt_hash_table *r = e->data;
125 r->refcnt++;
126 _mesa_hash_table_insert(fds_to_bo_sizes, (void*)(uintptr_t)new_fd,
127 (void*)(uintptr_t)r);
128 }
129
130 static inline void
del_drm_fd(int fd)131 del_drm_fd(int fd)
132 {
133 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
134 (void*)(uintptr_t)fd);
135 struct refcnt_hash_table *r = e->data;
136 if (!--r->refcnt) {
137 _mesa_hash_table_remove(fds_to_bo_sizes, e);
138 _mesa_hash_table_destroy(r->t, NULL);
139 free(r);
140 }
141 }
142
143 /* Our goal is not to have noise good enough for crypto,
144 * but instead values that are unique-ish enough that
145 * it is incredibly unlikely that a buffer overwrite
146 * will produce the exact same values.
147 */
148 static uint8_t
next_noise_value(uint8_t prev_noise)149 next_noise_value(uint8_t prev_noise)
150 {
151 uint32_t v = prev_noise;
152 return (v * 103u + 227u) & 0xFF;
153 }
154
155 static void
fill_noise_buffer(uint8_t * dst,uint8_t start,uint32_t length)156 fill_noise_buffer(uint8_t *dst, uint8_t start, uint32_t length)
157 {
158 for(uint32_t i = 0; i < length; ++i) {
159 dst[i] = start;
160 start = next_noise_value(start);
161 }
162 }
163
164 static bool
padding_is_good(int fd,uint32_t handle)165 padding_is_good(int fd, uint32_t handle)
166 {
167 struct drm_i915_gem_mmap mmap_arg = {
168 .handle = handle,
169 .offset = align64(bo_size(fd, handle), 4096),
170 .size = PADDING_SIZE,
171 .flags = 0,
172 };
173
174 /* Unknown bo, maybe prime or userptr. Ignore */
175 if (mmap_arg.offset == UINT64_MAX)
176 return true;
177
178 uint8_t *mapped;
179 int ret;
180 uint8_t expected_value;
181
182 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
183 if (ret != 0) {
184 mesa_logd("Unable to map buffer %d for pad checking.", handle);
185 return false;
186 }
187
188 mapped = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr;
189 #ifdef SUPPORT_INTEL_INTEGRATED_GPUS
190 /* bah-humbug, we need to see the latest contents and
191 * if the bo is not cache coherent we likely need to
192 * invalidate the cache lines to get it.
193 */
194 intel_invalidate_range(mapped, PADDING_SIZE);
195 #endif
196
197 expected_value = handle & 0xFF;
198 for (uint32_t i = 0; i < PADDING_SIZE; ++i) {
199 if (expected_value != mapped[i]) {
200 munmap(mapped, PADDING_SIZE);
201 return false;
202 }
203 expected_value = next_noise_value(expected_value);
204 }
205 munmap(mapped, PADDING_SIZE);
206
207 return true;
208 }
209
210 static int
create_with_padding(int fd,struct drm_i915_gem_create * create)211 create_with_padding(int fd, struct drm_i915_gem_create *create)
212 {
213 uint64_t original_size = create->size;
214
215 create->size = align64(original_size, 4096) + PADDING_SIZE;
216 int ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, create);
217 create->size = original_size;
218
219 if (ret != 0)
220 return ret;
221
222 uint8_t *noise_values;
223 struct drm_i915_gem_mmap mmap_arg = {
224 .handle = create->handle,
225 .offset = align64(create->size, 4096),
226 .size = PADDING_SIZE,
227 .flags = 0,
228 };
229
230 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
231 if (ret != 0) {
232 mesa_logd("Unable to map buffer %d for pad creation.\n", create->handle);
233 return 0;
234 }
235
236 noise_values = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr;
237 fill_noise_buffer(noise_values, create->handle & 0xFF,
238 PADDING_SIZE);
239 munmap(noise_values, PADDING_SIZE);
240
241 _mesa_hash_table_insert(bo_size_table(fd), (void*)(uintptr_t)create->handle,
242 (void*)(uintptr_t)create->size);
243
244 return 0;
245 }
246
247 static int
exec_and_check_padding(int fd,unsigned long request,struct drm_i915_gem_execbuffer2 * exec)248 exec_and_check_padding(int fd, unsigned long request,
249 struct drm_i915_gem_execbuffer2 *exec)
250 {
251 int ret = libc_ioctl(fd, request, exec);
252 if (ret != 0)
253 return ret;
254
255 struct drm_i915_gem_exec_object2 *objects =
256 (void*)(uintptr_t)exec->buffers_ptr;
257 uint32_t batch_bo = exec->flags & I915_EXEC_BATCH_FIRST ? objects[0].handle :
258 objects[exec->buffer_count - 1].handle;
259
260 struct drm_i915_gem_wait wait = {
261 .bo_handle = batch_bo,
262 .timeout_ns = -1,
263 };
264 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
265 if (ret != 0)
266 return ret;
267
268 bool detected_out_of_bounds_write = false;
269
270 for (int i = 0; i < exec->buffer_count; i++) {
271 uint32_t handle = objects[i].handle;
272
273 if (!padding_is_good(fd, handle)) {
274 detected_out_of_bounds_write = true;
275 mesa_loge("Detected buffer out-of-bounds write in bo %d", handle);
276 }
277 }
278
279 if (unlikely(detected_out_of_bounds_write)) {
280 abort();
281 }
282
283 return 0;
284 }
285
286 static int
gem_close(int fd,struct drm_gem_close * close)287 gem_close(int fd, struct drm_gem_close *close)
288 {
289 int ret = libc_ioctl(fd, DRM_IOCTL_GEM_CLOSE, close);
290 if (ret != 0)
291 return ret;
292
293 struct hash_table *t = bo_size_table(fd);
294 struct hash_entry *e =
295 _mesa_hash_table_search(t, (void*)(uintptr_t)close->handle);
296
297 if (e)
298 _mesa_hash_table_remove(t, e);
299
300 return 0;
301 }
302
303 static bool
is_i915(int fd)304 is_i915(int fd) {
305 struct stat stat;
306 if (fstat(fd, &stat))
307 return false;
308
309 if (!S_ISCHR(stat.st_mode) || major(stat.st_rdev) != DRM_MAJOR)
310 return false;
311
312 char name[5] = "";
313 drm_version_t version = {
314 .name = name,
315 .name_len = sizeof(name) - 1,
316 };
317 if (libc_ioctl(fd, DRM_IOCTL_VERSION, &version))
318 return false;
319
320 return strcmp("i915", name) == 0;
321 }
322
323 __attribute__ ((visibility ("default"))) int
open(const char * path,int flags,...)324 open(const char *path, int flags, ...)
325 {
326 va_list args;
327 mode_t mode;
328
329 va_start(args, flags);
330 mode = va_arg(args, int);
331 va_end(args);
332
333 int fd = libc_open(path, flags, mode);
334
335 MUTEX_LOCK();
336
337 if (fd >= 0 && is_i915(fd))
338 add_drm_fd(fd);
339
340 MUTEX_UNLOCK();
341
342 return fd;
343 }
344
345 __attribute__ ((visibility ("default"), alias ("open"))) int
346 open64(const char *path, int flags, ...);
347
348 __attribute__ ((visibility ("default"))) int
close(int fd)349 close(int fd)
350 {
351 MUTEX_LOCK();
352
353 if (is_drm_fd(fd))
354 del_drm_fd(fd);
355
356 MUTEX_UNLOCK();
357
358 return libc_close(fd);
359 }
360
361 __attribute__ ((visibility ("default"))) int
fcntl(int fd,int cmd,...)362 fcntl(int fd, int cmd, ...)
363 {
364 va_list args;
365 int param;
366
367 va_start(args, cmd);
368 param = va_arg(args, int);
369 va_end(args);
370
371 int res = libc_fcntl(fd, cmd, param);
372
373 MUTEX_LOCK();
374
375 if (is_drm_fd(fd) && cmd == F_DUPFD_CLOEXEC)
376 dup_drm_fd(fd, res);
377
378 MUTEX_UNLOCK();
379
380 return res;
381 }
382
383 __attribute__ ((visibility ("default"))) int
ioctl(int fd,unsigned long request,...)384 ioctl(int fd, unsigned long request, ...)
385 {
386 int res;
387 va_list args;
388 void *argp;
389
390 MUTEX_LOCK();
391
392 va_start(args, request);
393 argp = va_arg(args, void *);
394 va_end(args);
395
396 if (_IOC_TYPE(request) == DRM_IOCTL_BASE && !is_drm_fd(fd) && is_i915(fd)) {
397 mesa_loge("missed drm fd %d", fd);
398 add_drm_fd(fd);
399 }
400
401 if (is_drm_fd(fd)) {
402 switch (request) {
403 case DRM_IOCTL_GEM_CLOSE:
404 res = gem_close(fd, (struct drm_gem_close*)argp);
405 goto out;
406
407 case DRM_IOCTL_I915_GEM_CREATE:
408 res = create_with_padding(fd, (struct drm_i915_gem_create*)argp);
409 goto out;
410
411 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
412 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR:
413 res = exec_and_check_padding(fd, request,
414 (struct drm_i915_gem_execbuffer2*)argp);
415 goto out;
416
417 default:
418 break;
419 }
420 }
421 res = libc_ioctl(fd, request, argp);
422
423 out:
424 MUTEX_UNLOCK();
425 return res;
426 }
427
428 static void __attribute__ ((constructor))
init(void)429 init(void)
430 {
431 fds_to_bo_sizes = _mesa_pointer_hash_table_create(NULL);
432 libc_open = dlsym(RTLD_NEXT, "open");
433 libc_close = dlsym(RTLD_NEXT, "close");
434 libc_fcntl = dlsym(RTLD_NEXT, "fcntl");
435 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
436 }
437