1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24
25 #include <assert.h>
26 #include <inttypes.h>
27 #include <stdbool.h>
28 #include <stddef.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <fcntl.h>
34
35 #include "util/compress.h"
36 #include "util/crc32.h"
37 #include "util/u_debug.h"
38 #include "util/disk_cache.h"
39 #include "util/disk_cache_os.h"
40
41 #if DETECT_OS_WINDOWS
42
43 #include <windows.h>
44
45 bool
disk_cache_get_function_identifier(void * ptr,struct mesa_sha1 * ctx)46 disk_cache_get_function_identifier(void *ptr, struct mesa_sha1 *ctx)
47 {
48 HMODULE mod = NULL;
49 GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
50 (LPCWSTR)ptr,
51 &mod);
52 if (!mod)
53 return false;
54
55 WCHAR filename[MAX_PATH];
56 DWORD filename_length = GetModuleFileNameW(mod, filename, ARRAY_SIZE(filename));
57
58 if (filename_length == 0 || filename_length == ARRAY_SIZE(filename))
59 return false;
60
61 HANDLE mod_as_file = CreateFileW(
62 filename,
63 GENERIC_READ,
64 FILE_SHARE_READ,
65 NULL,
66 OPEN_EXISTING,
67 FILE_ATTRIBUTE_NORMAL,
68 NULL);
69 if (mod_as_file == INVALID_HANDLE_VALUE)
70 return false;
71
72 FILETIME time;
73 bool ret = GetFileTime(mod_as_file, NULL, NULL, &time);
74 if (ret)
75 _mesa_sha1_update(ctx, &time, sizeof(time));
76 CloseHandle(mod_as_file);
77 return ret;
78 }
79
80 #endif
81
82 #ifdef ENABLE_SHADER_CACHE
83
84 #if DETECT_OS_WINDOWS
85 /* TODO: implement disk cache support on windows */
86
87 #else
88
89 #include <dirent.h>
90 #include <errno.h>
91 #include <pwd.h>
92 #include <stdio.h>
93 #include <string.h>
94 #include <sys/file.h>
95 #include <sys/mman.h>
96 #include <sys/types.h>
97 #include <sys/stat.h>
98 #include <unistd.h>
99 #include "utime.h"
100
101 #include "util/blob.h"
102 #include "util/crc32.h"
103 #include "util/u_debug.h"
104 #include "util/ralloc.h"
105 #include "util/rand_xor.h"
106
107 /* Create a directory named 'path' if it does not already exist.
108 * This is for use by mkdir_with_parents_if_needed(). Use that instead.
109 *
110 * Returns: 0 if path already exists as a directory or if created.
111 * -1 in all other cases.
112 */
113 static int
mkdir_if_needed(const char * path)114 mkdir_if_needed(const char *path)
115 {
116 struct stat sb;
117
118 /* If the path exists already, then our work is done if it's a
119 * directory, but it's an error if it is not.
120 */
121 if (stat(path, &sb) == 0) {
122 if (S_ISDIR(sb.st_mode)) {
123 return 0;
124 } else {
125 fprintf(stderr, "Cannot use %s for shader cache (not a directory)"
126 "---disabling.\n", path);
127 return -1;
128 }
129 }
130
131 int ret = mkdir(path, 0700);
132 if (ret == 0 || (ret == -1 && errno == EEXIST))
133 return 0;
134
135 fprintf(stderr, "Failed to create %s for shader cache (%s)---disabling.\n",
136 path, strerror(errno));
137
138 return -1;
139 }
140
141 /* Create a directory named 'path' if it does not already exist,
142 * including parent directories if required.
143 *
144 * Returns: 0 if path already exists as a directory or if created.
145 * -1 in all other cases.
146 */
147 static int
mkdir_with_parents_if_needed(const char * path)148 mkdir_with_parents_if_needed(const char *path)
149 {
150 char *p;
151 const char *end;
152
153 if (path[0] == '\0')
154 return -1;
155
156 p = strdup(path);
157 end = p + strlen(p) + 1; /* end points to the \0 terminator */
158 for (char *q = p; q != end; q++) {
159 if (*q == '/' || q == end - 1) {
160 if (q == p) {
161 /* Skip the first / of an absolute path. */
162 continue;
163 }
164
165 *q = '\0';
166
167 if (mkdir_if_needed(p) == -1) {
168 free(p);
169 return -1;
170 }
171
172 *q = '/';
173 }
174 }
175 free(p);
176
177 return 0;
178 }
179
180 /* Concatenate an existing path and a new name to form a new path. If the new
181 * path does not exist as a directory, create it then return the resulting
182 * name of the new path (ralloc'ed off of 'ctx').
183 *
184 * Returns NULL on any error, such as:
185 *
186 * <path>/<name> exists but is not a directory
187 * <path>/<name> cannot be created as a directory
188 */
189 static char *
concatenate_and_mkdir(void * ctx,const char * path,const char * name)190 concatenate_and_mkdir(void *ctx, const char *path, const char *name)
191 {
192 char *new_path;
193
194 new_path = ralloc_asprintf(ctx, "%s/%s", path, name);
195
196 if (mkdir_with_parents_if_needed(new_path) == 0)
197 return new_path;
198
199 return NULL;
200 }
201
202 struct lru_file {
203 struct list_head node;
204 char *lru_name;
205 size_t lru_file_size;
206 time_t lru_atime;
207 };
208
209 static void
free_lru_file_list(struct list_head * lru_file_list)210 free_lru_file_list(struct list_head *lru_file_list)
211 {
212 struct lru_file *e, *next;
213 LIST_FOR_EACH_ENTRY_SAFE(e, next, lru_file_list, node) {
214 free(e->lru_name);
215 free(e);
216 }
217 free(lru_file_list);
218 }
219
220 /* Given a directory path and predicate function, create a linked list of entrys
221 * with the oldest access time in that directory for which the predicate
222 * returns true.
223 *
224 * Returns: A malloc'ed linkd list for the paths of chosen files, (or
225 * NULL on any error). The caller should free the linked list via
226 * free_lru_file_list() when finished.
227 */
228 static struct list_head *
choose_lru_file_matching(const char * dir_path,bool (* predicate)(const char * dir_path,const struct stat *,const char *,const size_t))229 choose_lru_file_matching(const char *dir_path,
230 bool (*predicate)(const char *dir_path,
231 const struct stat *,
232 const char *, const size_t))
233 {
234 DIR *dir;
235 struct dirent *dir_ent;
236
237 dir = opendir(dir_path);
238 if (dir == NULL)
239 return NULL;
240
241 const int dir_fd = dirfd(dir);
242
243 /* First count the number of files in the directory */
244 unsigned total_file_count = 0;
245 while ((dir_ent = readdir(dir)) != NULL) {
246 #ifdef HAVE_DIRENT_D_TYPE
247 if (dir_ent->d_type == DT_REG) { /* If the entry is a regular file */
248 total_file_count++;
249 }
250 #else
251 struct stat st;
252
253 if (fstatat(dir_fd, dir_ent->d_name, &st, AT_SYMLINK_NOFOLLOW) == 0) {
254 if (S_ISREG(st.st_mode)) {
255 total_file_count++;
256 }
257 }
258 #endif
259 }
260
261 /* Reset to the start of the directory */
262 rewinddir(dir);
263
264 /* Collect 10% of files in this directory for removal. Note: This should work
265 * out to only be around 0.04% of total cache items.
266 */
267 unsigned lru_file_count = total_file_count > 10 ? total_file_count / 10 : 1;
268 struct list_head *lru_file_list = malloc(sizeof(struct list_head));
269 list_inithead(lru_file_list);
270
271 unsigned processed_files = 0;
272 while (1) {
273 dir_ent = readdir(dir);
274 if (dir_ent == NULL)
275 break;
276
277 struct stat sb;
278 if (fstatat(dir_fd, dir_ent->d_name, &sb, 0) == 0) {
279 struct lru_file *entry = NULL;
280 if (!list_is_empty(lru_file_list))
281 entry = list_first_entry(lru_file_list, struct lru_file, node);
282
283 if (!entry|| sb.st_atime < entry->lru_atime) {
284 size_t len = strlen(dir_ent->d_name);
285 if (!predicate(dir_path, &sb, dir_ent->d_name, len))
286 continue;
287
288 bool new_entry = false;
289 if (processed_files < lru_file_count) {
290 entry = calloc(1, sizeof(struct lru_file));
291 new_entry = true;
292 }
293 processed_files++;
294
295 char *tmp = realloc(entry->lru_name, len + 1);
296 if (tmp) {
297 /* Find location to insert new lru item. We want to keep the
298 * list ordering from most recently used to least recently used.
299 * This allows us to just evict the head item from the list as
300 * we process the directory and find older entrys.
301 */
302 struct list_head *list_node = lru_file_list;
303 struct lru_file *e;
304 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
305 if (sb.st_atime < entry->lru_atime) {
306 list_node = &e->node;
307 break;
308 }
309 }
310
311 if (new_entry) {
312 list_addtail(&entry->node, list_node);
313 } else {
314 if (list_node != lru_file_list) {
315 list_del(lru_file_list);
316 list_addtail(lru_file_list, list_node);
317 }
318 }
319
320 entry->lru_name = tmp;
321 memcpy(entry->lru_name, dir_ent->d_name, len + 1);
322 entry->lru_atime = sb.st_atime;
323 entry->lru_file_size = sb.st_blocks * 512;
324 }
325 }
326 }
327 }
328
329 if (list_is_empty(lru_file_list)) {
330 closedir(dir);
331 free(lru_file_list);
332 return NULL;
333 }
334
335 /* Create the full path for the file list we found */
336 struct lru_file *e;
337 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
338 char *filename = e->lru_name;
339 if (asprintf(&e->lru_name, "%s/%s", dir_path, filename) < 0)
340 e->lru_name = NULL;
341
342 free(filename);
343 }
344
345 closedir(dir);
346
347 return lru_file_list;
348 }
349
350 /* Is entry a regular file, and not having a name with a trailing
351 * ".tmp"
352 */
353 static bool
is_regular_non_tmp_file(const char * path,const struct stat * sb,const char * d_name,const size_t len)354 is_regular_non_tmp_file(const char *path, const struct stat *sb,
355 const char *d_name, const size_t len)
356 {
357 if (!S_ISREG(sb->st_mode))
358 return false;
359
360 if (len >= 4 && strcmp(&d_name[len-4], ".tmp") == 0)
361 return false;
362
363 return true;
364 }
365
366 /* Returns the size of the deleted file, (or 0 on any error). */
367 static size_t
unlink_lru_file_from_directory(const char * path)368 unlink_lru_file_from_directory(const char *path)
369 {
370 struct list_head *lru_file_list =
371 choose_lru_file_matching(path, is_regular_non_tmp_file);
372 if (lru_file_list == NULL)
373 return 0;
374
375 assert(!list_is_empty(lru_file_list));
376
377 size_t total_unlinked_size = 0;
378 struct lru_file *e;
379 LIST_FOR_EACH_ENTRY(e, lru_file_list, node) {
380 if (unlink(e->lru_name) == 0)
381 total_unlinked_size += e->lru_file_size;
382 }
383 free_lru_file_list(lru_file_list);
384
385 return total_unlinked_size;
386 }
387
388 /* Is entry a directory with a two-character name, (and not the
389 * special name of ".."). We also return false if the dir is empty.
390 */
391 static bool
is_two_character_sub_directory(const char * path,const struct stat * sb,const char * d_name,const size_t len)392 is_two_character_sub_directory(const char *path, const struct stat *sb,
393 const char *d_name, const size_t len)
394 {
395 if (!S_ISDIR(sb->st_mode))
396 return false;
397
398 if (len != 2)
399 return false;
400
401 if (strcmp(d_name, "..") == 0)
402 return false;
403
404 char *subdir;
405 if (asprintf(&subdir, "%s/%s", path, d_name) == -1)
406 return false;
407 DIR *dir = opendir(subdir);
408 free(subdir);
409
410 if (dir == NULL)
411 return false;
412
413 unsigned subdir_entries = 0;
414 struct dirent *d;
415 while ((d = readdir(dir)) != NULL) {
416 if(++subdir_entries > 2)
417 break;
418 }
419 closedir(dir);
420
421 /* If dir only contains '.' and '..' it must be empty */
422 if (subdir_entries <= 2)
423 return false;
424
425 return true;
426 }
427
428 /* Create the directory that will be needed for the cache file for \key.
429 *
430 * Obviously, the implementation here must closely match
431 * _get_cache_file above.
432 */
433 static void
make_cache_file_directory(struct disk_cache * cache,const cache_key key)434 make_cache_file_directory(struct disk_cache *cache, const cache_key key)
435 {
436 char *dir;
437 char buf[41];
438
439 _mesa_sha1_format(buf, key);
440 if (asprintf(&dir, "%s/%c%c", cache->path, buf[0], buf[1]) == -1)
441 return;
442
443 mkdir_with_parents_if_needed(dir);
444 free(dir);
445 }
446
447 static ssize_t
read_all(int fd,void * buf,size_t count)448 read_all(int fd, void *buf, size_t count)
449 {
450 char *in = buf;
451 ssize_t read_ret;
452 size_t done;
453
454 for (done = 0; done < count; done += read_ret) {
455 read_ret = read(fd, in + done, count - done);
456 if (read_ret == -1 || read_ret == 0)
457 return -1;
458 }
459 return done;
460 }
461
462 static ssize_t
write_all(int fd,const void * buf,size_t count)463 write_all(int fd, const void *buf, size_t count)
464 {
465 const char *out = buf;
466 ssize_t written;
467 size_t done;
468
469 for (done = 0; done < count; done += written) {
470 written = write(fd, out + done, count - done);
471 if (written == -1)
472 return -1;
473 }
474 return done;
475 }
476
477 /* Evict least recently used cache item */
478 void
disk_cache_evict_lru_item(struct disk_cache * cache)479 disk_cache_evict_lru_item(struct disk_cache *cache)
480 {
481 char *dir_path;
482
483 /* With a reasonably-sized, full cache, (and with keys generated
484 * from a cryptographic hash), we can choose two random hex digits
485 * and reasonably expect the directory to exist with a file in it.
486 * Provides pseudo-LRU eviction to reduce checking all cache files.
487 */
488 uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus);
489 if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0)
490 return;
491
492 size_t size = unlink_lru_file_from_directory(dir_path);
493
494 free(dir_path);
495
496 if (size) {
497 p_atomic_add(&cache->size->value, - (uint64_t)size);
498 return;
499 }
500
501 /* In the case where the random choice of directory didn't find
502 * something, we choose the least recently accessed from the
503 * existing directories.
504 *
505 * Really, the only reason this code exists is to allow the unit
506 * tests to work, (which use an artificially-small cache to be able
507 * to force a single cached item to be evicted).
508 */
509 struct list_head *lru_file_list =
510 choose_lru_file_matching(cache->path, is_two_character_sub_directory);
511 if (lru_file_list == NULL)
512 return;
513
514 assert(!list_is_empty(lru_file_list));
515
516 struct lru_file *lru_file_dir =
517 list_first_entry(lru_file_list, struct lru_file, node);
518
519 size = unlink_lru_file_from_directory(lru_file_dir->lru_name);
520
521 free_lru_file_list(lru_file_list);
522
523 if (size)
524 p_atomic_add(&cache->size->value, - (uint64_t)size);
525 }
526
527 void
disk_cache_evict_item(struct disk_cache * cache,char * filename)528 disk_cache_evict_item(struct disk_cache *cache, char *filename)
529 {
530 struct stat sb;
531 if (stat(filename, &sb) == -1) {
532 free(filename);
533 return;
534 }
535
536 unlink(filename);
537 free(filename);
538
539 if (sb.st_blocks)
540 p_atomic_add(&cache->size->value, - (uint64_t)sb.st_blocks * 512);
541 }
542
543 static void *
parse_and_validate_cache_item(struct disk_cache * cache,void * cache_item,size_t cache_item_size,size_t * size)544 parse_and_validate_cache_item(struct disk_cache *cache, void *cache_item,
545 size_t cache_item_size, size_t *size)
546 {
547 uint8_t *uncompressed_data = NULL;
548
549 struct blob_reader ci_blob_reader;
550 blob_reader_init(&ci_blob_reader, cache_item, cache_item_size);
551
552 size_t header_size = cache->driver_keys_blob_size;
553 const void *keys_blob = blob_read_bytes(&ci_blob_reader, header_size);
554 if (ci_blob_reader.overrun)
555 goto fail;
556
557 /* Check for extremely unlikely hash collisions */
558 if (memcmp(cache->driver_keys_blob, keys_blob, header_size) != 0) {
559 assert(!"Mesa cache keys mismatch!");
560 goto fail;
561 }
562
563 uint32_t md_type = blob_read_uint32(&ci_blob_reader);
564 if (ci_blob_reader.overrun)
565 goto fail;
566
567 if (md_type == CACHE_ITEM_TYPE_GLSL) {
568 uint32_t num_keys = blob_read_uint32(&ci_blob_reader);
569 if (ci_blob_reader.overrun)
570 goto fail;
571
572 /* The cache item metadata is currently just used for distributing
573 * precompiled shaders, they are not used by Mesa so just skip them for
574 * now.
575 * TODO: pass the metadata back to the caller and do some basic
576 * validation.
577 */
578 const void UNUSED *metadata =
579 blob_read_bytes(&ci_blob_reader, num_keys * sizeof(cache_key));
580 if (ci_blob_reader.overrun)
581 goto fail;
582 }
583
584 /* Load the CRC that was created when the file was written. */
585 struct cache_entry_file_data *cf_data =
586 (struct cache_entry_file_data *)
587 blob_read_bytes(&ci_blob_reader, sizeof(struct cache_entry_file_data));
588 if (ci_blob_reader.overrun)
589 goto fail;
590
591 size_t cache_data_size = ci_blob_reader.end - ci_blob_reader.current;
592 const uint8_t *data = (uint8_t *) blob_read_bytes(&ci_blob_reader, cache_data_size);
593
594 /* Check the data for corruption */
595 if (cf_data->crc32 != util_hash_crc32(data, cache_data_size))
596 goto fail;
597
598 /* Uncompress the cache data */
599 uncompressed_data = malloc(cf_data->uncompressed_size);
600 if (!uncompressed_data)
601 goto fail;
602
603 if (cache->compression_disabled) {
604 if (cf_data->uncompressed_size != cache_data_size)
605 goto fail;
606
607 memcpy(uncompressed_data, data, cache_data_size);
608 } else {
609 if (!util_compress_inflate(data, cache_data_size, uncompressed_data,
610 cf_data->uncompressed_size))
611 goto fail;
612 }
613
614 if (size)
615 *size = cf_data->uncompressed_size;
616
617 return uncompressed_data;
618
619 fail:
620 if (uncompressed_data)
621 free(uncompressed_data);
622
623 return NULL;
624 }
625
626 void *
disk_cache_load_item(struct disk_cache * cache,char * filename,size_t * size)627 disk_cache_load_item(struct disk_cache *cache, char *filename, size_t *size)
628 {
629 uint8_t *data = NULL;
630
631 int fd = open(filename, O_RDONLY | O_CLOEXEC);
632 if (fd == -1)
633 goto fail;
634
635 struct stat sb;
636 if (fstat(fd, &sb) == -1)
637 goto fail;
638
639 data = malloc(sb.st_size);
640 if (data == NULL)
641 goto fail;
642
643 /* Read entire file into memory */
644 int ret = read_all(fd, data, sb.st_size);
645 if (ret == -1)
646 goto fail;
647
648 uint8_t *uncompressed_data =
649 parse_and_validate_cache_item(cache, data, sb.st_size, size);
650 if (!uncompressed_data)
651 goto fail;
652
653 free(data);
654 free(filename);
655 close(fd);
656
657 return uncompressed_data;
658
659 fail:
660 if (data)
661 free(data);
662 if (filename)
663 free(filename);
664 if (fd != -1)
665 close(fd);
666
667 return NULL;
668 }
669
670 /* Return a filename within the cache's directory corresponding to 'key'.
671 *
672 * Returns NULL if out of memory.
673 */
674 char *
disk_cache_get_cache_filename(struct disk_cache * cache,const cache_key key)675 disk_cache_get_cache_filename(struct disk_cache *cache, const cache_key key)
676 {
677 char buf[41];
678 char *filename;
679
680 if (cache->path_init_failed)
681 return NULL;
682
683 _mesa_sha1_format(buf, key);
684 if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0],
685 buf[1], buf + 2) == -1)
686 return NULL;
687
688 return filename;
689 }
690
691 static bool
create_cache_item_header_and_blob(struct disk_cache_put_job * dc_job,struct blob * cache_blob)692 create_cache_item_header_and_blob(struct disk_cache_put_job *dc_job,
693 struct blob *cache_blob)
694 {
695
696 /* Compress the cache item data */
697 size_t max_buf = util_compress_max_compressed_len(dc_job->size);
698 size_t compressed_size;
699 void *compressed_data;
700
701 if (dc_job->cache->compression_disabled) {
702 compressed_size = dc_job->size;
703 compressed_data = dc_job->data;
704 } else {
705 compressed_data = malloc(max_buf);
706 if (compressed_data == NULL)
707 return false;
708 compressed_size =
709 util_compress_deflate(dc_job->data, dc_job->size,
710 compressed_data, max_buf);
711 if (compressed_size == 0)
712 goto fail;
713 }
714
715 /* Copy the driver_keys_blob, this can be used find information about the
716 * mesa version that produced the entry or deal with hash collisions,
717 * should that ever become a real problem.
718 */
719 if (!blob_write_bytes(cache_blob, dc_job->cache->driver_keys_blob,
720 dc_job->cache->driver_keys_blob_size))
721 goto fail;
722
723 /* Write the cache item metadata. This data can be used to deal with
724 * hash collisions, as well as providing useful information to 3rd party
725 * tools reading the cache files.
726 */
727 if (!blob_write_uint32(cache_blob, dc_job->cache_item_metadata.type))
728 goto fail;
729
730 if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) {
731 if (!blob_write_uint32(cache_blob, dc_job->cache_item_metadata.num_keys))
732 goto fail;
733
734 size_t metadata_keys_size =
735 dc_job->cache_item_metadata.num_keys * sizeof(cache_key);
736 if (!blob_write_bytes(cache_blob, dc_job->cache_item_metadata.keys[0],
737 metadata_keys_size))
738 goto fail;
739 }
740
741 /* Create CRC of the compressed data. We will read this when restoring the
742 * cache and use it to check for corruption.
743 */
744 struct cache_entry_file_data cf_data;
745 cf_data.crc32 = util_hash_crc32(compressed_data, compressed_size);
746 cf_data.uncompressed_size = dc_job->size;
747
748 if (!blob_write_bytes(cache_blob, &cf_data, sizeof(cf_data)))
749 goto fail;
750
751 /* Finally copy the compressed cache blob */
752 if (!blob_write_bytes(cache_blob, compressed_data, compressed_size))
753 goto fail;
754
755 if (!dc_job->cache->compression_disabled)
756 free(compressed_data);
757
758 return true;
759
760 fail:
761 if (!dc_job->cache->compression_disabled)
762 free(compressed_data);
763
764 return false;
765 }
766
767 void
disk_cache_write_item_to_disk(struct disk_cache_put_job * dc_job,char * filename)768 disk_cache_write_item_to_disk(struct disk_cache_put_job *dc_job,
769 char *filename)
770 {
771 int fd = -1, fd_final = -1;
772 struct blob cache_blob;
773 blob_init(&cache_blob);
774
775 /* Write to a temporary file to allow for an atomic rename to the
776 * final destination filename, (to prevent any readers from seeing
777 * a partially written file).
778 */
779 char *filename_tmp = NULL;
780 if (asprintf(&filename_tmp, "%s.tmp", filename) == -1)
781 goto done;
782
783 fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
784
785 /* Make the two-character subdirectory within the cache as needed. */
786 if (fd == -1) {
787 if (errno != ENOENT)
788 goto done;
789
790 make_cache_file_directory(dc_job->cache, dc_job->key);
791
792 fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
793 if (fd == -1)
794 goto done;
795 }
796
797 /* With the temporary file open, we take an exclusive flock on
798 * it. If the flock fails, then another process still has the file
799 * open with the flock held. So just let that file be responsible
800 * for writing the file.
801 */
802 #ifdef HAVE_FLOCK
803 int err = flock(fd, LOCK_EX | LOCK_NB);
804 #else
805 struct flock lock = {
806 .l_start = 0,
807 .l_len = 0, /* entire file */
808 .l_type = F_WRLCK,
809 .l_whence = SEEK_SET
810 };
811 int err = fcntl(fd, F_SETLK, &lock);
812 #endif
813 if (err == -1)
814 goto done;
815
816 /* Now that we have the lock on the open temporary file, we can
817 * check to see if the destination file already exists. If so,
818 * another process won the race between when we saw that the file
819 * didn't exist and now. In this case, we don't do anything more,
820 * (to ensure the size accounting of the cache doesn't get off).
821 */
822 fd_final = open(filename, O_RDONLY | O_CLOEXEC);
823 if (fd_final != -1) {
824 unlink(filename_tmp);
825 goto done;
826 }
827
828 /* OK, we're now on the hook to write out a file that we know is
829 * not in the cache, and is also not being written out to the cache
830 * by some other process.
831 */
832 if (!create_cache_item_header_and_blob(dc_job, &cache_blob)) {
833 unlink(filename_tmp);
834 goto done;
835 }
836
837 /* Now, finally, write out the contents to the temporary file, then
838 * rename them atomically to the destination filename, and also
839 * perform an atomic increment of the total cache size.
840 */
841 int ret = write_all(fd, cache_blob.data, cache_blob.size);
842 if (ret == -1) {
843 unlink(filename_tmp);
844 goto done;
845 }
846
847 ret = rename(filename_tmp, filename);
848 if (ret == -1) {
849 unlink(filename_tmp);
850 goto done;
851 }
852
853 struct stat sb;
854 if (stat(filename, &sb) == -1) {
855 /* Something went wrong remove the file */
856 unlink(filename);
857 goto done;
858 }
859
860 p_atomic_add(&dc_job->cache->size->value, sb.st_blocks * 512);
861
862 done:
863 if (fd_final != -1)
864 close(fd_final);
865 /* This close finally releases the flock, (now that the final file
866 * has been renamed into place and the size has been added).
867 */
868 if (fd != -1)
869 close(fd);
870 free(filename_tmp);
871 blob_finish(&cache_blob);
872 }
873
874 /* Determine path for cache based on the first defined name as follows:
875 *
876 * $MESA_SHADER_CACHE_DIR
877 * $XDG_CACHE_HOME/mesa_shader_cache
878 * $HOME/.cache/mesa_shader_cache
879 * <pwd.pw_dir>/.cache/mesa_shader_cache
880 */
881 char *
disk_cache_generate_cache_dir(void * mem_ctx,const char * gpu_name,const char * driver_id,enum disk_cache_type cache_type)882 disk_cache_generate_cache_dir(void *mem_ctx, const char *gpu_name,
883 const char *driver_id,
884 enum disk_cache_type cache_type)
885 {
886 char *cache_dir_name = CACHE_DIR_NAME;
887 if (cache_type == DISK_CACHE_SINGLE_FILE)
888 cache_dir_name = CACHE_DIR_NAME_SF;
889 else if (cache_type == DISK_CACHE_DATABASE)
890 cache_dir_name = CACHE_DIR_NAME_DB;
891
892 char *path = secure_getenv("MESA_SHADER_CACHE_DIR");
893
894 if (!path) {
895 path = secure_getenv("MESA_GLSL_CACHE_DIR");
896 if (path)
897 fprintf(stderr,
898 "*** MESA_GLSL_CACHE_DIR is deprecated; "
899 "use MESA_SHADER_CACHE_DIR instead ***\n");
900 }
901
902 if (path) {
903 path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
904 if (!path)
905 return NULL;
906 }
907
908 if (path == NULL) {
909 char *xdg_cache_home = secure_getenv("XDG_CACHE_HOME");
910
911 if (xdg_cache_home) {
912 path = concatenate_and_mkdir(mem_ctx, xdg_cache_home, cache_dir_name);
913 if (!path)
914 return NULL;
915 }
916 }
917
918 if (!path) {
919 char *home = getenv("HOME");
920
921 if (home) {
922 path = concatenate_and_mkdir(mem_ctx, home, ".cache");
923 if (!path)
924 return NULL;
925
926 path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
927 if (!path)
928 return NULL;
929 }
930 }
931
932 if (!path) {
933 char *buf;
934 size_t buf_size;
935 struct passwd pwd, *result;
936
937 buf_size = sysconf(_SC_GETPW_R_SIZE_MAX);
938 if (buf_size == -1)
939 buf_size = 512;
940
941 /* Loop until buf_size is large enough to query the directory */
942 while (1) {
943 buf = ralloc_size(mem_ctx, buf_size);
944
945 getpwuid_r(getuid(), &pwd, buf, buf_size, &result);
946 if (result)
947 break;
948
949 if (errno == ERANGE) {
950 ralloc_free(buf);
951 buf = NULL;
952 buf_size *= 2;
953 } else {
954 return NULL;
955 }
956 }
957
958 path = concatenate_and_mkdir(mem_ctx, pwd.pw_dir, ".cache");
959 if (!path)
960 return NULL;
961
962 path = concatenate_and_mkdir(mem_ctx, path, cache_dir_name);
963 if (!path)
964 return NULL;
965 }
966
967 if (cache_type == DISK_CACHE_SINGLE_FILE) {
968 path = concatenate_and_mkdir(mem_ctx, path, driver_id);
969 if (!path)
970 return NULL;
971
972 path = concatenate_and_mkdir(mem_ctx, path, gpu_name);
973 if (!path)
974 return NULL;
975 }
976
977 return path;
978 }
979
980 bool
disk_cache_enabled()981 disk_cache_enabled()
982 {
983 /* Disk cache is not enabled for android, but android's EGL layer
984 * uses EGL_ANDROID_blob_cache to manage the cache itself:
985 */
986 if (DETECT_OS_ANDROID)
987 return false;
988
989 /* If running as a users other than the real user disable cache */
990 if (!__normal_user())
991 return false;
992
993 /* At user request, disable shader cache entirely. */
994 #ifdef SHADER_CACHE_DISABLE_BY_DEFAULT
995 bool disable_by_default = true;
996 #else
997 bool disable_by_default = false;
998 #endif
999 char *envvar_name = "MESA_SHADER_CACHE_DISABLE";
1000 if (!getenv(envvar_name)) {
1001 envvar_name = "MESA_GLSL_CACHE_DISABLE";
1002 if (getenv(envvar_name))
1003 fprintf(stderr,
1004 "*** MESA_GLSL_CACHE_DISABLE is deprecated; "
1005 "use MESA_SHADER_CACHE_DISABLE instead ***\n");
1006 }
1007
1008 if (debug_get_bool_option(envvar_name, disable_by_default))
1009 return false;
1010
1011 return true;
1012 }
1013
1014 void *
disk_cache_load_item_foz(struct disk_cache * cache,const cache_key key,size_t * size)1015 disk_cache_load_item_foz(struct disk_cache *cache, const cache_key key,
1016 size_t *size)
1017 {
1018 size_t cache_tem_size = 0;
1019 void *cache_item = foz_read_entry(&cache->foz_db, key, &cache_tem_size);
1020 if (!cache_item)
1021 return NULL;
1022
1023 uint8_t *uncompressed_data =
1024 parse_and_validate_cache_item(cache, cache_item, cache_tem_size, size);
1025 free(cache_item);
1026
1027 return uncompressed_data;
1028 }
1029
1030 bool
disk_cache_write_item_to_disk_foz(struct disk_cache_put_job * dc_job)1031 disk_cache_write_item_to_disk_foz(struct disk_cache_put_job *dc_job)
1032 {
1033 struct blob cache_blob;
1034 blob_init(&cache_blob);
1035
1036 if (!create_cache_item_header_and_blob(dc_job, &cache_blob))
1037 return false;
1038
1039 bool r = foz_write_entry(&dc_job->cache->foz_db, dc_job->key,
1040 cache_blob.data, cache_blob.size);
1041
1042 blob_finish(&cache_blob);
1043 return r;
1044 }
1045
1046 bool
disk_cache_load_cache_index_foz(void * mem_ctx,struct disk_cache * cache)1047 disk_cache_load_cache_index_foz(void *mem_ctx, struct disk_cache *cache)
1048 {
1049 /* Load cache index into a hash map (from fossilise files) */
1050 return foz_prepare(&cache->foz_db, cache->path);
1051 }
1052
1053
1054 void
disk_cache_touch_cache_user_marker(char * path)1055 disk_cache_touch_cache_user_marker(char *path)
1056 {
1057 char *marker_path = NULL;
1058 asprintf(&marker_path, "%s/marker", path);
1059 if (!marker_path)
1060 return;
1061
1062 time_t now = time(NULL);
1063
1064 struct stat attr;
1065 if (stat(marker_path, &attr) == -1) {
1066 int fd = open(marker_path, O_WRONLY | O_CREAT | O_CLOEXEC, 0644);
1067 if (fd != -1) {
1068 close(fd);
1069 }
1070 } else if (now - attr.st_mtime > 60 * 60 * 24 /* One day */) {
1071 (void)utime(marker_path, NULL);
1072 }
1073 free(marker_path);
1074 }
1075
1076 bool
disk_cache_mmap_cache_index(void * mem_ctx,struct disk_cache * cache,char * path)1077 disk_cache_mmap_cache_index(void *mem_ctx, struct disk_cache *cache,
1078 char *path)
1079 {
1080 int fd = -1;
1081 bool mapped = false;
1082
1083 path = ralloc_asprintf(mem_ctx, "%s/index", cache->path);
1084 if (path == NULL)
1085 goto path_fail;
1086
1087 fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
1088 if (fd == -1)
1089 goto path_fail;
1090
1091 struct stat sb;
1092 if (fstat(fd, &sb) == -1)
1093 goto path_fail;
1094
1095 /* Force the index file to be the expected size. */
1096 size_t size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE;
1097 if (sb.st_size != size) {
1098 #if HAVE_POSIX_FALLOCATE
1099 /* posix_fallocate() ensures disk space is allocated otherwise it
1100 * fails if there is not enough space on the disk.
1101 */
1102 if (posix_fallocate(fd, 0, size) != 0)
1103 goto path_fail;
1104 #else
1105 /* ftruncate() allocates disk space lazily. If the disk is full
1106 * and it is unable to allocate disk space when accessed via
1107 * mmap, it will crash with a SIGBUS.
1108 */
1109 if (ftruncate(fd, size) == -1)
1110 goto path_fail;
1111 #endif
1112 }
1113
1114 /* We map this shared so that other processes see updates that we
1115 * make.
1116 *
1117 * Note: We do use atomic addition to ensure that multiple
1118 * processes don't scramble the cache size recorded in the
1119 * index. But we don't use any locking to prevent multiple
1120 * processes from updating the same entry simultaneously. The idea
1121 * is that if either result lands entirely in the index, then
1122 * that's equivalent to a well-ordered write followed by an
1123 * eviction and a write. On the other hand, if the simultaneous
1124 * writes result in a corrupt entry, that's not really any
1125 * different than both entries being evicted, (since within the
1126 * guarantees of the cryptographic hash, a corrupt entry is
1127 * unlikely to ever match a real cache key).
1128 */
1129 cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE,
1130 MAP_SHARED, fd, 0);
1131 if (cache->index_mmap == MAP_FAILED)
1132 goto path_fail;
1133 cache->index_mmap_size = size;
1134
1135 cache->size = (p_atomic_uint64_t *) cache->index_mmap;
1136 cache->stored_keys = cache->index_mmap + sizeof(uint64_t);
1137 mapped = true;
1138
1139 path_fail:
1140 if (fd != -1)
1141 close(fd);
1142
1143 return mapped;
1144 }
1145
1146 void
disk_cache_destroy_mmap(struct disk_cache * cache)1147 disk_cache_destroy_mmap(struct disk_cache *cache)
1148 {
1149 munmap(cache->index_mmap, cache->index_mmap_size);
1150 }
1151
1152 void *
disk_cache_db_load_item(struct disk_cache * cache,const cache_key key,size_t * size)1153 disk_cache_db_load_item(struct disk_cache *cache, const cache_key key,
1154 size_t *size)
1155 {
1156 size_t cache_tem_size = 0;
1157 void *cache_item = mesa_cache_db_multipart_read_entry(&cache->cache_db,
1158 key, &cache_tem_size);
1159 if (!cache_item)
1160 return NULL;
1161
1162 uint8_t *uncompressed_data =
1163 parse_and_validate_cache_item(cache, cache_item, cache_tem_size, size);
1164 free(cache_item);
1165
1166 return uncompressed_data;
1167 }
1168
1169 bool
disk_cache_db_write_item_to_disk(struct disk_cache_put_job * dc_job)1170 disk_cache_db_write_item_to_disk(struct disk_cache_put_job *dc_job)
1171 {
1172 struct blob cache_blob;
1173 blob_init(&cache_blob);
1174
1175 if (!create_cache_item_header_and_blob(dc_job, &cache_blob))
1176 return false;
1177
1178 bool r = mesa_cache_db_multipart_entry_write(&dc_job->cache->cache_db,
1179 dc_job->key, cache_blob.data,
1180 cache_blob.size);
1181
1182 blob_finish(&cache_blob);
1183 return r;
1184 }
1185
1186 bool
disk_cache_db_load_cache_index(void * mem_ctx,struct disk_cache * cache)1187 disk_cache_db_load_cache_index(void *mem_ctx, struct disk_cache *cache)
1188 {
1189 return mesa_cache_db_multipart_open(&cache->cache_db, cache->path);
1190 }
1191
1192 static void
delete_dir(const char * path)1193 delete_dir(const char* path)
1194 {
1195 DIR *dir = opendir(path);
1196 if (!dir)
1197 return;
1198
1199 struct dirent *p;
1200 char *entry_path = NULL;
1201
1202 while ((p = readdir(dir)) != NULL) {
1203 if (strcmp(p->d_name, ".") == 0 || strcmp(p->d_name, "..") == 0)
1204 continue;
1205
1206 asprintf(&entry_path, "%s/%s", path, p->d_name);
1207 if (!entry_path)
1208 continue;
1209
1210 struct stat st;
1211 if (stat(entry_path, &st)) {
1212 free(entry_path);
1213 continue;
1214 }
1215 if (S_ISDIR(st.st_mode))
1216 delete_dir(entry_path);
1217 else
1218 unlink(entry_path);
1219
1220 free(entry_path);
1221 }
1222 closedir(dir);
1223 rmdir(path);
1224 }
1225
1226 /* Deletes old multi-file caches, to avoid having two default caches taking up disk space. */
1227 void
disk_cache_delete_old_cache(void)1228 disk_cache_delete_old_cache(void)
1229 {
1230 void *ctx = ralloc_context(NULL);
1231 char *dirname = disk_cache_generate_cache_dir(ctx, NULL, NULL, DISK_CACHE_MULTI_FILE);
1232 if (!dirname)
1233 goto finish;
1234
1235 /* The directory itself doesn't get updated, so use a marker timestamp */
1236 char *index_path = ralloc_asprintf(ctx, "%s/marker", dirname);
1237
1238 struct stat attr;
1239 if (stat(index_path, &attr) == -1)
1240 goto finish;
1241
1242 time_t now = time(NULL);
1243
1244 /* Do not delete anything if the cache has been modified in the past week */
1245 if (now - attr.st_mtime < 60 * 60 * 24 * 7)
1246 goto finish;
1247
1248 delete_dir(dirname);
1249
1250 finish:
1251 ralloc_free(ctx);
1252 }
1253 #endif
1254
1255 #endif /* ENABLE_SHADER_CACHE */
1256