1 /*
2 * Copyright 2021 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "vn_renderer_internal.h"
7
8 /* 3 seconds */
9 #define VN_RENDERER_SHMEM_CACHE_EXPIRACY (3ll * 1000 * 1000)
10
11 static void
vn_renderer_shmem_cache_dump(struct vn_renderer_shmem_cache * cache)12 vn_renderer_shmem_cache_dump(struct vn_renderer_shmem_cache *cache)
13 {
14 simple_mtx_lock(&cache->mutex);
15
16 vn_log(NULL, "dumping renderer shmem cache");
17 vn_log(NULL, " cache skip: %d", cache->debug.cache_skip_count);
18 vn_log(NULL, " cache hit: %d", cache->debug.cache_hit_count);
19 vn_log(NULL, " cache miss: %d", cache->debug.cache_miss_count);
20
21 uint32_t bucket_mask = cache->bucket_mask;
22 while (bucket_mask) {
23 const int idx = u_bit_scan(&bucket_mask);
24 const struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
25 uint32_t count = 0;
26 list_for_each_entry(struct vn_renderer_shmem, shmem, &bucket->shmems,
27 cache_head)
28 count++;
29 if (count)
30 vn_log(NULL, " buckets[%d]: %d shmems", idx, count);
31 }
32
33 simple_mtx_unlock(&cache->mutex);
34 }
35
36 void
vn_renderer_shmem_cache_init(struct vn_renderer_shmem_cache * cache,struct vn_renderer * renderer,vn_renderer_shmem_cache_destroy_func destroy_func)37 vn_renderer_shmem_cache_init(struct vn_renderer_shmem_cache *cache,
38 struct vn_renderer *renderer,
39 vn_renderer_shmem_cache_destroy_func destroy_func)
40 {
41 /* cache->bucket_mask is 32-bit and u_bit_scan is used */
42 static_assert(ARRAY_SIZE(cache->buckets) <= 32, "");
43
44 cache->renderer = renderer;
45 cache->destroy_func = destroy_func;
46
47 simple_mtx_init(&cache->mutex, mtx_plain);
48
49 for (uint32_t i = 0; i < ARRAY_SIZE(cache->buckets); i++) {
50 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[i];
51 list_inithead(&bucket->shmems);
52 }
53
54 cache->initialized = true;
55 }
56
57 void
vn_renderer_shmem_cache_fini(struct vn_renderer_shmem_cache * cache)58 vn_renderer_shmem_cache_fini(struct vn_renderer_shmem_cache *cache)
59 {
60 if (!cache->initialized)
61 return;
62
63 if (VN_DEBUG(CACHE))
64 vn_renderer_shmem_cache_dump(cache);
65
66 while (cache->bucket_mask) {
67 const int idx = u_bit_scan(&cache->bucket_mask);
68 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
69
70 list_for_each_entry_safe(struct vn_renderer_shmem, shmem,
71 &bucket->shmems, cache_head)
72 cache->destroy_func(cache->renderer, shmem);
73 }
74
75 simple_mtx_destroy(&cache->mutex);
76 }
77
78 static struct vn_renderer_shmem_bucket *
choose_bucket(struct vn_renderer_shmem_cache * cache,size_t size,int * out_idx)79 choose_bucket(struct vn_renderer_shmem_cache *cache,
80 size_t size,
81 int *out_idx)
82 {
83 assert(size);
84 if (unlikely(!util_is_power_of_two_or_zero64(size)))
85 return NULL;
86
87 const uint32_t idx = ffsll(size) - 1;
88 if (unlikely(idx >= ARRAY_SIZE(cache->buckets)))
89 return NULL;
90
91 *out_idx = idx;
92 return &cache->buckets[idx];
93 }
94
95 static void
vn_renderer_shmem_cache_remove_expired_locked(struct vn_renderer_shmem_cache * cache,int64_t now)96 vn_renderer_shmem_cache_remove_expired_locked(
97 struct vn_renderer_shmem_cache *cache, int64_t now)
98 {
99 uint32_t bucket_mask = cache->bucket_mask;
100 while (bucket_mask) {
101 const int idx = u_bit_scan(&bucket_mask);
102 struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
103
104 assert(!list_is_empty(&bucket->shmems));
105 const struct vn_renderer_shmem *last_shmem = list_last_entry(
106 &bucket->shmems, struct vn_renderer_shmem, cache_head);
107
108 /* remove expired shmems but keep at least the last one */
109 list_for_each_entry_safe(struct vn_renderer_shmem, shmem,
110 &bucket->shmems, cache_head) {
111 if (shmem == last_shmem ||
112 now - shmem->cache_timestamp < VN_RENDERER_SHMEM_CACHE_EXPIRACY)
113 break;
114
115 list_del(&shmem->cache_head);
116 cache->destroy_func(cache->renderer, shmem);
117 }
118 }
119 }
120
121 bool
vn_renderer_shmem_cache_add(struct vn_renderer_shmem_cache * cache,struct vn_renderer_shmem * shmem)122 vn_renderer_shmem_cache_add(struct vn_renderer_shmem_cache *cache,
123 struct vn_renderer_shmem *shmem)
124 {
125 assert(!vn_refcount_is_valid(&shmem->refcount));
126
127 int idx;
128 struct vn_renderer_shmem_bucket *bucket =
129 choose_bucket(cache, shmem->mmap_size, &idx);
130 if (!bucket)
131 return false;
132
133 const int64_t now = os_time_get();
134 shmem->cache_timestamp = now;
135
136 simple_mtx_lock(&cache->mutex);
137
138 vn_renderer_shmem_cache_remove_expired_locked(cache, now);
139
140 list_addtail(&shmem->cache_head, &bucket->shmems);
141 cache->bucket_mask |= 1 << idx;
142
143 simple_mtx_unlock(&cache->mutex);
144
145 return true;
146 }
147
148 struct vn_renderer_shmem *
vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache * cache,size_t size)149 vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache *cache,
150 size_t size)
151 {
152 int idx;
153 struct vn_renderer_shmem_bucket *bucket = choose_bucket(cache, size, &idx);
154 if (!bucket) {
155 VN_TRACE_SCOPE("shmem cache skip");
156 simple_mtx_lock(&cache->mutex);
157 cache->debug.cache_skip_count++;
158 simple_mtx_unlock(&cache->mutex);
159 return NULL;
160 }
161
162 struct vn_renderer_shmem *shmem = NULL;
163
164 simple_mtx_lock(&cache->mutex);
165 if (cache->bucket_mask & (1 << idx)) {
166 assert(!list_is_empty(&bucket->shmems));
167 shmem = list_first_entry(&bucket->shmems, struct vn_renderer_shmem,
168 cache_head);
169 list_del(&shmem->cache_head);
170
171 if (list_is_empty(&bucket->shmems))
172 cache->bucket_mask &= ~(1 << idx);
173
174 cache->debug.cache_hit_count++;
175 } else {
176 VN_TRACE_SCOPE("shmem cache miss");
177 cache->debug.cache_miss_count++;
178 }
179 simple_mtx_unlock(&cache->mutex);
180
181 return shmem;
182 }
183