xref: /aosp_15_r20/external/mesa3d/src/gallium/auxiliary/pipebuffer/pb_cache.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /**************************************************************************
2  *
3  * Copyright 2007-2008 VMware, Inc.
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include "pb_cache.h"
30 #include "util/u_memory.h"
31 #include "util/os_time.h"
32 
33 /*
34  * Helper function for detecting time outs, taking in account overflow.
35  *
36  * Returns true if the current time has elapsed beyond the specified interval.
37  */
38 static inline bool
time_timeout_ms(unsigned start,unsigned interval,unsigned curr)39 time_timeout_ms(unsigned start, unsigned interval, unsigned curr)
40 {
41    unsigned end = start + interval;
42 
43    if (start <= end)
44       return !(start <= curr && curr < end);
45    else
46       return !((start <= curr) || (curr < end));
47 }
48 
49 static unsigned
time_get_ms(struct pb_cache * mgr)50 time_get_ms(struct pb_cache *mgr)
51 {
52    /* Return the time relative to msecs_base_time. */
53    return os_time_get() / 1000 - mgr->msecs_base_time;
54 }
55 
56 static struct pb_buffer_lean *
get_buffer(struct pb_cache * mgr,struct pb_cache_entry * entry)57 get_buffer(struct pb_cache *mgr, struct pb_cache_entry *entry)
58 {
59    return (struct pb_buffer_lean*)((char*)entry - mgr->offsetof_pb_cache_entry);
60 }
61 
62 /**
63  * Actually destroy the buffer.
64  */
65 static void
destroy_buffer_locked(struct pb_cache * mgr,struct pb_cache_entry * entry)66 destroy_buffer_locked(struct pb_cache *mgr, struct pb_cache_entry *entry)
67 {
68    struct pb_buffer_lean *buf = get_buffer(mgr, entry);
69 
70    assert(!pipe_is_referenced(&buf->reference));
71    if (list_is_linked(&entry->head)) {
72       list_del(&entry->head);
73       assert(mgr->num_buffers);
74       --mgr->num_buffers;
75       mgr->cache_size -= buf->size;
76    }
77    mgr->destroy_buffer(mgr->winsys, buf);
78 }
79 
80 /**
81  * Free as many cache buffers from the list head as possible.
82  */
83 static void
release_expired_buffers_locked(struct pb_cache * mgr,struct list_head * cache,unsigned current_time_ms)84 release_expired_buffers_locked(struct pb_cache *mgr, struct list_head *cache,
85                                unsigned current_time_ms)
86 {
87    struct list_head *curr, *next;
88    struct pb_cache_entry *entry;
89 
90    curr = cache->next;
91    next = curr->next;
92    while (curr != cache) {
93       entry = list_entry(curr, struct pb_cache_entry, head);
94 
95       if (!time_timeout_ms(entry->start_ms, mgr->msecs,
96                            current_time_ms))
97          break;
98 
99       destroy_buffer_locked(mgr, entry);
100 
101       curr = next;
102       next = curr->next;
103    }
104 }
105 
106 /**
107  * Add a buffer to the cache. This is typically done when the buffer is
108  * being released.
109  */
110 void
pb_cache_add_buffer(struct pb_cache * mgr,struct pb_cache_entry * entry)111 pb_cache_add_buffer(struct pb_cache *mgr, struct pb_cache_entry *entry)
112 {
113    struct list_head *cache = &mgr->buckets[entry->bucket_index];
114    struct pb_buffer_lean *buf = get_buffer(mgr, entry);
115    unsigned i;
116 
117    simple_mtx_lock(&mgr->mutex);
118    assert(!pipe_is_referenced(&buf->reference));
119 
120    unsigned current_time_ms = time_get_ms(mgr);
121 
122    for (i = 0; i < mgr->num_heaps; i++)
123       release_expired_buffers_locked(mgr, &mgr->buckets[i], current_time_ms);
124 
125    /* Directly release any buffer that exceeds the limit. */
126    if (mgr->cache_size + buf->size > mgr->max_cache_size) {
127       mgr->destroy_buffer(mgr->winsys, buf);
128       simple_mtx_unlock(&mgr->mutex);
129       return;
130    }
131 
132    entry->start_ms = time_get_ms(mgr);
133    list_addtail(&entry->head, cache);
134    ++mgr->num_buffers;
135    mgr->cache_size += buf->size;
136    simple_mtx_unlock(&mgr->mutex);
137 }
138 
139 /**
140  * \return 1   if compatible and can be reclaimed
141  *         0   if incompatible
142  *        -1   if compatible and can't be reclaimed
143  */
144 static int
pb_cache_is_buffer_compat(struct pb_cache * mgr,struct pb_cache_entry * entry,pb_size size,unsigned alignment,unsigned usage)145 pb_cache_is_buffer_compat(struct pb_cache *mgr, struct pb_cache_entry *entry,
146                           pb_size size, unsigned alignment, unsigned usage)
147 {
148    struct pb_buffer_lean *buf = get_buffer(mgr, entry);
149 
150    if (!pb_check_usage(usage, buf->usage))
151       return 0;
152 
153    /* be lenient with size */
154    if (buf->size < size ||
155        buf->size > (unsigned) (mgr->size_factor * size))
156       return 0;
157 
158    if (usage & mgr->bypass_usage)
159       return 0;
160 
161    if (!pb_check_alignment(alignment, 1u << buf->alignment_log2))
162       return 0;
163 
164    return mgr->can_reclaim(mgr->winsys, buf) ? 1 : -1;
165 }
166 
167 /**
168  * Find a compatible buffer in the cache, return it, and remove it
169  * from the cache.
170  */
171 struct pb_buffer_lean *
pb_cache_reclaim_buffer(struct pb_cache * mgr,pb_size size,unsigned alignment,unsigned usage,unsigned bucket_index)172 pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
173                         unsigned alignment, unsigned usage,
174                         unsigned bucket_index)
175 {
176    struct pb_cache_entry *entry;
177    struct pb_cache_entry *cur_entry;
178    struct list_head *cur, *next;
179    int ret = 0;
180 
181    assert(bucket_index < mgr->num_heaps);
182    struct list_head *cache = &mgr->buckets[bucket_index];
183 
184    simple_mtx_lock(&mgr->mutex);
185 
186    entry = NULL;
187    cur = cache->next;
188    next = cur->next;
189 
190    /* search in the expired buffers, freeing them in the process */
191    unsigned now = time_get_ms(mgr);
192    while (cur != cache) {
193       cur_entry = list_entry(cur, struct pb_cache_entry, head);
194 
195       if (!entry && (ret = pb_cache_is_buffer_compat(mgr, cur_entry, size,
196                                                      alignment, usage)) > 0)
197          entry = cur_entry;
198       else if (time_timeout_ms(cur_entry->start_ms, mgr->msecs, now))
199          destroy_buffer_locked(mgr, cur_entry);
200       else
201          /* This buffer (and all hereafter) are still hot in cache */
202          break;
203 
204       /* the buffer is busy (and probably all remaining ones too) */
205       if (ret == -1)
206          break;
207 
208       cur = next;
209       next = cur->next;
210    }
211 
212    /* keep searching in the hot buffers */
213    if (!entry && ret != -1) {
214       while (cur != cache) {
215          cur_entry = list_entry(cur, struct pb_cache_entry, head);
216          ret = pb_cache_is_buffer_compat(mgr, cur_entry, size, alignment, usage);
217 
218          if (ret > 0) {
219             entry = cur_entry;
220             break;
221          }
222          if (ret == -1)
223             break;
224          /* no need to check the timeout here */
225          cur = next;
226          next = cur->next;
227       }
228    }
229 
230    /* found a compatible buffer, return it */
231    if (entry) {
232       struct pb_buffer_lean *buf = get_buffer(mgr, entry);
233 
234       mgr->cache_size -= buf->size;
235       list_del(&entry->head);
236       --mgr->num_buffers;
237       simple_mtx_unlock(&mgr->mutex);
238       /* Increase refcount */
239       pipe_reference_init(&buf->reference, 1);
240       return buf;
241    }
242 
243    simple_mtx_unlock(&mgr->mutex);
244    return NULL;
245 }
246 
247 /**
248  * Empty the cache. Useful when there is not enough memory.
249  */
250 unsigned
pb_cache_release_all_buffers(struct pb_cache * mgr)251 pb_cache_release_all_buffers(struct pb_cache *mgr)
252 {
253    struct list_head *curr, *next;
254    struct pb_cache_entry *buf;
255    unsigned num_reclaims = 0;
256    unsigned i;
257 
258    simple_mtx_lock(&mgr->mutex);
259    for (i = 0; i < mgr->num_heaps; i++) {
260       struct list_head *cache = &mgr->buckets[i];
261 
262       curr = cache->next;
263       next = curr->next;
264       while (curr != cache) {
265          buf = list_entry(curr, struct pb_cache_entry, head);
266          destroy_buffer_locked(mgr, buf);
267          num_reclaims++;
268          curr = next;
269          next = curr->next;
270       }
271    }
272    simple_mtx_unlock(&mgr->mutex);
273    return num_reclaims;
274 }
275 
276 void
pb_cache_init_entry(struct pb_cache * mgr,struct pb_cache_entry * entry,struct pb_buffer_lean * buf,unsigned bucket_index)277 pb_cache_init_entry(struct pb_cache *mgr, struct pb_cache_entry *entry,
278                     struct pb_buffer_lean *buf, unsigned bucket_index)
279 {
280    assert(bucket_index < mgr->num_heaps);
281 
282    memset(entry, 0, sizeof(*entry));
283    entry->bucket_index = bucket_index;
284 }
285 
286 /**
287  * Initialize a caching buffer manager.
288  *
289  * @param mgr     The cache buffer manager
290  * @param num_heaps  Number of separate caches/buckets indexed by bucket_index
291  *                   for faster buffer matching (alternative to slower
292  *                   "usage"-based matching).
293  * @param usecs   Unused buffers may be released from the cache after this
294  *                time
295  * @param size_factor  Declare buffers that are size_factor times bigger than
296  *                     the requested size as cache hits.
297  * @param bypass_usage  Bitmask. If (requested usage & bypass_usage) != 0,
298  *                      buffer allocation requests are rejected.
299  * @param maximum_cache_size  Maximum size of all unused buffers the cache can
300  *                            hold.
301  * @param offsetof_pb_cache_entry  offsetof(driver_bo, pb_cache_entry)
302  * @param destroy_buffer  Function that destroys a buffer for good.
303  * @param can_reclaim     Whether a buffer can be reclaimed (e.g. is not busy)
304  */
305 void
pb_cache_init(struct pb_cache * mgr,unsigned num_heaps,unsigned usecs,float size_factor,unsigned bypass_usage,uint64_t maximum_cache_size,unsigned offsetof_pb_cache_entry,void * winsys,void (* destroy_buffer)(void * winsys,struct pb_buffer_lean * buf),bool (* can_reclaim)(void * winsys,struct pb_buffer_lean * buf))306 pb_cache_init(struct pb_cache *mgr, unsigned num_heaps,
307               unsigned usecs, float size_factor,
308               unsigned bypass_usage, uint64_t maximum_cache_size,
309               unsigned offsetof_pb_cache_entry, void *winsys,
310               void (*destroy_buffer)(void *winsys, struct pb_buffer_lean *buf),
311               bool (*can_reclaim)(void *winsys, struct pb_buffer_lean *buf))
312 {
313    unsigned i;
314 
315    mgr->buckets = CALLOC(num_heaps, sizeof(struct list_head));
316    if (!mgr->buckets)
317       return;
318 
319    for (i = 0; i < num_heaps; i++)
320       list_inithead(&mgr->buckets[i]);
321 
322    (void) simple_mtx_init(&mgr->mutex, mtx_plain);
323    mgr->winsys = winsys;
324    mgr->cache_size = 0;
325    mgr->max_cache_size = maximum_cache_size;
326    mgr->num_heaps = num_heaps;
327    mgr->msecs = usecs / 1000;
328    mgr->msecs_base_time = os_time_get() / 1000;
329    mgr->num_buffers = 0;
330    mgr->bypass_usage = bypass_usage;
331    mgr->size_factor = size_factor;
332    mgr->offsetof_pb_cache_entry = offsetof_pb_cache_entry;
333    mgr->destroy_buffer = destroy_buffer;
334    mgr->can_reclaim = can_reclaim;
335 }
336 
337 /**
338  * Deinitialize the manager completely.
339  */
340 void
pb_cache_deinit(struct pb_cache * mgr)341 pb_cache_deinit(struct pb_cache *mgr)
342 {
343    pb_cache_release_all_buffers(mgr);
344    simple_mtx_destroy(&mgr->mutex);
345    FREE(mgr->buckets);
346    mgr->buckets = NULL;
347 }
348