xref: /aosp_15_r20/external/angle/src/libANGLE/renderer/vulkan/vk_resource.h (revision 8975f5c5ed3d1c378011245431ada316dfb6f244)
1 //
2 // Copyright 2017 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // Resource:
7 //    Resource lifetime tracking in the Vulkan back-end.
8 //
9 
10 #ifndef LIBANGLE_RENDERER_VULKAN_RESOURCEVK_H_
11 #define LIBANGLE_RENDERER_VULKAN_RESOURCEVK_H_
12 
13 #include "common/FixedQueue.h"
14 #include "common/SimpleMutex.h"
15 #include "libANGLE/HandleAllocator.h"
16 #include "libANGLE/renderer/vulkan/vk_utils.h"
17 
18 #include <queue>
19 
20 namespace rx
21 {
22 namespace vk
23 {
24 // We expect almost all reasonable usage case should have at most 4 current contexts now. When
25 // exceeded, it should still work, but storage will grow.
26 static constexpr size_t kMaxFastQueueSerials = 4;
27 // Serials is an array of queue serials, which when paired with the index of the serials in the
28 // array result in QueueSerials. The array may expand if needed. Since it owned by Resource object
29 // which is protected by shared lock, it is safe to reallocate storage if needed. When it passes to
30 // renderer at garbage collection time, we will make a copy. The array size is expected to be small.
31 // But in future if we run into situation that array size is too big, we can change to packed array
32 // of QueueSerials.
33 using Serials = angle::FastVector<Serial, kMaxFastQueueSerials>;
34 
35 // Tracks how a resource is used by ANGLE and by a VkQueue. The serial indicates the most recent use
36 // of a resource in the VkQueue. We use the monotonically incrementing serial number to determine if
37 // a resource is currently in use.
38 class ResourceUse final
39 {
40   public:
41     ResourceUse()  = default;
42     ~ResourceUse() = default;
43 
ResourceUse(const QueueSerial & queueSerial)44     ResourceUse(const QueueSerial &queueSerial) { setQueueSerial(queueSerial); }
ResourceUse(const Serials & otherSerials)45     ResourceUse(const Serials &otherSerials) { mSerials = otherSerials; }
46 
47     // Copy constructor
ResourceUse(const ResourceUse & other)48     ResourceUse(const ResourceUse &other) : mSerials(other.mSerials) {}
49     ResourceUse &operator=(const ResourceUse &other)
50     {
51         mSerials = other.mSerials;
52         return *this;
53     }
54 
55     // Move constructor
ResourceUse(ResourceUse && other)56     ResourceUse(ResourceUse &&other) : mSerials(other.mSerials) { other.mSerials.clear(); }
57     ResourceUse &operator=(ResourceUse &&other)
58     {
59         mSerials = other.mSerials;
60         other.mSerials.clear();
61         return *this;
62     }
63 
valid()64     bool valid() const { return mSerials.size() > 0; }
65 
reset()66     void reset() { mSerials.clear(); }
67 
getSerials()68     const Serials &getSerials() const { return mSerials; }
69 
setSerial(SerialIndex index,Serial serial)70     void setSerial(SerialIndex index, Serial serial)
71     {
72         ASSERT(index != kInvalidQueueSerialIndex);
73         if (ANGLE_UNLIKELY(mSerials.size() <= index))
74         {
75             mSerials.resize(index + 1, kZeroSerial);
76         }
77         ASSERT(mSerials[index] <= serial);
78         mSerials[index] = serial;
79     }
80 
setQueueSerial(const QueueSerial & queueSerial)81     void setQueueSerial(const QueueSerial &queueSerial)
82     {
83         setSerial(queueSerial.getIndex(), queueSerial.getSerial());
84     }
85 
86     // Returns true if there is at least one serial is greater than
87     bool operator>(const AtomicQueueSerialFixedArray &serials) const
88     {
89         ASSERT(mSerials.size() <= serials.size());
90         for (SerialIndex i = 0; i < mSerials.size(); ++i)
91         {
92             if (mSerials[i] > serials[i])
93             {
94                 return true;
95             }
96         }
97         return false;
98     }
99 
100     // Returns true if it contains a serial that is greater than
101     bool operator>(const QueueSerial &queuSerial) const
102     {
103         return mSerials.size() > queuSerial.getIndex() &&
104                mSerials[queuSerial.getIndex()] > queuSerial.getSerial();
105     }
106     bool operator>=(const QueueSerial &queueSerial) const
107     {
108         return mSerials.size() > queueSerial.getIndex() &&
109                mSerials[queueSerial.getIndex()] >= queueSerial.getSerial();
110     }
111 
112     // Returns true if all serials are less than or equal
113     bool operator<=(const AtomicQueueSerialFixedArray &serials) const
114     {
115         ASSERT(mSerials.size() <= serials.size());
116         for (SerialIndex i = 0; i < mSerials.size(); ++i)
117         {
118             if (mSerials[i] > serials[i])
119             {
120                 return false;
121             }
122         }
123         return true;
124     }
125 
usedByCommandBuffer(const QueueSerial & commandBufferQueueSerial)126     bool usedByCommandBuffer(const QueueSerial &commandBufferQueueSerial) const
127     {
128         ASSERT(commandBufferQueueSerial.valid());
129         // Return true if we have the exact queue serial in the array.
130         return mSerials.size() > commandBufferQueueSerial.getIndex() &&
131                mSerials[commandBufferQueueSerial.getIndex()] ==
132                    commandBufferQueueSerial.getSerial();
133     }
134 
135     // Merge other's serials into this object.
merge(const ResourceUse & other)136     void merge(const ResourceUse &other)
137     {
138         if (mSerials.size() < other.mSerials.size())
139         {
140             mSerials.resize(other.mSerials.size(), kZeroSerial);
141         }
142 
143         for (SerialIndex i = 0; i < other.mSerials.size(); ++i)
144         {
145             if (mSerials[i] < other.mSerials[i])
146             {
147                 mSerials[i] = other.mSerials[i];
148             }
149         }
150     }
151 
152   private:
153     // The most recent time of use in a VkQueue.
154     Serials mSerials;
155 };
156 std::ostream &operator<<(std::ostream &os, const ResourceUse &use);
157 
158 class SharedGarbage final : angle::NonCopyable
159 {
160   public:
161     SharedGarbage();
162     SharedGarbage(SharedGarbage &&other);
163     SharedGarbage(const ResourceUse &use, GarbageObjects &&garbage);
164     ~SharedGarbage();
165     SharedGarbage &operator=(SharedGarbage &&rhs);
166 
167     bool destroyIfComplete(Renderer *renderer);
168     bool hasResourceUseSubmitted(Renderer *renderer) const;
169     // This is not being used now.
getSize()170     VkDeviceSize getSize() const { return 0; }
171 
172   private:
173     ResourceUse mLifetime;
174     GarbageObjects mGarbage;
175 };
176 
177 // SharedGarbageList list tracks garbage using angle::FixedQueue. It allows concurrent add (i.e.,
178 // enqueue) and cleanup (i.e. dequeue) operations from two threads. Add call from two threads are
179 // synchronized using a mutex and cleanup call from two threads are synchronized with a separate
180 // mutex.
181 template <class T>
182 class SharedGarbageList final : angle::NonCopyable
183 {
184   public:
SharedGarbageList()185     SharedGarbageList()
186         : mSubmittedQueue(kInitialQueueCapacity),
187           mUnsubmittedQueue(kInitialQueueCapacity),
188           mTotalSubmittedGarbageBytes(0),
189           mTotalUnsubmittedGarbageBytes(0),
190           mTotalGarbageDestroyed(0)
191     {}
~SharedGarbageList()192     ~SharedGarbageList()
193     {
194         ASSERT(mSubmittedQueue.empty());
195         ASSERT(mUnsubmittedQueue.empty());
196     }
197 
add(Renderer * renderer,T && garbage)198     void add(Renderer *renderer, T &&garbage)
199     {
200         VkDeviceSize size = garbage.getSize();
201         if (garbage.destroyIfComplete(renderer))
202         {
203             mTotalGarbageDestroyed += size;
204         }
205         else
206         {
207             std::unique_lock<angle::SimpleMutex> enqueueLock(mMutex);
208             if (garbage.hasResourceUseSubmitted(renderer))
209             {
210                 addGarbageLocked(mSubmittedQueue, std::move(garbage));
211                 mTotalSubmittedGarbageBytes += size;
212             }
213             else
214             {
215                 addGarbageLocked(mUnsubmittedQueue, std::move(garbage));
216                 // We use relaxed ordering here since it is always modified with mMutex. The atomic
217                 // is only for the purpose of make tsan happy.
218                 mTotalUnsubmittedGarbageBytes.fetch_add(size, std::memory_order_relaxed);
219             }
220         }
221     }
222 
empty()223     bool empty() const { return mSubmittedQueue.empty() && mUnsubmittedQueue.empty(); }
getSubmittedGarbageSize()224     VkDeviceSize getSubmittedGarbageSize() const
225     {
226         return mTotalSubmittedGarbageBytes.load(std::memory_order_consume);
227     }
getUnsubmittedGarbageSize()228     VkDeviceSize getUnsubmittedGarbageSize() const
229     {
230         return mTotalUnsubmittedGarbageBytes.load(std::memory_order_consume);
231     }
getDestroyedGarbageSize()232     VkDeviceSize getDestroyedGarbageSize() const
233     {
234         return mTotalGarbageDestroyed.load(std::memory_order_consume);
235     }
resetDestroyedGarbageSize()236     void resetDestroyedGarbageSize() { mTotalGarbageDestroyed = 0; }
237 
238     // Number of bytes destroyed is returned.
cleanupSubmittedGarbage(Renderer * renderer)239     VkDeviceSize cleanupSubmittedGarbage(Renderer *renderer)
240     {
241         std::unique_lock<angle::SimpleMutex> lock(mSubmittedQueueDequeueMutex);
242         VkDeviceSize bytesDestroyed = 0;
243         while (!mSubmittedQueue.empty())
244         {
245             T &garbage        = mSubmittedQueue.front();
246             VkDeviceSize size = garbage.getSize();
247             if (!garbage.destroyIfComplete(renderer))
248             {
249                 break;
250             }
251             bytesDestroyed += size;
252             mSubmittedQueue.pop();
253         }
254         mTotalSubmittedGarbageBytes -= bytesDestroyed;
255         mTotalGarbageDestroyed += bytesDestroyed;
256         return bytesDestroyed;
257     }
258 
259     // Check if pending garbage is still pending submission. If not, move them to the garbage list.
260     // Otherwise move the element to the end of the queue. Note that this call took both locks of
261     // this list. Since this call is only used for pending submission garbage list and that list
262     // only temporary stores garbage, it does not destroy garbage in this list. And moving garbage
263     // around is expected to be cheap in general, so lock contention is not expected.
cleanupUnsubmittedGarbage(Renderer * renderer)264     void cleanupUnsubmittedGarbage(Renderer *renderer)
265     {
266         std::unique_lock<angle::SimpleMutex> enqueueLock(mMutex);
267         size_t count            = mUnsubmittedQueue.size();
268         VkDeviceSize bytesMoved = 0;
269         for (size_t i = 0; i < count; i++)
270         {
271             T &garbage = mUnsubmittedQueue.front();
272             if (garbage.hasResourceUseSubmitted(renderer))
273             {
274                 bytesMoved += garbage.getSize();
275                 addGarbageLocked(mSubmittedQueue, std::move(garbage));
276             }
277             else
278             {
279                 mUnsubmittedQueue.push(std::move(garbage));
280             }
281             mUnsubmittedQueue.pop();
282         }
283         mTotalUnsubmittedGarbageBytes -= bytesMoved;
284         mTotalSubmittedGarbageBytes += bytesMoved;
285     }
286 
287   private:
addGarbageLocked(angle::FixedQueue<T> & queue,T && garbage)288     void addGarbageLocked(angle::FixedQueue<T> &queue, T &&garbage)
289     {
290         // Expand the queue storage if we only have one empty space left. That one empty space is
291         // required by cleanupPendingSubmissionGarbage so that we do not need to allocate another
292         // temporary storage.
293         if (queue.size() >= queue.capacity() - 1)
294         {
295             std::unique_lock<angle::SimpleMutex> dequeueLock(mSubmittedQueueDequeueMutex);
296             size_t newCapacity = queue.capacity() << 1;
297             queue.updateCapacity(newCapacity);
298         }
299         queue.push(std::move(garbage));
300     }
301 
302     static constexpr size_t kInitialQueueCapacity = 64;
303     // Protects both enqueue and dequeue of mUnsubmittedQueue, as well as enqueue of
304     // mSubmittedQueue.
305     angle::SimpleMutex mMutex;
306     // Protect dequeue of mSubmittedQueue, which is expected to be more expensive.
307     angle::SimpleMutex mSubmittedQueueDequeueMutex;
308     // Holds garbage that all of use has been submitted to renderer.
309     angle::FixedQueue<T> mSubmittedQueue;
310     // Holds garbage with at least one of the queueSerials has not yet submitted to renderer.
311     angle::FixedQueue<T> mUnsubmittedQueue;
312     // Total bytes of garbage in mSubmittedQueue.
313     std::atomic<VkDeviceSize> mTotalSubmittedGarbageBytes;
314     // Total bytes of garbage in mUnsubmittedQueue.
315     std::atomic<VkDeviceSize> mTotalUnsubmittedGarbageBytes;
316     // Total bytes of garbage been destroyed since last resetDestroyedGarbageSize call.
317     std::atomic<VkDeviceSize> mTotalGarbageDestroyed;
318 };
319 
320 // This is a helper class for back-end objects used in Vk command buffers. They keep a record
321 // of their use in ANGLE and VkQueues via ResourceUse.
322 class Resource : angle::NonCopyable
323 {
324   public:
~Resource()325     virtual ~Resource() {}
326 
327     // Complete all recorded and in-flight commands involving this resource
328     angle::Result waitForIdle(ContextVk *contextVk,
329                               const char *debugMessage,
330                               RenderPassClosureReason reason);
331 
setSerial(SerialIndex index,Serial serial)332     void setSerial(SerialIndex index, Serial serial) { mUse.setSerial(index, serial); }
333 
setQueueSerial(const QueueSerial & queueSerial)334     void setQueueSerial(const QueueSerial &queueSerial)
335     {
336         mUse.setSerial(queueSerial.getIndex(), queueSerial.getSerial());
337     }
338 
mergeResourceUse(const ResourceUse & use)339     void mergeResourceUse(const ResourceUse &use) { mUse.merge(use); }
340 
341     // Check if this resource is used by a command buffer.
usedByCommandBuffer(const QueueSerial & commandBufferQueueSerial)342     bool usedByCommandBuffer(const QueueSerial &commandBufferQueueSerial) const
343     {
344         return mUse.usedByCommandBuffer(commandBufferQueueSerial);
345     }
346 
getResourceUse()347     const ResourceUse &getResourceUse() const { return mUse; }
348 
349   protected:
Resource()350     Resource() {}
Resource(Resource && other)351     Resource(Resource &&other) : Resource() { mUse = std::move(other.mUse); }
352     Resource &operator=(Resource &&rhs)
353     {
354         std::swap(mUse, rhs.mUse);
355         return *this;
356     }
357 
358     // Current resource lifetime.
359     ResourceUse mUse;
360 };
361 
362 // Similar to |Resource| above, this tracks object usage. This includes additional granularity to
363 // track whether an object is used for read-only or read/write access.
364 class ReadWriteResource : public Resource
365 {
366   public:
~ReadWriteResource()367     virtual ~ReadWriteResource() override {}
368 
369     // Complete all recorded and in-flight commands involving this resource
waitForIdle(ContextVk * contextVk,const char * debugMessage,RenderPassClosureReason reason)370     angle::Result waitForIdle(ContextVk *contextVk,
371                               const char *debugMessage,
372                               RenderPassClosureReason reason)
373     {
374         return Resource::waitForIdle(contextVk, debugMessage, reason);
375     }
376 
setWriteQueueSerial(const QueueSerial & writeQueueSerial)377     void setWriteQueueSerial(const QueueSerial &writeQueueSerial)
378     {
379         mUse.setQueueSerial(writeQueueSerial);
380         mWriteUse.setQueueSerial(writeQueueSerial);
381     }
382 
383     // Check if this resource is used by a command buffer.
usedByCommandBuffer(const QueueSerial & commandBufferQueueSerial)384     bool usedByCommandBuffer(const QueueSerial &commandBufferQueueSerial) const
385     {
386         return mUse.usedByCommandBuffer(commandBufferQueueSerial);
387     }
writtenByCommandBuffer(const QueueSerial & commandBufferQueueSerial)388     bool writtenByCommandBuffer(const QueueSerial &commandBufferQueueSerial) const
389     {
390         return mWriteUse.usedByCommandBuffer(commandBufferQueueSerial);
391     }
392 
getWriteResourceUse()393     const ResourceUse &getWriteResourceUse() const { return mWriteUse; }
394 
395   protected:
ReadWriteResource()396     ReadWriteResource() {}
ReadWriteResource(ReadWriteResource && other)397     ReadWriteResource(ReadWriteResource &&other) { *this = std::move(other); }
398     ReadWriteResource &operator=(ReadWriteResource &&other)
399     {
400         Resource::operator=(std::move(other));
401         mWriteUse = std::move(other.mWriteUse);
402         return *this;
403     }
404 
405     // Track write use of the object. Only updated for setWriteQueueSerial().
406     ResourceUse mWriteUse;
407 };
408 
409 // Adds "void release(Renderer *)" method for collecting garbage.
410 // Enables RendererScoped<> for classes that support DeviceScoped<>.
411 template <class T>
412 class ReleasableResource final : public Resource
413 {
414   public:
415     // Calls collectGarbage() on the object.
416     void release(Renderer *renderer);
417 
get()418     const T &get() const { return mObject; }
get()419     T &get() { return mObject; }
420 
421   private:
422     T mObject;
423 };
424 }  // namespace vk
425 }  // namespace rx
426 
427 #endif  // LIBANGLE_RENDERER_VULKAN_RESOURCEVK_H_
428