1 /* 2 * Copyright 2022 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef skgpu_graphite_QueueManager_DEFINED 9 #define skgpu_graphite_QueueManager_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "include/gpu/graphite/GraphiteTypes.h" 13 #include "include/private/base/SkDeque.h" 14 #include "include/private/base/SkTArray.h" 15 #include "src/core/SkTHash.h" 16 17 #include <memory> 18 #include <vector> 19 20 namespace skgpu::graphite { 21 22 class Buffer; 23 class CommandBuffer; 24 class Context; 25 class GpuWorkSubmission; 26 struct InsertRecordingInfo; 27 class ResourceProvider; 28 class SharedContext; 29 class Task; 30 class UploadBufferManager; 31 32 /** 33 * QueueManager class manages all our command buffers and making sure they are submitted to the GPU 34 * in the correct order. Each backend subclasses this class in order to specialize how it gets 35 * new command buffers and how they are submitted 36 * 37 * The class also supports sending commands to either a protected command buffer or a non-protected 38 * command buffer. When we are in a non-protected Context, all commands will use non-protected 39 * command buffers. When we are in a protected Context, the majority of commands will all go to a 40 * protected command buffer (e.g. everything coming in via addRecording). However, there are cases 41 * where we need to do some commands in a non-protected command buffer. One specific example of this 42 * is when uploading data to a buffer that is read in the vertex shader. Protected memory is not 43 * allowed to be accessed in a vertex shader, so all resources must be non-protected. That means if 44 * we need to copy data into these resources, those copy operations must occur in a non-protected 45 * command buffer. The only way to current request a command buffer that does not match the 46 * protectedness of the Context is to call addTask directly here. We do not support intermixing 47 * calls to a protected and non-protected command buffer without calling submit beforehand. If you 48 * want to switch which type of command is being recorded, you must make sure to call submitToGpu 49 * before recording the new commands. 50 */ 51 class QueueManager { 52 public: 53 virtual ~QueueManager(); 54 55 // Adds the commands from the passed in Recording to the current CommandBuffer 56 [[nodiscard]] bool addRecording(const InsertRecordingInfo&, Context*); 57 58 // Adds the commands from the passed in Task to the current CommandBuffer 59 [[nodiscard]] bool addTask(Task*, Context*, Protected); 60 61 // Adds a proc that will be called when the current CommandBuffer is submitted and finishes 62 [[nodiscard]] bool addFinishInfo(const InsertFinishInfo&, 63 ResourceProvider*, 64 SkSpan<const sk_sp<Buffer>> buffersToAsyncMap = {}); 65 66 [[nodiscard]] bool submitToGpu(); 67 [[nodiscard]] bool hasUnfinishedGpuWork(); 68 void checkForFinishedWork(SyncToCpu); 69 70 #if defined(GPU_TEST_UTILS) startCapture()71 virtual void startCapture() {} stopCapture()72 virtual void stopCapture() {} 73 #endif 74 75 void returnCommandBuffer(std::unique_ptr<CommandBuffer>); 76 tick()77 virtual void tick() const {} 78 79 void addUploadBufferManagerRefs(UploadBufferManager*); 80 81 protected: 82 QueueManager(const SharedContext* sharedContext); 83 84 using OutstandingSubmission = std::unique_ptr<GpuWorkSubmission>; 85 86 const SharedContext* fSharedContext; 87 std::unique_ptr<CommandBuffer> fCurrentCommandBuffer; 88 89 private: 90 virtual std::unique_ptr<CommandBuffer> getNewCommandBuffer(ResourceProvider*, Protected) = 0; 91 virtual OutstandingSubmission onSubmitToGpu() = 0; 92 93 bool setupCommandBuffer(ResourceProvider*, Protected); 94 95 std::vector<std::unique_ptr<CommandBuffer>>* getAvailableCommandBufferList(Protected); 96 97 SkDeque fOutstandingSubmissions; 98 99 std::vector<std::unique_ptr<CommandBuffer>> fAvailableCommandBuffers; 100 std::vector<std::unique_ptr<CommandBuffer>> fAvailableProtectedCommandBuffers; 101 102 skia_private::THashMap<uint32_t, uint32_t> fLastAddedRecordingIDs; 103 }; 104 105 } // namespace skgpu::graphite 106 107 #endif // skgpu_graphite_QueueManager_DEFINED 108