1 /* 2 * Copyright 2019 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrRenderTask_DEFINED 9 #define GrRenderTask_DEFINED 10 11 #include "include/core/SkRefCnt.h" 12 #include "include/gpu/GpuTypes.h" 13 #include "include/private/base/SkAssert.h" 14 #include "include/private/base/SkDebug.h" 15 #include "include/private/base/SkSpan_impl.h" 16 #include "include/private/base/SkTArray.h" 17 #include "include/private/base/SkTo.h" 18 #include "include/private/gpu/ganesh/GrTypesPriv.h" 19 #include "src/base/SkTInternalLList.h" 20 #include "src/gpu/ganesh/GrCaps.h" 21 #include "src/gpu/ganesh/GrSurfaceProxyView.h" 22 23 #include <cstdint> 24 25 class GrDrawingManager; 26 class GrOpFlushState; 27 class GrRecordingContext; 28 class GrResourceAllocator; 29 class GrSurfaceProxy; 30 class GrTextureProxy; 31 class GrTextureResolveManager; 32 class GrTextureResolveRenderTask; 33 class SkString; 34 struct SkIRect; 35 namespace skgpu::ganesh { 36 class OpsTask; 37 } 38 39 // This class abstracts a task that targets a single GrSurfaceProxy, participates in the 40 // GrDrawingManager's DAG, and implements the onExecute method to modify its target proxy's 41 // contents. (e.g., an opsTask that executes a command buffer, a task to regenerate mipmaps, etc.) 42 class GrRenderTask : public SkRefCnt { 43 public: 44 GrRenderTask(); 45 SkDEBUGCODE(~GrRenderTask() override;) 46 47 void makeClosed(GrRecordingContext*); 48 prePrepare(GrRecordingContext * context)49 void prePrepare(GrRecordingContext* context) { this->onPrePrepare(context); } 50 51 // These two methods are only invoked at flush time 52 void prepare(GrOpFlushState* flushState); execute(GrOpFlushState * flushState)53 bool execute(GrOpFlushState* flushState) { return this->onExecute(flushState); } 54 requiresExplicitCleanup()55 virtual bool requiresExplicitCleanup() const { return false; } 56 57 // Called when this class will survive a flush and needs to truncate its ops and start over. 58 // TODO: ultimately it should be invalid for an op list to survive a flush. 59 // https://bugs.chromium.org/p/skia/issues/detail?id=7111 endFlush(GrDrawingManager *)60 virtual void endFlush(GrDrawingManager*) {} 61 62 // This method "disowns" all the GrSurfaceProxies this RenderTask modifies. In 63 // practice this just means telling the drawingManager to forget the relevant 64 // mappings from surface proxy to last modifying rendertask. 65 virtual void disown(GrDrawingManager*); 66 isClosed()67 bool isClosed() const { return this->isSetFlag(kClosed_Flag); } 68 69 /** 70 * Make this task skippable. This must be used purely for optimization purposes 71 * at this point as not all tasks will actually skip their work. It would be better if we could 72 * detect tasks that can be skipped automatically. We'd need to support minimal flushes (i.e., 73 * only flush that which is required for SkSurfaces/SkImages) and the ability to detect 74 * "orphaned tasks" and clean them out from the DAG so they don't indefinitely accumulate. 75 * Finally, we'd probably have to track whether a proxy's backing store was imported or ever 76 * exported to the client in case the client is doing direct reads outside of Skia and thus 77 * may require tasks targeting the proxy to execute even if our DAG contains no reads. 78 */ 79 void makeSkippable(); 80 isSkippable()81 bool isSkippable() const { return this->isSetFlag(kSkippable_Flag); } 82 83 /** If true no other task should be reordered relative to this task. */ blocksReordering()84 bool blocksReordering() const { return this->isSetFlag(kBlocksReordering_Flag); } 85 86 /* 87 * Notify this GrRenderTask that it relies on the contents of 'dependedOn' 88 */ 89 void addDependency(GrDrawingManager*, 90 GrSurfaceProxy* dependedOn, 91 skgpu::Mipmapped, 92 GrTextureResolveManager, 93 const GrCaps& caps); 94 95 /* 96 * Notify this GrRenderTask that it relies on the contents of all GrRenderTasks which otherTask 97 * depends on. 98 */ 99 void addDependenciesFromOtherTask(GrRenderTask* otherTask); 100 dependencies()101 SkSpan<GrRenderTask*> dependencies() { return SkSpan(fDependencies); } dependents()102 SkSpan<GrRenderTask*> dependents() { return SkSpan(fDependents); } 103 104 void replaceDependency(const GrRenderTask* toReplace, GrRenderTask* replaceWith); 105 void replaceDependent(const GrRenderTask* toReplace, GrRenderTask* replaceWith); 106 107 108 /* 109 * Does this renderTask depend on 'dependedOn'? 110 */ 111 bool dependsOn(const GrRenderTask* dependedOn) const; 112 uniqueID()113 uint32_t uniqueID() const { return fUniqueID; } numTargets()114 int numTargets() const { return fTargets.size(); } target(int i)115 GrSurfaceProxy* target(int i) const { return fTargets[i].get(); } 116 117 /* 118 * Safely cast this GrRenderTask to a OpsTask (if possible). 119 */ asOpsTask()120 virtual skgpu::ganesh::OpsTask* asOpsTask() { return nullptr; } 121 122 #if defined(GPU_TEST_UTILS) 123 /* 124 * Dump out the GrRenderTask dependency DAG 125 */ 126 virtual void dump(const SkString& label, 127 SkString indent, 128 bool printDependencies, 129 bool close) const; 130 virtual const char* name() const = 0; 131 #endif 132 133 #ifdef SK_DEBUG numClips()134 virtual int numClips() const { return 0; } 135 136 virtual void visitProxies_debugOnly(const GrVisitProxyFunc&) const = 0; 137 visitTargetAndSrcProxies_debugOnly(const GrVisitProxyFunc & func)138 void visitTargetAndSrcProxies_debugOnly(const GrVisitProxyFunc& func) const { 139 this->visitProxies_debugOnly(func); 140 for (const sk_sp<GrSurfaceProxy>& target : fTargets) { 141 func(target.get(), skgpu::Mipmapped::kNo); 142 } 143 } 144 #endif 145 isUsed(GrSurfaceProxy * proxy)146 bool isUsed(GrSurfaceProxy* proxy) const { 147 for (const sk_sp<GrSurfaceProxy>& target : fTargets) { 148 if (target.get() == proxy) { 149 return true; 150 } 151 } 152 153 return this->onIsUsed(proxy); 154 } 155 156 // Feed proxy usage intervals to the GrResourceAllocator class 157 virtual void gatherProxyIntervals(GrResourceAllocator*) const = 0; 158 159 // In addition to just the GrSurface being allocated, has the stencil buffer been allocated (if 160 // it is required)? 161 bool isInstantiated() const; 162 163 // Used by GrRenderTaskCluster. 164 SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrRenderTask); 165 166 #if defined(GPU_TEST_UTILS) resolveTask()167 const GrTextureResolveRenderTask* resolveTask() const { return fTextureResolveTask; } 168 #endif 169 protected: 170 SkDEBUGCODE(bool deferredProxiesAreInstantiated() const;) 171 172 // Add a target surface proxy to the list of targets for this task. 173 // This also informs the drawing manager to update the lastRenderTask association. 174 void addTarget(GrDrawingManager*, sk_sp<GrSurfaceProxy>); 175 176 // Helper that adds the proxy owned by a view. addTarget(GrDrawingManager * dm,const GrSurfaceProxyView & view)177 void addTarget(GrDrawingManager* dm, const GrSurfaceProxyView& view) { 178 this->addTarget(dm, view.refProxy()); 179 } 180 181 enum class ExpectedOutcome : bool { 182 kTargetUnchanged, 183 kTargetDirty, 184 }; 185 186 // Performs any work to finalize this renderTask prior to execution. If returning 187 // ExpectedOutcome::kTargetDirty, the caller is also responsible to fill out the area it will 188 // modify in targetUpdateBounds. 189 // 190 // targetUpdateBounds must not extend beyond the proxy bounds. 191 virtual ExpectedOutcome onMakeClosed(GrRecordingContext*, SkIRect* targetUpdateBounds) = 0; 192 193 skia_private::STArray<1, sk_sp<GrSurfaceProxy>> fTargets; 194 195 // List of texture proxies whose contents are being prepared on a worker thread 196 // TODO: this list exists so we can fire off the proper upload when an renderTask begins 197 // executing. Can this be replaced? 198 skia_private::TArray<GrTextureProxy*, true> fDeferredProxies; 199 200 enum Flags { 201 kClosed_Flag = 0x01, //!< This task can't accept any more dependencies. 202 kDisowned_Flag = 0x02, //!< This task is disowned by its GrDrawingManager. 203 kSkippable_Flag = 0x04, //!< This task is skippable. 204 kAtlas_Flag = 0x08, //!< This task is atlas. 205 kBlocksReordering_Flag = 0x10, //!< No task can be reordered with respect to this task. 206 207 kWasOutput_Flag = 0x20, //!< Flag for topological sorting 208 kTempMark_Flag = 0x40, //!< Flag for topological sorting 209 }; 210 setFlag(uint32_t flag)211 void setFlag(uint32_t flag) { 212 fFlags |= flag; 213 } 214 resetFlag(uint32_t flag)215 void resetFlag(uint32_t flag) { 216 fFlags &= ~flag; 217 } 218 isSetFlag(uint32_t flag)219 bool isSetFlag(uint32_t flag) const { 220 return SkToBool(fFlags & flag); 221 } 222 setIndex(uint32_t index)223 void setIndex(uint32_t index) { 224 SkASSERT(!this->isSetFlag(kWasOutput_Flag)); 225 SkASSERT(index < (1 << 25)); 226 fFlags |= index << 7; 227 } 228 getIndex()229 uint32_t getIndex() const { 230 SkASSERT(this->isSetFlag(kWasOutput_Flag)); 231 return fFlags >> 7; 232 } 233 234 private: 235 // for TopoSortTraits, fTextureResolveTask, addDependency 236 friend class GrDrawingManager; 237 friend class GrMockRenderTask; 238 239 // Derived classes can override to indicate usage of proxies _other than target proxies_. 240 // GrRenderTask itself will handle checking the target proxies. 241 virtual bool onIsUsed(GrSurfaceProxy*) const = 0; 242 243 void addDependency(GrRenderTask* dependedOn); 244 void addDependent(GrRenderTask* dependent); 245 SkDEBUGCODE(bool isDependent(const GrRenderTask* dependent) const;) 246 SkDEBUGCODE(void validate() const;) 247 248 static uint32_t CreateUniqueID(); 249 250 struct TopoSortTraits { GetIndexTopoSortTraits251 static uint32_t GetIndex(GrRenderTask* renderTask) { 252 return renderTask->getIndex(); 253 } OutputTopoSortTraits254 static void Output(GrRenderTask* renderTask, uint32_t index) { 255 renderTask->setIndex(index); 256 renderTask->setFlag(kWasOutput_Flag); 257 } WasOutputTopoSortTraits258 static bool WasOutput(const GrRenderTask* renderTask) { 259 return renderTask->isSetFlag(kWasOutput_Flag); 260 } SetTempMarkTopoSortTraits261 static void SetTempMark(GrRenderTask* renderTask) { 262 renderTask->setFlag(kTempMark_Flag); 263 } ResetTempMarkTopoSortTraits264 static void ResetTempMark(GrRenderTask* renderTask) { 265 renderTask->resetFlag(kTempMark_Flag); 266 } IsTempMarkedTopoSortTraits267 static bool IsTempMarked(const GrRenderTask* renderTask) { 268 return renderTask->isSetFlag(kTempMark_Flag); 269 } NumDependenciesTopoSortTraits270 static int NumDependencies(const GrRenderTask* renderTask) { 271 return renderTask->fDependencies.size(); 272 } DependencyTopoSortTraits273 static GrRenderTask* Dependency(GrRenderTask* renderTask, int index) { 274 return renderTask->fDependencies[index]; 275 } 276 }; 277 onMakeSkippable()278 virtual void onMakeSkippable() {} onPrePrepare(GrRecordingContext *)279 virtual void onPrePrepare(GrRecordingContext*) {} // Only OpsTask currently overrides this onPrepare(GrOpFlushState *)280 virtual void onPrepare(GrOpFlushState*) {} // OpsTask and GrDDLTask override this 281 virtual bool onExecute(GrOpFlushState* flushState) = 0; 282 283 const uint32_t fUniqueID; 284 uint32_t fFlags; 285 286 // 'this' GrRenderTask relies on the output of the GrRenderTasks in 'fDependencies' 287 skia_private::STArray<1, GrRenderTask*, true> fDependencies; 288 // 'this' GrRenderTask's output is relied on by the GrRenderTasks in 'fDependents' 289 skia_private::STArray<1, GrRenderTask*, true> fDependents; 290 291 // For performance reasons, we should perform texture resolves back-to-back as much as possible. 292 // (http://skbug.com/9406). To accomplish this, we make and reuse one single resolve task for 293 // each render task, then add it as a dependency during makeClosed(). 294 GrTextureResolveRenderTask* fTextureResolveTask = nullptr; 295 296 SkDEBUGCODE(GrDrawingManager *fDrawingMgr = nullptr;) 297 }; 298 299 #endif 300