xref: /aosp_15_r20/external/skia/src/gpu/graphite/Context.cpp (revision c8dee2aa9b3f27cf6c858bd81872bdeb2c07ed17)
1 /*
2  * Copyright 2021 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "include/gpu/graphite/Context.h"
9 
10 #include "include/core/SkColorSpace.h"
11 #include "include/core/SkPathTypes.h"
12 #include "include/core/SkTraceMemoryDump.h"
13 #include "include/effects/SkRuntimeEffect.h"
14 #include "include/gpu/graphite/BackendTexture.h"
15 #include "include/gpu/graphite/PrecompileContext.h"
16 #include "include/gpu/graphite/Recorder.h"
17 #include "include/gpu/graphite/Recording.h"
18 #include "include/gpu/graphite/Surface.h"
19 #include "include/gpu/graphite/TextureInfo.h"
20 #include "include/private/base/SkOnce.h"
21 #include "src/base/SkRectMemcpy.h"
22 #include "src/core/SkAutoPixmapStorage.h"
23 #include "src/core/SkColorFilterPriv.h"
24 #include "src/core/SkConvertPixels.h"
25 #include "src/core/SkTraceEvent.h"
26 #include "src/core/SkYUVMath.h"
27 #include "src/gpu/RefCntedCallback.h"
28 #include "src/gpu/graphite/AtlasProvider.h"
29 #include "src/gpu/graphite/BufferManager.h"
30 #include "src/gpu/graphite/Caps.h"
31 #include "src/gpu/graphite/ClientMappedBufferManager.h"
32 #include "src/gpu/graphite/CommandBuffer.h"
33 #include "src/gpu/graphite/ContextPriv.h"
34 #include "src/gpu/graphite/DrawAtlas.h"
35 #include "src/gpu/graphite/GlobalCache.h"
36 #include "src/gpu/graphite/GraphicsPipeline.h"
37 #include "src/gpu/graphite/GraphicsPipelineDesc.h"
38 #include "src/gpu/graphite/Image_Base_Graphite.h"
39 #include "src/gpu/graphite/Image_Graphite.h"
40 #include "src/gpu/graphite/KeyContext.h"
41 #include "src/gpu/graphite/Log.h"
42 #include "src/gpu/graphite/QueueManager.h"
43 #include "src/gpu/graphite/RecorderPriv.h"
44 #include "src/gpu/graphite/RecordingPriv.h"
45 #include "src/gpu/graphite/Renderer.h"
46 #include "src/gpu/graphite/RendererProvider.h"
47 #include "src/gpu/graphite/ResourceProvider.h"
48 #include "src/gpu/graphite/RuntimeEffectDictionary.h"
49 #include "src/gpu/graphite/ShaderCodeDictionary.h"
50 #include "src/gpu/graphite/SharedContext.h"
51 #include "src/gpu/graphite/Surface_Graphite.h"
52 #include "src/gpu/graphite/TextureProxyView.h"
53 #include "src/gpu/graphite/TextureUtils.h"
54 #include "src/gpu/graphite/task/CopyTask.h"
55 #include "src/gpu/graphite/task/SynchronizeToCpuTask.h"
56 #include "src/gpu/graphite/task/UploadTask.h"
57 #include "src/image/SkSurface_Base.h"
58 #include "src/sksl/SkSLGraphiteModules.h"
59 
60 #if defined(GPU_TEST_UTILS)
61 #include "src/gpu/graphite/ContextOptionsPriv.h"
62 #endif
63 
64 namespace skgpu::graphite {
65 
66 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
67 
Next()68 Context::ContextID Context::ContextID::Next() {
69     static std::atomic<uint32_t> nextID{1};
70     uint32_t id;
71     do {
72         id = nextID.fetch_add(1, std::memory_order_relaxed);
73     } while (id == SK_InvalidUniqueID);
74     return ContextID(id);
75 }
76 
77 //--------------------------------------------------------------------------------------------------
Context(sk_sp<SharedContext> sharedContext,std::unique_ptr<QueueManager> queueManager,const ContextOptions & options)78 Context::Context(sk_sp<SharedContext> sharedContext,
79                  std::unique_ptr<QueueManager> queueManager,
80                  const ContextOptions& options)
81         : fSharedContext(std::move(sharedContext))
82         , fQueueManager(std::move(queueManager))
83         , fContextID(ContextID::Next()) {
84     // We need to move the Graphite SkSL code into the central SkSL data loader at least once
85     // (but preferrably only once) before we try to use it. We assume that there's no way to
86     // use the SkSL code without making a context, so we initialize it here.
87     static SkOnce once;
88     once([] { SkSL::Loader::SetGraphiteModuleData(SkSL::Loader::GetGraphiteModules()); });
89 
90     // We have to create this outside the initializer list because we need to pass in the Context's
91     // SingleOwner object and it is declared last
92     fResourceProvider = fSharedContext->makeResourceProvider(&fSingleOwner,
93                                                              SK_InvalidGenID,
94                                                              options.fGpuBudgetInBytes,
95                                                              /* avoidBufferAlloc= */ false);
96     fMappedBufferManager = std::make_unique<ClientMappedBufferManager>(this->contextID());
97 #if defined(GPU_TEST_UTILS)
98     if (options.fOptionsPriv) {
99         fStoreContextRefInRecorder = options.fOptionsPriv->fStoreContextRefInRecorder;
100     }
101 #endif
102 }
103 
~Context()104 Context::~Context() {
105 #if defined(GPU_TEST_UTILS)
106     SkAutoMutexExclusive lock(fTestingLock);
107     for (auto& recorder : fTrackedRecorders) {
108         recorder->priv().setContext(nullptr);
109     }
110 #endif
111 }
112 
finishInitialization()113 bool Context::finishInitialization() {
114     SkASSERT(!fSharedContext->rendererProvider()); // Can only initialize once
115 
116     StaticBufferManager bufferManager{fResourceProvider.get(), fSharedContext->caps()};
117     std::unique_ptr<RendererProvider> renderers{
118             new RendererProvider(fSharedContext->caps(), &bufferManager)};
119 
120     auto result = bufferManager.finalize(this, fQueueManager.get(), fSharedContext->globalCache());
121     if (result == StaticBufferManager::FinishResult::kFailure) {
122         // If something went wrong filling out the static vertex buffers, any Renderer that would
123         // use it will draw incorrectly, so it's better to fail the Context creation.
124         return false;
125     }
126     if (result == StaticBufferManager::FinishResult::kSuccess &&
127         !fQueueManager->submitToGpu()) {
128         SKGPU_LOG_W("Failed to submit initial command buffer for Context creation.\n");
129         return false;
130     } // else result was kNoWork so skip submitting to the GPU
131     fSharedContext->setRendererProvider(std::move(renderers));
132     return true;
133 }
134 
backend() const135 BackendApi Context::backend() const { return fSharedContext->backend(); }
136 
makeRecorder(const RecorderOptions & options)137 std::unique_ptr<Recorder> Context::makeRecorder(const RecorderOptions& options) {
138     ASSERT_SINGLE_OWNER
139 
140     // This is a client-owned Recorder so pass a null context so it creates its own ResourceProvider
141     auto recorder = std::unique_ptr<Recorder>(new Recorder(fSharedContext, options, nullptr));
142 #if defined(GPU_TEST_UTILS)
143     if (fStoreContextRefInRecorder) {
144         recorder->priv().setContext(this);
145     }
146 #endif
147     return recorder;
148 }
149 
makePrecompileContext()150 std::unique_ptr<PrecompileContext> Context::makePrecompileContext() {
151     ASSERT_SINGLE_OWNER
152 
153     return std::unique_ptr<PrecompileContext>(new PrecompileContext(fSharedContext));
154 }
155 
makeInternalRecorder() const156 std::unique_ptr<Recorder> Context::makeInternalRecorder() const {
157     ASSERT_SINGLE_OWNER
158 
159     // Unlike makeRecorder(), this Recorder is meant to be short-lived and go
160     // away before a Context public API function returns to the caller. As such
161     // it shares the Context's resource provider (no separate budget) and does
162     // not get tracked. The internal drawing performed with an internal recorder
163     // should not require a client image provider.
164     return std::unique_ptr<Recorder>(new Recorder(fSharedContext, {}, this));
165 }
166 
insertRecording(const InsertRecordingInfo & info)167 bool Context::insertRecording(const InsertRecordingInfo& info) {
168     ASSERT_SINGLE_OWNER
169 
170     return fQueueManager->addRecording(info, this);
171 }
172 
submit(SyncToCpu syncToCpu)173 bool Context::submit(SyncToCpu syncToCpu) {
174     ASSERT_SINGLE_OWNER
175 
176     if (syncToCpu == SyncToCpu::kYes && !fSharedContext->caps()->allowCpuSync()) {
177         SKGPU_LOG_E("SyncToCpu::kYes not supported with ContextOptions::fNeverYieldToWebGPU. "
178                     "The parameter is ignored and no synchronization will occur.");
179         syncToCpu = SyncToCpu::kNo;
180     }
181     bool success = fQueueManager->submitToGpu();
182     this->checkForFinishedWork(syncToCpu);
183     return success;
184 }
185 
hasUnfinishedGpuWork() const186 bool Context::hasUnfinishedGpuWork() const { return fQueueManager->hasUnfinishedGpuWork(); }
187 
188 template <typename SrcPixels>
189 struct Context::AsyncParams {
190     const SrcPixels* fSrcImage;
191     SkIRect          fSrcRect;
192     SkImageInfo      fDstImageInfo;
193 
194     SkImage::ReadPixelsCallback* fCallback;
195     SkImage::ReadPixelsContext   fCallbackContext;
196 
197     template <typename S>
withNewSourceskgpu::graphite::Context::AsyncParams198     AsyncParams<S> withNewSource(const S* newPixels, const SkIRect& newSrcRect) const {
199         return AsyncParams<S>{newPixels, newSrcRect,
200                                 fDstImageInfo, fCallback, fCallbackContext};
201     }
202 
failskgpu::graphite::Context::AsyncParams203     void fail() const {
204         (*fCallback)(fCallbackContext, nullptr);
205     }
206 
validateskgpu::graphite::Context::AsyncParams207     bool validate() const {
208         if (!fSrcImage) {
209             return false;
210         }
211         if (fSrcImage->isProtected()) {
212             return false;
213         }
214         if (!SkIRect::MakeSize(fSrcImage->dimensions()).contains(fSrcRect)) {
215             return false;
216         }
217         if (!SkImageInfoIsValid(fDstImageInfo)) {
218             return false;
219         }
220         return true;
221     }
222 };
223 
224 template <typename ReadFn, typename... ExtraArgs>
asyncRescaleAndReadImpl(ReadFn Context::* asyncRead,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,const AsyncParams<SkImage> & params,ExtraArgs...extraParams)225 void Context::asyncRescaleAndReadImpl(ReadFn Context::* asyncRead,
226                                       SkImage::RescaleGamma rescaleGamma,
227                                       SkImage::RescaleMode rescaleMode,
228                                       const AsyncParams<SkImage>& params,
229                                       ExtraArgs... extraParams) {
230     if (!params.validate()) {
231         return params.fail();
232     }
233 
234     if (params.fSrcRect.size() == params.fDstImageInfo.dimensions()) {
235         // No need to rescale so do a direct readback
236         return (this->*asyncRead)(/*recorder=*/nullptr, params, extraParams...);
237     }
238 
239     // Make a recorder to collect the rescale drawing commands and the copy commands
240     std::unique_ptr<Recorder> recorder = this->makeInternalRecorder();
241     sk_sp<SkImage> scaledImage = RescaleImage(recorder.get(),
242                                               params.fSrcImage,
243                                               params.fSrcRect,
244                                               params.fDstImageInfo,
245                                               rescaleGamma,
246                                               rescaleMode);
247     if (!scaledImage) {
248         SKGPU_LOG_W("AsyncRead failed because rescaling failed");
249         return params.fail();
250     }
251     (this->*asyncRead)(std::move(recorder),
252                        params.withNewSource(scaledImage.get(), params.fDstImageInfo.bounds()),
253                        extraParams...);
254 }
255 
asyncRescaleAndReadPixels(const SkImage * src,const SkImageInfo & dstImageInfo,const SkIRect & srcRect,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)256 void Context::asyncRescaleAndReadPixels(const SkImage* src,
257                                         const SkImageInfo& dstImageInfo,
258                                         const SkIRect& srcRect,
259                                         SkImage::RescaleGamma rescaleGamma,
260                                         SkImage::RescaleMode rescaleMode,
261                                         SkImage::ReadPixelsCallback callback,
262                                         SkImage::ReadPixelsContext callbackContext) {
263     this->asyncRescaleAndReadImpl(&Context::asyncReadPixels,
264                                   rescaleGamma, rescaleMode,
265                                   {src, srcRect, dstImageInfo, callback, callbackContext});
266 }
267 
asyncRescaleAndReadPixels(const SkSurface * src,const SkImageInfo & dstImageInfo,const SkIRect & srcRect,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)268 void Context::asyncRescaleAndReadPixels(const SkSurface* src,
269                                         const SkImageInfo& dstImageInfo,
270                                         const SkIRect& srcRect,
271                                         SkImage::RescaleGamma rescaleGamma,
272                                         SkImage::RescaleMode rescaleMode,
273                                         SkImage::ReadPixelsCallback callback,
274                                         SkImage::ReadPixelsContext callbackContext) {
275     sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
276     if (!surfaceImage) {
277         // The source surface is not texturable, so the only supported readback is if there's
278         // no rescaling
279         if (src && asConstSB(src)->isGraphiteBacked() &&
280             srcRect.size() == dstImageInfo.dimensions()) {
281             TextureProxy* proxy = static_cast<const Surface*>(src)->backingTextureProxy();
282             return this->asyncReadTexture(/*recorder=*/nullptr,
283                                           {proxy, srcRect, dstImageInfo, callback, callbackContext},
284                                           src->imageInfo().colorInfo());
285         }
286         // else fall through and let asyncRescaleAndReadPixels() invoke the callback when it detects
287         // the null image.
288     }
289     this->asyncRescaleAndReadPixels(surfaceImage.get(),
290                                     dstImageInfo,
291                                     srcRect,
292                                     rescaleGamma,
293                                     rescaleMode,
294                                     callback,
295                                     callbackContext);
296 }
297 
asyncReadPixels(std::unique_ptr<Recorder> recorder,const AsyncParams<SkImage> & params)298 void Context::asyncReadPixels(std::unique_ptr<Recorder> recorder,
299                               const AsyncParams<SkImage>& params) {
300     TRACE_EVENT2("skia.gpu", TRACE_FUNC,
301                  "width", params.fSrcRect.width(),
302                  "height", params.fSrcRect.height());
303     SkASSERT(params.validate());    // all paths to here are already validated
304     SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
305 
306     const Caps* caps = fSharedContext->caps();
307     TextureProxyView view = AsView(params.fSrcImage);
308     if (!view || !caps->supportsReadPixels(view.proxy()->textureInfo())) {
309         // This is either a YUVA image (null view) or the texture can't be read directly, so
310         // perform a draw into a compatible texture format and/or flatten any YUVA planes to RGBA.
311         if (!recorder) {
312             recorder = this->makeInternalRecorder();
313         }
314         sk_sp<SkImage> flattened = CopyAsDraw(recorder.get(),
315                                               params.fSrcImage,
316                                               params.fSrcRect,
317                                               params.fDstImageInfo.colorInfo(),
318                                               Budgeted::kYes,
319                                               Mipmapped::kNo,
320                                               SkBackingFit::kApprox,
321                                               "AsyncReadPixelsFallbackTexture");
322         if (!flattened) {
323             SKGPU_LOG_W("AsyncRead failed because copy-as-drawing into a readable format failed");
324             return params.fail();
325         }
326         // Use the original fSrcRect and not flattened's size since it's approx-fit.
327         return this->asyncReadPixels(std::move(recorder),
328                                      params.withNewSource(flattened.get(),
329                                      SkIRect::MakeSize(params.fSrcRect.size())));
330     }
331 
332     // Can copy directly from the image's texture
333     this->asyncReadTexture(std::move(recorder), params.withNewSource(view.proxy(), params.fSrcRect),
334                            params.fSrcImage->imageInfo().colorInfo());
335 }
336 
asyncReadTexture(std::unique_ptr<Recorder> recorder,const AsyncParams<TextureProxy> & params,const SkColorInfo & srcColorInfo)337 void Context::asyncReadTexture(std::unique_ptr<Recorder> recorder,
338                                const AsyncParams<TextureProxy>& params,
339                                const SkColorInfo& srcColorInfo) {
340     SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
341 
342     // We can get here directly from surface or testing-only read pixels, so re-validate
343     if (!params.validate()) {
344         return params.fail();
345     }
346     PixelTransferResult transferResult = this->transferPixels(recorder.get(),
347                                                               params.fSrcImage,
348                                                               srcColorInfo,
349                                                               params.fDstImageInfo.colorInfo(),
350                                                               params.fSrcRect);
351 
352     if (!transferResult.fTransferBuffer) {
353         // TODO: try to do a synchronous readPixels instead
354         return params.fail();
355     }
356 
357     this->finalizeAsyncReadPixels(std::move(recorder),
358                                   {&transferResult, 1},
359                                   params.fCallback,
360                                   params.fCallbackContext);
361 }
362 
asyncRescaleAndReadPixelsYUV420(const SkImage * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)363 void Context::asyncRescaleAndReadPixelsYUV420(const SkImage* src,
364                                               SkYUVColorSpace yuvColorSpace,
365                                               sk_sp<SkColorSpace> dstColorSpace,
366                                               const SkIRect& srcRect,
367                                               const SkISize& dstSize,
368                                               SkImage::RescaleGamma rescaleGamma,
369                                               SkImage::RescaleMode rescaleMode,
370                                               SkImage::ReadPixelsCallback callback,
371                                               SkImage::ReadPixelsContext callbackContext) {
372     // Use kOpaque alpha type to signal that we don't read back the alpha channel
373     SkImageInfo dstImageInfo = SkImageInfo::Make(dstSize,
374                                                  kRGBA_8888_SkColorType,
375                                                  kOpaque_SkAlphaType,
376                                                  std::move(dstColorSpace));
377     this->asyncRescaleAndReadImpl(&Context::asyncReadPixelsYUV420,
378                                   rescaleGamma, rescaleMode,
379                                   {src, srcRect, dstImageInfo, callback, callbackContext},
380                                   yuvColorSpace);
381 }
382 
asyncRescaleAndReadPixelsYUV420(const SkSurface * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)383 void Context::asyncRescaleAndReadPixelsYUV420(const SkSurface* src,
384                                               SkYUVColorSpace yuvColorSpace,
385                                               sk_sp<SkColorSpace> dstColorSpace,
386                                               const SkIRect& srcRect,
387                                               const SkISize& dstSize,
388                                               SkImage::RescaleGamma rescaleGamma,
389                                               SkImage::RescaleMode rescaleMode,
390                                               SkImage::ReadPixelsCallback callback,
391                                               SkImage::ReadPixelsContext callbackContext) {
392     // YUV[A] readback requires the surface to be texturable since the plane conversion is performed
393     // by draws. If AsImage() returns null, the image version of asyncRescaleAndReadback will
394     // automatically fail.
395     // TODO: Is it worth performing an extra copy from 'surface' into a texture in order to succeed?
396     sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
397     this->asyncRescaleAndReadPixelsYUV420(surfaceImage.get(),
398                                           yuvColorSpace,
399                                           dstColorSpace,
400                                           srcRect,
401                                           dstSize,
402                                           rescaleGamma,
403                                           rescaleMode,
404                                           callback,
405                                           callbackContext);
406 }
407 
asyncRescaleAndReadPixelsYUVA420(const SkImage * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)408 void Context::asyncRescaleAndReadPixelsYUVA420(const SkImage* src,
409                                                SkYUVColorSpace yuvColorSpace,
410                                                sk_sp<SkColorSpace> dstColorSpace,
411                                                const SkIRect& srcRect,
412                                                const SkISize& dstSize,
413                                                SkImage::RescaleGamma rescaleGamma,
414                                                SkImage::RescaleMode rescaleMode,
415                                                SkImage::ReadPixelsCallback callback,
416                                                SkImage::ReadPixelsContext callbackContext) {
417     SkImageInfo dstImageInfo = SkImageInfo::Make(dstSize,
418                                                  kRGBA_8888_SkColorType,
419                                                  kPremul_SkAlphaType,
420                                                  std::move(dstColorSpace));
421     this->asyncRescaleAndReadImpl(&Context::asyncReadPixelsYUV420,
422                                   rescaleGamma, rescaleMode,
423                                   {src, srcRect, dstImageInfo, callback, callbackContext},
424                                   yuvColorSpace);
425 }
426 
asyncRescaleAndReadPixelsYUVA420(const SkSurface * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)427 void Context::asyncRescaleAndReadPixelsYUVA420(const SkSurface* src,
428                                                SkYUVColorSpace yuvColorSpace,
429                                                sk_sp<SkColorSpace> dstColorSpace,
430                                                const SkIRect& srcRect,
431                                                const SkISize& dstSize,
432                                                SkImage::RescaleGamma rescaleGamma,
433                                                SkImage::RescaleMode rescaleMode,
434                                                SkImage::ReadPixelsCallback callback,
435                                                SkImage::ReadPixelsContext callbackContext) {
436     sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
437     this->asyncRescaleAndReadPixelsYUVA420(surfaceImage.get(),
438                                            yuvColorSpace,
439                                            dstColorSpace,
440                                            srcRect,
441                                            dstSize,
442                                            rescaleGamma,
443                                            rescaleMode,
444                                            callback,
445                                            callbackContext);
446 }
447 
asyncReadPixelsYUV420(std::unique_ptr<Recorder> recorder,const AsyncParams<SkImage> & params,SkYUVColorSpace yuvColorSpace)448 void Context::asyncReadPixelsYUV420(std::unique_ptr<Recorder> recorder,
449                                     const AsyncParams<SkImage>& params,
450                                     SkYUVColorSpace yuvColorSpace) {
451     TRACE_EVENT2("skia.gpu", TRACE_FUNC,
452                  "width", params.fSrcRect.width(),
453                  "height", params.fSrcRect.height());
454     // This is only called by asyncRescaleAndReadImpl which already validates its parameters
455     SkASSERT(params.validate());
456     SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
457 
458     // The planes are always extracted via drawing, so create the Recorder if there isn't one yet.
459     if (!recorder) {
460         recorder = this->makeInternalRecorder();
461     }
462 
463     // copyPlane renders the source image into an A8 image and sets up a transfer stored in 'result'
464     auto copyPlane = [&](SkImageInfo planeInfo,
465                          std::string_view label,
466                          float rgb2yuv[20],
467                          const SkMatrix& texMatrix,
468                          PixelTransferResult* result) {
469         sk_sp<Surface> dstSurface = Surface::MakeScratch(recorder.get(),
470                                                          planeInfo,
471                                                          std::move(label),
472                                                          Budgeted::kYes,
473                                                          Mipmapped::kNo,
474                                                          SkBackingFit::kApprox);
475         if (!dstSurface) {
476             return false;
477         }
478 
479         // Render the plane defined by rgb2yuv from srcImage into dstSurface
480         SkPaint paint;
481         const SkSamplingOptions sampling(SkFilterMode::kLinear, SkMipmapMode::kNone);
482         sk_sp<SkShader> imgShader = params.fSrcImage->makeShader(
483                 SkTileMode::kClamp, SkTileMode::kClamp, sampling, texMatrix);
484         paint.setShader(std::move(imgShader));
485         paint.setBlendMode(SkBlendMode::kSrc);
486 
487         if (rgb2yuv) {
488             // NOTE: The dstSurface's color space is set to the requested RGB dstColorSpace, so
489             // the rendered image is automatically converted to that RGB color space before the
490             // RGB->YUV color filter is evaluated, putting the plane data into the alpha channel.
491             paint.setColorFilter(SkColorFilters::Matrix(rgb2yuv));
492         }
493 
494         SkCanvas* canvas = dstSurface->getCanvas();
495         canvas->drawPaint(paint);
496 
497         // Manually flush the surface before transferPixels() is called to ensure the rendering
498         // operations run before the CopyTextureToBuffer task.
499         Flush(dstSurface);
500         // Must use planeInfo.bounds() for srcRect since dstSurface is kApprox-fit.
501         *result = this->transferPixels(recorder.get(),
502                                        dstSurface->backingTextureProxy(),
503                                        dstSurface->imageInfo().colorInfo(),
504                                        planeInfo.colorInfo(),
505                                        planeInfo.bounds());
506         return SkToBool(result->fTransferBuffer);
507     };
508 
509     // Set up draws and transfers. This interleaves the drawing to a plane and the copy to the
510     // transfer buffer, which will allow the scratch A8 surface to be reused for each plane.
511     // TODO: Use one transfer buffer for all three planes to reduce map/unmap cost?
512     const bool readAlpha = params.fDstImageInfo.colorInfo().alphaType() != kOpaque_SkAlphaType;
513     SkImageInfo yaInfo = params.fDstImageInfo.makeColorType(kAlpha_8_SkColorType)
514                                              .makeAlphaType(kPremul_SkAlphaType);
515     SkImageInfo uvInfo = yaInfo.makeWH(yaInfo.width()/2, yaInfo.height()/2);
516     PixelTransferResult transfers[4];
517 
518     float baseM[20];
519     SkColorMatrix_RGB2YUV(yuvColorSpace, baseM);
520     SkMatrix texMatrix = SkMatrix::Translate(-params.fSrcRect.fLeft, -params.fSrcRect.fTop);
521 
522     // This matrix generates (r,g,b,a) = (0, 0, 0, y)
523     float yM[20];
524     std::fill_n(yM, 15, 0.f);
525     std::copy_n(baseM + 0, 5, yM + 15);
526     if (!copyPlane(yaInfo, "AsyncReadPixelsYPlane", yM, texMatrix, &transfers[0])) {
527         return params.fail();
528     }
529 
530     // No matrix, straight copy of alpha channel
531     SkASSERT(baseM[15] == 0 &&
532              baseM[16] == 0 &&
533              baseM[17] == 0 &&
534              baseM[18] == 1 &&
535              baseM[19] == 0);
536     if (readAlpha &&
537         !copyPlane(yaInfo, "AsyncReadPixelsAPlane", nullptr, texMatrix, &transfers[3])) {
538         return params.fail();
539     }
540 
541     // The UV planes are at half resolution compared to Y and A in 4:2:0
542     texMatrix.postScale(0.5f, 0.5f);
543 
544     // This matrix generates (r,g,b,a) = (0, 0, 0, u)
545     float uM[20];
546     std::fill_n(uM, 15, 0.f);
547     std::copy_n(baseM + 5, 5, uM + 15);
548     if (!copyPlane(uvInfo, "AsyncReadPixelsUPlane", uM, texMatrix, &transfers[1])) {
549         return params.fail();
550     }
551 
552     // This matrix generates (r,g,b,a) = (0, 0, 0, v)
553     float vM[20];
554     std::fill_n(vM, 15, 0.f);
555     std::copy_n(baseM + 10, 5, vM + 15);
556     if (!copyPlane(uvInfo, "AsyncReadPixelsVPlane", vM, texMatrix, &transfers[2])) {
557         return params.fail();
558     }
559 
560     this->finalizeAsyncReadPixels(std::move(recorder),
561                                   {transfers, readAlpha ? 4 : 3},
562                                   params.fCallback,
563                                   params.fCallbackContext);
564 }
565 
finalizeAsyncReadPixels(std::unique_ptr<Recorder> recorder,SkSpan<PixelTransferResult> transferResults,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)566 void Context::finalizeAsyncReadPixels(std::unique_ptr<Recorder> recorder,
567                                       SkSpan<PixelTransferResult> transferResults,
568                                       SkImage::ReadPixelsCallback callback,
569                                       SkImage::ReadPixelsContext callbackContext) {
570     // If the async readback work required a Recorder, insert the recording with all of the
571     // accumulated work (which includes any copies). Otherwise, for pure copy readbacks,
572     // transferPixels() already added the tasks directly to the QueueManager.
573     if (recorder) {
574         std::unique_ptr<Recording> recording = recorder->snap();
575         if (!recording) {
576             callback(callbackContext, nullptr);
577             return;
578         }
579         InsertRecordingInfo recordingInfo;
580         recordingInfo.fRecording = recording.get();
581         if (!this->insertRecording(recordingInfo)) {
582             callback(callbackContext, nullptr);
583             return;
584         }
585     }
586 
587     // Set up FinishContext and add transfer commands to queue
588     struct AsyncReadFinishContext {
589         SkImage::ReadPixelsCallback* fClientCallback;
590         SkImage::ReadPixelsContext fClientContext;
591         ClientMappedBufferManager* fMappedBufferManager;
592         std::array<PixelTransferResult, 4> fTransferResults;
593     };
594 
595     auto finishContext = std::make_unique<AsyncReadFinishContext>();
596     finishContext->fClientCallback      = callback;
597     finishContext->fClientContext       = callbackContext;
598     finishContext->fMappedBufferManager = fMappedBufferManager.get();
599 
600     SkASSERT(transferResults.size() <= std::size(finishContext->fTransferResults));
601     skia_private::STArray<4, sk_sp<Buffer>> buffersToAsyncMap;
602     for (size_t i = 0; i < transferResults.size(); ++i) {
603         finishContext->fTransferResults[i] = std::move(transferResults[i]);
604         if (fSharedContext->caps()->bufferMapsAreAsync()) {
605             buffersToAsyncMap.push_back(finishContext->fTransferResults[i].fTransferBuffer);
606         }
607     }
608 
609     InsertFinishInfo info;
610     info.fFinishedContext = finishContext.release();
611     info.fFinishedProc = [](GpuFinishedContext c, CallbackResult status) {
612         std::unique_ptr<const AsyncReadFinishContext> context(
613                 reinterpret_cast<const AsyncReadFinishContext*>(c));
614         using AsyncReadResult = skgpu::TAsyncReadResult<Buffer, ContextID, PixelTransferResult>;
615 
616         ClientMappedBufferManager* manager = context->fMappedBufferManager;
617         std::unique_ptr<AsyncReadResult> result;
618         if (status == CallbackResult::kSuccess) {
619             result = std::make_unique<AsyncReadResult>(manager->ownerID());
620         }
621         for (const auto& r : context->fTransferResults) {
622             if (!r.fTransferBuffer) {
623                 break;
624             }
625             if (result && !result->addTransferResult(r, r.fSize, r.fRowBytes, manager)) {
626                 result.reset();
627             }
628             // If we didn't get this buffer into the mapped buffer manager then make sure it gets
629             // unmapped if it has a pending or completed async map.
630             if (!result && r.fTransferBuffer->isUnmappable()) {
631                 r.fTransferBuffer->unmap();
632             }
633         }
634         (*context->fClientCallback)(context->fClientContext, std::move(result));
635     };
636 
637     // If addFinishInfo() fails, it invokes the finish callback automatically, which handles all the
638     // required clean up for us, just log an error message. The buffers will never be mapped and
639     // thus don't need an unmap.
640     if (!fQueueManager->addFinishInfo(info, fResourceProvider.get(), buffersToAsyncMap)) {
641         SKGPU_LOG_E("Failed to register finish callbacks for asyncReadPixels.");
642         return;
643     }
644 }
645 
transferPixels(Recorder * recorder,const TextureProxy * srcProxy,const SkColorInfo & srcColorInfo,const SkColorInfo & dstColorInfo,const SkIRect & srcRect)646 Context::PixelTransferResult Context::transferPixels(Recorder* recorder,
647                                                      const TextureProxy* srcProxy,
648                                                      const SkColorInfo& srcColorInfo,
649                                                      const SkColorInfo& dstColorInfo,
650                                                      const SkIRect& srcRect) {
651     SkASSERT(SkIRect::MakeSize(srcProxy->dimensions()).contains(srcRect));
652     SkASSERT(SkColorInfoIsValid(dstColorInfo));
653 
654     const Caps* caps = fSharedContext->caps();
655     if (!srcProxy || !caps->supportsReadPixels(srcProxy->textureInfo())) {
656         return {};
657     }
658 
659     const SkColorType srcColorType = srcColorInfo.colorType();
660     SkColorType supportedColorType;
661     bool isRGB888Format;
662     std::tie(supportedColorType, isRGB888Format) =
663             caps->supportedReadPixelsColorType(srcColorType,
664                                                srcProxy->textureInfo(),
665                                                dstColorInfo.colorType());
666     if (supportedColorType == kUnknown_SkColorType) {
667         return {};
668     }
669 
670     // Fail if read color type does not have all of dstCT's color channels and those missing color
671     // channels are in the src.
672     uint32_t dstChannels = SkColorTypeChannelFlags(dstColorInfo.colorType());
673     uint32_t legalReadChannels = SkColorTypeChannelFlags(supportedColorType);
674     uint32_t srcChannels = SkColorTypeChannelFlags(srcColorType);
675     if ((~legalReadChannels & dstChannels) & srcChannels) {
676         return {};
677     }
678 
679     int bpp = isRGB888Format ? 3 : SkColorTypeBytesPerPixel(supportedColorType);
680     size_t rowBytes = caps->getAlignedTextureDataRowBytes(bpp * srcRect.width());
681     size_t size = SkAlignTo(rowBytes * srcRect.height(), caps->requiredTransferBufferAlignment());
682     sk_sp<Buffer> buffer = fResourceProvider->findOrCreateBuffer(
683             size, BufferType::kXferGpuToCpu, AccessPattern::kHostVisible, "TransferToCpu");
684     if (!buffer) {
685         return {};
686     }
687 
688     // Set up copy task. Since we always use a new buffer the offset can be 0 and we don't need to
689     // worry about aligning it to the required transfer buffer alignment.
690     sk_sp<CopyTextureToBufferTask> copyTask = CopyTextureToBufferTask::Make(sk_ref_sp(srcProxy),
691                                                                             srcRect,
692                                                                             buffer,
693                                                                             /*bufferOffset=*/0,
694                                                                             rowBytes);
695     const bool addTasksDirectly = !SkToBool(recorder);
696     Protected contextIsProtected = fSharedContext->isProtected();
697     if (!copyTask || (addTasksDirectly && !fQueueManager->addTask(copyTask.get(),
698                                                                   this,
699                                                                   contextIsProtected))) {
700         return {};
701     } else if (!addTasksDirectly) {
702         // Add the task to the Recorder instead of the QueueManager if that's been required for
703         // collecting tasks to prepare the copied textures.
704         recorder->priv().add(std::move(copyTask));
705     }
706     sk_sp<SynchronizeToCpuTask> syncTask = SynchronizeToCpuTask::Make(buffer);
707     if (!syncTask || (addTasksDirectly && !fQueueManager->addTask(syncTask.get(),
708                                                                   this,
709                                                                   contextIsProtected))) {
710         return {};
711     } else if (!addTasksDirectly) {
712         recorder->priv().add(std::move(syncTask));
713     }
714 
715     PixelTransferResult result;
716     result.fTransferBuffer = std::move(buffer);
717     result.fSize = srcRect.size();
718     // srcColorInfo describes the texture; readColorInfo describes the result of the copy-to-buffer,
719     // which may be different; dstColorInfo is what we have to transform it into when invoking the
720     // async callbacks.
721     SkColorInfo readColorInfo = srcColorInfo.makeColorType(supportedColorType);
722     if (readColorInfo != dstColorInfo || isRGB888Format) {
723         SkISize dims = srcRect.size();
724         SkImageInfo srcInfo = SkImageInfo::Make(dims, readColorInfo);
725         SkImageInfo dstInfo = SkImageInfo::Make(dims, dstColorInfo);
726         result.fRowBytes = dstInfo.minRowBytes();
727         result.fPixelConverter = [dstInfo, srcInfo, rowBytes, isRGB888Format](
728                 void* dst, const void* src) {
729             SkAutoPixmapStorage temp;
730             size_t srcRowBytes = rowBytes;
731             if (isRGB888Format) {
732                 temp.alloc(srcInfo);
733                 size_t tRowBytes = temp.rowBytes();
734                 auto* sRow = reinterpret_cast<const char*>(src);
735                 auto* tRow = reinterpret_cast<char*>(temp.writable_addr());
736                 for (int y = 0; y < srcInfo.height(); ++y, sRow += srcRowBytes, tRow += tRowBytes) {
737                     for (int x = 0; x < srcInfo.width(); ++x) {
738                         auto s = sRow + x*3;
739                         auto t = tRow + x*sizeof(uint32_t);
740                         memcpy(t, s, 3);
741                         t[3] = static_cast<char>(0xFF);
742                     }
743                 }
744                 src = temp.addr();
745                 srcRowBytes = tRowBytes;
746             }
747             SkAssertResult(SkConvertPixels(dstInfo, dst, dstInfo.minRowBytes(),
748                                            srcInfo, src, srcRowBytes));
749         };
750     } else {
751         result.fRowBytes = rowBytes;
752     }
753 
754     return result;
755 }
756 
checkForFinishedWork(SyncToCpu syncToCpu)757 void Context::checkForFinishedWork(SyncToCpu syncToCpu) {
758     ASSERT_SINGLE_OWNER
759 
760     fQueueManager->checkForFinishedWork(syncToCpu);
761     fMappedBufferManager->process();
762 }
763 
checkAsyncWorkCompletion()764 void Context::checkAsyncWorkCompletion() {
765     this->checkForFinishedWork(SyncToCpu::kNo);
766 }
767 
deleteBackendTexture(const BackendTexture & texture)768 void Context::deleteBackendTexture(const BackendTexture& texture) {
769     ASSERT_SINGLE_OWNER
770 
771     if (!texture.isValid() || texture.backend() != this->backend()) {
772         return;
773     }
774     fResourceProvider->deleteBackendTexture(texture);
775 }
776 
freeGpuResources()777 void Context::freeGpuResources() {
778     ASSERT_SINGLE_OWNER
779 
780     this->checkAsyncWorkCompletion();
781 
782     fResourceProvider->freeGpuResources();
783 }
784 
performDeferredCleanup(std::chrono::milliseconds msNotUsed)785 void Context::performDeferredCleanup(std::chrono::milliseconds msNotUsed) {
786     ASSERT_SINGLE_OWNER
787 
788     this->checkAsyncWorkCompletion();
789 
790     auto purgeTime = skgpu::StdSteadyClock::now() - msNotUsed;
791     fResourceProvider->purgeResourcesNotUsedSince(purgeTime);
792 }
793 
currentBudgetedBytes() const794 size_t Context::currentBudgetedBytes() const {
795     ASSERT_SINGLE_OWNER
796     return fResourceProvider->getResourceCacheCurrentBudgetedBytes();
797 }
798 
currentPurgeableBytes() const799 size_t Context::currentPurgeableBytes() const {
800     ASSERT_SINGLE_OWNER
801     return fResourceProvider->getResourceCacheCurrentPurgeableBytes();
802 }
803 
maxBudgetedBytes() const804 size_t Context::maxBudgetedBytes() const {
805     ASSERT_SINGLE_OWNER
806     return fResourceProvider->getResourceCacheLimit();
807 }
808 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const809 void Context::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
810     ASSERT_SINGLE_OWNER
811     fResourceProvider->dumpMemoryStatistics(traceMemoryDump);
812     // TODO: What is the graphite equivalent for the text blob cache and how do we print out its
813     // used bytes here (see Ganesh implementation).
814 }
815 
isDeviceLost() const816 bool Context::isDeviceLost() const {
817     return fSharedContext->isDeviceLost();
818 }
819 
maxTextureSize() const820 int Context::maxTextureSize() const {
821     return fSharedContext->caps()->maxTextureSize();
822 }
823 
supportsProtectedContent() const824 bool Context::supportsProtectedContent() const {
825     return fSharedContext->isProtected() == Protected::kYes;
826 }
827 
supportedGpuStats() const828 GpuStatsFlags Context::supportedGpuStats() const {
829     return fSharedContext->caps()->supportedGpuStats();
830 }
831 
832 ///////////////////////////////////////////////////////////////////////////////////
833 
834 #if defined(GPU_TEST_UTILS)
deregisterRecorder(const Recorder * recorder)835 void Context::deregisterRecorder(const Recorder* recorder) {
836     SkAutoMutexExclusive lock(fTestingLock);
837     for (auto it = fTrackedRecorders.begin();
838          it != fTrackedRecorders.end();
839          it++) {
840         if (*it == recorder) {
841             fTrackedRecorders.erase(it);
842             return;
843         }
844     }
845 }
846 
readPixels(const SkPixmap & pm,const TextureProxy * textureProxy,const SkImageInfo & srcImageInfo,int srcX,int srcY)847 bool ContextPriv::readPixels(const SkPixmap& pm,
848                              const TextureProxy* textureProxy,
849                              const SkImageInfo& srcImageInfo,
850                              int srcX, int srcY) {
851     auto rect = SkIRect::MakeXYWH(srcX, srcY, pm.width(), pm.height());
852     struct AsyncContext {
853         bool fCalled = false;
854         std::unique_ptr<const SkImage::AsyncReadResult> fResult;
855     } asyncContext;
856 
857     auto asyncCallback = [](void* c, std::unique_ptr<const SkImage::AsyncReadResult> out) {
858         auto context = static_cast<AsyncContext*>(c);
859         context->fResult = std::move(out);
860         context->fCalled = true;
861     };
862 
863     const SkColorInfo& srcColorInfo = srcImageInfo.colorInfo();
864 
865     // This is roughly equivalent to the logic taken in asyncRescaleAndRead(SkSurface) to either
866     // try the image-based readback (with copy-as-draw fallbacks) or read the texture directly
867     // if it supports reading.
868     if (!fContext->fSharedContext->caps()->supportsReadPixels(textureProxy->textureInfo())) {
869         // Since this is a synchronous testing-only API, callers should have flushed any pending
870         // work that modifies this texture proxy already. This means we don't have to worry about
871         // re-wrapping the proxy in a new Image (that wouldn't tbe connected to any Device, etc.).
872         sk_sp<SkImage> image{new Image(TextureProxyView(sk_ref_sp(textureProxy)), srcColorInfo)};
873         Context::AsyncParams<SkImage> params {image.get(), rect, pm.info(),
874                                               asyncCallback, &asyncContext};
875         if (!params.validate()) {
876             params.fail();
877         } else {
878             fContext->asyncReadPixels(/*recorder=*/nullptr, params);
879         }
880     } else {
881         fContext->asyncReadTexture(/*recorder=*/nullptr,
882                                    {textureProxy, rect, pm.info(), asyncCallback, &asyncContext},
883                                    srcImageInfo.colorInfo());
884     }
885 
886     if (fContext->fSharedContext->caps()->allowCpuSync()) {
887         fContext->submit(SyncToCpu::kYes);
888     } else {
889         fContext->submit(SyncToCpu::kNo);
890         if (fContext->fSharedContext->backend() == BackendApi::kDawn) {
891             while (!asyncContext.fCalled) {
892                 fContext->fSharedContext->deviceTick(fContext);
893             }
894         } else {
895             SK_ABORT("Only Dawn supports non-syncing contexts.");
896         }
897     }
898     SkASSERT(asyncContext.fCalled);
899     if (!asyncContext.fResult) {
900         return false;
901     }
902     SkRectMemcpy(pm.writable_addr(), pm.rowBytes(), asyncContext.fResult->data(0),
903                  asyncContext.fResult->rowBytes(0), pm.info().minRowBytes(),
904                  pm.height());
905     return true;
906 }
907 
supportsPathRendererStrategy(PathRendererStrategy strategy)908 bool ContextPriv::supportsPathRendererStrategy(PathRendererStrategy strategy) {
909     AtlasProvider::PathAtlasFlagsBitMask pathAtlasFlags =
910             AtlasProvider::QueryPathAtlasSupport(this->caps());
911     switch (strategy) {
912         case PathRendererStrategy::kDefault:
913             return true;
914         case PathRendererStrategy::kComputeAnalyticAA:
915         case PathRendererStrategy::kComputeMSAA16:
916         case PathRendererStrategy::kComputeMSAA8:
917             return SkToBool(pathAtlasFlags & AtlasProvider::PathAtlasFlags::kCompute);
918         case PathRendererStrategy::kRasterAA:
919             return SkToBool(pathAtlasFlags & AtlasProvider::PathAtlasFlags::kRaster);
920         case PathRendererStrategy::kTessellation:
921             return true;
922     }
923 
924     return false;
925 }
926 
927 #endif // GPU_TEST_UTILS
928 
929 ///////////////////////////////////////////////////////////////////////////////////
930 
MakeContext(sk_sp<SharedContext> sharedContext,std::unique_ptr<QueueManager> queueManager,const ContextOptions & options)931 std::unique_ptr<Context> ContextCtorAccessor::MakeContext(
932         sk_sp<SharedContext> sharedContext,
933         std::unique_ptr<QueueManager> queueManager,
934         const ContextOptions& options) {
935     auto context = std::unique_ptr<Context>(new Context(std::move(sharedContext),
936                                                         std::move(queueManager),
937                                                         options));
938     if (context && context->finishInitialization()) {
939         return context;
940     } else {
941         return nullptr;
942     }
943 }
944 
945 } // namespace skgpu::graphite
946