1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7 #include "include/gpu/ganesh/GrDirectContext.h"
8
9 #include "include/core/SkImageInfo.h"
10 #include "include/core/SkPixmap.h"
11 #include "include/core/SkSize.h"
12 #include "include/core/SkSurface.h"
13 #include "include/core/SkTextureCompressionType.h"
14 #include "include/core/SkTraceMemoryDump.h"
15 #include "include/gpu/GpuTypes.h"
16 #include "include/gpu/ganesh/GrBackendSemaphore.h"
17 #include "include/gpu/ganesh/GrBackendSurface.h"
18 #include "include/gpu/ganesh/GrContextThreadSafeProxy.h"
19 #include "include/private/base/SingleOwner.h"
20 #include "include/private/base/SkTArray.h"
21 #include "include/private/base/SkTemplates.h"
22 #include "include/private/gpu/ganesh/GrTypesPriv.h"
23 #include "src/base/SkAutoMalloc.h"
24 #include "src/core/SkCompressedDataUtils.h"
25 #include "src/core/SkMipmap.h"
26 #include "src/core/SkTaskGroup.h"
27 #include "src/core/SkTraceEvent.h"
28 #include "src/gpu/DataUtils.h"
29 #include "src/gpu/GpuTypesPriv.h"
30 #include "src/gpu/RefCntedCallback.h"
31 #include "src/gpu/Swizzle.h"
32 #include "src/gpu/ganesh/Device.h"
33 #include "src/gpu/ganesh/GrBackendUtils.h"
34 #include "src/gpu/ganesh/GrCaps.h"
35 #include "src/gpu/ganesh/GrClientMappedBufferManager.h"
36 #include "src/gpu/ganesh/GrColorInfo.h"
37 #include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
38 #include "src/gpu/ganesh/GrDirectContextPriv.h"
39 #include "src/gpu/ganesh/GrDrawOpAtlas.h"
40 #include "src/gpu/ganesh/GrDrawingManager.h"
41 #include "src/gpu/ganesh/GrGpu.h"
42 #include "src/gpu/ganesh/GrPixmap.h"
43 #include "src/gpu/ganesh/GrProxyProvider.h"
44 #include "src/gpu/ganesh/GrRenderTargetProxy.h"
45 #include "src/gpu/ganesh/GrResourceCache.h"
46 #include "src/gpu/ganesh/GrResourceProvider.h"
47 #include "src/gpu/ganesh/GrSemaphore.h" // IWYU pragma: keep
48 #include "src/gpu/ganesh/GrShaderCaps.h"
49 #include "src/gpu/ganesh/GrSurfaceProxy.h"
50 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
51 #include "src/gpu/ganesh/GrThreadSafePipelineBuilder.h" // IWYU pragma: keep
52 #include "src/gpu/ganesh/SurfaceContext.h"
53 #include "src/gpu/ganesh/image/SkImage_GaneshBase.h"
54 #include "src/gpu/ganesh/mock/GrMockGpu.h"
55 #include "src/gpu/ganesh/ops/SmallPathAtlasMgr.h"
56 #include "src/gpu/ganesh/surface/SkSurface_Ganesh.h"
57 #include "src/gpu/ganesh/text/GrAtlasManager.h"
58 #include "src/image/SkImage_Base.h"
59 #include "src/image/SkSurface_Base.h"
60 #include "src/text/gpu/StrikeCache.h"
61 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
62
63 #include <array>
64 #include <atomic>
65 #include <forward_list>
66 #include <memory>
67 #include <utility>
68
69 #ifdef SK_DIRECT3D
70 #include "src/gpu/ganesh/d3d/GrD3DGpu.h"
71 #endif
72
73 using namespace skia_private;
74
75 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
76
77 using StrikeCache = sktext::gpu::StrikeCache;
78
Next()79 GrDirectContext::DirectContextID GrDirectContext::DirectContextID::Next() {
80 static std::atomic<uint32_t> nextID{1};
81 uint32_t id;
82 do {
83 id = nextID.fetch_add(1, std::memory_order_relaxed);
84 } while (id == SK_InvalidUniqueID);
85 return DirectContextID(id);
86 }
87
GrDirectContext(GrBackendApi backend,const GrContextOptions & options,sk_sp<GrContextThreadSafeProxy> proxy)88 GrDirectContext::GrDirectContext(GrBackendApi backend,
89 const GrContextOptions& options,
90 sk_sp<GrContextThreadSafeProxy> proxy)
91 : GrRecordingContext(std::move(proxy), false)
92 , fDeleteCallbackHelper(new DeleteCallbackHelper(options.fContextDeleteContext,
93 options.fContextDeleteProc))
94 , fDirectContextID(DirectContextID::Next()) {}
95
~GrDirectContext()96 GrDirectContext::~GrDirectContext() {
97 ASSERT_SINGLE_OWNER
98 // this if-test protects against the case where the context is being destroyed
99 // before having been fully created
100 if (fGpu) {
101 this->flushAndSubmit();
102 }
103
104 // We need to make sure all work is finished on the gpu before we start releasing resources.
105 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/false);
106
107 this->destroyDrawingManager();
108
109 // Ideally we could just let the ptr drop, but resource cache queries this ptr in releaseAll.
110 if (fResourceCache) {
111 fResourceCache->releaseAll();
112 }
113 // This has to be after GrResourceCache::releaseAll so that other threads that are holding
114 // async pixel result don't try to destroy buffers off thread.
115 fMappedBufferManager.reset();
116 }
117
threadSafeProxy()118 sk_sp<GrContextThreadSafeProxy> GrDirectContext::threadSafeProxy() {
119 return GrRecordingContext::threadSafeProxy();
120 }
121
resetGLTextureBindings()122 void GrDirectContext::resetGLTextureBindings() {
123 if (this->abandoned() || this->backend() != GrBackendApi::kOpenGL) {
124 return;
125 }
126 fGpu->resetTextureBindings();
127 }
128
resetContext(uint32_t state)129 void GrDirectContext::resetContext(uint32_t state) {
130 ASSERT_SINGLE_OWNER
131 fGpu->markContextDirty(state);
132 }
133
abandonContext()134 void GrDirectContext::abandonContext() {
135 if (GrRecordingContext::abandoned()) {
136 return;
137 }
138
139 if (fInsideReleaseProcCnt) {
140 SkDEBUGFAIL("Calling GrDirectContext::abandonContext() while inside a ReleaseProc is not "
141 "allowed");
142 return;
143 }
144
145 GrRecordingContext::abandonContext();
146
147 // We need to make sure all work is finished on the gpu before we start releasing resources.
148 this->syncAllOutstandingGpuWork(this->caps()->mustSyncGpuDuringAbandon());
149
150 fStrikeCache->freeAll();
151
152 fMappedBufferManager->abandon();
153
154 fResourceProvider->abandon();
155
156 // abandon first so destructors don't try to free the resources in the API.
157 fResourceCache->abandonAll();
158
159 fGpu->disconnect(GrGpu::DisconnectType::kAbandon);
160
161 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
162 if (fSmallPathAtlasMgr) {
163 fSmallPathAtlasMgr->reset();
164 }
165 #endif
166 fAtlasManager->freeAll();
167 }
168
abandoned()169 bool GrDirectContext::abandoned() {
170 if (GrRecordingContext::abandoned()) {
171 return true;
172 }
173
174 if (fGpu && fGpu->isDeviceLost()) {
175 this->abandonContext();
176 return true;
177 }
178 return false;
179 }
180
isDeviceLost()181 bool GrDirectContext::isDeviceLost() {
182 if (fGpu && fGpu->isDeviceLost()) {
183 if (!GrRecordingContext::abandoned()) {
184 this->abandonContext();
185 }
186 return true;
187 }
188 return false;
189 }
190
oomed()191 bool GrDirectContext::oomed() { return fGpu ? fGpu->checkAndResetOOMed() : false; }
192
releaseResourcesAndAbandonContext()193 void GrDirectContext::releaseResourcesAndAbandonContext() {
194 if (GrRecordingContext::abandoned()) {
195 return;
196 }
197
198 GrRecordingContext::abandonContext();
199
200 // We need to make sure all work is finished on the gpu before we start releasing resources.
201 this->syncAllOutstandingGpuWork(/*shouldExecuteWhileAbandoned=*/true);
202
203 fResourceProvider->abandon();
204
205 // Release all resources in the backend 3D API.
206 fResourceCache->releaseAll();
207
208 // Must be after GrResourceCache::releaseAll().
209 fMappedBufferManager.reset();
210
211 fGpu->disconnect(GrGpu::DisconnectType::kCleanup);
212 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
213 if (fSmallPathAtlasMgr) {
214 fSmallPathAtlasMgr->reset();
215 }
216 #endif
217 fAtlasManager->freeAll();
218 }
219
freeGpuResources()220 void GrDirectContext::freeGpuResources() {
221 ASSERT_SINGLE_OWNER
222
223 if (this->abandoned()) {
224 return;
225 }
226
227 this->flushAndSubmit();
228 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
229 if (fSmallPathAtlasMgr) {
230 fSmallPathAtlasMgr->reset();
231 }
232 #endif
233 fAtlasManager->freeAll();
234
235 // TODO: the glyph cache doesn't hold any GpuResources so this call should not be needed here.
236 // Some slack in the GrTextBlob's implementation requires it though. That could be fixed.
237 fStrikeCache->freeAll();
238
239 this->drawingManager()->freeGpuResources();
240
241 fResourceCache->purgeUnlockedResources(GrPurgeResourceOptions::kAllResources);
242 }
243
init()244 bool GrDirectContext::init() {
245 ASSERT_SINGLE_OWNER
246 if (!fGpu) {
247 return false;
248 }
249
250 fThreadSafeProxy->priv().init(fGpu->refCaps(), fGpu->refPipelineBuilder());
251 if (!GrRecordingContext::init()) {
252 return false;
253 }
254
255 SkASSERT(this->getTextBlobRedrawCoordinator());
256 SkASSERT(this->threadSafeCache());
257
258 fStrikeCache = std::make_unique<StrikeCache>();
259 fResourceCache = std::make_unique<GrResourceCache>(this->singleOwner(),
260 this->directContextID(),
261 this->contextID());
262 fResourceCache->setProxyProvider(this->proxyProvider());
263 fResourceCache->setThreadSafeCache(this->threadSafeCache());
264 #if defined(GPU_TEST_UTILS)
265 if (this->options().fResourceCacheLimitOverride != -1) {
266 this->setResourceCacheLimit(this->options().fResourceCacheLimitOverride);
267 }
268 #endif
269 fResourceProvider = std::make_unique<GrResourceProvider>(fGpu.get(), fResourceCache.get(),
270 this->singleOwner());
271 fMappedBufferManager = std::make_unique<GrClientMappedBufferManager>(this->directContextID());
272
273 fDidTestPMConversions = false;
274
275 // DDL TODO: we need to think through how the task group & persistent cache
276 // get passed on to/shared between all the DDLRecorders created with this context.
277 if (this->options().fExecutor) {
278 fTaskGroup = std::make_unique<SkTaskGroup>(*this->options().fExecutor);
279 }
280
281 fPersistentCache = this->options().fPersistentCache;
282
283 GrDrawOpAtlas::AllowMultitexturing allowMultitexturing;
284 if (GrContextOptions::Enable::kNo == this->options().fAllowMultipleGlyphCacheTextures ||
285 // multitexturing supported only if range can represent the index + texcoords fully
286 !(this->caps()->shaderCaps()->fFloatIs32Bits ||
287 this->caps()->shaderCaps()->fIntegerSupport)) {
288 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kNo;
289 } else {
290 allowMultitexturing = GrDrawOpAtlas::AllowMultitexturing::kYes;
291 }
292
293 GrProxyProvider* proxyProvider = this->priv().proxyProvider();
294
295 fAtlasManager = std::make_unique<GrAtlasManager>(proxyProvider,
296 this->options().fGlyphCacheTextureMaximumBytes,
297 allowMultitexturing,
298 this->options().fSupportBilerpFromGlyphAtlas);
299 this->priv().addOnFlushCallbackObject(fAtlasManager.get());
300
301 return true;
302 }
303
getResourceCacheUsage(int * resourceCount,size_t * resourceBytes) const304 void GrDirectContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
305 ASSERT_SINGLE_OWNER
306
307 if (resourceCount) {
308 *resourceCount = fResourceCache->getBudgetedResourceCount();
309 }
310 if (resourceBytes) {
311 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
312 }
313 }
314
getResourceCachePurgeableBytes() const315 size_t GrDirectContext::getResourceCachePurgeableBytes() const {
316 ASSERT_SINGLE_OWNER
317 return fResourceCache->getPurgeableBytes();
318 }
319
getResourceCacheLimits(int * maxResources,size_t * maxResourceBytes) const320 void GrDirectContext::getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const {
321 ASSERT_SINGLE_OWNER
322 if (maxResources) {
323 *maxResources = -1;
324 }
325 if (maxResourceBytes) {
326 *maxResourceBytes = this->getResourceCacheLimit();
327 }
328 }
329
getResourceCacheLimit() const330 size_t GrDirectContext::getResourceCacheLimit() const {
331 ASSERT_SINGLE_OWNER
332 return fResourceCache->getMaxResourceBytes();
333 }
334
setResourceCacheLimits(int unused,size_t maxResourceBytes)335 void GrDirectContext::setResourceCacheLimits(int unused, size_t maxResourceBytes) {
336 ASSERT_SINGLE_OWNER
337 this->setResourceCacheLimit(maxResourceBytes);
338 }
339
setResourceCacheLimit(size_t maxResourceBytes)340 void GrDirectContext::setResourceCacheLimit(size_t maxResourceBytes) {
341 ASSERT_SINGLE_OWNER
342 fResourceCache->setLimit(maxResourceBytes);
343 }
344
purgeUnlockedResources(GrPurgeResourceOptions opts)345 void GrDirectContext::purgeUnlockedResources(GrPurgeResourceOptions opts) {
346 ASSERT_SINGLE_OWNER
347
348 if (this->abandoned()) {
349 return;
350 }
351
352 fResourceCache->purgeUnlockedResources(opts);
353 fResourceCache->purgeAsNeeded();
354
355 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
356 // place to purge stale blobs
357 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
358
359 fGpu->releaseUnlockedBackendObjects();
360 }
361
performDeferredCleanup(std::chrono::milliseconds msNotUsed,GrPurgeResourceOptions opts)362 void GrDirectContext::performDeferredCleanup(std::chrono::milliseconds msNotUsed,
363 GrPurgeResourceOptions opts) {
364 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
365
366 ASSERT_SINGLE_OWNER
367
368 if (this->abandoned()) {
369 return;
370 }
371
372 this->checkAsyncWorkCompletion();
373 fMappedBufferManager->process();
374 auto purgeTime = skgpu::StdSteadyClock::now() - msNotUsed;
375
376 fResourceCache->purgeAsNeeded();
377 fResourceCache->purgeResourcesNotUsedSince(purgeTime, opts);
378
379 // The textBlob Cache doesn't actually hold any GPU resource but this is a convenient
380 // place to purge stale blobs
381 this->getTextBlobRedrawCoordinator()->purgeStaleBlobs();
382 }
383
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)384 void GrDirectContext::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
385 ASSERT_SINGLE_OWNER
386
387 if (this->abandoned()) {
388 return;
389 }
390
391 fResourceCache->purgeUnlockedResources(bytesToPurge, preferScratchResources);
392 }
393
394 ////////////////////////////////////////////////////////////////////////////////
wait(int numSemaphores,const GrBackendSemaphore waitSemaphores[],bool deleteSemaphoresAfterWait)395 bool GrDirectContext::wait(int numSemaphores, const GrBackendSemaphore waitSemaphores[],
396 bool deleteSemaphoresAfterWait) {
397 if (!fGpu || !fGpu->caps()->backendSemaphoreSupport()) {
398 return false;
399 }
400 GrWrapOwnership ownership =
401 deleteSemaphoresAfterWait ? kAdopt_GrWrapOwnership : kBorrow_GrWrapOwnership;
402 for (int i = 0; i < numSemaphores; ++i) {
403 std::unique_ptr<GrSemaphore> sema = fResourceProvider->wrapBackendSemaphore(
404 waitSemaphores[i], GrSemaphoreWrapType::kWillWait, ownership);
405 // If we failed to wrap the semaphore it means the client didn't give us a valid semaphore
406 // to begin with. Therefore, it is fine to not wait on it.
407 if (sema) {
408 fGpu->waitSemaphore(sema.get());
409 }
410 }
411 return true;
412 }
413
414 #if !defined(SK_ENABLE_OPTIMIZE_SIZE)
onGetSmallPathAtlasMgr()415 skgpu::ganesh::SmallPathAtlasMgr* GrDirectContext::onGetSmallPathAtlasMgr() {
416 if (!fSmallPathAtlasMgr) {
417 fSmallPathAtlasMgr = std::make_unique<skgpu::ganesh::SmallPathAtlasMgr>();
418
419 this->priv().addOnFlushCallbackObject(fSmallPathAtlasMgr.get());
420 }
421
422 if (!fSmallPathAtlasMgr->initAtlas(this->proxyProvider(), this->caps())) {
423 return nullptr;
424 }
425
426 return fSmallPathAtlasMgr.get();
427 }
428 #endif
429
430 ////////////////////////////////////////////////////////////////////////////////
431
supportedGpuStats() const432 skgpu::GpuStatsFlags GrDirectContext::supportedGpuStats() const {
433 return this->caps()->supportedGpuStats();
434 }
435
flush(const GrFlushInfo & info)436 GrSemaphoresSubmitted GrDirectContext::flush(const GrFlushInfo& info) {
437 ASSERT_SINGLE_OWNER
438 if (this->abandoned()) {
439 if (info.fFinishedProc) {
440 info.fFinishedProc(info.fFinishedContext);
441 }
442 if (info.fSubmittedProc) {
443 info.fSubmittedProc(info.fSubmittedContext, false);
444 }
445 return GrSemaphoresSubmitted::kNo;
446 }
447
448 return this->drawingManager()->flushSurfaces(
449 {}, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, nullptr);
450 }
451
submit(const GrSubmitInfo & info)452 bool GrDirectContext::submit(const GrSubmitInfo& info) {
453 ASSERT_SINGLE_OWNER
454 if (this->abandoned()) {
455 return false;
456 }
457
458 if (!fGpu) {
459 return false;
460 }
461
462 return fGpu->submitToGpu(info);
463 }
464
flush(const sk_sp<const SkImage> & image,const GrFlushInfo & flushInfo)465 GrSemaphoresSubmitted GrDirectContext::flush(const sk_sp<const SkImage>& image,
466 const GrFlushInfo& flushInfo) {
467 if (!image) {
468 return GrSemaphoresSubmitted::kNo;
469 }
470 auto ib = as_IB(image);
471 if (!ib->isGaneshBacked()) {
472 return GrSemaphoresSubmitted::kNo;
473 }
474 auto igb = static_cast<const SkImage_GaneshBase*>(image.get());
475 return igb->flush(this, flushInfo);
476 }
477
flush(const sk_sp<const SkImage> & image)478 void GrDirectContext::flush(const sk_sp<const SkImage>& image) {
479 this->flush(image, {});
480 }
481
flushAndSubmit(const sk_sp<const SkImage> & image)482 void GrDirectContext::flushAndSubmit(const sk_sp<const SkImage>& image) {
483 this->flush(image, {});
484 this->submit();
485 }
486
flush(SkSurface * surface,SkSurfaces::BackendSurfaceAccess access,const GrFlushInfo & info)487 GrSemaphoresSubmitted GrDirectContext::flush(SkSurface* surface,
488 SkSurfaces::BackendSurfaceAccess access,
489 const GrFlushInfo& info) {
490 if (!surface) {
491 return GrSemaphoresSubmitted::kNo;
492 }
493 auto sb = asSB(surface);
494 if (!sb->isGaneshBacked()) {
495 return GrSemaphoresSubmitted::kNo;
496 }
497
498 auto gs = static_cast<SkSurface_Ganesh*>(surface);
499 SkASSERT(this->priv().matches(gs->getDevice()->recordingContext()->asDirectContext()));
500 GrRenderTargetProxy* rtp = gs->getDevice()->targetProxy();
501
502 return this->priv().flushSurface(rtp, access, info, nullptr);
503 }
504
flush(SkSurface * surface,const GrFlushInfo & info,const skgpu::MutableTextureState * newState)505 GrSemaphoresSubmitted GrDirectContext::flush(SkSurface* surface,
506 const GrFlushInfo& info,
507 const skgpu::MutableTextureState* newState) {
508 if (!surface) {
509 return GrSemaphoresSubmitted::kNo;
510 }
511 auto sb = asSB(surface);
512 if (!sb->isGaneshBacked()) {
513 return GrSemaphoresSubmitted::kNo;
514 }
515
516 auto gs = static_cast<SkSurface_Ganesh*>(surface);
517 SkASSERT(this->priv().matches(gs->getDevice()->recordingContext()->asDirectContext()));
518 GrRenderTargetProxy* rtp = gs->getDevice()->targetProxy();
519
520 return this->priv().flushSurface(
521 rtp, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, newState);
522 }
523
flushAndSubmit(SkSurface * surface,GrSyncCpu sync)524 void GrDirectContext::flushAndSubmit(SkSurface* surface, GrSyncCpu sync) {
525 this->flush(surface, SkSurfaces::BackendSurfaceAccess::kNoAccess, GrFlushInfo());
526 this->submit(sync);
527 }
528
flush(SkSurface * surface)529 void GrDirectContext::flush(SkSurface* surface) {
530 this->flush(surface, GrFlushInfo(), nullptr);
531 }
532
533 ////////////////////////////////////////////////////////////////////////////////
534
checkAsyncWorkCompletion()535 void GrDirectContext::checkAsyncWorkCompletion() {
536 if (fGpu) {
537 fGpu->checkFinishedCallbacks();
538 }
539 }
540
syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned)541 void GrDirectContext::syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned) {
542 if (fGpu && (!this->abandoned() || shouldExecuteWhileAbandoned)) {
543 fGpu->finishOutstandingGpuWork();
544 this->checkAsyncWorkCompletion();
545 }
546 }
547
548 ////////////////////////////////////////////////////////////////////////////////
549
storeVkPipelineCacheData()550 void GrDirectContext::storeVkPipelineCacheData() {
551 if (fGpu) {
552 fGpu->storeVkPipelineCacheData();
553 }
554 }
555
556 ////////////////////////////////////////////////////////////////////////////////
557
supportsDistanceFieldText() const558 bool GrDirectContext::supportsDistanceFieldText() const {
559 return this->caps()->shaderCaps()->supportsDistanceFieldText();
560 }
561
562 //////////////////////////////////////////////////////////////////////////////
563
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const564 void GrDirectContext::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
565 ASSERT_SINGLE_OWNER
566 fResourceCache->dumpMemoryStatistics(traceMemoryDump);
567 traceMemoryDump->dumpNumericValue("skia/gr_text_blob_cache", "size", "bytes",
568 this->getTextBlobRedrawCoordinator()->usedBytes());
569 }
570
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,std::string_view label)571 GrBackendTexture GrDirectContext::createBackendTexture(int width,
572 int height,
573 const GrBackendFormat& backendFormat,
574 skgpu::Mipmapped mipmapped,
575 GrRenderable renderable,
576 GrProtected isProtected,
577 std::string_view label) {
578 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
579 if (this->abandoned()) {
580 return GrBackendTexture();
581 }
582
583 return fGpu->createBackendTexture({width, height}, backendFormat, renderable,
584 mipmapped, isProtected, label);
585 }
586
createBackendTexture(const SkPixmap & srcData,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)587 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap& srcData,
588 GrSurfaceOrigin textureOrigin,
589 GrRenderable renderable,
590 GrProtected isProtected,
591 GrGpuFinishedProc finishedProc,
592 GrGpuFinishedContext finishedContext,
593 std::string_view label) {
594 return this->createBackendTexture(&srcData, 1, textureOrigin, renderable, isProtected,
595 finishedProc, finishedContext, label);
596 }
597
createBackendTexture(const SkPixmap & srcData,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)598 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap& srcData,
599 GrRenderable renderable,
600 GrProtected isProtected,
601 GrGpuFinishedProc finishedProc,
602 GrGpuFinishedContext finishedContext,
603 std::string_view label) {
604 return this->createBackendTexture(&srcData,
605 1,
606 renderable,
607 isProtected,
608 finishedProc,
609 finishedContext,
610 label);
611 }
612
createBackendTexture(const SkPixmap srcData[],int numLevels,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)613 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
614 int numLevels,
615 GrRenderable renderable,
616 GrProtected isProtected,
617 GrGpuFinishedProc finishedProc,
618 GrGpuFinishedContext finishedContext,
619 std::string_view label) {
620 return this->createBackendTexture(srcData,
621 numLevels,
622 kTopLeft_GrSurfaceOrigin,
623 renderable,
624 isProtected,
625 finishedProc,
626 finishedContext,
627 label);
628 }
629
createBackendTexture(int width,int height,SkColorType skColorType,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,std::string_view label)630 GrBackendTexture GrDirectContext::createBackendTexture(int width,
631 int height,
632 SkColorType skColorType,
633 skgpu::Mipmapped mipmapped,
634 GrRenderable renderable,
635 GrProtected isProtected,
636 std::string_view label) {
637 if (this->abandoned()) {
638 return GrBackendTexture();
639 }
640
641 const GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
642
643 return this->createBackendTexture(
644 width, height, format, mipmapped, renderable, isProtected, label);
645 }
646
create_and_clear_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color,std::string_view label)647 static GrBackendTexture create_and_clear_backend_texture(
648 GrDirectContext* dContext,
649 SkISize dimensions,
650 const GrBackendFormat& backendFormat,
651 skgpu::Mipmapped mipmapped,
652 GrRenderable renderable,
653 GrProtected isProtected,
654 sk_sp<skgpu::RefCntedCallback> finishedCallback,
655 std::array<float, 4> color,
656 std::string_view label) {
657 GrGpu* gpu = dContext->priv().getGpu();
658 GrBackendTexture beTex = gpu->createBackendTexture(dimensions, backendFormat, renderable,
659 mipmapped, isProtected, label);
660 if (!beTex.isValid()) {
661 return {};
662 }
663
664 if (!dContext->priv().getGpu()->clearBackendTexture(beTex,
665 std::move(finishedCallback),
666 color)) {
667 dContext->deleteBackendTexture(beTex);
668 return {};
669 }
670 return beTex;
671 }
672
update_texture_with_pixmaps(GrDirectContext * context,const SkPixmap src[],int numLevels,const GrBackendTexture & backendTexture,GrSurfaceOrigin textureOrigin,sk_sp<skgpu::RefCntedCallback> finishedCallback)673 static bool update_texture_with_pixmaps(GrDirectContext* context,
674 const SkPixmap src[],
675 int numLevels,
676 const GrBackendTexture& backendTexture,
677 GrSurfaceOrigin textureOrigin,
678 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
679 GrColorType ct = SkColorTypeToGrColorType(src[0].colorType());
680 const GrBackendFormat& format = backendTexture.getBackendFormat();
681
682 if (!context->priv().caps()->areColorTypeAndFormatCompatible(ct, format)) {
683 return false;
684 }
685
686 auto proxy = context->priv().proxyProvider()->wrapBackendTexture(backendTexture,
687 kBorrow_GrWrapOwnership,
688 GrWrapCacheable::kNo,
689 kRW_GrIOType,
690 std::move(finishedCallback));
691 if (!proxy) {
692 return false;
693 }
694
695 skgpu::Swizzle swizzle = context->priv().caps()->getReadSwizzle(format, ct);
696 GrSurfaceProxyView view(std::move(proxy), textureOrigin, swizzle);
697 skgpu::ganesh::SurfaceContext surfaceContext(
698 context, std::move(view), src[0].info().colorInfo());
699 AutoSTArray<15, GrCPixmap> tmpSrc(numLevels);
700 for (int i = 0; i < numLevels; ++i) {
701 tmpSrc[i] = src[i];
702 }
703 if (!surfaceContext.writePixels(context, tmpSrc.get(), numLevels)) {
704 return false;
705 }
706
707 GrSurfaceProxy* p = surfaceContext.asSurfaceProxy();
708 GrFlushInfo info;
709 context->priv().drawingManager()->flushSurfaces(
710 {&p, 1}, SkSurfaces::BackendSurfaceAccess::kNoAccess, info, nullptr);
711 return true;
712 }
713
createBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)714 GrBackendTexture GrDirectContext::createBackendTexture(int width,
715 int height,
716 const GrBackendFormat& backendFormat,
717 const SkColor4f& color,
718 skgpu::Mipmapped mipmapped,
719 GrRenderable renderable,
720 GrProtected isProtected,
721 GrGpuFinishedProc finishedProc,
722 GrGpuFinishedContext finishedContext,
723 std::string_view label) {
724 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
725
726 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
727 if (this->abandoned()) {
728 return {};
729 }
730
731 return create_and_clear_backend_texture(this,
732 {width, height},
733 backendFormat,
734 mipmapped,
735 renderable,
736 isProtected,
737 std::move(finishedCallback),
738 color.array(),
739 label);
740 }
741
createBackendTexture(int width,int height,SkColorType skColorType,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)742 GrBackendTexture GrDirectContext::createBackendTexture(int width,
743 int height,
744 SkColorType skColorType,
745 const SkColor4f& color,
746 skgpu::Mipmapped mipmapped,
747 GrRenderable renderable,
748 GrProtected isProtected,
749 GrGpuFinishedProc finishedProc,
750 GrGpuFinishedContext finishedContext,
751 std::string_view label) {
752 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
753
754 if (this->abandoned()) {
755 return {};
756 }
757
758 GrBackendFormat format = this->defaultBackendFormat(skColorType, renderable);
759 if (!format.isValid()) {
760 return {};
761 }
762
763 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
764 SkColor4f swizzledColor = this->caps()->getWriteSwizzle(format, grColorType).applyTo(color);
765
766 return create_and_clear_backend_texture(this,
767 {width, height},
768 format,
769 mipmapped,
770 renderable,
771 isProtected,
772 std::move(finishedCallback),
773 swizzledColor.array(),
774 label);
775 }
776
createBackendTexture(const SkPixmap srcData[],int numProvidedLevels,GrSurfaceOrigin textureOrigin,GrRenderable renderable,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext,std::string_view label)777 GrBackendTexture GrDirectContext::createBackendTexture(const SkPixmap srcData[],
778 int numProvidedLevels,
779 GrSurfaceOrigin textureOrigin,
780 GrRenderable renderable,
781 GrProtected isProtected,
782 GrGpuFinishedProc finishedProc,
783 GrGpuFinishedContext finishedContext,
784 std::string_view label) {
785 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
786
787 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
788
789 if (this->abandoned()) {
790 return {};
791 }
792
793 if (!srcData || numProvidedLevels <= 0) {
794 return {};
795 }
796
797 SkColorType colorType = srcData[0].colorType();
798
799 skgpu::Mipmapped mipmapped = skgpu::Mipmapped::kNo;
800 if (numProvidedLevels > 1) {
801 mipmapped = skgpu::Mipmapped::kYes;
802 }
803
804 GrBackendFormat backendFormat = this->defaultBackendFormat(colorType, renderable);
805 GrBackendTexture beTex = this->createBackendTexture(srcData[0].width(),
806 srcData[0].height(),
807 backendFormat,
808 mipmapped,
809 renderable,
810 isProtected,
811 label);
812 if (!beTex.isValid()) {
813 return {};
814 }
815 if (!update_texture_with_pixmaps(this,
816 srcData,
817 numProvidedLevels,
818 beTex,
819 textureOrigin,
820 std::move(finishedCallback))) {
821 this->deleteBackendTexture(beTex);
822 return {};
823 }
824 return beTex;
825 }
826
updateBackendTexture(const GrBackendTexture & texture,const SkPixmap srcData[],int numLevels,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)827 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& texture,
828 const SkPixmap srcData[],
829 int numLevels,
830 GrGpuFinishedProc finishedProc,
831 GrGpuFinishedContext finishedContext) {
832 return this->updateBackendTexture(texture,
833 srcData,
834 numLevels,
835 kTopLeft_GrSurfaceOrigin,
836 finishedProc,
837 finishedContext);
838 }
839
updateBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)840 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
841 const SkColor4f& color,
842 GrGpuFinishedProc finishedProc,
843 GrGpuFinishedContext finishedContext) {
844 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
845
846 if (this->abandoned()) {
847 return false;
848 }
849
850 return fGpu->clearBackendTexture(backendTexture, std::move(finishedCallback), color.array());
851 }
852
updateBackendTexture(const GrBackendTexture & backendTexture,SkColorType skColorType,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)853 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
854 SkColorType skColorType,
855 const SkColor4f& color,
856 GrGpuFinishedProc finishedProc,
857 GrGpuFinishedContext finishedContext) {
858 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
859
860 if (this->abandoned()) {
861 return false;
862 }
863
864 GrBackendFormat format = backendTexture.getBackendFormat();
865 GrColorType grColorType = SkColorTypeToGrColorType(skColorType);
866
867 if (!this->caps()->areColorTypeAndFormatCompatible(grColorType, format)) {
868 return false;
869 }
870
871 skgpu::Swizzle swizzle = this->caps()->getWriteSwizzle(format, grColorType);
872 SkColor4f swizzledColor = swizzle.applyTo(color);
873
874 return fGpu->clearBackendTexture(backendTexture,
875 std::move(finishedCallback),
876 swizzledColor.array());
877 }
878
updateBackendTexture(const GrBackendTexture & backendTexture,const SkPixmap srcData[],int numLevels,GrSurfaceOrigin textureOrigin,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)879 bool GrDirectContext::updateBackendTexture(const GrBackendTexture& backendTexture,
880 const SkPixmap srcData[],
881 int numLevels,
882 GrSurfaceOrigin textureOrigin,
883 GrGpuFinishedProc finishedProc,
884 GrGpuFinishedContext finishedContext) {
885 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
886
887 if (this->abandoned()) {
888 return false;
889 }
890
891 if (!srcData || numLevels <= 0) {
892 return false;
893 }
894
895 // If the texture has MIP levels then we require that the full set is overwritten.
896 int numExpectedLevels = 1;
897 if (backendTexture.hasMipmaps()) {
898 numExpectedLevels = SkMipmap::ComputeLevelCount(backendTexture.width(),
899 backendTexture.height()) + 1;
900 }
901 if (numLevels != numExpectedLevels) {
902 return false;
903 }
904 return update_texture_with_pixmaps(this,
905 srcData,
906 numLevels,
907 backendTexture,
908 textureOrigin,
909 std::move(finishedCallback));
910 }
911
912 //////////////////////////////////////////////////////////////////////////////
913
create_and_update_compressed_backend_texture(GrDirectContext * dContext,SkISize dimensions,const GrBackendFormat & backendFormat,skgpu::Mipmapped mipmapped,GrProtected isProtected,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t size)914 static GrBackendTexture create_and_update_compressed_backend_texture(
915 GrDirectContext* dContext,
916 SkISize dimensions,
917 const GrBackendFormat& backendFormat,
918 skgpu::Mipmapped mipmapped,
919 GrProtected isProtected,
920 sk_sp<skgpu::RefCntedCallback> finishedCallback,
921 const void* data,
922 size_t size) {
923 GrGpu* gpu = dContext->priv().getGpu();
924
925 GrBackendTexture beTex = gpu->createCompressedBackendTexture(dimensions, backendFormat,
926 mipmapped, isProtected);
927 if (!beTex.isValid()) {
928 return {};
929 }
930
931 if (!dContext->priv().getGpu()->updateCompressedBackendTexture(
932 beTex, std::move(finishedCallback), data, size)) {
933 dContext->deleteBackendTexture(beTex);
934 return {};
935 }
936 return beTex;
937 }
938
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)939 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
940 int width,
941 int height,
942 const GrBackendFormat& backendFormat,
943 const SkColor4f& color,
944 skgpu::Mipmapped mipmapped,
945 GrProtected isProtected,
946 GrGpuFinishedProc finishedProc,
947 GrGpuFinishedContext finishedContext) {
948 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
949 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
950
951 if (this->abandoned()) {
952 return {};
953 }
954
955 SkTextureCompressionType compression = GrBackendFormatToCompressionType(backendFormat);
956 if (compression == SkTextureCompressionType::kNone) {
957 return {};
958 }
959
960 size_t size = SkCompressedDataSize(
961 compression, {width, height}, nullptr, mipmapped == skgpu::Mipmapped::kYes);
962 auto storage = std::make_unique<char[]>(size);
963 skgpu::FillInCompressedData(compression, {width, height}, mipmapped, storage.get(), color);
964 return create_and_update_compressed_backend_texture(this,
965 {width, height},
966 backendFormat,
967 mipmapped,
968 isProtected,
969 std::move(finishedCallback),
970 storage.get(),
971 size);
972 }
973
createCompressedBackendTexture(int width,int height,SkTextureCompressionType compression,const SkColor4f & color,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)974 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
975 int width,
976 int height,
977 SkTextureCompressionType compression,
978 const SkColor4f& color,
979 skgpu::Mipmapped mipmapped,
980 GrProtected isProtected,
981 GrGpuFinishedProc finishedProc,
982 GrGpuFinishedContext finishedContext) {
983 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
984 GrBackendFormat format = this->compressedBackendFormat(compression);
985 return this->createCompressedBackendTexture(width, height, format, color,
986 mipmapped, isProtected, finishedProc,
987 finishedContext);
988 }
989
createCompressedBackendTexture(int width,int height,const GrBackendFormat & backendFormat,const void * compressedData,size_t dataSize,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)990 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
991 int width,
992 int height,
993 const GrBackendFormat& backendFormat,
994 const void* compressedData,
995 size_t dataSize,
996 skgpu::Mipmapped mipmapped,
997 GrProtected isProtected,
998 GrGpuFinishedProc finishedProc,
999 GrGpuFinishedContext finishedContext) {
1000 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1001 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1002
1003 if (this->abandoned()) {
1004 return {};
1005 }
1006
1007 return create_and_update_compressed_backend_texture(this,
1008 {width, height},
1009 backendFormat,
1010 mipmapped,
1011 isProtected,
1012 std::move(finishedCallback),
1013 compressedData,
1014 dataSize);
1015 }
1016
createCompressedBackendTexture(int width,int height,SkTextureCompressionType compression,const void * data,size_t dataSize,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1017 GrBackendTexture GrDirectContext::createCompressedBackendTexture(
1018 int width,
1019 int height,
1020 SkTextureCompressionType compression,
1021 const void* data,
1022 size_t dataSize,
1023 skgpu::Mipmapped mipmapped,
1024 GrProtected isProtected,
1025 GrGpuFinishedProc finishedProc,
1026 GrGpuFinishedContext finishedContext) {
1027 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1028 GrBackendFormat format = this->compressedBackendFormat(compression);
1029 return this->createCompressedBackendTexture(width, height, format, data, dataSize, mipmapped,
1030 isProtected, finishedProc, finishedContext);
1031 }
1032
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const SkColor4f & color,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1033 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1034 const SkColor4f& color,
1035 GrGpuFinishedProc finishedProc,
1036 GrGpuFinishedContext finishedContext) {
1037 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1038
1039 if (this->abandoned()) {
1040 return false;
1041 }
1042
1043 SkTextureCompressionType compression =
1044 GrBackendFormatToCompressionType(backendTexture.getBackendFormat());
1045 if (compression == SkTextureCompressionType::kNone) {
1046 return {};
1047 }
1048 size_t size = SkCompressedDataSize(compression,
1049 backendTexture.dimensions(),
1050 nullptr,
1051 backendTexture.hasMipmaps());
1052 SkAutoMalloc storage(size);
1053 skgpu::FillInCompressedData(compression,
1054 backendTexture.dimensions(),
1055 backendTexture.mipmapped(),
1056 static_cast<char*>(storage.get()),
1057 color);
1058 return fGpu->updateCompressedBackendTexture(backendTexture,
1059 std::move(finishedCallback),
1060 storage.get(),
1061 size);
1062 }
1063
updateCompressedBackendTexture(const GrBackendTexture & backendTexture,const void * compressedData,size_t dataSize,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1064 bool GrDirectContext::updateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1065 const void* compressedData,
1066 size_t dataSize,
1067 GrGpuFinishedProc finishedProc,
1068 GrGpuFinishedContext finishedContext) {
1069 auto finishedCallback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1070
1071 if (this->abandoned()) {
1072 return false;
1073 }
1074
1075 if (!compressedData) {
1076 return false;
1077 }
1078
1079 return fGpu->updateCompressedBackendTexture(backendTexture,
1080 std::move(finishedCallback),
1081 compressedData,
1082 dataSize);
1083 }
1084
1085 //////////////////////////////////////////////////////////////////////////////
1086
setBackendTextureState(const GrBackendTexture & backendTexture,const skgpu::MutableTextureState & state,skgpu::MutableTextureState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1087 bool GrDirectContext::setBackendTextureState(const GrBackendTexture& backendTexture,
1088 const skgpu::MutableTextureState& state,
1089 skgpu::MutableTextureState* previousState,
1090 GrGpuFinishedProc finishedProc,
1091 GrGpuFinishedContext finishedContext) {
1092 auto callback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1093
1094 if (this->abandoned()) {
1095 return false;
1096 }
1097
1098 return fGpu->setBackendTextureState(backendTexture, state, previousState, std::move(callback));
1099 }
1100
1101
setBackendRenderTargetState(const GrBackendRenderTarget & backendRenderTarget,const skgpu::MutableTextureState & state,skgpu::MutableTextureState * previousState,GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)1102 bool GrDirectContext::setBackendRenderTargetState(const GrBackendRenderTarget& backendRenderTarget,
1103 const skgpu::MutableTextureState& state,
1104 skgpu::MutableTextureState* previousState,
1105 GrGpuFinishedProc finishedProc,
1106 GrGpuFinishedContext finishedContext) {
1107 auto callback = skgpu::RefCntedCallback::Make(finishedProc, finishedContext);
1108
1109 if (this->abandoned()) {
1110 return false;
1111 }
1112
1113 return fGpu->setBackendRenderTargetState(backendRenderTarget, state, previousState,
1114 std::move(callback));
1115 }
1116
deleteBackendTexture(const GrBackendTexture & backendTex)1117 void GrDirectContext::deleteBackendTexture(const GrBackendTexture& backendTex) {
1118 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1119 // For the Vulkan backend we still must destroy the backend texture when the context is
1120 // abandoned.
1121 if ((this->abandoned() && this->backend() != GrBackendApi::kVulkan) || !backendTex.isValid()) {
1122 return;
1123 }
1124
1125 fGpu->deleteBackendTexture(backendTex);
1126 }
1127
1128 //////////////////////////////////////////////////////////////////////////////
1129
precompileShader(const SkData & key,const SkData & data)1130 bool GrDirectContext::precompileShader(const SkData& key, const SkData& data) {
1131 return fGpu->precompileShader(key, data);
1132 }
1133
1134 #if defined(SK_ENABLE_DUMP_GPU)
1135 #include "include/core/SkString.h"
1136 #include "src/gpu/ganesh/GrUtil.h"
1137 #include "src/utils/SkJSONWriter.h"
1138
dump() const1139 SkString GrDirectContext::dump() const {
1140 SkDynamicMemoryWStream stream;
1141 SkJSONWriter writer(&stream, SkJSONWriter::Mode::kPretty);
1142 writer.beginObject();
1143
1144 writer.appendCString("backend", GrBackendApiToStr(this->backend()));
1145
1146 writer.appendName("caps");
1147 this->caps()->dumpJSON(&writer);
1148
1149 writer.appendName("gpu");
1150 this->fGpu->dumpJSON(&writer);
1151
1152 writer.appendName("context");
1153 this->dumpJSON(&writer);
1154
1155 // Flush JSON to the memory stream
1156 writer.endObject();
1157 writer.flush();
1158
1159 // Null terminate the JSON data in the memory stream
1160 stream.write8(0);
1161
1162 // Allocate a string big enough to hold all the data, then copy out of the stream
1163 SkString result(stream.bytesWritten());
1164 stream.copyToAndReset(result.data());
1165 return result;
1166 }
1167 #endif
1168
1169 /*************************************************************************************************/
MakeMock(const GrMockOptions * mockOptions)1170 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions) {
1171 GrContextOptions defaultOptions;
1172 return MakeMock(mockOptions, defaultOptions);
1173 }
1174
MakeMock(const GrMockOptions * mockOptions,const GrContextOptions & options)1175 sk_sp<GrDirectContext> GrDirectContext::MakeMock(const GrMockOptions* mockOptions,
1176 const GrContextOptions& options) {
1177 sk_sp<GrDirectContext> direct(
1178 new GrDirectContext(GrBackendApi::kMock,
1179 options,
1180 GrContextThreadSafeProxyPriv::Make(GrBackendApi::kMock, options)));
1181
1182 direct->fGpu = GrMockGpu::Make(mockOptions, options, direct.get());
1183 if (!direct->init()) {
1184 return nullptr;
1185 }
1186
1187 return direct;
1188 }
1189
1190 #ifdef SK_DIRECT3D
1191 /*************************************************************************************************/
MakeDirect3D(const GrD3DBackendContext & backendContext)1192 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext) {
1193 GrContextOptions defaultOptions;
1194 return MakeDirect3D(backendContext, defaultOptions);
1195 }
1196
MakeDirect3D(const GrD3DBackendContext & backendContext,const GrContextOptions & options)1197 sk_sp<GrDirectContext> GrDirectContext::MakeDirect3D(const GrD3DBackendContext& backendContext,
1198 const GrContextOptions& options) {
1199 sk_sp<GrDirectContext> direct(new GrDirectContext(
1200 GrBackendApi::kDirect3D,
1201 options,
1202 GrContextThreadSafeProxyPriv::Make(GrBackendApi::kDirect3D, options)));
1203
1204 direct->fGpu = GrD3DGpu::Make(backendContext, options, direct.get());
1205 if (!direct->init()) {
1206 return nullptr;
1207 }
1208
1209 return direct;
1210 }
1211 #endif
1212