1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/gl/GrGLGpu.h"
9
10 #include "include/core/SkAlphaType.h"
11 #include "include/core/SkColor.h"
12 #include "include/core/SkColorSpace.h"
13 #include "include/core/SkData.h"
14 #include "include/core/SkRect.h"
15 #include "include/core/SkSize.h"
16 #include "include/core/SkString.h"
17 #include "include/core/SkTextureCompressionType.h"
18 #include "include/core/SkTypes.h"
19 #include "include/gpu/GpuTypes.h"
20 #include "include/gpu/ganesh/GrBackendSurface.h"
21 #include "include/gpu/ganesh/GrContextOptions.h"
22 #include "include/gpu/ganesh/GrDirectContext.h"
23 #include "include/gpu/ganesh/GrDriverBugWorkarounds.h"
24 #include "include/gpu/ganesh/GrTypes.h"
25 #include "include/gpu/ganesh/gl/GrGLConfig.h"
26 #include "include/private/base/SkFloatingPoint.h"
27 #include "include/private/base/SkMath.h"
28 #include "include/private/base/SkPoint_impl.h"
29 #include "include/private/base/SkTemplates.h"
30 #include "include/private/base/SkTo.h"
31 #include "src/base/SkScopeExit.h"
32 #include "src/core/SkCompressedDataUtils.h"
33 #include "src/core/SkLRUCache.h"
34 #include "src/core/SkMipmap.h"
35 #include "src/core/SkSLTypeShared.h"
36 #include "src/core/SkTraceEvent.h"
37 #include "src/gpu/RefCntedCallback.h"
38 #include "src/gpu/SkRenderEngineAbortf.h"
39 #include "src/gpu/Swizzle.h"
40 #include "src/gpu/ganesh/GrAttachment.h"
41 #include "src/gpu/ganesh/GrBackendSurfacePriv.h"
42 #include "src/gpu/ganesh/GrBackendUtils.h"
43 #include "src/gpu/ganesh/GrBuffer.h"
44 #include "src/gpu/ganesh/GrDataUtils.h"
45 #include "src/gpu/ganesh/GrDirectContextPriv.h"
46 #include "src/gpu/ganesh/GrGpuBuffer.h"
47 #include "src/gpu/ganesh/GrImageInfo.h"
48 #include "src/gpu/ganesh/GrPipeline.h"
49 #include "src/gpu/ganesh/GrProgramInfo.h"
50 #include "src/gpu/ganesh/GrRenderTarget.h"
51 #include "src/gpu/ganesh/GrSemaphore.h"
52 #include "src/gpu/ganesh/GrShaderCaps.h"
53 #include "src/gpu/ganesh/GrShaderVar.h"
54 #include "src/gpu/ganesh/GrStagingBufferManager.h"
55 #include "src/gpu/ganesh/GrSurface.h"
56 #include "src/gpu/ganesh/GrTexture.h"
57 #include "src/gpu/ganesh/GrUtil.h"
58 #include "src/gpu/ganesh/GrWindowRectangles.h"
59 #include "src/gpu/ganesh/gl/GrGLAttachment.h"
60 #include "src/gpu/ganesh/gl/GrGLBackendSurfacePriv.h"
61 #include "src/gpu/ganesh/gl/GrGLBuffer.h"
62 #include "src/gpu/ganesh/gl/GrGLOpsRenderPass.h"
63 #include "src/gpu/ganesh/gl/GrGLProgram.h"
64 #include "src/gpu/ganesh/gl/GrGLSemaphore.h"
65 #include "src/gpu/ganesh/gl/GrGLTextureRenderTarget.h"
66 #include "src/gpu/ganesh/gl/builders/GrGLShaderStringBuilder.h"
67 #include "src/sksl/SkSLProgramKind.h"
68 #include "src/sksl/SkSLProgramSettings.h"
69 #include "src/sksl/ir/SkSLProgram.h"
70
71 #include <algorithm>
72 #include <cmath>
73 #include <functional>
74 #include <memory>
75 #include <string>
76 #include <utility>
77
78 using namespace skia_private;
79
80 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
81 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
82
83 #define GL_ALLOC_CALL(call) \
84 [&] { \
85 if (this->glCaps().skipErrorChecks()) { \
86 GR_GL_CALL(this->glInterface(), call); \
87 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
88 } else { \
89 this->clearErrorsAndCheckForOOM(); \
90 GR_GL_CALL_NOERRCHECK(this->glInterface(), call); \
91 return this->getErrorAndCheckForOOM(); \
92 } \
93 }()
94
95 //#define USE_NSIGHT
96
97 ///////////////////////////////////////////////////////////////////////////////
98
99 static const GrGLenum gXfermodeEquation2Blend[] = {
100 // Basic OpenGL blend equations.
101 GR_GL_FUNC_ADD,
102 GR_GL_FUNC_SUBTRACT,
103 GR_GL_FUNC_REVERSE_SUBTRACT,
104
105 // GL_KHR_blend_equation_advanced.
106 GR_GL_SCREEN,
107 GR_GL_OVERLAY,
108 GR_GL_DARKEN,
109 GR_GL_LIGHTEN,
110 GR_GL_COLORDODGE,
111 GR_GL_COLORBURN,
112 GR_GL_HARDLIGHT,
113 GR_GL_SOFTLIGHT,
114 GR_GL_DIFFERENCE,
115 GR_GL_EXCLUSION,
116 GR_GL_MULTIPLY,
117 GR_GL_HSL_HUE,
118 GR_GL_HSL_SATURATION,
119 GR_GL_HSL_COLOR,
120 GR_GL_HSL_LUMINOSITY,
121
122 // Illegal... needs to map to something.
123 GR_GL_FUNC_ADD,
124 };
125 static_assert(0 == (int)skgpu::BlendEquation::kAdd);
126 static_assert(1 == (int)skgpu::BlendEquation::kSubtract);
127 static_assert(2 == (int)skgpu::BlendEquation::kReverseSubtract);
128 static_assert(3 == (int)skgpu::BlendEquation::kScreen);
129 static_assert(4 == (int)skgpu::BlendEquation::kOverlay);
130 static_assert(5 == (int)skgpu::BlendEquation::kDarken);
131 static_assert(6 == (int)skgpu::BlendEquation::kLighten);
132 static_assert(7 == (int)skgpu::BlendEquation::kColorDodge);
133 static_assert(8 == (int)skgpu::BlendEquation::kColorBurn);
134 static_assert(9 == (int)skgpu::BlendEquation::kHardLight);
135 static_assert(10 == (int)skgpu::BlendEquation::kSoftLight);
136 static_assert(11 == (int)skgpu::BlendEquation::kDifference);
137 static_assert(12 == (int)skgpu::BlendEquation::kExclusion);
138 static_assert(13 == (int)skgpu::BlendEquation::kMultiply);
139 static_assert(14 == (int)skgpu::BlendEquation::kHSLHue);
140 static_assert(15 == (int)skgpu::BlendEquation::kHSLSaturation);
141 static_assert(16 == (int)skgpu::BlendEquation::kHSLColor);
142 static_assert(17 == (int)skgpu::BlendEquation::kHSLLuminosity);
143 static_assert(std::size(gXfermodeEquation2Blend) == skgpu::kBlendEquationCnt);
144
145 static const GrGLenum gXfermodeCoeff2Blend[] = {
146 GR_GL_ZERO,
147 GR_GL_ONE,
148 GR_GL_SRC_COLOR,
149 GR_GL_ONE_MINUS_SRC_COLOR,
150 GR_GL_DST_COLOR,
151 GR_GL_ONE_MINUS_DST_COLOR,
152 GR_GL_SRC_ALPHA,
153 GR_GL_ONE_MINUS_SRC_ALPHA,
154 GR_GL_DST_ALPHA,
155 GR_GL_ONE_MINUS_DST_ALPHA,
156 GR_GL_CONSTANT_COLOR,
157 GR_GL_ONE_MINUS_CONSTANT_COLOR,
158
159 // extended blend coeffs
160 GR_GL_SRC1_COLOR,
161 GR_GL_ONE_MINUS_SRC1_COLOR,
162 GR_GL_SRC1_ALPHA,
163 GR_GL_ONE_MINUS_SRC1_ALPHA,
164
165 // Illegal... needs to map to something.
166 GR_GL_ZERO,
167 };
168
169 //////////////////////////////////////////////////////////////////////////////
170
gl_target_to_binding_index(GrGLenum target)171 static int gl_target_to_binding_index(GrGLenum target) {
172 switch (target) {
173 case GR_GL_TEXTURE_2D:
174 return 0;
175 case GR_GL_TEXTURE_RECTANGLE:
176 return 1;
177 case GR_GL_TEXTURE_EXTERNAL:
178 return 2;
179 }
180 SK_ABORT("Unexpected GL texture target.");
181 }
182
boundID(GrGLenum target) const183 GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const {
184 return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID;
185 }
186
hasBeenModified(GrGLenum target) const187 bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const {
188 return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified;
189 }
190
setBoundID(GrGLenum target,GrGpuResource::UniqueID resourceID)191 void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) {
192 int targetIndex = gl_target_to_binding_index(target);
193 fTargetBindings[targetIndex].fBoundResourceID = resourceID;
194 fTargetBindings[targetIndex].fHasBeenModified = true;
195 }
196
invalidateForScratchUse(GrGLenum target)197 void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) {
198 this->setBoundID(target, GrGpuResource::UniqueID());
199 }
200
invalidateAllTargets(bool markUnmodified)201 void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) {
202 for (auto& targetBinding : fTargetBindings) {
203 targetBinding.fBoundResourceID.makeInvalid();
204 if (markUnmodified) {
205 targetBinding.fHasBeenModified = false;
206 }
207 }
208 }
209
210 //////////////////////////////////////////////////////////////////////////////
211
filter_to_gl_mag_filter(GrSamplerState::Filter filter)212 static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) {
213 switch (filter) {
214 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST;
215 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR;
216 }
217 SkUNREACHABLE;
218 }
219
filter_to_gl_min_filter(GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm)220 static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter,
221 GrSamplerState::MipmapMode mm) {
222 switch (mm) {
223 case GrSamplerState::MipmapMode::kNone:
224 return filter_to_gl_mag_filter(filter);
225 case GrSamplerState::MipmapMode::kNearest:
226 switch (filter) {
227 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_NEAREST;
228 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_NEAREST;
229 }
230 SkUNREACHABLE;
231 case GrSamplerState::MipmapMode::kLinear:
232 switch (filter) {
233 case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_LINEAR;
234 case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_LINEAR;
235 }
236 SkUNREACHABLE;
237 }
238 SkUNREACHABLE;
239 }
240
wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,const GrCaps & caps)241 static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode,
242 const GrCaps& caps) {
243 switch (wrapMode) {
244 case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE;
245 case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT;
246 case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT;
247 case GrSamplerState::WrapMode::kClampToBorder:
248 // May not be supported but should have been caught earlier
249 SkASSERT(caps.clampToBorderSupport());
250 return GR_GL_CLAMP_TO_BORDER;
251 }
252 SkUNREACHABLE;
253 }
254
255 ///////////////////////////////////////////////////////////////////////////////
256
cleanup_program(GrGLGpu * gpu,GrGLuint * programID,GrGLuint * vshader,GrGLuint * fshader)257 static void cleanup_program(GrGLGpu* gpu,
258 GrGLuint* programID,
259 GrGLuint* vshader,
260 GrGLuint* fshader) {
261 const GrGLInterface* gli = gpu->glInterface();
262 if (programID) {
263 GR_GL_CALL(gli, DeleteProgram(*programID));
264 *programID = 0;
265 }
266 if (vshader) {
267 GR_GL_CALL(gli, DeleteShader(*vshader));
268 *vshader = 0;
269 }
270 if (fshader) {
271 GR_GL_CALL(gli, DeleteShader(*fshader));
272 *fshader = 0;
273 }
274 }
275
276 ///////////////////////////////////////////////////////////////////////////////
277
278 class GrGLGpu::SamplerObjectCache {
279 public:
SamplerObjectCache(GrGLGpu * gpu)280 SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) {
281 fNumTextureUnits = fGpu->glCaps().shaderCaps()->fMaxFragmentSamplers;
282 fTextureUnitStates = std::make_unique<UnitState[]>(fNumTextureUnits);
283 }
284
~SamplerObjectCache()285 ~SamplerObjectCache() {
286 if (!fNumTextureUnits) {
287 // We've already been abandoned.
288 return;
289 }
290 }
291
bindSampler(int unitIdx,GrSamplerState state)292 void bindSampler(int unitIdx, GrSamplerState state) {
293 if (unitIdx >= fNumTextureUnits) {
294 return;
295 }
296 // In GL the max aniso value is specified in addition to min/mag filters and the driver
297 // is encouraged to consider the other filter settings when doing aniso.
298 uint32_t key = state.asKey(/*anisoIsOrthogonal=*/true);
299 const Sampler* sampler = fSamplers.find(key);
300 if (!sampler) {
301 GrGLuint s;
302 GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s));
303 if (!s) {
304 return;
305 }
306 sampler = fSamplers.insert(key, Sampler(s, fGpu->glInterface()));
307 GrGLenum minFilter = filter_to_gl_min_filter(state.filter(), state.mipmapMode());
308 GrGLenum magFilter = filter_to_gl_mag_filter(state.filter());
309 GrGLenum wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps());
310 GrGLenum wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps());
311 GR_GL_CALL(fGpu->glInterface(),
312 SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter));
313 GR_GL_CALL(fGpu->glInterface(),
314 SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter));
315 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX));
316 GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY));
317 SkASSERT(fGpu->glCaps().anisoSupport() || !state.isAniso());
318 if (fGpu->glCaps().anisoSupport()) {
319 float maxAniso = std::min(static_cast<GrGLfloat>(state.maxAniso()),
320 fGpu->glCaps().maxTextureMaxAnisotropy());
321 GR_GL_CALL(fGpu->glInterface(), SamplerParameterf(s,
322 GR_GL_TEXTURE_MAX_ANISOTROPY,
323 maxAniso));
324 }
325 }
326 SkASSERT(sampler && sampler->id());
327 if (!fTextureUnitStates[unitIdx].fKnown ||
328 fTextureUnitStates[unitIdx].fSamplerIDIfKnown != sampler->id()) {
329 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, sampler->id()));
330 fTextureUnitStates[unitIdx].fSamplerIDIfKnown = sampler->id();
331 fTextureUnitStates[unitIdx].fKnown = true;
332 }
333 }
334
unbindSampler(int unitIdx)335 void unbindSampler(int unitIdx) {
336 if (!fTextureUnitStates[unitIdx].fKnown ||
337 fTextureUnitStates[unitIdx].fSamplerIDIfKnown != 0) {
338 GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, 0));
339 fTextureUnitStates[unitIdx].fSamplerIDIfKnown = 0;
340 fTextureUnitStates[unitIdx].fKnown = true;
341 }
342 }
343
invalidateBindings()344 void invalidateBindings() {
345 std::fill_n(fTextureUnitStates.get(), fNumTextureUnits, UnitState{});
346 }
347
abandon()348 void abandon() {
349 fSamplers.foreach([](uint32_t* key, Sampler* sampler) { sampler->abandon(); });
350 fTextureUnitStates.reset();
351 fNumTextureUnits = 0;
352 }
353
release()354 void release() {
355 if (!fNumTextureUnits) {
356 // We've already been abandoned.
357 return;
358 }
359 fSamplers.reset();
360 // Deleting a bound sampler implicitly binds sampler 0. We just invalidate all of our
361 // knowledge.
362 std::fill_n(fTextureUnitStates.get(), fNumTextureUnits, UnitState{});
363 }
364
365 private:
366 class Sampler {
367 public:
368 Sampler() = default;
369 Sampler(const Sampler&) = delete;
370
Sampler(Sampler && that)371 Sampler(Sampler&& that) {
372 fID = that.fID;
373 fInterface = that.fInterface;
374 that.fID = 0;
375 }
376
Sampler(GrGLuint id,const GrGLInterface * interface)377 Sampler(GrGLuint id, const GrGLInterface* interface) : fID(id), fInterface(interface) {}
378
~Sampler()379 ~Sampler() {
380 if (fID) {
381 GR_GL_CALL(fInterface, DeleteSamplers(1, &fID));
382 }
383 }
384
id() const385 GrGLuint id() const { return fID; }
386
abandon()387 void abandon() { fID = 0; }
388
389 private:
390 GrGLuint fID = 0;
391 const GrGLInterface* fInterface = nullptr;
392 };
393
394 struct UnitState {
395 bool fKnown = false;
396 GrGLuint fSamplerIDIfKnown = 0;
397 };
398
399 static constexpr int kMaxSamplers = 32;
400
401 SkLRUCache<uint32_t, Sampler> fSamplers{kMaxSamplers};
402 std::unique_ptr<UnitState[]> fTextureUnitStates;
403 GrGLGpu* fGpu;
404 int fNumTextureUnits;
405 };
406
407 ///////////////////////////////////////////////////////////////////////////////
408
Make(sk_sp<const GrGLInterface> interface,const GrContextOptions & options,GrDirectContext * direct)409 std::unique_ptr<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface,
410 const GrContextOptions& options,
411 GrDirectContext* direct) {
412 #if !defined(SK_DISABLE_LEGACY_GL_MAKE_NATIVE_INTERFACE)
413 if (!interface) {
414 interface = GrGLMakeNativeInterface();
415 if (!interface) {
416 return nullptr;
417 }
418 }
419 #else
420 if (!interface) {
421 return nullptr;
422 }
423 #endif
424 #ifdef USE_NSIGHT
425 const_cast<GrContextOptions&>(options).fSuppressPathRendering = true;
426 #endif
427 auto glContext = GrGLContext::Make(std::move(interface), options);
428 if (!glContext) {
429 return nullptr;
430 }
431 return std::unique_ptr<GrGpu>(new GrGLGpu(std::move(glContext), direct));
432 }
433
GrGLGpu(std::unique_ptr<GrGLContext> ctx,GrDirectContext * dContext)434 GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrDirectContext* dContext)
435 : GrGpu(dContext)
436 , fGLContext(std::move(ctx))
437 , fProgramCache(new ProgramCache(dContext->priv().options().fRuntimeProgramCacheSize))
438 , fHWProgramID(0)
439 , fTempSrcFBOID(0)
440 , fTempDstFBOID(0)
441 , fStencilClearFBOID(0)
442 , fFinishCallbacks(this) {
443 SkASSERT(fGLContext);
444 // Clear errors so we don't get confused whether we caused an error.
445 this->clearErrorsAndCheckForOOM();
446 // Toss out any pre-existing OOM that was hanging around before we got started.
447 this->checkAndResetOOMed();
448
449 this->initCaps(sk_ref_sp(fGLContext->caps()));
450
451 fHWTextureUnitBindings.reset(this->numTextureUnits());
452
453 this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER;
454 this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
455 this->hwBufferState(GrGpuBufferType::kDrawIndirect)->fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
456 if (GrGLCaps::TransferBufferType::kChromium == this->glCaps().transferBufferType()) {
457 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget =
458 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
459 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget =
460 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
461 } else {
462 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
463 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
464 }
465 for (int i = 0; i < kGrGpuBufferTypeCount; ++i) {
466 fHWBufferState[i].invalidate();
467 }
468
469 if (this->glCaps().useSamplerObjects()) {
470 fSamplerObjectCache = std::make_unique<SamplerObjectCache>(this);
471 }
472 }
473
~GrGLGpu()474 GrGLGpu::~GrGLGpu() {
475 // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
476 // to release the resources held by the objects themselves.
477 fCopyProgramArrayBuffer.reset();
478 fMipmapProgramArrayBuffer.reset();
479 if (fProgramCache) {
480 fProgramCache->reset();
481 }
482
483 fHWProgram.reset();
484 if (fHWProgramID) {
485 // detach the current program so there is no confusion on OpenGL's part
486 // that we want it to be deleted
487 GL_CALL(UseProgram(0));
488 }
489
490 if (fTempSrcFBOID) {
491 this->deleteFramebuffer(fTempSrcFBOID);
492 }
493 if (fTempDstFBOID) {
494 this->deleteFramebuffer(fTempDstFBOID);
495 }
496 if (fStencilClearFBOID) {
497 this->deleteFramebuffer(fStencilClearFBOID);
498 }
499
500 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
501 if (0 != fCopyPrograms[i].fProgram) {
502 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
503 }
504 }
505
506 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
507 if (0 != fMipmapPrograms[i].fProgram) {
508 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
509 }
510 }
511
512 fSamplerObjectCache.reset();
513
514 fFinishCallbacks.callAll(true);
515 }
516
disconnect(DisconnectType type)517 void GrGLGpu::disconnect(DisconnectType type) {
518 INHERITED::disconnect(type);
519 if (DisconnectType::kCleanup == type) {
520 if (fHWProgramID) {
521 GL_CALL(UseProgram(0));
522 }
523 if (fTempSrcFBOID) {
524 this->deleteFramebuffer(fTempSrcFBOID);
525 }
526 if (fTempDstFBOID) {
527 this->deleteFramebuffer(fTempDstFBOID);
528 }
529 if (fStencilClearFBOID) {
530 this->deleteFramebuffer(fStencilClearFBOID);
531 }
532 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
533 if (fCopyPrograms[i].fProgram) {
534 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
535 }
536 }
537 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
538 if (fMipmapPrograms[i].fProgram) {
539 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
540 }
541 }
542
543 if (fSamplerObjectCache) {
544 fSamplerObjectCache->release();
545 }
546 } else {
547 if (fProgramCache) {
548 fProgramCache->abandon();
549 }
550 if (fSamplerObjectCache) {
551 fSamplerObjectCache->abandon();
552 }
553 }
554
555 fHWProgram.reset();
556 fProgramCache->reset();
557 fProgramCache.reset();
558
559 fHWProgramID = 0;
560 fTempSrcFBOID = 0;
561 fTempDstFBOID = 0;
562 fStencilClearFBOID = 0;
563 fCopyProgramArrayBuffer.reset();
564 for (size_t i = 0; i < std::size(fCopyPrograms); ++i) {
565 fCopyPrograms[i].fProgram = 0;
566 }
567 fMipmapProgramArrayBuffer.reset();
568 for (size_t i = 0; i < std::size(fMipmapPrograms); ++i) {
569 fMipmapPrograms[i].fProgram = 0;
570 }
571
572 fFinishCallbacks.callAll(/* doDelete */ DisconnectType::kCleanup == type);
573 }
574
pipelineBuilder()575 GrThreadSafePipelineBuilder* GrGLGpu::pipelineBuilder() {
576 return fProgramCache.get();
577 }
578
refPipelineBuilder()579 sk_sp<GrThreadSafePipelineBuilder> GrGLGpu::refPipelineBuilder() {
580 return fProgramCache;
581 }
582
583 ///////////////////////////////////////////////////////////////////////////////
584
onResetContext(uint32_t resetBits)585 void GrGLGpu::onResetContext(uint32_t resetBits) {
586 if (resetBits & kMisc_GrGLBackendState) {
587 // we don't use the zb at all
588 GL_CALL(Disable(GR_GL_DEPTH_TEST));
589 GL_CALL(DepthMask(GR_GL_FALSE));
590
591 // We don't use face culling.
592 GL_CALL(Disable(GR_GL_CULL_FACE));
593 // We do use separate stencil. Our algorithms don't care which face is front vs. back so
594 // just set this to the default for self-consistency.
595 GL_CALL(FrontFace(GR_GL_CCW));
596
597 this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate();
598 this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate();
599
600 if (GR_IS_GR_GL(this->glStandard())) {
601 #ifndef USE_NSIGHT
602 // Desktop-only state that we never change
603 if (!this->glCaps().isCoreProfile()) {
604 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
605 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
606 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
607 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
608 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
609 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
610 }
611 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
612 // core profile. This seems like a bug since the core spec removes any mention of
613 // GL_ARB_imaging.
614 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
615 GL_CALL(Disable(GR_GL_COLOR_TABLE));
616 }
617 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
618
619 fHWWireframeEnabled = kUnknown_TriState;
620 #endif
621 // Since ES doesn't support glPointSize at all we always use the VS to
622 // set the point size
623 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
624
625 }
626
627 if (GR_IS_GR_GL_ES(this->glStandard()) &&
628 this->glCaps().fbFetchRequiresEnablePerSample()) {
629 // The arm extension requires specifically enabling MSAA fetching per sample.
630 // On some devices this may have a perf hit. Also multiple render targets are disabled
631 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE));
632 }
633 fHWWriteToColor = kUnknown_TriState;
634 // we only ever use lines in hairline mode
635 GL_CALL(LineWidth(1));
636 GL_CALL(Disable(GR_GL_DITHER));
637
638 fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN;
639 }
640
641 if (resetBits & kMSAAEnable_GrGLBackendState) {
642 if (this->glCaps().clientCanDisableMultisample()) {
643 // Restore GL_MULTISAMPLE to its initial state. It being enabled has no effect on draws
644 // to non-MSAA targets.
645 GL_CALL(Enable(GR_GL_MULTISAMPLE));
646 }
647 fHWConservativeRasterEnabled = kUnknown_TriState;
648 }
649
650 fHWActiveTextureUnitIdx = -1; // invalid
651 fLastPrimitiveType = static_cast<GrPrimitiveType>(-1);
652
653 if (resetBits & kTextureBinding_GrGLBackendState) {
654 for (int s = 0; s < this->numTextureUnits(); ++s) {
655 fHWTextureUnitBindings[s].invalidateAllTargets(false);
656 }
657 if (fSamplerObjectCache) {
658 fSamplerObjectCache->invalidateBindings();
659 }
660 }
661
662 if (resetBits & kBlend_GrGLBackendState) {
663 fHWBlendState.invalidate();
664 }
665
666 if (resetBits & kView_GrGLBackendState) {
667 fHWScissorSettings.invalidate();
668 fHWWindowRectsState.invalidate();
669 fHWViewport.invalidate();
670 }
671
672 if (resetBits & kStencil_GrGLBackendState) {
673 fHWStencilSettings.invalidate();
674 fHWStencilTestEnabled = kUnknown_TriState;
675 }
676
677 // Vertex
678 if (resetBits & kVertex_GrGLBackendState) {
679 fHWVertexArrayState.invalidate();
680 this->hwBufferState(GrGpuBufferType::kVertex)->invalidate();
681 this->hwBufferState(GrGpuBufferType::kIndex)->invalidate();
682 this->hwBufferState(GrGpuBufferType::kDrawIndirect)->invalidate();
683 }
684
685 if (resetBits & kRenderTarget_GrGLBackendState) {
686 fHWBoundRenderTargetUniqueID.makeInvalid();
687 fHWSRGBFramebuffer = kUnknown_TriState;
688 fBoundDrawFramebuffer = 0;
689 }
690
691 // we assume these values
692 if (resetBits & kPixelStore_GrGLBackendState) {
693 if (this->caps()->writePixelsRowBytesSupport() ||
694 this->caps()->transferPixelsToRowBytesSupport()) {
695 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
696 }
697 if (this->glCaps().readPixelsRowBytesSupport()) {
698 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
699 }
700 if (this->glCaps().packFlipYSupport()) {
701 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
702 }
703 }
704
705 if (resetBits & kProgram_GrGLBackendState) {
706 fHWProgramID = 0;
707 fHWProgram.reset();
708 }
709 ++fResetTimestampForTextureParameters;
710 }
711
check_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::Desc * desc,bool skipRectTexSupportCheck=false)712 static bool check_backend_texture(const GrBackendTexture& backendTex,
713 const GrGLCaps& caps,
714 GrGLTexture::Desc* desc,
715 bool skipRectTexSupportCheck = false) {
716 GrGLTextureInfo info;
717 if (!GrBackendTextures::GetGLTextureInfo(backendTex, &info) || !info.fID || !info.fFormat) {
718 return false;
719 }
720
721 if (info.fProtected == skgpu::Protected::kYes && !caps.supportsProtectedContent()) {
722 return false;
723 }
724
725 desc->fSize = {backendTex.width(), backendTex.height()};
726 desc->fTarget = info.fTarget;
727 desc->fID = info.fID;
728 desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
729 desc->fIsProtected = skgpu::Protected(info.fProtected == skgpu::Protected::kYes ||
730 caps.strictProtectedness());
731
732 if (desc->fFormat == GrGLFormat::kUnknown) {
733 return false;
734 }
735 if (GR_GL_TEXTURE_EXTERNAL == desc->fTarget) {
736 if (!caps.shaderCaps()->fExternalTextureSupport) {
737 return false;
738 }
739 } else if (GR_GL_TEXTURE_RECTANGLE == desc->fTarget) {
740 if (!caps.rectangleTextureSupport() && !skipRectTexSupportCheck) {
741 return false;
742 }
743 } else if (GR_GL_TEXTURE_2D != desc->fTarget) {
744 return false;
745 }
746
747
748 return true;
749 }
750
get_gl_texture_params(const GrBackendTexture & backendTex)751 static sk_sp<GrGLTextureParameters> get_gl_texture_params(const GrBackendTexture& backendTex) {
752 const GrBackendTextureData* btd = GrBackendSurfacePriv::GetBackendData(backendTex);
753 auto glTextureData = static_cast<const GrGLBackendTextureData*>(btd);
754 SkASSERT(glTextureData);
755 return glTextureData->info().refParameters();
756 }
757
onWrapBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable,GrIOType ioType)758 sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex,
759 GrWrapOwnership ownership,
760 GrWrapCacheable cacheable,
761 GrIOType ioType) {
762 GrGLTexture::Desc desc;
763 if (!check_backend_texture(backendTex, this->glCaps(), &desc)) {
764 return nullptr;
765 }
766
767 if (kBorrow_GrWrapOwnership == ownership) {
768 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
769 } else {
770 desc.fOwnership = GrBackendObjectOwnership::kOwned;
771 }
772
773 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid
774 : GrMipmapStatus::kNotAllocated;
775
776 auto texture = GrGLTexture::MakeWrapped(this,
777 mipmapStatus,
778 desc,
779 get_gl_texture_params(backendTex),
780 cacheable,
781 ioType,
782 backendTex.getLabel());
783 if (this->glCaps().isFormatRenderable(backendTex.getBackendFormat(), 1)) {
784 // Pessimistically assume this external texture may have been bound to a FBO.
785 texture->baseLevelWasBoundToFBO();
786 }
787 return texture;
788 }
789
check_compressed_backend_texture(const GrBackendTexture & backendTex,const GrGLCaps & caps,GrGLTexture::Desc * desc,bool skipRectTexSupportCheck=false)790 static bool check_compressed_backend_texture(const GrBackendTexture& backendTex,
791 const GrGLCaps& caps, GrGLTexture::Desc* desc,
792 bool skipRectTexSupportCheck = false) {
793 GrGLTextureInfo info;
794 if (!GrBackendTextures::GetGLTextureInfo(backendTex, &info) || !info.fID || !info.fFormat) {
795 return false;
796 }
797 if (info.fProtected == skgpu::Protected::kYes && !caps.supportsProtectedContent()) {
798 return false;
799 }
800
801 desc->fSize = {backendTex.width(), backendTex.height()};
802 desc->fTarget = info.fTarget;
803 desc->fID = info.fID;
804 desc->fFormat = GrGLFormatFromGLEnum(info.fFormat);
805 desc->fIsProtected = skgpu::Protected(info.fProtected == skgpu::Protected::kYes ||
806 caps.strictProtectedness());
807
808 if (desc->fFormat == GrGLFormat::kUnknown) {
809 return false;
810 }
811
812 if (GR_GL_TEXTURE_2D != desc->fTarget) {
813 return false;
814 }
815
816 return true;
817 }
818
onWrapCompressedBackendTexture(const GrBackendTexture & backendTex,GrWrapOwnership ownership,GrWrapCacheable cacheable)819 sk_sp<GrTexture> GrGLGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex,
820 GrWrapOwnership ownership,
821 GrWrapCacheable cacheable) {
822 GrGLTexture::Desc desc;
823 if (!check_compressed_backend_texture(backendTex, this->glCaps(), &desc)) {
824 return nullptr;
825 }
826
827 if (kBorrow_GrWrapOwnership == ownership) {
828 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
829 } else {
830 desc.fOwnership = GrBackendObjectOwnership::kOwned;
831 }
832
833 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid
834 : GrMipmapStatus::kNotAllocated;
835
836 return GrGLTexture::MakeWrapped(this,
837 mipmapStatus,
838 desc,
839 get_gl_texture_params(backendTex),
840 cacheable,
841 kRead_GrIOType,
842 backendTex.getLabel());
843 }
844
onWrapRenderableBackendTexture(const GrBackendTexture & backendTex,int sampleCnt,GrWrapOwnership ownership,GrWrapCacheable cacheable)845 sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex,
846 int sampleCnt,
847 GrWrapOwnership ownership,
848 GrWrapCacheable cacheable) {
849 const GrGLCaps& caps = this->glCaps();
850
851 GrGLTexture::Desc desc;
852 if (!check_backend_texture(backendTex, this->glCaps(), &desc)) {
853 return nullptr;
854 }
855 SkASSERT(caps.isFormatRenderable(desc.fFormat, sampleCnt));
856 SkASSERT(caps.isFormatTexturable(desc.fFormat));
857
858 // We don't support rendering to a EXTERNAL texture.
859 if (GR_GL_TEXTURE_EXTERNAL == desc.fTarget) {
860 return nullptr;
861 }
862
863 if (kBorrow_GrWrapOwnership == ownership) {
864 desc.fOwnership = GrBackendObjectOwnership::kBorrowed;
865 } else {
866 desc.fOwnership = GrBackendObjectOwnership::kOwned;
867 }
868
869
870 sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, desc.fFormat);
871 SkASSERT(sampleCnt);
872
873 GrGLRenderTarget::IDs rtIDs;
874 if (!this->createRenderTargetObjects(desc, sampleCnt, &rtIDs)) {
875 return nullptr;
876 }
877
878 GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kDirty
879 : GrMipmapStatus::kNotAllocated;
880
881 sk_sp<GrGLTextureRenderTarget> texRT(
882 GrGLTextureRenderTarget::MakeWrapped(this,
883 sampleCnt,
884 desc,
885 get_gl_texture_params(backendTex),
886 rtIDs,
887 cacheable,
888 mipmapStatus,
889 backendTex.getLabel()));
890 texRT->baseLevelWasBoundToFBO();
891 return texRT;
892 }
893
onWrapBackendRenderTarget(const GrBackendRenderTarget & backendRT)894 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
895 GrGLFramebufferInfo info;
896 if (!GrBackendRenderTargets::GetGLFramebufferInfo(backendRT, &info)) {
897 return nullptr;
898 }
899
900 if (backendRT.isProtected() && !this->glCaps().supportsProtectedContent()) {
901 return nullptr;
902 }
903
904 const auto format = GrBackendFormats::AsGLFormat(backendRT.getBackendFormat());
905 if (!this->glCaps().isFormatRenderable(format, backendRT.sampleCnt())) {
906 return nullptr;
907 }
908
909 int sampleCount = this->glCaps().getRenderTargetSampleCount(backendRT.sampleCnt(), format);
910
911 GrGLRenderTarget::IDs rtIDs;
912 if (sampleCount <= 1) {
913 rtIDs.fSingleSampleFBOID = info.fFBOID;
914 rtIDs.fMultisampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
915 } else {
916 rtIDs.fSingleSampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
917 rtIDs.fMultisampleFBOID = info.fFBOID;
918 }
919 rtIDs.fMSColorRenderbufferID = 0;
920 rtIDs.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed;
921 rtIDs.fTotalMemorySamplesPerPixel = sampleCount;
922
923 return GrGLRenderTarget::MakeWrapped(this,
924 backendRT.dimensions(),
925 format,
926 sampleCount,
927 rtIDs,
928 backendRT.stencilBits(),
929 skgpu::Protected(backendRT.isProtected()),
930 /*label=*/"GLGpu_WrapBackendRenderTarget");
931 }
932
check_write_and_transfer_input(GrGLTexture * glTex)933 static bool check_write_and_transfer_input(GrGLTexture* glTex) {
934 if (!glTex) {
935 return false;
936 }
937
938 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
939 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
940 return false;
941 }
942
943 return true;
944 }
945
onWritePixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount,bool prepForTexSampling)946 bool GrGLGpu::onWritePixels(GrSurface* surface,
947 SkIRect rect,
948 GrColorType surfaceColorType,
949 GrColorType srcColorType,
950 const GrMipLevel texels[],
951 int mipLevelCount,
952 bool prepForTexSampling) {
953 auto glTex = static_cast<GrGLTexture*>(surface->asTexture());
954
955 if (!check_write_and_transfer_input(glTex)) {
956 return false;
957 }
958
959 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
960
961 // If we have mips make sure the base/max levels cover the full range so that the uploads go to
962 // the right levels. We've found some Radeons require this.
963 if (mipLevelCount && this->glCaps().mipmapLevelControlSupport()) {
964 auto params = glTex->parameters();
965 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
966 int maxLevel = glTex->maxMipmapLevel();
967 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
968 GL_CALL(TexParameteri(glTex->target(), GR_GL_TEXTURE_BASE_LEVEL, 0));
969 nonsamplerState.fBaseMipMapLevel = 0;
970 }
971 if (params->nonsamplerState().fMaxMipmapLevel != maxLevel) {
972 GL_CALL(TexParameteri(glTex->target(), GR_GL_TEXTURE_MAX_LEVEL, maxLevel));
973 nonsamplerState.fBaseMipMapLevel = maxLevel;
974 }
975 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
976 }
977
978 if (this->glCaps().flushBeforeWritePixels()) {
979 GL_CALL(Flush());
980 }
981
982 SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
983 return this->uploadColorTypeTexData(glTex->format(),
984 surfaceColorType,
985 glTex->dimensions(),
986 glTex->target(),
987 rect,
988 srcColorType,
989 texels,
990 mipLevelCount);
991 }
992
onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,size_t srcOffset,sk_sp<GrGpuBuffer> dst,size_t dstOffset,size_t size)993 bool GrGLGpu::onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
994 size_t srcOffset,
995 sk_sp<GrGpuBuffer> dst,
996 size_t dstOffset,
997 size_t size) {
998 SkASSERT(!src->isMapped());
999 SkASSERT(!dst->isMapped());
1000
1001 auto glSrc = static_cast<const GrGLBuffer*>(src.get());
1002 auto glDst = static_cast<const GrGLBuffer*>(dst.get());
1003
1004 // If we refactored bindBuffer() to use something other than GrGpuBufferType to indicate the
1005 // binding target then we could use the COPY_READ and COPY_WRITE targets here. But
1006 // CopyBufferSubData is documented to work with all the targets so it's not clear it's worth it.
1007 this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glSrc);
1008 this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glDst);
1009
1010 GL_CALL(CopyBufferSubData(GR_GL_PIXEL_UNPACK_BUFFER,
1011 GR_GL_PIXEL_PACK_BUFFER,
1012 srcOffset,
1013 dstOffset,
1014 size));
1015 return true;
1016 }
1017
onTransferPixelsTo(GrTexture * texture,SkIRect rect,GrColorType textureColorType,GrColorType bufferColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset,size_t rowBytes)1018 bool GrGLGpu::onTransferPixelsTo(GrTexture* texture,
1019 SkIRect rect,
1020 GrColorType textureColorType,
1021 GrColorType bufferColorType,
1022 sk_sp<GrGpuBuffer> transferBuffer,
1023 size_t offset,
1024 size_t rowBytes) {
1025 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
1026
1027 // Can't transfer compressed data
1028 SkASSERT(!GrGLFormatIsCompressed(glTex->format()));
1029
1030 if (!check_write_and_transfer_input(glTex)) {
1031 return false;
1032 }
1033
1034 static_assert(sizeof(int) == sizeof(int32_t), "");
1035
1036 this->bindTextureToScratchUnit(glTex->target(), glTex->textureID());
1037
1038 SkASSERT(!transferBuffer->isMapped());
1039 SkASSERT(!transferBuffer->isCpuBuffer());
1040 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer.get());
1041 this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
1042
1043 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
1044
1045 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
1046 const size_t trimRowBytes = rect.width() * bpp;
1047 const void* pixels = (void*)offset;
1048
1049 SkASSERT(glBuffer->size() >= offset + rowBytes*(rect.height() - 1) + trimRowBytes);
1050
1051 bool restoreGLRowLength = false;
1052 if (trimRowBytes != rowBytes) {
1053 // we should have checked for this support already
1054 SkASSERT(this->glCaps().transferPixelsToRowBytesSupport());
1055 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp));
1056 restoreGLRowLength = true;
1057 }
1058
1059 GrGLFormat textureFormat = glTex->format();
1060 // External format and type come from the upload data.
1061 GrGLenum externalFormat = 0;
1062 GrGLenum externalType = 0;
1063 this->glCaps().getTexSubImageExternalFormatAndType(
1064 textureFormat, textureColorType, bufferColorType, &externalFormat, &externalType);
1065 if (!externalFormat || !externalType) {
1066 return false;
1067 }
1068
1069 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
1070 GL_CALL(TexSubImage2D(glTex->target(),
1071 0,
1072 rect.left(),
1073 rect.top(),
1074 rect.width(),
1075 rect.height(),
1076 externalFormat,
1077 externalType,
1078 pixels));
1079
1080 if (restoreGLRowLength) {
1081 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1082 }
1083
1084 return true;
1085 }
1086
onTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,sk_sp<GrGpuBuffer> transferBuffer,size_t offset)1087 bool GrGLGpu::onTransferPixelsFrom(GrSurface* surface,
1088 SkIRect rect,
1089 GrColorType surfaceColorType,
1090 GrColorType dstColorType,
1091 sk_sp<GrGpuBuffer> transferBuffer,
1092 size_t offset) {
1093 auto* glBuffer = static_cast<GrGLBuffer*>(transferBuffer.get());
1094 SkASSERT(glBuffer->size() >= offset + (rect.width() *
1095 rect.height()*
1096 GrColorTypeBytesPerPixel(dstColorType)));
1097
1098 this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glBuffer);
1099
1100 auto offsetAsPtr = reinterpret_cast<void*>(offset);
1101 return this->readOrTransferPixelsFrom(surface,
1102 rect,
1103 surfaceColorType,
1104 dstColorType,
1105 offsetAsPtr,
1106 rect.width());
1107 }
1108
unbindXferBuffer(GrGpuBufferType type)1109 void GrGLGpu::unbindXferBuffer(GrGpuBufferType type) {
1110 if (this->glCaps().transferBufferType() != GrGLCaps::TransferBufferType::kARB_PBO &&
1111 this->glCaps().transferBufferType() != GrGLCaps::TransferBufferType::kNV_PBO) {
1112 return;
1113 }
1114 SkASSERT(type == GrGpuBufferType::kXferCpuToGpu || type == GrGpuBufferType::kXferGpuToCpu);
1115 auto* xferBufferState = this->hwBufferState(type);
1116 if (!xferBufferState->fBufferZeroKnownBound) {
1117 GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0));
1118 xferBufferState->fBoundBufferUniqueID.makeInvalid();
1119 xferBufferState->fBufferZeroKnownBound = true;
1120 }
1121 }
1122
uploadColorTypeTexData(GrGLFormat textureFormat,GrColorType textureColorType,SkISize texDims,GrGLenum target,SkIRect dstRect,GrColorType srcColorType,const GrMipLevel texels[],int mipLevelCount)1123 bool GrGLGpu::uploadColorTypeTexData(GrGLFormat textureFormat,
1124 GrColorType textureColorType,
1125 SkISize texDims,
1126 GrGLenum target,
1127 SkIRect dstRect,
1128 GrColorType srcColorType,
1129 const GrMipLevel texels[],
1130 int mipLevelCount) {
1131 // If we're uploading compressed data then we should be using uploadCompressedTexData
1132 SkASSERT(!GrGLFormatIsCompressed(textureFormat));
1133
1134 SkASSERT(this->glCaps().isFormatTexturable(textureFormat));
1135
1136 size_t bpp = GrColorTypeBytesPerPixel(srcColorType);
1137
1138 // External format and type come from the upload data.
1139 GrGLenum externalFormat;
1140 GrGLenum externalType;
1141 this->glCaps().getTexSubImageExternalFormatAndType(
1142 textureFormat, textureColorType, srcColorType, &externalFormat, &externalType);
1143 if (!externalFormat || !externalType) {
1144 return false;
1145 }
1146 this->uploadTexData(texDims, target, dstRect, externalFormat, externalType, bpp, texels,
1147 mipLevelCount);
1148 return true;
1149 }
1150
uploadColorToTex(GrGLFormat textureFormat,SkISize texDims,GrGLenum target,std::array<float,4> color,uint32_t levelMask)1151 bool GrGLGpu::uploadColorToTex(GrGLFormat textureFormat,
1152 SkISize texDims,
1153 GrGLenum target,
1154 std::array<float, 4> color,
1155 uint32_t levelMask) {
1156 GrColorType colorType;
1157 GrGLenum externalFormat, externalType;
1158 this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(textureFormat, &externalFormat,
1159 &externalType, &colorType);
1160 if (colorType == GrColorType::kUnknown) {
1161 return false;
1162 }
1163
1164 std::unique_ptr<char[]> pixelStorage;
1165 size_t bpp = 0;
1166 int numLevels = SkMipmap::ComputeLevelCount(texDims) + 1;
1167 STArray<16, GrMipLevel> levels;
1168 levels.resize(numLevels);
1169 SkISize levelDims = texDims;
1170 for (int i = 0; i < numLevels; ++i, levelDims = {std::max(levelDims.width() >> 1, 1),
1171 std::max(levelDims.height() >> 1, 1)}) {
1172 if (levelMask & (1 << i)) {
1173 if (!pixelStorage) {
1174 // Make one tight image at the first size and reuse it for smaller levels.
1175 GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, levelDims);
1176 size_t rb = ii.minRowBytes();
1177 pixelStorage.reset(new char[rb * levelDims.height()]);
1178 if (!GrClearImage(ii, pixelStorage.get(), ii.minRowBytes(), color)) {
1179 return false;
1180 }
1181 bpp = ii.bpp();
1182 }
1183 levels[i] = {pixelStorage.get(), levelDims.width()*bpp, nullptr};
1184 }
1185 }
1186 this->uploadTexData(texDims, target, SkIRect::MakeSize(texDims), externalFormat, externalType,
1187 bpp, levels.begin(), levels.size());
1188 return true;
1189 }
1190
uploadTexData(SkISize texDims,GrGLenum target,SkIRect dstRect,GrGLenum externalFormat,GrGLenum externalType,size_t bpp,const GrMipLevel texels[],int mipLevelCount)1191 void GrGLGpu::uploadTexData(SkISize texDims,
1192 GrGLenum target,
1193 SkIRect dstRect,
1194 GrGLenum externalFormat,
1195 GrGLenum externalType,
1196 size_t bpp,
1197 const GrMipLevel texels[],
1198 int mipLevelCount) {
1199 SkASSERT(!texDims.isEmpty());
1200 SkASSERT(!dstRect.isEmpty());
1201 SkASSERT(SkIRect::MakeSize(texDims).contains(dstRect));
1202 SkASSERT(mipLevelCount > 0 && mipLevelCount <= SkMipmap::ComputeLevelCount(texDims) + 1);
1203 SkASSERT(mipLevelCount == 1 || dstRect == SkIRect::MakeSize(texDims));
1204
1205 const GrGLCaps& caps = this->glCaps();
1206
1207 bool restoreGLRowLength = false;
1208
1209 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1210 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
1211
1212 SkISize dims = dstRect.size();
1213 for (int level = 0; level < mipLevelCount; ++level, dims = {std::max(dims.width() >> 1, 1),
1214 std::max(dims.height() >> 1, 1)}) {
1215 if (!texels[level].fPixels) {
1216 continue;
1217 }
1218 const size_t trimRowBytes = dims.width() * bpp;
1219 const size_t rowBytes = texels[level].fRowBytes;
1220
1221 if (caps.writePixelsRowBytesSupport() && (rowBytes != trimRowBytes || restoreGLRowLength)) {
1222 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
1223 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
1224 restoreGLRowLength = true;
1225 } else {
1226 SkASSERT(rowBytes == trimRowBytes);
1227 }
1228
1229 GL_CALL(TexSubImage2D(target, level, dstRect.x(), dstRect.y(), dims.width(), dims.height(),
1230 externalFormat, externalType, texels[level].fPixels));
1231 }
1232 if (restoreGLRowLength) {
1233 SkASSERT(caps.writePixelsRowBytesSupport());
1234 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1235 }
1236 }
1237
uploadCompressedTexData(SkTextureCompressionType compressionType,GrGLFormat format,SkISize dimensions,skgpu::Mipmapped mipmapped,GrGLenum target,const void * data,size_t dataSize)1238 bool GrGLGpu::uploadCompressedTexData(SkTextureCompressionType compressionType,
1239 GrGLFormat format,
1240 SkISize dimensions,
1241 skgpu::Mipmapped mipmapped,
1242 GrGLenum target,
1243 const void* data,
1244 size_t dataSize) {
1245 SkASSERT(format != GrGLFormat::kUnknown);
1246 const GrGLCaps& caps = this->glCaps();
1247
1248 // We only need the internal format for compressed 2D textures.
1249 GrGLenum internalFormat = caps.getTexImageOrStorageInternalFormat(format);
1250 if (!internalFormat) {
1251 return false;
1252 }
1253
1254 SkASSERT(compressionType != SkTextureCompressionType::kNone);
1255
1256 bool useTexStorage = caps.formatSupportsTexStorage(format);
1257
1258 int numMipLevels = 1;
1259 if (mipmapped == skgpu::Mipmapped::kYes) {
1260 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1261 }
1262
1263 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1264
1265 // TODO: Make sure that the width and height that we pass to OpenGL
1266 // is a multiple of the block size.
1267
1268 if (useTexStorage) {
1269 // We never resize or change formats of textures.
1270 GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, numMipLevels, internalFormat,
1271 dimensions.width(), dimensions.height()));
1272 if (error != GR_GL_NO_ERROR) {
1273 return false;
1274 }
1275
1276 size_t offset = 0;
1277 for (int level = 0; level < numMipLevels; ++level) {
1278
1279 size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions,
1280 nullptr, false);
1281
1282 error = GL_ALLOC_CALL(CompressedTexSubImage2D(target,
1283 level,
1284 0, // left
1285 0, // top
1286 dimensions.width(),
1287 dimensions.height(),
1288 internalFormat,
1289 SkToInt(levelDataSize),
1290 &((const char*)data)[offset]));
1291
1292 if (error != GR_GL_NO_ERROR) {
1293 return false;
1294 }
1295
1296 offset += levelDataSize;
1297 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
1298 }
1299 } else {
1300 size_t offset = 0;
1301
1302 for (int level = 0; level < numMipLevels; ++level) {
1303 size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions,
1304 nullptr, false);
1305
1306 const char* rawLevelData = &((const char*)data)[offset];
1307 GrGLenum error = GL_ALLOC_CALL(CompressedTexImage2D(target,
1308 level,
1309 internalFormat,
1310 dimensions.width(),
1311 dimensions.height(),
1312 0, // border
1313 SkToInt(levelDataSize),
1314 rawLevelData));
1315
1316 if (error != GR_GL_NO_ERROR) {
1317 return false;
1318 }
1319
1320 offset += levelDataSize;
1321 dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)};
1322 }
1323 }
1324 return true;
1325 }
1326
renderbufferStorageMSAA(const GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)1327 bool GrGLGpu::renderbufferStorageMSAA(const GrGLContext& ctx, int sampleCount, GrGLenum format,
1328 int width, int height) {
1329 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
1330 GrGLenum error;
1331 switch (ctx.caps()->msFBOType()) {
1332 case GrGLCaps::kStandard_MSFBOType:
1333 error = GL_ALLOC_CALL(RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, sampleCount,
1334 format, width, height));
1335 break;
1336 case GrGLCaps::kES_Apple_MSFBOType:
1337 error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2APPLE(
1338 GR_GL_RENDERBUFFER, sampleCount, format, width, height));
1339 break;
1340 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
1341 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
1342 error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2EXT(
1343 GR_GL_RENDERBUFFER, sampleCount, format, width, height));
1344 break;
1345 case GrGLCaps::kNone_MSFBOType:
1346 SkUNREACHABLE;
1347 }
1348 return error == GR_GL_NO_ERROR;
1349 }
1350
createRenderTargetObjects(const GrGLTexture::Desc & desc,int sampleCount,GrGLRenderTarget::IDs * rtIDs)1351 bool GrGLGpu::createRenderTargetObjects(const GrGLTexture::Desc& desc,
1352 int sampleCount,
1353 GrGLRenderTarget::IDs* rtIDs) {
1354 rtIDs->fMSColorRenderbufferID = 0;
1355 rtIDs->fMultisampleFBOID = 0;
1356 rtIDs->fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
1357 rtIDs->fSingleSampleFBOID = 0;
1358 rtIDs->fTotalMemorySamplesPerPixel = 0;
1359
1360 SkScopeExit cleanupOnFail([&] {
1361 if (rtIDs->fMSColorRenderbufferID) {
1362 GL_CALL(DeleteRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
1363 }
1364 if (rtIDs->fMultisampleFBOID != rtIDs->fSingleSampleFBOID) {
1365 this->deleteFramebuffer(rtIDs->fMultisampleFBOID);
1366 }
1367 if (rtIDs->fSingleSampleFBOID) {
1368 this->deleteFramebuffer(rtIDs->fSingleSampleFBOID);
1369 }
1370 });
1371
1372 GrGLenum colorRenderbufferFormat = 0; // suppress warning
1373
1374 if (desc.fFormat == GrGLFormat::kUnknown) {
1375 return false;
1376 }
1377
1378 if (sampleCount > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
1379 return false;
1380 }
1381
1382 GL_CALL(GenFramebuffers(1, &rtIDs->fSingleSampleFBOID));
1383 if (!rtIDs->fSingleSampleFBOID) {
1384 RENDERENGINE_ABORTF("%s failed to GenFramebuffers!", __func__);
1385 return false;
1386 }
1387
1388 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
1389 // the texture bound to the other. The exception is the IMG multisample extension. With this
1390 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
1391 // rendered from.
1392 if (sampleCount <= 1) {
1393 rtIDs->fMultisampleFBOID = GrGLRenderTarget::kUnresolvableFBOID;
1394 } else if (this->glCaps().usesImplicitMSAAResolve()) {
1395 // GrGLRenderTarget target will configure the FBO as multisample or not base on need.
1396 rtIDs->fMultisampleFBOID = rtIDs->fSingleSampleFBOID;
1397 } else {
1398 GL_CALL(GenFramebuffers(1, &rtIDs->fMultisampleFBOID));
1399 if (!rtIDs->fMultisampleFBOID) {
1400 return false;
1401 }
1402 GL_CALL(GenRenderbuffers(1, &rtIDs->fMSColorRenderbufferID));
1403 if (!rtIDs->fMSColorRenderbufferID) {
1404 return false;
1405 }
1406 colorRenderbufferFormat = this->glCaps().getRenderbufferInternalFormat(desc.fFormat);
1407 }
1408
1409 #if defined(__has_feature)
1410 #define IS_TSAN __has_feature(thread_sanitizer)
1411 #else
1412 #define IS_TSAN 0
1413 #endif
1414
1415 // below here we may bind the FBO
1416 fHWBoundRenderTargetUniqueID.makeInvalid();
1417 if (rtIDs->fMSColorRenderbufferID) {
1418 SkASSERT(sampleCount > 1);
1419 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, rtIDs->fMSColorRenderbufferID));
1420 if (!this->renderbufferStorageMSAA(*fGLContext, sampleCount, colorRenderbufferFormat,
1421 desc.fSize.width(), desc.fSize.height())) {
1422 return false;
1423 }
1424 this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fMultisampleFBOID);
1425 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1426 GR_GL_COLOR_ATTACHMENT0,
1427 GR_GL_RENDERBUFFER,
1428 rtIDs->fMSColorRenderbufferID));
1429 // See skbug.com/12644
1430 #if !IS_TSAN
1431 if (!this->glCaps().skipErrorChecks()) {
1432 GrGLenum status;
1433 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1434 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1435 return false;
1436 }
1437 if (this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
1438 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1439 GR_GL_COLOR_ATTACHMENT0,
1440 GR_GL_RENDERBUFFER,
1441 0));
1442 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1443 GR_GL_COLOR_ATTACHMENT0,
1444 GR_GL_RENDERBUFFER,
1445 rtIDs->fMSColorRenderbufferID));
1446 }
1447 }
1448 #endif
1449 rtIDs->fTotalMemorySamplesPerPixel += sampleCount;
1450 }
1451 this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fSingleSampleFBOID);
1452 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1453 GR_GL_COLOR_ATTACHMENT0,
1454 desc.fTarget,
1455 desc.fID,
1456 0));
1457 // See skbug.com/12644
1458 #if !IS_TSAN
1459 if (!this->glCaps().skipErrorChecks()) {
1460 GrGLenum status;
1461 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1462 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1463 return false;
1464 }
1465 if (this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
1466 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1467 GR_GL_COLOR_ATTACHMENT0,
1468 desc.fTarget,
1469 0,
1470 0));
1471 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1472 GR_GL_COLOR_ATTACHMENT0,
1473 desc.fTarget,
1474 desc.fID,
1475 0));
1476 }
1477 }
1478 #endif
1479
1480 #undef IS_TSAN
1481 ++rtIDs->fTotalMemorySamplesPerPixel;
1482
1483 // We did it!
1484 cleanupOnFail.clear();
1485 return true;
1486 }
1487
1488 // good to set a break-point here to know when createTexture fails
return_null_texture()1489 static sk_sp<GrTexture> return_null_texture() {
1490 // SkDEBUGFAIL("null texture");
1491 return nullptr;
1492 }
1493
set_initial_texture_params(const GrGLInterface * interface,const GrGLCaps & caps,GrGLenum target)1494 static GrGLTextureParameters::SamplerOverriddenState set_initial_texture_params(
1495 const GrGLInterface* interface,
1496 const GrGLCaps& caps,
1497 GrGLenum target) {
1498 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1499 // drivers have a bug where an FBO won't be complete if it includes a
1500 // texture that is not mipmap complete (considering the filter in use).
1501 GrGLTextureParameters::SamplerOverriddenState state;
1502 state.fMinFilter = GR_GL_NEAREST;
1503 state.fMagFilter = GR_GL_NEAREST;
1504 state.fWrapS = GR_GL_CLAMP_TO_EDGE;
1505 state.fWrapT = GR_GL_CLAMP_TO_EDGE;
1506 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, state.fMagFilter));
1507 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, state.fMinFilter));
1508 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_S, state.fWrapS));
1509 GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_T, state.fWrapT));
1510 return state;
1511 }
1512
onCreateTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,int renderTargetSampleCnt,skgpu::Budgeted budgeted,GrProtected isProtected,int mipLevelCount,uint32_t levelClearMask,std::string_view label)1513 sk_sp<GrTexture> GrGLGpu::onCreateTexture(SkISize dimensions,
1514 const GrBackendFormat& format,
1515 GrRenderable renderable,
1516 int renderTargetSampleCnt,
1517 skgpu::Budgeted budgeted,
1518 GrProtected isProtected,
1519 int mipLevelCount,
1520 uint32_t levelClearMask,
1521 std::string_view label) {
1522 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1523 return nullptr;
1524 }
1525 SkASSERT(GrGLCaps::kNone_MSFBOType != this->glCaps().msFBOType() || renderTargetSampleCnt == 1);
1526
1527 SkASSERT(mipLevelCount > 0);
1528 GrMipmapStatus mipmapStatus =
1529 mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated;
1530 GrGLTextureParameters::SamplerOverriddenState initialState;
1531 GrGLTexture::Desc texDesc;
1532 texDesc.fSize = dimensions;
1533 switch (format.textureType()) {
1534 case GrTextureType::kExternal:
1535 case GrTextureType::kNone:
1536 return nullptr;
1537 case GrTextureType::k2D:
1538 texDesc.fTarget = GR_GL_TEXTURE_2D;
1539 break;
1540 case GrTextureType::kRectangle:
1541 if (mipLevelCount > 1 || !this->glCaps().rectangleTextureSupport()) {
1542 return nullptr;
1543 }
1544 texDesc.fTarget = GR_GL_TEXTURE_RECTANGLE;
1545 break;
1546 }
1547 texDesc.fFormat = GrBackendFormats::AsGLFormat(format);
1548 texDesc.fOwnership = GrBackendObjectOwnership::kOwned;
1549 SkASSERT(texDesc.fFormat != GrGLFormat::kUnknown);
1550 SkASSERT(!GrGLFormatIsCompressed(texDesc.fFormat));
1551 texDesc.fIsProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
1552 this->glCaps().strictProtectedness());
1553
1554 texDesc.fID = this->createTexture(dimensions, texDesc.fFormat, texDesc.fTarget, renderable,
1555 &initialState, mipLevelCount, texDesc.fIsProtected, label);
1556 if (!texDesc.fID) {
1557 return return_null_texture();
1558 }
1559
1560 sk_sp<GrGLTexture> tex;
1561 if (renderable == GrRenderable::kYes) {
1562 // unbind the texture from the texture unit before binding it to the frame buffer
1563 GL_CALL(BindTexture(texDesc.fTarget, 0));
1564 GrGLRenderTarget::IDs rtIDDesc;
1565
1566 if (!this->createRenderTargetObjects(texDesc, renderTargetSampleCnt, &rtIDDesc)) {
1567 GL_CALL(DeleteTextures(1, &texDesc.fID));
1568 return return_null_texture();
1569 }
1570 tex = sk_make_sp<GrGLTextureRenderTarget>(this,
1571 budgeted,
1572 renderTargetSampleCnt,
1573 texDesc,
1574 rtIDDesc,
1575 mipmapStatus,
1576 label);
1577 tex->baseLevelWasBoundToFBO();
1578 } else {
1579 tex = sk_make_sp<GrGLTexture>(this, budgeted, texDesc, mipmapStatus, label);
1580 }
1581 // The non-sampler params are still at their default values.
1582 tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1583 fResetTimestampForTextureParameters);
1584 if (levelClearMask) {
1585 if (this->glCaps().clearTextureSupport()) {
1586 GrGLenum externalFormat, externalType;
1587 GrColorType colorType;
1588 this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(
1589 texDesc.fFormat, &externalFormat, &externalType, &colorType);
1590 for (int i = 0; i < mipLevelCount; ++i) {
1591 if (levelClearMask & (1U << i)) {
1592 GL_CALL(ClearTexImage(tex->textureID(), i, externalFormat, externalType,
1593 nullptr));
1594 }
1595 }
1596 } else if (this->glCaps().canFormatBeFBOColorAttachment(
1597 GrBackendFormats::AsGLFormat(format)) &&
1598 !this->glCaps().performColorClearsAsDraws()) {
1599 this->flushScissorTest(GrScissorTest::kDisabled);
1600 this->disableWindowRectangles();
1601 this->flushColorWrite(true);
1602 this->flushClearColor({0, 0, 0, 0});
1603 for (int i = 0; i < mipLevelCount; ++i) {
1604 if (levelClearMask & (1U << i)) {
1605 this->bindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER,
1606 kDst_TempFBOTarget);
1607 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1608 this->unbindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER);
1609 }
1610 }
1611 fHWBoundRenderTargetUniqueID.makeInvalid();
1612 } else {
1613 this->bindTextureToScratchUnit(texDesc.fTarget, tex->textureID());
1614 std::array<float, 4> zeros = {};
1615 this->uploadColorToTex(texDesc.fFormat,
1616 texDesc.fSize,
1617 texDesc.fTarget,
1618 zeros,
1619 levelClearMask);
1620 }
1621 }
1622 return tex;
1623 }
1624
onCreateCompressedTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Budgeted budgeted,skgpu::Mipmapped mipmapped,GrProtected isProtected,const void * data,size_t dataSize)1625 sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(SkISize dimensions,
1626 const GrBackendFormat& format,
1627 skgpu::Budgeted budgeted,
1628 skgpu::Mipmapped mipmapped,
1629 GrProtected isProtected,
1630 const void* data,
1631 size_t dataSize) {
1632 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1633 return nullptr;
1634 }
1635 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1636
1637 GrGLTextureParameters::SamplerOverriddenState initialState;
1638 GrGLTexture::Desc desc;
1639 desc.fSize = dimensions;
1640 desc.fTarget = GR_GL_TEXTURE_2D;
1641 desc.fOwnership = GrBackendObjectOwnership::kOwned;
1642 desc.fFormat = GrBackendFormats::AsGLFormat(format);
1643 desc.fIsProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
1644 this->glCaps().strictProtectedness());
1645 desc.fID = this->createCompressedTexture2D(desc.fSize, compression, desc.fFormat,
1646 mipmapped, desc.fIsProtected, &initialState);
1647 if (!desc.fID) {
1648 return nullptr;
1649 }
1650
1651 if (data) {
1652 if (!this->uploadCompressedTexData(compression, desc.fFormat, dimensions, mipmapped,
1653 GR_GL_TEXTURE_2D, data, dataSize)) {
1654 GL_CALL(DeleteTextures(1, &desc.fID));
1655 return nullptr;
1656 }
1657 }
1658
1659 // Unbind this texture from the scratch texture unit.
1660 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0);
1661
1662 GrMipmapStatus mipmapStatus = mipmapped == skgpu::Mipmapped::kYes
1663 ? GrMipmapStatus::kValid
1664 : GrMipmapStatus::kNotAllocated;
1665
1666 auto tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, mipmapStatus,
1667 /*label=*/"GLGpuCreateCompressedTexture");
1668 // The non-sampler params are still at their default values.
1669 tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1670 fResetTimestampForTextureParameters);
1671 return tex;
1672 }
1673
onCreateCompressedBackendTexture(SkISize dimensions,const GrBackendFormat & format,skgpu::Mipmapped mipmapped,GrProtected isProtected)1674 GrBackendTexture GrGLGpu::onCreateCompressedBackendTexture(SkISize dimensions,
1675 const GrBackendFormat& format,
1676 skgpu::Mipmapped mipmapped,
1677 GrProtected isProtected) {
1678 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
1679 return {};
1680 }
1681
1682 this->handleDirtyContext();
1683
1684 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
1685 if (glFormat == GrGLFormat::kUnknown) {
1686 return {};
1687 }
1688
1689 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1690
1691 GrGLTextureInfo info;
1692 GrGLTextureParameters::SamplerOverriddenState initialState;
1693
1694 info.fTarget = GR_GL_TEXTURE_2D;
1695 info.fFormat = GrGLFormatToEnum(glFormat);
1696 info.fProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
1697 this->glCaps().strictProtectedness());
1698 info.fID = this->createCompressedTexture2D(dimensions, compression, glFormat,
1699 mipmapped, info.fProtected, &initialState);
1700 if (!info.fID) {
1701 return {};
1702 }
1703
1704 // Unbind this texture from the scratch texture unit.
1705 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0);
1706
1707 auto parameters = sk_make_sp<GrGLTextureParameters>();
1708 // The non-sampler params are still at their default values.
1709 parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
1710 fResetTimestampForTextureParameters);
1711
1712 return GrBackendTextures::MakeGL(
1713 dimensions.width(), dimensions.height(), mipmapped, info, std::move(parameters));
1714 }
1715
onUpdateCompressedBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,const void * data,size_t length)1716 bool GrGLGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture,
1717 sk_sp<skgpu::RefCntedCallback> finishedCallback,
1718 const void* data,
1719 size_t length) {
1720 GrGLTextureInfo info;
1721 SkAssertResult(GrBackendTextures::GetGLTextureInfo(backendTexture, &info));
1722
1723 GrBackendFormat format = backendTexture.getBackendFormat();
1724 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
1725 if (glFormat == GrGLFormat::kUnknown) {
1726 return false;
1727 }
1728 SkTextureCompressionType compression = GrBackendFormatToCompressionType(format);
1729
1730 skgpu::Mipmapped mipmapped =
1731 backendTexture.hasMipmaps() ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
1732
1733 this->bindTextureToScratchUnit(info.fTarget, info.fID);
1734
1735 // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
1736 // so that the uploads go to the right levels.
1737 if (backendTexture.hasMipmaps() && this->glCaps().mipmapLevelControlSupport()) {
1738 auto params = get_gl_texture_params(backendTexture);
1739 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
1740 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
1741 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0));
1742 nonsamplerState.fBaseMipMapLevel = 0;
1743 }
1744 int numMipLevels =
1745 SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1;
1746 if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) {
1747 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1));
1748 nonsamplerState.fBaseMipMapLevel = numMipLevels - 1;
1749 }
1750 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
1751 }
1752
1753 bool result = this->uploadCompressedTexData(compression,
1754 glFormat,
1755 backendTexture.dimensions(),
1756 mipmapped,
1757 GR_GL_TEXTURE_2D,
1758 data,
1759 length);
1760
1761 // Unbind this texture from the scratch texture unit.
1762 this->bindTextureToScratchUnit(info.fTarget, 0);
1763
1764 return result;
1765 }
1766
getCompatibleStencilIndex(GrGLFormat format)1767 int GrGLGpu::getCompatibleStencilIndex(GrGLFormat format) {
1768 if (this->glCaps().avoidStencilBuffers()) {
1769 return -1;
1770 }
1771
1772 static const int kSize = 16;
1773 SkASSERT(this->glCaps().canFormatBeFBOColorAttachment(format));
1774
1775 if (!this->glCaps().hasStencilFormatBeenDeterminedForFormat(format)) {
1776 // Default to unsupported, set this if we find a stencil format that works.
1777 int firstWorkingStencilFormatIndex = -1;
1778
1779 // In the following we're not actually creating the StencilBuffer that will be used but,
1780 // rather, are just determining the correct format to use. We assume that the
1781 // acceptable format will not change between Protected and unProtected stencil buffers and
1782 // that using Protected::kNo here will not cause any issues with strictProtectedness mode
1783 // (since no work is actually submitted to a queue).
1784 const GrProtected kNotProtected = skgpu::Protected::kNo;
1785
1786 GrGLuint colorID = this->createTexture({kSize, kSize}, format, GR_GL_TEXTURE_2D,
1787 GrRenderable::kYes,
1788 nullptr,
1789 1,
1790 kNotProtected,
1791 /*label=*/"Skia");
1792 if (!colorID) {
1793 return -1;
1794 }
1795 // unbind the texture from the texture unit before binding it to the frame buffer
1796 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
1797
1798 // Create Framebuffer
1799 GrGLuint fb = 0;
1800 GL_CALL(GenFramebuffers(1, &fb));
1801 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb);
1802 fHWBoundRenderTargetUniqueID.makeInvalid();
1803 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1804 GR_GL_COLOR_ATTACHMENT0,
1805 GR_GL_TEXTURE_2D,
1806 colorID,
1807 0));
1808 GrGLuint sbRBID = 0;
1809 GL_CALL(GenRenderbuffers(1, &sbRBID));
1810
1811 // look over formats till I find a compatible one
1812 int stencilFmtCnt = this->glCaps().stencilFormats().size();
1813 if (sbRBID) {
1814 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
1815 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
1816 GrGLFormat sFmt = this->glCaps().stencilFormats()[i];
1817 GrGLenum error = GL_ALLOC_CALL(RenderbufferStorage(
1818 GR_GL_RENDERBUFFER, GrGLFormatToEnum(sFmt), kSize, kSize));
1819 if (error == GR_GL_NO_ERROR) {
1820 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1821 GR_GL_STENCIL_ATTACHMENT,
1822 GR_GL_RENDERBUFFER, sbRBID));
1823 if (GrGLFormatIsPackedDepthStencil(sFmt)) {
1824 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1825 GR_GL_DEPTH_ATTACHMENT,
1826 GR_GL_RENDERBUFFER, sbRBID));
1827 } else {
1828 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1829 GR_GL_DEPTH_ATTACHMENT,
1830 GR_GL_RENDERBUFFER, 0));
1831 }
1832 GrGLenum status;
1833 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1834 if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
1835 firstWorkingStencilFormatIndex = i;
1836 break;
1837 }
1838 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1839 GR_GL_STENCIL_ATTACHMENT,
1840 GR_GL_RENDERBUFFER, 0));
1841 if (GrGLFormatIsPackedDepthStencil(sFmt)) {
1842 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1843 GR_GL_DEPTH_ATTACHMENT,
1844 GR_GL_RENDERBUFFER, 0));
1845 }
1846 }
1847 }
1848 GL_CALL(DeleteRenderbuffers(1, &sbRBID));
1849 }
1850 GL_CALL(DeleteTextures(1, &colorID));
1851 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
1852 this->deleteFramebuffer(fb);
1853 fGLContext->caps()->setStencilFormatIndexForFormat(format, firstWorkingStencilFormatIndex);
1854 }
1855 return this->glCaps().getStencilFormatIndexForFormat(format);
1856 }
1857
set_khr_debug_label(GrGLGpu * gpu,const GrGLuint id,std::string_view label)1858 static void set_khr_debug_label(GrGLGpu* gpu, const GrGLuint id, std::string_view label) {
1859 const std::string khr_debug_label = label.empty() ? "Skia" : std::string(label);
1860 if (gpu->glCaps().debugSupport()) {
1861 GR_GL_CALL(gpu->glInterface(), ObjectLabel(GR_GL_TEXTURE, id, -1, khr_debug_label.c_str()));
1862 }
1863 }
1864
createCompressedTexture2D(SkISize dimensions,SkTextureCompressionType compression,GrGLFormat format,skgpu::Mipmapped mipmapped,GrProtected isProtected,GrGLTextureParameters::SamplerOverriddenState * initialState)1865 GrGLuint GrGLGpu::createCompressedTexture2D(
1866 SkISize dimensions,
1867 SkTextureCompressionType compression,
1868 GrGLFormat format,
1869 skgpu::Mipmapped mipmapped,
1870 GrProtected isProtected,
1871 GrGLTextureParameters::SamplerOverriddenState* initialState) {
1872 if (format == GrGLFormat::kUnknown) {
1873 return 0;
1874 }
1875 GrGLuint id = 0;
1876 GL_CALL(GenTextures(1, &id));
1877 if (!id) {
1878 return 0;
1879 }
1880
1881 this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id);
1882
1883 set_khr_debug_label(this, id, /*label=*/"Skia");
1884
1885 *initialState = set_initial_texture_params(this->glInterface(),
1886 this->glCaps(),
1887 GR_GL_TEXTURE_2D);
1888
1889 SkASSERT(isProtected == skgpu::Protected::kNo || this->glCaps().supportsProtectedContent());
1890 SkASSERT(!this->glCaps().strictProtectedness() || isProtected == skgpu::Protected::kYes);
1891
1892 if (GrProtected::kYes == isProtected) {
1893 if (this->glCaps().supportsProtectedContent()) {
1894 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_PROTECTED_EXT, GR_GL_TRUE));
1895 } else {
1896 GL_CALL(DeleteTextures(1, &id));
1897 return 0;
1898 }
1899 }
1900
1901 return id;
1902 }
1903
createTexture(SkISize dimensions,GrGLFormat format,GrGLenum target,GrRenderable renderable,GrGLTextureParameters::SamplerOverriddenState * initialState,int mipLevelCount,GrProtected isProtected,std::string_view label)1904 GrGLuint GrGLGpu::createTexture(SkISize dimensions,
1905 GrGLFormat format,
1906 GrGLenum target,
1907 GrRenderable renderable,
1908 GrGLTextureParameters::SamplerOverriddenState* initialState,
1909 int mipLevelCount,
1910 GrProtected isProtected,
1911 std::string_view label) {
1912 SkASSERT(format != GrGLFormat::kUnknown);
1913 SkASSERT(!GrGLFormatIsCompressed(format));
1914
1915 GrGLuint id = 0;
1916 GL_CALL(GenTextures(1, &id));
1917
1918 if (!id) {
1919 return 0;
1920 }
1921
1922 this->bindTextureToScratchUnit(target, id);
1923
1924 set_khr_debug_label(this, id, label);
1925
1926 if (GrRenderable::kYes == renderable && this->glCaps().textureUsageSupport()) {
1927 // provides a hint about how this texture will be used
1928 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_USAGE, GR_GL_FRAMEBUFFER_ATTACHMENT));
1929 }
1930
1931 if (initialState) {
1932 *initialState = set_initial_texture_params(this->glInterface(), this->glCaps(), target);
1933 } else {
1934 set_initial_texture_params(this->glInterface(), this->glCaps(), target);
1935 }
1936
1937 SkASSERT(isProtected == skgpu::Protected::kNo || this->glCaps().supportsProtectedContent());
1938 SkASSERT(!this->glCaps().strictProtectedness() || isProtected == skgpu::Protected::kYes);
1939
1940 if (GrProtected::kYes == isProtected) {
1941 if (this->glCaps().supportsProtectedContent()) {
1942 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_PROTECTED_EXT, GR_GL_TRUE));
1943 } else {
1944 GL_CALL(DeleteTextures(1, &id));
1945 return 0;
1946 }
1947 }
1948
1949 GrGLenum internalFormat = this->glCaps().getTexImageOrStorageInternalFormat(format);
1950
1951 bool success = false;
1952 if (internalFormat) {
1953 if (this->glCaps().formatSupportsTexStorage(format)) {
1954 auto levelCount = std::max(mipLevelCount, 1);
1955 GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, levelCount, internalFormat,
1956 dimensions.width(), dimensions.height()));
1957 success = (error == GR_GL_NO_ERROR);
1958 } else {
1959 GrGLenum externalFormat, externalType;
1960 this->glCaps().getTexImageExternalFormatAndType(format, &externalFormat, &externalType);
1961 GrGLenum error = GR_GL_NO_ERROR;
1962 if (externalFormat && externalType) {
1963 // If we don't unbind here then nullptr is treated as a zero offset into the bound
1964 // transfer buffer rather than an indication that there is no data to copy.
1965 this->unbindXferBuffer(GrGpuBufferType::kXferCpuToGpu);
1966 for (int level = 0; level < mipLevelCount && error == GR_GL_NO_ERROR; level++) {
1967 const int twoToTheMipLevel = 1 << level;
1968 const int currentWidth = std::max(1, dimensions.width() / twoToTheMipLevel);
1969 const int currentHeight = std::max(1, dimensions.height() / twoToTheMipLevel);
1970 error = GL_ALLOC_CALL(TexImage2D(target, level, internalFormat, currentWidth,
1971 currentHeight, 0, externalFormat, externalType,
1972 nullptr));
1973 }
1974 success = (error == GR_GL_NO_ERROR);
1975 }
1976 }
1977 }
1978 if (success) {
1979 return id;
1980 }
1981 GL_CALL(DeleteTextures(1, &id));
1982 return 0;
1983 }
1984
makeStencilAttachment(const GrBackendFormat & colorFormat,SkISize dimensions,int numStencilSamples)1985 sk_sp<GrAttachment> GrGLGpu::makeStencilAttachment(const GrBackendFormat& colorFormat,
1986 SkISize dimensions, int numStencilSamples) {
1987 int sIdx = this->getCompatibleStencilIndex(GrBackendFormats::AsGLFormat(colorFormat));
1988 if (sIdx < 0) {
1989 return nullptr;
1990 }
1991 GrGLFormat sFmt = this->glCaps().stencilFormats()[sIdx];
1992
1993 auto stencil = GrGLAttachment::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1994 if (stencil) {
1995 fStats.incStencilAttachmentCreates();
1996 }
1997 return stencil;
1998 }
1999
makeMSAAAttachment(SkISize dimensions,const GrBackendFormat & format,int numSamples,GrProtected isProtected,GrMemoryless isMemoryless)2000 sk_sp<GrAttachment> GrGLGpu::makeMSAAAttachment(SkISize dimensions, const GrBackendFormat& format,
2001 int numSamples, GrProtected isProtected,
2002 GrMemoryless isMemoryless) {
2003 SkASSERT(isMemoryless == GrMemoryless::kNo);
2004 return GrGLAttachment::MakeMSAA(
2005 this, dimensions, numSamples, GrBackendFormats::AsGLFormat(format));
2006 }
2007
2008 ////////////////////////////////////////////////////////////////////////////////
2009
onCreateBuffer(size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern)2010 sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size,
2011 GrGpuBufferType intendedType,
2012 GrAccessPattern accessPattern) {
2013 return GrGLBuffer::Make(this, size, intendedType, accessPattern);
2014 }
2015
flushScissorTest(GrScissorTest scissorTest)2016 void GrGLGpu::flushScissorTest(GrScissorTest scissorTest) {
2017 if (GrScissorTest::kEnabled == scissorTest) {
2018 if (kYes_TriState != fHWScissorSettings.fEnabled) {
2019 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2020 fHWScissorSettings.fEnabled = kYes_TriState;
2021 }
2022 } else {
2023 if (kNo_TriState != fHWScissorSettings.fEnabled) {
2024 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2025 fHWScissorSettings.fEnabled = kNo_TriState;
2026 }
2027 }
2028 }
2029
flushScissorRect(const SkIRect & scissor,int rtHeight,GrSurfaceOrigin rtOrigin)2030 void GrGLGpu::flushScissorRect(const SkIRect& scissor, int rtHeight, GrSurfaceOrigin rtOrigin) {
2031 SkASSERT(fHWScissorSettings.fEnabled == TriState::kYes_TriState);
2032 auto nativeScissor = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, scissor);
2033 if (fHWScissorSettings.fRect != nativeScissor) {
2034 GL_CALL(Scissor(nativeScissor.fX, nativeScissor.fY, nativeScissor.fWidth,
2035 nativeScissor.fHeight));
2036 fHWScissorSettings.fRect = nativeScissor;
2037 }
2038 }
2039
flushViewport(const SkIRect & viewport,int rtHeight,GrSurfaceOrigin rtOrigin)2040 void GrGLGpu::flushViewport(const SkIRect& viewport, int rtHeight, GrSurfaceOrigin rtOrigin) {
2041 auto nativeViewport = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, viewport);
2042 if (fHWViewport != nativeViewport) {
2043 GL_CALL(Viewport(nativeViewport.fX, nativeViewport.fY,
2044 nativeViewport.fWidth, nativeViewport.fHeight));
2045 fHWViewport = nativeViewport;
2046 }
2047 }
2048
flushWindowRectangles(const GrWindowRectsState & windowState,const GrGLRenderTarget * rt,GrSurfaceOrigin origin)2049 void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState,
2050 const GrGLRenderTarget* rt, GrSurfaceOrigin origin) {
2051 #ifndef USE_NSIGHT
2052 typedef GrWindowRectsState::Mode Mode;
2053 // Window rects can't be used on-screen.
2054 SkASSERT(!windowState.enabled() || !rt->glRTFBOIDis0());
2055 SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles());
2056
2057 if (!this->caps()->maxWindowRectangles() ||
2058 fHWWindowRectsState.knownEqualTo(origin, rt->width(), rt->height(), windowState)) {
2059 return;
2060 }
2061
2062 // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above
2063 // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912
2064 int numWindows = std::min(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows));
2065 SkASSERT(windowState.numWindows() == numWindows);
2066
2067 GrNativeRect glwindows[GrWindowRectangles::kMaxWindows];
2068 const SkIRect* skwindows = windowState.windows().data();
2069 for (int i = 0; i < numWindows; ++i) {
2070 glwindows[i].setRelativeTo(origin, rt->height(), skwindows[i]);
2071 }
2072
2073 GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE;
2074 GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts()));
2075
2076 fHWWindowRectsState.set(origin, rt->width(), rt->height(), windowState);
2077 #endif
2078 }
2079
disableWindowRectangles()2080 void GrGLGpu::disableWindowRectangles() {
2081 #ifndef USE_NSIGHT
2082 if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) {
2083 return;
2084 }
2085 GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr));
2086 fHWWindowRectsState.setDisabled();
2087 #endif
2088 }
2089
flushGLState(GrRenderTarget * renderTarget,bool useMultisampleFBO,const GrProgramInfo & programInfo)2090 bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget, bool useMultisampleFBO,
2091 const GrProgramInfo& programInfo) {
2092 this->handleDirtyContext();
2093
2094 sk_sp<GrGLProgram> program = fProgramCache->findOrCreateProgram(this->getContext(),
2095 programInfo);
2096 if (!program) {
2097 GrCapsDebugf(this->caps(), "Failed to create program!\n");
2098 return false;
2099 }
2100
2101 this->flushProgram(std::move(program));
2102
2103 // Swizzle the blend to match what the shader will output.
2104 this->flushBlendAndColorWrite(programInfo.pipeline().getXferProcessor().getBlendInfo(),
2105 programInfo.pipeline().writeSwizzle());
2106
2107 fHWProgram->updateUniforms(renderTarget, programInfo);
2108
2109 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
2110 GrStencilSettings stencil;
2111 if (programInfo.isStencilEnabled()) {
2112 SkASSERT(glRT->getStencilAttachment(useMultisampleFBO));
2113 stencil.reset(*programInfo.userStencilSettings(),
2114 programInfo.pipeline().hasStencilClip(),
2115 glRT->numStencilBits(useMultisampleFBO));
2116 }
2117 this->flushStencil(stencil, programInfo.origin());
2118 this->flushScissorTest(GrScissorTest(programInfo.pipeline().isScissorTestEnabled()));
2119 this->flushWindowRectangles(programInfo.pipeline().getWindowRectsState(),
2120 glRT, programInfo.origin());
2121 this->flushConservativeRasterState(programInfo.pipeline().usesConservativeRaster());
2122 this->flushWireframeState(programInfo.pipeline().isWireframe());
2123
2124 // This must come after textures are flushed because a texture may need
2125 // to be msaa-resolved (which will modify bound FBO state).
2126 this->flushRenderTarget(glRT, useMultisampleFBO);
2127
2128 return true;
2129 }
2130
flushProgram(sk_sp<GrGLProgram> program)2131 void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) {
2132 if (!program) {
2133 fHWProgram.reset();
2134 fHWProgramID = 0;
2135 return;
2136 }
2137 SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID()));
2138 if (program == fHWProgram) {
2139 return;
2140 }
2141 auto id = program->programID();
2142 SkASSERT(id);
2143 GL_CALL(UseProgram(id));
2144 fHWProgram = std::move(program);
2145 fHWProgramID = id;
2146 }
2147
flushProgram(GrGLuint id)2148 void GrGLGpu::flushProgram(GrGLuint id) {
2149 SkASSERT(id);
2150 if (fHWProgramID == id) {
2151 SkASSERT(!fHWProgram);
2152 return;
2153 }
2154 fHWProgram.reset();
2155 GL_CALL(UseProgram(id));
2156 fHWProgramID = id;
2157 }
2158
didDrawTo(GrRenderTarget * rt)2159 void GrGLGpu::didDrawTo(GrRenderTarget* rt) {
2160 SkASSERT(fHWWriteToColor != kUnknown_TriState);
2161 if (fHWWriteToColor == kYes_TriState) {
2162 // The bounds are only used to check for empty and we don't know the bounds. The origin
2163 // is irrelevant if there are no bounds.
2164 this->didWriteToSurface(rt, kTopLeft_GrSurfaceOrigin, /*bounds=*/nullptr);
2165 }
2166 }
2167
bindBuffer(GrGpuBufferType type,const GrBuffer * buffer)2168 GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
2169 this->handleDirtyContext();
2170
2171 // Index buffer state is tied to the vertex array.
2172 if (GrGpuBufferType::kIndex == type) {
2173 this->bindVertexArray(0);
2174 }
2175
2176 auto* bufferState = this->hwBufferState(type);
2177 if (buffer->isCpuBuffer()) {
2178 if (!bufferState->fBufferZeroKnownBound) {
2179 GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
2180 bufferState->fBufferZeroKnownBound = true;
2181 bufferState->fBoundBufferUniqueID.makeInvalid();
2182 }
2183 } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() !=
2184 bufferState->fBoundBufferUniqueID) {
2185 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
2186 GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
2187 bufferState->fBufferZeroKnownBound = false;
2188 bufferState->fBoundBufferUniqueID = glBuffer->uniqueID();
2189 }
2190
2191 return bufferState->fGLTarget;
2192 }
2193
clear(const GrScissorState & scissor,std::array<float,4> color,GrRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin)2194 void GrGLGpu::clear(const GrScissorState& scissor,
2195 std::array<float, 4> color,
2196 GrRenderTarget* target,
2197 bool useMultisampleFBO,
2198 GrSurfaceOrigin origin) {
2199 // parent class should never let us get here with no RT
2200 SkASSERT(target);
2201 SkASSERT(!this->caps()->performColorClearsAsDraws());
2202 SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws());
2203
2204 this->handleDirtyContext();
2205
2206 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2207
2208 this->flushRenderTarget(glRT, useMultisampleFBO);
2209 this->flushScissor(scissor, glRT->height(), origin);
2210 this->disableWindowRectangles();
2211 this->flushColorWrite(true);
2212 this->flushClearColor(color);
2213 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
2214 this->didWriteToSurface(glRT, origin, scissor.enabled() ? &scissor.rect() : nullptr);
2215 }
2216
use_tiled_rendering(const GrGLCaps & glCaps,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2217 static bool use_tiled_rendering(const GrGLCaps& glCaps,
2218 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2219 // Only use the tiled rendering extension if we can explicitly clear and discard the stencil.
2220 // Otherwise it's faster to just not use it.
2221 return glCaps.tiledRenderingSupport() && GrLoadOp::kClear == stencilLoadStore.fLoadOp &&
2222 GrStoreOp::kDiscard == stencilLoadStore.fStoreOp;
2223 }
2224
beginCommandBuffer(GrGLRenderTarget * rt,bool useMultisampleFBO,const SkIRect & bounds,GrSurfaceOrigin origin,const GrOpsRenderPass::LoadAndStoreInfo & colorLoadStore,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2225 void GrGLGpu::beginCommandBuffer(GrGLRenderTarget* rt, bool useMultisampleFBO,
2226 const SkIRect& bounds, GrSurfaceOrigin origin,
2227 const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
2228 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2229 SkASSERT(!fIsExecutingCommandBuffer_DebugOnly);
2230
2231 this->handleDirtyContext();
2232
2233 this->flushRenderTarget(rt, useMultisampleFBO);
2234 SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = true);
2235
2236 if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
2237 auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, rt->height(), bounds);
2238 GrGLbitfield preserveMask = (GrLoadOp::kLoad == colorLoadStore.fLoadOp)
2239 ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
2240 SkASSERT(GrLoadOp::kLoad != stencilLoadStore.fLoadOp); // Handled by use_tiled_rendering().
2241 GL_CALL(StartTiling(nativeBounds.fX, nativeBounds.fY, nativeBounds.fWidth,
2242 nativeBounds.fHeight, preserveMask));
2243 }
2244
2245 GrGLbitfield clearMask = 0;
2246 if (GrLoadOp::kClear == colorLoadStore.fLoadOp) {
2247 SkASSERT(!this->caps()->performColorClearsAsDraws());
2248 this->flushClearColor(colorLoadStore.fClearColor);
2249 this->flushColorWrite(true);
2250 clearMask |= GR_GL_COLOR_BUFFER_BIT;
2251 }
2252 if (GrLoadOp::kClear == stencilLoadStore.fLoadOp) {
2253 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2254 GL_CALL(StencilMask(0xffffffff));
2255 GL_CALL(ClearStencil(0));
2256 clearMask |= GR_GL_STENCIL_BUFFER_BIT;
2257 }
2258 if (clearMask) {
2259 this->flushScissorTest(GrScissorTest::kDisabled);
2260 this->disableWindowRectangles();
2261 GL_CALL(Clear(clearMask));
2262 if (clearMask & GR_GL_COLOR_BUFFER_BIT) {
2263 this->didWriteToSurface(rt, origin, nullptr);
2264 }
2265 }
2266 }
2267
endCommandBuffer(GrGLRenderTarget * rt,bool useMultisampleFBO,const GrOpsRenderPass::LoadAndStoreInfo & colorLoadStore,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilLoadStore)2268 void GrGLGpu::endCommandBuffer(GrGLRenderTarget* rt, bool useMultisampleFBO,
2269 const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore,
2270 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) {
2271 SkASSERT(fIsExecutingCommandBuffer_DebugOnly);
2272
2273 this->handleDirtyContext();
2274
2275 if (rt->uniqueID() != fHWBoundRenderTargetUniqueID ||
2276 useMultisampleFBO != fHWBoundFramebufferIsMSAA) {
2277 // The framebuffer binding changed in the middle of a command buffer. We should have already
2278 // printed a warning during onFBOChanged.
2279 return;
2280 }
2281
2282 if (GrGLCaps::kNone_InvalidateFBType != this->glCaps().invalidateFBType()) {
2283 STArray<2, GrGLenum> discardAttachments;
2284 if (GrStoreOp::kDiscard == colorLoadStore.fStoreOp) {
2285 discardAttachments.push_back(
2286 rt->isFBO0(useMultisampleFBO) ? GR_GL_COLOR : GR_GL_COLOR_ATTACHMENT0);
2287 }
2288 if (GrStoreOp::kDiscard == stencilLoadStore.fStoreOp) {
2289 discardAttachments.push_back(
2290 rt->isFBO0(useMultisampleFBO) ? GR_GL_STENCIL : GR_GL_STENCIL_ATTACHMENT);
2291 }
2292
2293 if (!discardAttachments.empty()) {
2294 if (GrGLCaps::kInvalidate_InvalidateFBType == this->glCaps().invalidateFBType()) {
2295 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.size(),
2296 discardAttachments.begin()));
2297 } else {
2298 SkASSERT(GrGLCaps::kDiscard_InvalidateFBType == this->glCaps().invalidateFBType());
2299 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.size(),
2300 discardAttachments.begin()));
2301 }
2302 }
2303 }
2304
2305 if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) {
2306 GrGLbitfield preserveMask = (GrStoreOp::kStore == colorLoadStore.fStoreOp)
2307 ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE;
2308 // Handled by use_tiled_rendering().
2309 SkASSERT(GrStoreOp::kStore != stencilLoadStore.fStoreOp);
2310 GL_CALL(EndTiling(preserveMask));
2311 }
2312
2313 SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = false);
2314 }
2315
clearStencilClip(const GrScissorState & scissor,bool insideStencilMask,GrRenderTarget * target,bool useMultisampleFBO,GrSurfaceOrigin origin)2316 void GrGLGpu::clearStencilClip(const GrScissorState& scissor, bool insideStencilMask,
2317 GrRenderTarget* target, bool useMultisampleFBO,
2318 GrSurfaceOrigin origin) {
2319 SkASSERT(target);
2320 SkASSERT(!this->caps()->performStencilClearsAsDraws());
2321 SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws());
2322 this->handleDirtyContext();
2323
2324 GrAttachment* sb = target->getStencilAttachment(useMultisampleFBO);
2325 if (!sb) {
2326 // We should only get here if we marked a proxy as requiring a SB. However,
2327 // the SB creation could later fail. Likely clipping is going to go awry now.
2328 return;
2329 }
2330
2331 GrGLint stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
2332 #if 0
2333 SkASSERT(stencilBitCount > 0);
2334 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
2335 #else
2336 // we could just clear the clip bit but when we go through
2337 // ANGLE a partial stencil mask will cause clears to be
2338 // turned into draws. Our contract on OpsTask says that
2339 // changing the clip between stencil passes may or may not
2340 // zero the client's clip bits. So we just clear the whole thing.
2341 static const GrGLint clipStencilMask = ~0;
2342 #endif
2343 GrGLint value;
2344 if (insideStencilMask) {
2345 value = (1 << (stencilBitCount - 1));
2346 } else {
2347 value = 0;
2348 }
2349 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2350 this->flushRenderTarget(glRT, useMultisampleFBO);
2351
2352 this->flushScissor(scissor, glRT->height(), origin);
2353 this->disableWindowRectangles();
2354
2355 GL_CALL(StencilMask((uint32_t) clipStencilMask));
2356 GL_CALL(ClearStencil(value));
2357 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2358 fHWStencilSettings.invalidate();
2359 }
2360
readOrTransferPixelsFrom(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * offsetOrPtr,int rowWidthInPixels)2361 bool GrGLGpu::readOrTransferPixelsFrom(GrSurface* surface,
2362 SkIRect rect,
2363 GrColorType surfaceColorType,
2364 GrColorType dstColorType,
2365 void* offsetOrPtr,
2366 int rowWidthInPixels) {
2367 SkASSERT(surface);
2368
2369 auto format = GrBackendFormats::AsGLFormat(surface->backendFormat());
2370 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2371 if (!renderTarget && !this->glCaps().isFormatRenderable(format, 1)) {
2372 return false;
2373 }
2374 GrGLenum externalFormat = 0;
2375 GrGLenum externalType = 0;
2376 this->glCaps().getReadPixelsFormat(
2377 format, surfaceColorType, dstColorType, &externalFormat, &externalType);
2378 if (!externalFormat || !externalType) {
2379 return false;
2380 }
2381
2382 if (renderTarget) {
2383 // Always bind the single sample FBO since we can't read pixels from an MSAA framebuffer.
2384 constexpr bool useMultisampleFBO = false;
2385 if (renderTarget->numSamples() > 1 && renderTarget->isFBO0(useMultisampleFBO)) {
2386 return false;
2387 }
2388 this->flushRenderTarget(renderTarget, useMultisampleFBO);
2389 } else {
2390 // Use a temporary FBO.
2391 this->bindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
2392 fHWBoundRenderTargetUniqueID.makeInvalid();
2393 }
2394
2395 // determine if GL can read using the passed rowBytes or if we need a scratch buffer.
2396 if (rowWidthInPixels != rect.width()) {
2397 SkASSERT(this->glCaps().readPixelsRowBytesSupport());
2398 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowWidthInPixels));
2399 }
2400 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, 1));
2401
2402 GL_CALL(ReadPixels(rect.left(),
2403 rect.top(),
2404 rect.width(),
2405 rect.height(),
2406 externalFormat,
2407 externalType,
2408 offsetOrPtr));
2409
2410 if (rowWidthInPixels != rect.width()) {
2411 SkASSERT(this->glCaps().readPixelsRowBytesSupport());
2412 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
2413 }
2414
2415 if (!renderTarget) {
2416 this->unbindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER);
2417 }
2418 return true;
2419 }
2420
onReadPixels(GrSurface * surface,SkIRect rect,GrColorType surfaceColorType,GrColorType dstColorType,void * buffer,size_t rowBytes)2421 bool GrGLGpu::onReadPixels(GrSurface* surface,
2422 SkIRect rect,
2423 GrColorType surfaceColorType,
2424 GrColorType dstColorType,
2425 void* buffer,
2426 size_t rowBytes) {
2427 SkASSERT(surface);
2428
2429 size_t bytesPerPixel = GrColorTypeBytesPerPixel(dstColorType);
2430
2431 // GL_PACK_ROW_LENGTH is in terms of pixels not bytes.
2432 int rowPixelWidth;
2433
2434 if (rowBytes == SkToSizeT(rect.width()*bytesPerPixel)) {
2435 rowPixelWidth = rect.width();
2436 } else {
2437 SkASSERT(!(rowBytes % bytesPerPixel));
2438 rowPixelWidth = rowBytes / bytesPerPixel;
2439 }
2440 this->unbindXferBuffer(GrGpuBufferType::kXferGpuToCpu);
2441 return this->readOrTransferPixelsFrom(surface,
2442 rect,
2443 surfaceColorType,
2444 dstColorType,
2445 buffer,
2446 rowPixelWidth);
2447 }
2448
onGetOpsRenderPass(GrRenderTarget * rt,bool useMultisampleFBO,GrAttachment *,GrSurfaceOrigin origin,const SkIRect & bounds,const GrOpsRenderPass::LoadAndStoreInfo & colorInfo,const GrOpsRenderPass::StencilLoadAndStoreInfo & stencilInfo,const TArray<GrSurfaceProxy *,true> & sampledProxies,GrXferBarrierFlags renderPassXferBarriers)2449 GrOpsRenderPass* GrGLGpu::onGetOpsRenderPass(
2450 GrRenderTarget* rt,
2451 bool useMultisampleFBO,
2452 GrAttachment*,
2453 GrSurfaceOrigin origin,
2454 const SkIRect& bounds,
2455 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
2456 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo,
2457 const TArray<GrSurfaceProxy*, true>& sampledProxies,
2458 GrXferBarrierFlags renderPassXferBarriers) {
2459 if (!fCachedOpsRenderPass) {
2460 fCachedOpsRenderPass = std::make_unique<GrGLOpsRenderPass>(this);
2461 }
2462 if (useMultisampleFBO && rt->numSamples() == 1) {
2463 // We will be using dynamic msaa. Ensure there is an attachment.
2464 auto glRT = static_cast<GrGLRenderTarget*>(rt);
2465 if (!glRT->ensureDynamicMSAAAttachment()) {
2466 SkDebugf("WARNING: Failed to make dmsaa attachment. Render pass will be dropped.");
2467 return nullptr;
2468 }
2469 }
2470 fCachedOpsRenderPass->set(rt, useMultisampleFBO, bounds, origin, colorInfo, stencilInfo);
2471 return fCachedOpsRenderPass.get();
2472 }
2473
flushRenderTarget(GrGLRenderTarget * target,bool useMultisampleFBO)2474 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, bool useMultisampleFBO) {
2475 SkASSERT(target);
2476 GrGpuResource::UniqueID rtID = target->uniqueID();
2477 if (fHWBoundRenderTargetUniqueID != rtID ||
2478 fHWBoundFramebufferIsMSAA != useMultisampleFBO ||
2479 target->mustRebind(useMultisampleFBO)) {
2480 target->bind(useMultisampleFBO);
2481 #ifdef SK_DEBUG
2482 // don't do this check in Chromium -- this is causing
2483 // lots of repeated command buffer flushes when the compositor is
2484 // rendering with Ganesh, which is really slow; even too slow for
2485 // Debug mode.
2486 // Also don't do this when we know glCheckFramebufferStatus() may have side effects.
2487 if (!this->glCaps().skipErrorChecks() &&
2488 !this->glCaps().rebindColorAttachmentAfterCheckFramebufferStatus()) {
2489 GrGLenum status;
2490 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
2491 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
2492 SkDebugf("GrGLGpu::flushRenderTargetNoColorWrites glCheckFramebufferStatus %x\n",
2493 status);
2494 }
2495 }
2496 #endif
2497 fHWBoundRenderTargetUniqueID = rtID;
2498 fHWBoundFramebufferIsMSAA = useMultisampleFBO;
2499 this->flushViewport(SkIRect::MakeSize(target->dimensions()),
2500 target->height(),
2501 kTopLeft_GrSurfaceOrigin); // the origin is irrelevant in this case
2502 }
2503 if (this->caps()->workarounds().force_update_scissor_state_when_binding_fbo0) {
2504 // The driver forgets the correct scissor state when using FBO 0.
2505 if (!fHWScissorSettings.fRect.isInvalid()) {
2506 const GrNativeRect& r = fHWScissorSettings.fRect;
2507 GL_CALL(Scissor(r.fX, r.fY, r.fWidth, r.fHeight));
2508 }
2509 if (fHWScissorSettings.fEnabled == kYes_TriState) {
2510 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2511 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2512 } else if (fHWScissorSettings.fEnabled == kNo_TriState) {
2513 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
2514 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2515 }
2516 }
2517
2518 if (this->glCaps().srgbWriteControl()) {
2519 this->flushFramebufferSRGB(this->caps()->isFormatSRGB(target->backendFormat()));
2520 }
2521
2522 if (this->glCaps().shouldQueryImplementationReadSupport(target->format())) {
2523 GrGLint format;
2524 GrGLint type;
2525 GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format);
2526 GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &type);
2527 this->glCaps().didQueryImplementationReadSupport(target->format(), format, type);
2528 }
2529 }
2530
flushFramebufferSRGB(bool enable)2531 void GrGLGpu::flushFramebufferSRGB(bool enable) {
2532 if (enable && kYes_TriState != fHWSRGBFramebuffer) {
2533 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
2534 fHWSRGBFramebuffer = kYes_TriState;
2535 } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) {
2536 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
2537 fHWSRGBFramebuffer = kNo_TriState;
2538 }
2539 }
2540
prepareToDraw(GrPrimitiveType primitiveType)2541 GrGLenum GrGLGpu::prepareToDraw(GrPrimitiveType primitiveType) {
2542 fStats.incNumDraws();
2543
2544 if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() &&
2545 GrIsPrimTypeLines(primitiveType) && !GrIsPrimTypeLines(fLastPrimitiveType)) {
2546 GL_CALL(Enable(GR_GL_CULL_FACE));
2547 GL_CALL(Disable(GR_GL_CULL_FACE));
2548 }
2549 fLastPrimitiveType = primitiveType;
2550
2551 switch (primitiveType) {
2552 case GrPrimitiveType::kTriangles:
2553 return GR_GL_TRIANGLES;
2554 case GrPrimitiveType::kTriangleStrip:
2555 return GR_GL_TRIANGLE_STRIP;
2556 case GrPrimitiveType::kPoints:
2557 return GR_GL_POINTS;
2558 case GrPrimitiveType::kLines:
2559 return GR_GL_LINES;
2560 case GrPrimitiveType::kLineStrip:
2561 return GR_GL_LINE_STRIP;
2562 }
2563 SK_ABORT("invalid GrPrimitiveType");
2564 }
2565
onResolveRenderTarget(GrRenderTarget * target,const SkIRect & resolveRect)2566 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) {
2567 auto glRT = static_cast<GrGLRenderTarget*>(target);
2568 if (this->glCaps().framebufferResolvesMustBeFullSize()) {
2569 this->resolveRenderFBOs(glRT, SkIRect::MakeSize(glRT->dimensions()),
2570 ResolveDirection::kMSAAToSingle);
2571 } else {
2572 this->resolveRenderFBOs(glRT, resolveRect, ResolveDirection::kMSAAToSingle);
2573 }
2574 }
2575
resolveRenderFBOs(GrGLRenderTarget * rt,const SkIRect & resolveRect,ResolveDirection resolveDirection,bool invalidateReadBufferAfterBlit)2576 void GrGLGpu::resolveRenderFBOs(GrGLRenderTarget* rt, const SkIRect& resolveRect,
2577 ResolveDirection resolveDirection,
2578 bool invalidateReadBufferAfterBlit) {
2579 this->handleDirtyContext();
2580 rt->bindForResolve(resolveDirection);
2581
2582 const GrGLCaps& caps = this->glCaps();
2583
2584 // make sure we go through flushRenderTarget() since we've modified
2585 // the bound DRAW FBO ID.
2586 fHWBoundRenderTargetUniqueID.makeInvalid();
2587 if (GrGLCaps::kES_Apple_MSFBOType == caps.msFBOType()) {
2588 // The Apple extension doesn't support blitting from single to multisample.
2589 SkASSERT(resolveDirection != ResolveDirection::kSingleToMSAA);
2590 SkASSERT(resolveRect == SkIRect::MakeSize(rt->dimensions()));
2591 // Apple's extension uses the scissor as the blit bounds.
2592 // Passing in kTopLeft_GrSurfaceOrigin will make sure no transformation of the rect
2593 // happens inside flushScissor since resolveRect is already in native device coordinates.
2594 GrScissorState scissor(rt->dimensions());
2595 SkAssertResult(scissor.set(resolveRect));
2596 this->flushScissor(scissor, rt->height(), kTopLeft_GrSurfaceOrigin);
2597 this->disableWindowRectangles();
2598 GL_CALL(ResolveMultisampleFramebuffer());
2599 } else {
2600 SkASSERT(!caps.framebufferResolvesMustBeFullSize() ||
2601 resolveRect == SkIRect::MakeSize(rt->dimensions()));
2602 int l = resolveRect.x();
2603 int b = resolveRect.y();
2604 int r = resolveRect.x() + resolveRect.width();
2605 int t = resolveRect.y() + resolveRect.height();
2606
2607 // BlitFrameBuffer respects the scissor, so disable it.
2608 this->flushScissorTest(GrScissorTest::kDisabled);
2609 this->disableWindowRectangles();
2610 GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2611 }
2612
2613 if (caps.invalidateFBType() != GrGLCaps::kNone_InvalidateFBType &&
2614 invalidateReadBufferAfterBlit) {
2615 // Invalidate the read FBO attachment after the blit, in hopes that this allows the driver
2616 // to perform tiling optimizations.
2617 bool readBufferIsMSAA = resolveDirection == ResolveDirection::kMSAAToSingle;
2618 GrGLenum colorDiscardAttachment = rt->isFBO0(readBufferIsMSAA) ? GR_GL_COLOR
2619 : GR_GL_COLOR_ATTACHMENT0;
2620 if (caps.invalidateFBType() == GrGLCaps::kInvalidate_InvalidateFBType) {
2621 GL_CALL(InvalidateFramebuffer(GR_GL_READ_FRAMEBUFFER, 1, &colorDiscardAttachment));
2622 } else {
2623 SkASSERT(caps.invalidateFBType() == GrGLCaps::kDiscard_InvalidateFBType);
2624 // glDiscardFramebuffer only accepts GL_FRAMEBUFFER.
2625 rt->bind(readBufferIsMSAA);
2626 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, 1, &colorDiscardAttachment));
2627 }
2628 }
2629 }
2630
2631 namespace {
2632
2633
gr_to_gl_stencil_op(GrStencilOp op)2634 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
2635 static const GrGLenum gTable[kGrStencilOpCount] = {
2636 GR_GL_KEEP, // kKeep
2637 GR_GL_ZERO, // kZero
2638 GR_GL_REPLACE, // kReplace
2639 GR_GL_INVERT, // kInvert
2640 GR_GL_INCR_WRAP, // kIncWrap
2641 GR_GL_DECR_WRAP, // kDecWrap
2642 GR_GL_INCR, // kIncClamp
2643 GR_GL_DECR, // kDecClamp
2644 };
2645 static_assert(0 == (int)GrStencilOp::kKeep);
2646 static_assert(1 == (int)GrStencilOp::kZero);
2647 static_assert(2 == (int)GrStencilOp::kReplace);
2648 static_assert(3 == (int)GrStencilOp::kInvert);
2649 static_assert(4 == (int)GrStencilOp::kIncWrap);
2650 static_assert(5 == (int)GrStencilOp::kDecWrap);
2651 static_assert(6 == (int)GrStencilOp::kIncClamp);
2652 static_assert(7 == (int)GrStencilOp::kDecClamp);
2653 SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
2654 return gTable[(int)op];
2655 }
2656
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings::Face & face,GrGLenum glFace)2657 void set_gl_stencil(const GrGLInterface* gl,
2658 const GrStencilSettings::Face& face,
2659 GrGLenum glFace) {
2660 GrGLenum glFunc = GrToGLStencilFunc(face.fTest);
2661 GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp);
2662 GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp);
2663
2664 GrGLint ref = face.fRef;
2665 GrGLint mask = face.fTestMask;
2666 GrGLint writeMask = face.fWriteMask;
2667
2668 if (GR_GL_FRONT_AND_BACK == glFace) {
2669 // we call the combined func just in case separate stencil is not
2670 // supported.
2671 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
2672 GR_GL_CALL(gl, StencilMask(writeMask));
2673 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
2674 } else {
2675 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
2676 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
2677 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
2678 }
2679 }
2680 } // namespace
2681
flushStencil(const GrStencilSettings & stencilSettings,GrSurfaceOrigin origin)2682 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings, GrSurfaceOrigin origin) {
2683 if (stencilSettings.isDisabled()) {
2684 this->disableStencil();
2685 } else if (fHWStencilSettings != stencilSettings ||
2686 (stencilSettings.isTwoSided() && fHWStencilOrigin != origin)) {
2687 if (kYes_TriState != fHWStencilTestEnabled) {
2688 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2689
2690 fHWStencilTestEnabled = kYes_TriState;
2691 }
2692 if (!stencilSettings.isTwoSided()) {
2693 set_gl_stencil(this->glInterface(), stencilSettings.singleSidedFace(),
2694 GR_GL_FRONT_AND_BACK);
2695 } else {
2696 set_gl_stencil(this->glInterface(), stencilSettings.postOriginCWFace(origin),
2697 GR_GL_FRONT);
2698 set_gl_stencil(this->glInterface(), stencilSettings.postOriginCCWFace(origin),
2699 GR_GL_BACK);
2700 }
2701 fHWStencilSettings = stencilSettings;
2702 fHWStencilOrigin = origin;
2703 }
2704 }
2705
disableStencil()2706 void GrGLGpu::disableStencil() {
2707 if (kNo_TriState != fHWStencilTestEnabled) {
2708 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2709
2710 fHWStencilTestEnabled = kNo_TriState;
2711 fHWStencilSettings.invalidate();
2712 }
2713 }
2714
flushConservativeRasterState(bool enabled)2715 void GrGLGpu::flushConservativeRasterState(bool enabled) {
2716 if (this->caps()->conservativeRasterSupport()) {
2717 if (enabled) {
2718 if (kYes_TriState != fHWConservativeRasterEnabled) {
2719 GL_CALL(Enable(GR_GL_CONSERVATIVE_RASTERIZATION));
2720 fHWConservativeRasterEnabled = kYes_TriState;
2721 }
2722 } else {
2723 if (kNo_TriState != fHWConservativeRasterEnabled) {
2724 GL_CALL(Disable(GR_GL_CONSERVATIVE_RASTERIZATION));
2725 fHWConservativeRasterEnabled = kNo_TriState;
2726 }
2727 }
2728 }
2729 }
2730
flushWireframeState(bool enabled)2731 void GrGLGpu::flushWireframeState(bool enabled) {
2732 if (this->caps()->wireframeSupport()) {
2733 if (this->caps()->wireframeMode() || enabled) {
2734 if (kYes_TriState != fHWWireframeEnabled) {
2735 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE));
2736 fHWWireframeEnabled = kYes_TriState;
2737 }
2738 } else {
2739 if (kNo_TriState != fHWWireframeEnabled) {
2740 GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL));
2741 fHWWireframeEnabled = kNo_TriState;
2742 }
2743 }
2744 }
2745 }
2746
flushBlendAndColorWrite(const skgpu::BlendInfo & blendInfo,const skgpu::Swizzle & swizzle)2747 void GrGLGpu::flushBlendAndColorWrite(const skgpu::BlendInfo& blendInfo,
2748 const skgpu::Swizzle& swizzle) {
2749 if (this->glCaps().neverDisableColorWrites() && !blendInfo.fWritesColor) {
2750 // We need to work around a driver bug by using a blend state that preserves the dst color,
2751 // rather than disabling color writes.
2752 skgpu::BlendInfo preserveDstBlend;
2753 preserveDstBlend.fSrcBlend = skgpu::BlendCoeff::kZero;
2754 preserveDstBlend.fDstBlend = skgpu::BlendCoeff::kOne;
2755 this->flushBlendAndColorWrite(preserveDstBlend, swizzle);
2756 return;
2757 }
2758
2759 skgpu::BlendEquation equation = blendInfo.fEquation;
2760 skgpu::BlendCoeff srcCoeff = blendInfo.fSrcBlend;
2761 skgpu::BlendCoeff dstCoeff = blendInfo.fDstBlend;
2762
2763 // Any optimization to disable blending should have already been applied and
2764 // tweaked the equation to "add "or "subtract", and the coeffs to (1, 0).
2765 bool blendOff = skgpu::BlendShouldDisable(equation, srcCoeff, dstCoeff) ||
2766 !blendInfo.fWritesColor;
2767
2768 if (blendOff) {
2769 if (kNo_TriState != fHWBlendState.fEnabled) {
2770 GL_CALL(Disable(GR_GL_BLEND));
2771
2772 // Workaround for the ARM KHR_blend_equation_advanced disable flags issue
2773 // https://code.google.com/p/skia/issues/detail?id=3943
2774 if (this->ctxInfo().vendor() == GrGLVendor::kARM &&
2775 skgpu::BlendEquationIsAdvanced(fHWBlendState.fEquation)) {
2776 SkASSERT(this->caps()->advancedBlendEquationSupport());
2777 // Set to any basic blending equation.
2778 skgpu::BlendEquation blendEquation = skgpu::BlendEquation::kAdd;
2779 GL_CALL(BlendEquation(gXfermodeEquation2Blend[(int)blendEquation]));
2780 fHWBlendState.fEquation = blendEquation;
2781 }
2782
2783 // Workaround for Adreno 5xx BlendFunc bug. See crbug.com/1241134.
2784 // We must also check to see if the blend coeffs are invalid because the client may have
2785 // reset our gl state and thus we will have forgotten if the previous use was a coeff
2786 // that referenced src2.
2787 if (this->glCaps().mustResetBlendFuncBetweenDualSourceAndDisable() &&
2788 (skgpu::BlendCoeffRefsSrc2(fHWBlendState.fSrcCoeff) ||
2789 skgpu::BlendCoeffRefsSrc2(fHWBlendState.fDstCoeff) ||
2790 fHWBlendState.fSrcCoeff == skgpu::BlendCoeff::kIllegal ||
2791 fHWBlendState.fDstCoeff == skgpu::BlendCoeff::kIllegal)) {
2792 // We just reset the blend func to anything that doesn't reference src2
2793 GL_CALL(BlendFunc(GR_GL_ONE, GR_GL_ZERO));
2794 fHWBlendState.fSrcCoeff = skgpu::BlendCoeff::kOne;
2795 fHWBlendState.fDstCoeff = skgpu::BlendCoeff::kZero;
2796 }
2797
2798 fHWBlendState.fEnabled = kNo_TriState;
2799 }
2800 } else {
2801 if (kYes_TriState != fHWBlendState.fEnabled) {
2802 GL_CALL(Enable(GR_GL_BLEND));
2803
2804 fHWBlendState.fEnabled = kYes_TriState;
2805 }
2806
2807 if (fHWBlendState.fEquation != equation) {
2808 GL_CALL(BlendEquation(gXfermodeEquation2Blend[(int)equation]));
2809 fHWBlendState.fEquation = equation;
2810 }
2811
2812 if (skgpu::BlendEquationIsAdvanced(equation)) {
2813 SkASSERT(this->caps()->advancedBlendEquationSupport());
2814
2815 this->flushColorWrite(blendInfo.fWritesColor);
2816 // Advanced equations have no other blend state.
2817 return;
2818 }
2819
2820 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
2821 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[(int)srcCoeff],
2822 gXfermodeCoeff2Blend[(int)dstCoeff]));
2823 fHWBlendState.fSrcCoeff = srcCoeff;
2824 fHWBlendState.fDstCoeff = dstCoeff;
2825 }
2826
2827 if (skgpu::BlendCoeffRefsConstant(srcCoeff) || skgpu::BlendCoeffRefsConstant(dstCoeff)) {
2828 SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
2829 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
2830 GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA));
2831 fHWBlendState.fConstColor = blendConst;
2832 fHWBlendState.fConstColorValid = true;
2833 }
2834 }
2835 }
2836
2837 this->flushColorWrite(blendInfo.fWritesColor);
2838 }
2839
bindTexture(int unitIdx,GrSamplerState samplerState,const skgpu::Swizzle & swizzle,GrGLTexture * texture)2840 void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, const skgpu::Swizzle& swizzle,
2841 GrGLTexture* texture) {
2842 SkASSERT(texture);
2843
2844 #ifdef SK_DEBUG
2845 if (!this->caps()->npotTextureTileSupport()) {
2846 if (samplerState.isRepeatedX()) {
2847 const int w = texture->width();
2848 SkASSERT(SkIsPow2(w));
2849 }
2850 if (samplerState.isRepeatedY()) {
2851 const int h = texture->height();
2852 SkASSERT(SkIsPow2(h));
2853 }
2854 }
2855 #endif
2856
2857 GrGpuResource::UniqueID textureID = texture->uniqueID();
2858 GrGLenum target = texture->target();
2859 if (fHWTextureUnitBindings[unitIdx].boundID(target) != textureID) {
2860 this->setTextureUnit(unitIdx);
2861 GL_CALL(BindTexture(target, texture->textureID()));
2862 fHWTextureUnitBindings[unitIdx].setBoundID(target, textureID);
2863 }
2864
2865 if (samplerState.mipmapped() == skgpu::Mipmapped::kYes) {
2866 if (!this->caps()->mipmapSupport() || texture->mipmapped() == skgpu::Mipmapped::kNo) {
2867 // We should have caught this already.
2868 SkASSERT(!samplerState.isAniso());
2869 samplerState = GrSamplerState(samplerState.wrapModeX(),
2870 samplerState.wrapModeY(),
2871 samplerState.filter(),
2872 GrSamplerState::MipmapMode::kNone);
2873 } else {
2874 SkASSERT(!texture->mipmapsAreDirty());
2875 }
2876 }
2877
2878 auto timestamp = texture->parameters()->resetTimestamp();
2879 bool setAll = timestamp < fResetTimestampForTextureParameters;
2880 const GrGLTextureParameters::SamplerOverriddenState* samplerStateToRecord = nullptr;
2881 GrGLTextureParameters::SamplerOverriddenState newSamplerState;
2882 if (this->glCaps().useSamplerObjects()) {
2883 fSamplerObjectCache->bindSampler(unitIdx, samplerState);
2884 if (this->glCaps().mustSetAnyTexParameterToEnableMipmapping()) {
2885 if (samplerState.mipmapped() == skgpu::Mipmapped::kYes) {
2886 GrGLenum minFilter = filter_to_gl_min_filter(samplerState.filter(),
2887 samplerState.mipmapMode());
2888 const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
2889 texture->parameters()->samplerOverriddenState();
2890 this->setTextureUnit(unitIdx);
2891 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, minFilter));
2892 newSamplerState = oldSamplerState;
2893 newSamplerState.fMinFilter = minFilter;
2894 samplerStateToRecord = &newSamplerState;
2895 }
2896 }
2897 } else {
2898 if (fSamplerObjectCache) {
2899 fSamplerObjectCache->unbindSampler(unitIdx);
2900 }
2901 const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState =
2902 texture->parameters()->samplerOverriddenState();
2903 samplerStateToRecord = &newSamplerState;
2904
2905 newSamplerState.fMinFilter = filter_to_gl_min_filter(samplerState.filter(),
2906 samplerState.mipmapMode());
2907 newSamplerState.fMagFilter = filter_to_gl_mag_filter(samplerState.filter());
2908
2909 newSamplerState.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps());
2910 newSamplerState.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps());
2911
2912 newSamplerState.fMaxAniso = std::min(static_cast<GrGLfloat>(samplerState.maxAniso()),
2913 this->glCaps().maxTextureMaxAnisotropy());
2914
2915 // These are the OpenGL default values.
2916 newSamplerState.fMinLOD = -1000.f;
2917 newSamplerState.fMaxLOD = 1000.f;
2918
2919 if (setAll || newSamplerState.fMagFilter != oldSamplerState.fMagFilter) {
2920 this->setTextureUnit(unitIdx);
2921 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerState.fMagFilter));
2922 }
2923 if (setAll || newSamplerState.fMinFilter != oldSamplerState.fMinFilter) {
2924 this->setTextureUnit(unitIdx);
2925 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerState.fMinFilter));
2926 }
2927 if (this->glCaps().mipmapLodControlSupport()) {
2928 if (setAll || newSamplerState.fMinLOD != oldSamplerState.fMinLOD) {
2929 this->setTextureUnit(unitIdx);
2930 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerState.fMinLOD));
2931 }
2932 if (setAll || newSamplerState.fMaxLOD != oldSamplerState.fMaxLOD) {
2933 this->setTextureUnit(unitIdx);
2934 GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerState.fMaxLOD));
2935 }
2936 }
2937 if (setAll || newSamplerState.fWrapS != oldSamplerState.fWrapS) {
2938 this->setTextureUnit(unitIdx);
2939 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerState.fWrapS));
2940 }
2941 if (setAll || newSamplerState.fWrapT != oldSamplerState.fWrapT) {
2942 this->setTextureUnit(unitIdx);
2943 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerState.fWrapT));
2944 }
2945 if (this->glCaps().clampToBorderSupport()) {
2946 // Make sure the border color is transparent black (the default)
2947 if (setAll || oldSamplerState.fBorderColorInvalid) {
2948 this->setTextureUnit(unitIdx);
2949 static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f};
2950 GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack));
2951 }
2952 }
2953 if (this->caps()->anisoSupport()) {
2954 if (setAll || oldSamplerState.fMaxAniso != newSamplerState.fMaxAniso) {
2955 GL_CALL(TexParameterf(target,
2956 GR_GL_TEXTURE_MAX_ANISOTROPY,
2957 newSamplerState.fMaxAniso));
2958 }
2959 }
2960 }
2961 GrGLTextureParameters::NonsamplerState newNonsamplerState;
2962 newNonsamplerState.fBaseMipMapLevel = 0;
2963 newNonsamplerState.fMaxMipmapLevel = texture->maxMipmapLevel();
2964 newNonsamplerState.fSwizzleIsRGBA = true;
2965
2966 const GrGLTextureParameters::NonsamplerState& oldNonsamplerState =
2967 texture->parameters()->nonsamplerState();
2968 if (this->glCaps().textureSwizzleSupport()) {
2969 if (setAll || !oldNonsamplerState.fSwizzleIsRGBA) {
2970 static constexpr GrGLenum kRGBA[4] {
2971 GR_GL_RED,
2972 GR_GL_GREEN,
2973 GR_GL_BLUE,
2974 GR_GL_ALPHA
2975 };
2976 this->setTextureUnit(unitIdx);
2977 if (GR_IS_GR_GL(this->glStandard())) {
2978 static_assert(sizeof(kRGBA[0]) == sizeof(GrGLint));
2979 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA,
2980 reinterpret_cast<const GrGLint*>(kRGBA)));
2981 } else if (GR_IS_GR_GL_ES(this->glStandard())) {
2982 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
2983 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, kRGBA[0]));
2984 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, kRGBA[1]));
2985 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, kRGBA[2]));
2986 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, kRGBA[3]));
2987 }
2988 }
2989 }
2990 // These are not supported in ES2 contexts
2991 if (this->glCaps().mipmapLevelControlSupport() &&
2992 (texture->textureType() != GrTextureType::kExternal ||
2993 !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) {
2994 if (newNonsamplerState.fBaseMipMapLevel != oldNonsamplerState.fBaseMipMapLevel) {
2995 this->setTextureUnit(unitIdx);
2996 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL,
2997 newNonsamplerState.fBaseMipMapLevel));
2998 }
2999 if (newNonsamplerState.fMaxMipmapLevel != oldNonsamplerState.fMaxMipmapLevel) {
3000 this->setTextureUnit(unitIdx);
3001 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
3002 newNonsamplerState.fMaxMipmapLevel));
3003 }
3004 }
3005 texture->parameters()->set(samplerStateToRecord, newNonsamplerState,
3006 fResetTimestampForTextureParameters);
3007 }
3008
onResetTextureBindings()3009 void GrGLGpu::onResetTextureBindings() {
3010 static constexpr GrGLenum kTargets[] = {GR_GL_TEXTURE_2D, GR_GL_TEXTURE_RECTANGLE,
3011 GR_GL_TEXTURE_EXTERNAL};
3012 for (int i = 0; i < this->numTextureUnits(); ++i) {
3013 this->setTextureUnit(i);
3014 for (auto target : kTargets) {
3015 if (fHWTextureUnitBindings[i].hasBeenModified(target)) {
3016 GL_CALL(BindTexture(target, 0));
3017 }
3018 }
3019 fHWTextureUnitBindings[i].invalidateAllTargets(true);
3020 }
3021 }
3022
flushColorWrite(bool writeColor)3023 void GrGLGpu::flushColorWrite(bool writeColor) {
3024 if (!writeColor) {
3025 if (kNo_TriState != fHWWriteToColor) {
3026 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
3027 GR_GL_FALSE, GR_GL_FALSE));
3028 fHWWriteToColor = kNo_TriState;
3029 }
3030 } else {
3031 if (kYes_TriState != fHWWriteToColor) {
3032 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
3033 fHWWriteToColor = kYes_TriState;
3034 }
3035 }
3036 }
3037
flushClearColor(std::array<float,4> color)3038 void GrGLGpu::flushClearColor(std::array<float, 4> color) {
3039 GrGLfloat r = color[0], g = color[1], b = color[2], a = color[3];
3040 if (this->glCaps().clearToBoundaryValuesIsBroken() &&
3041 (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) {
3042 static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f);
3043 static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f);
3044 a = (1 == a) ? safeAlpha1 : safeAlpha0;
3045 }
3046 if (r != fHWClearColor[0] || g != fHWClearColor[1] ||
3047 b != fHWClearColor[2] || a != fHWClearColor[3]) {
3048 GL_CALL(ClearColor(r, g, b, a));
3049 fHWClearColor[0] = r;
3050 fHWClearColor[1] = g;
3051 fHWClearColor[2] = b;
3052 fHWClearColor[3] = a;
3053 }
3054 }
3055
setTextureUnit(int unit)3056 void GrGLGpu::setTextureUnit(int unit) {
3057 SkASSERT(unit >= 0 && unit < this->numTextureUnits());
3058 if (unit != fHWActiveTextureUnitIdx) {
3059 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
3060 fHWActiveTextureUnitIdx = unit;
3061 }
3062 }
3063
bindTextureToScratchUnit(GrGLenum target,GrGLint textureID)3064 void GrGLGpu::bindTextureToScratchUnit(GrGLenum target, GrGLint textureID) {
3065 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
3066 int lastUnitIdx = this->numTextureUnits() - 1;
3067 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
3068 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
3069 fHWActiveTextureUnitIdx = lastUnitIdx;
3070 }
3071 // Clear out the this field so that if a GrGLProgram does use this unit it will rebind the
3072 // correct texture.
3073 fHWTextureUnitBindings[lastUnitIdx].invalidateForScratchUse(target);
3074 GL_CALL(BindTexture(target, textureID));
3075 }
3076
3077 // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface.
can_blit_framebuffer_for_copy_surface(const GrSurface * dst,const GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,const GrGLCaps & caps)3078 static inline bool can_blit_framebuffer_for_copy_surface(const GrSurface* dst,
3079 const GrSurface* src,
3080 const SkIRect& srcRect,
3081 const SkIRect& dstRect,
3082 const GrGLCaps& caps) {
3083 int dstSampleCnt = 0;
3084 int srcSampleCnt = 0;
3085 if (const GrRenderTarget* rt = dst->asRenderTarget()) {
3086 dstSampleCnt = rt->numSamples();
3087 }
3088 if (const GrRenderTarget* rt = src->asRenderTarget()) {
3089 srcSampleCnt = rt->numSamples();
3090 }
3091 SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget()));
3092 SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget()));
3093
3094 GrGLFormat dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3095 GrGLFormat srcFormat = GrBackendFormats::AsGLFormat(src->backendFormat());
3096
3097 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3098 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3099
3100 GrTextureType dstTexType;
3101 GrTextureType* dstTexTypePtr = nullptr;
3102 GrTextureType srcTexType;
3103 GrTextureType* srcTexTypePtr = nullptr;
3104 if (dstTex) {
3105 dstTexType = dstTex->textureType();
3106 dstTexTypePtr = &dstTexType;
3107 }
3108 if (srcTex) {
3109 srcTexType = srcTex->textureType();
3110 srcTexTypePtr = &srcTexType;
3111 }
3112
3113 return caps.canCopyAsBlit(dstFormat, dstSampleCnt, dstTexTypePtr,
3114 srcFormat, srcSampleCnt, srcTexTypePtr,
3115 src->getBoundsRect(), true, srcRect, dstRect);
3116 }
3117
rt_has_msaa_render_buffer(const GrGLRenderTarget * rt,const GrGLCaps & glCaps)3118 static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) {
3119 // A RT has a separate MSAA renderbuffer if:
3120 // 1) It's multisampled
3121 // 2) We're using an extension with separate MSAA renderbuffers
3122 // 3) It's not FBO 0, which is special and always auto-resolves
3123 return rt->numSamples() > 1 && glCaps.usesMSAARenderBuffers() && !rt->isFBO0(true/*msaa*/);
3124 }
3125
can_copy_texsubimage(const GrSurface * dst,const GrSurface * src,const GrGLCaps & caps)3126 static inline bool can_copy_texsubimage(const GrSurface* dst, const GrSurface* src,
3127 const GrGLCaps& caps) {
3128
3129 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
3130 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
3131 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3132 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3133
3134 bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false;
3135 bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false;
3136
3137 GrGLFormat dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3138 GrGLFormat srcFormat = GrBackendFormats::AsGLFormat(src->backendFormat());
3139
3140 GrTextureType dstTexType;
3141 GrTextureType* dstTexTypePtr = nullptr;
3142 GrTextureType srcTexType;
3143 GrTextureType* srcTexTypePtr = nullptr;
3144 if (dstTex) {
3145 dstTexType = dstTex->textureType();
3146 dstTexTypePtr = &dstTexType;
3147 }
3148 if (srcTex) {
3149 srcTexType = srcTex->textureType();
3150 srcTexTypePtr = &srcTexType;
3151 }
3152
3153 return caps.canCopyTexSubImage(dstFormat, dstHasMSAARenderBuffer, dstTexTypePtr,
3154 srcFormat, srcHasMSAARenderBuffer, srcTexTypePtr);
3155 }
3156
bindSurfaceFBOForPixelOps(GrSurface * surface,int mipLevel,GrGLenum fboTarget,TempFBOTarget tempFBOTarget)3157 void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget,
3158 TempFBOTarget tempFBOTarget) {
3159 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
3160 if (!rt || mipLevel > 0) {
3161 SkASSERT(surface->asTexture());
3162 GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture());
3163 GrGLuint texID = texture->textureID();
3164 GrGLenum target = texture->target();
3165 GrGLuint* tempFBOID;
3166 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
3167
3168 if (0 == *tempFBOID) {
3169 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
3170 }
3171
3172 this->bindFramebuffer(fboTarget, *tempFBOID);
3173 GR_GL_CALL(
3174 this->glInterface(),
3175 FramebufferTexture2D(fboTarget, GR_GL_COLOR_ATTACHMENT0, target, texID, mipLevel));
3176 if (mipLevel == 0) {
3177 texture->baseLevelWasBoundToFBO();
3178 }
3179 } else {
3180 rt->bindForPixelOps(fboTarget);
3181 }
3182 }
3183
unbindSurfaceFBOForPixelOps(GrSurface * surface,int mipLevel,GrGLenum fboTarget)3184 void GrGLGpu::unbindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget) {
3185 // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to
3186 if (mipLevel > 0 || !surface->asRenderTarget()) {
3187 SkASSERT(surface->asTexture());
3188 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
3189 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3190 GR_GL_COLOR_ATTACHMENT0,
3191 textureTarget,
3192 0,
3193 0));
3194 }
3195 }
3196
onFBOChanged()3197 void GrGLGpu::onFBOChanged() {
3198 if (this->caps()->workarounds().flush_on_framebuffer_change) {
3199 this->flush(FlushType::kForce);
3200 }
3201 #ifdef SK_DEBUG
3202 if (fIsExecutingCommandBuffer_DebugOnly) {
3203 SkDebugf("WARNING: GL FBO binding changed while executing a command buffer. "
3204 "This will severely hurt performance.\n");
3205 }
3206 #endif
3207 }
3208
bindFramebuffer(GrGLenum target,GrGLuint fboid)3209 void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) {
3210 GL_CALL(BindFramebuffer(target, fboid));
3211 if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) {
3212 fBoundDrawFramebuffer = fboid;
3213 }
3214 this->onFBOChanged();
3215 }
3216
deleteFramebuffer(GrGLuint fboid)3217 void GrGLGpu::deleteFramebuffer(GrGLuint fboid) {
3218 // We're relying on the GL state shadowing being correct in the workaround code below so we
3219 // need to handle a dirty context.
3220 this->handleDirtyContext();
3221 if (fboid == fBoundDrawFramebuffer &&
3222 this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) {
3223 // This workaround only applies to deleting currently bound framebuffers
3224 // on Adreno 420. Because this is a somewhat rare case, instead of
3225 // tracking all the attachments of every framebuffer instead just always
3226 // unbind all attachments.
3227 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3228 GR_GL_RENDERBUFFER, 0));
3229 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT,
3230 GR_GL_RENDERBUFFER, 0));
3231 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
3232 GR_GL_RENDERBUFFER, 0));
3233 }
3234
3235 GL_CALL(DeleteFramebuffers(1, &fboid));
3236
3237 // Deleting the currently bound framebuffer rebinds to 0.
3238 if (fboid == fBoundDrawFramebuffer) {
3239 this->onFBOChanged();
3240 }
3241 }
3242
onCopySurface(GrSurface * dst,const SkIRect & dstRect,GrSurface * src,const SkIRect & srcRect,GrSamplerState::Filter filter)3243 bool GrGLGpu::onCopySurface(GrSurface* dst, const SkIRect& dstRect,
3244 GrSurface* src, const SkIRect& srcRect,
3245 GrSamplerState::Filter filter) {
3246 // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
3247 // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites().
3248 bool preferCopy = SkToBool(dst->asRenderTarget());
3249 bool scalingCopy = dstRect.size() != srcRect.size();
3250 auto dstFormat = GrBackendFormats::AsGLFormat(dst->backendFormat());
3251 if (preferCopy &&
3252 this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()), scalingCopy)) {
3253 GrRenderTarget* dstRT = dst->asRenderTarget();
3254 bool drawToMultisampleFBO = dstRT && dstRT->numSamples() > 1;
3255 if (this->copySurfaceAsDraw(dst, drawToMultisampleFBO, src, srcRect, dstRect, filter)) {
3256 return true;
3257 }
3258 }
3259
3260 // Prefer copying as with glCopyTexSubImage when the dimensions are the same.
3261 if (!scalingCopy && can_copy_texsubimage(dst, src, this->glCaps())) {
3262 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstRect.topLeft());
3263 return true;
3264 }
3265
3266 if (can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstRect, this->glCaps())) {
3267 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstRect, filter);
3268 }
3269
3270 if (!preferCopy &&
3271 this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()), scalingCopy)) {
3272 GrRenderTarget* dstRT = dst->asRenderTarget();
3273 bool drawToMultisampleFBO = dstRT && dstRT->numSamples() > 1;
3274 if (this->copySurfaceAsDraw(dst, drawToMultisampleFBO, src, srcRect, dstRect, filter)) {
3275 return true;
3276 }
3277 }
3278
3279 return false;
3280 }
3281
createCopyProgram(GrTexture * srcTex)3282 bool GrGLGpu::createCopyProgram(GrTexture* srcTex) {
3283 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
3284
3285 int progIdx = TextureToCopyProgramIdx(srcTex);
3286 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3287 SkSLType samplerType = SkSLCombinedSamplerTypeForTextureType(srcTex->textureType());
3288
3289 if (!fCopyProgramArrayBuffer) {
3290 static const GrGLfloat vdata[] = {
3291 0, 0,
3292 0, 1,
3293 1, 0,
3294 1, 1
3295 };
3296 fCopyProgramArrayBuffer = GrGLBuffer::Make(this,
3297 sizeof(vdata),
3298 GrGpuBufferType::kVertex,
3299 kStatic_GrAccessPattern);
3300 if (fCopyProgramArrayBuffer) {
3301 fCopyProgramArrayBuffer->updateData(
3302 vdata, /*offset=*/0, sizeof(vdata), /*preserve=*/false);
3303 }
3304 }
3305 if (!fCopyProgramArrayBuffer) {
3306 return false;
3307 }
3308
3309 SkASSERT(!fCopyPrograms[progIdx].fProgram);
3310 GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
3311 if (!fCopyPrograms[progIdx].fProgram) {
3312 return false;
3313 }
3314
3315 GrShaderVar aVertex("a_vertex", SkSLType::kHalf2, GrShaderVar::TypeModifier::In);
3316 GrShaderVar uTexCoordXform("u_texCoordXform", SkSLType::kHalf4,
3317 GrShaderVar::TypeModifier::Uniform);
3318 GrShaderVar uPosXform("u_posXform", SkSLType::kHalf4, GrShaderVar::TypeModifier::Uniform);
3319 GrShaderVar uTexture("u_texture", samplerType);
3320 GrShaderVar vTexCoord("v_texCoord", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out);
3321 GrShaderVar oFragColor("o_FragColor", SkSLType::kHalf4, GrShaderVar::TypeModifier::Out);
3322
3323 SkString vshaderTxt;
3324 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3325 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3326 vshaderTxt.appendf("#extension %s : require\n", extension);
3327 }
3328 vTexCoord.addModifier("noperspective");
3329 }
3330
3331 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3332 vshaderTxt.append(";");
3333 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3334 vshaderTxt.append(";");
3335 uPosXform.appendDecl(shaderCaps, &vshaderTxt);
3336 vshaderTxt.append(";");
3337 vTexCoord.appendDecl(shaderCaps, &vshaderTxt);
3338 vshaderTxt.append(";");
3339
3340 vshaderTxt.append(
3341 // Copy Program VS
3342 "void main() {"
3343 "v_texCoord = half2(a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw);"
3344 "sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
3345 "sk_Position.zw = half2(0, 1);"
3346 "}"
3347 );
3348
3349 SkString fshaderTxt;
3350 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3351 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3352 fshaderTxt.appendf("#extension %s : require\n", extension);
3353 }
3354 }
3355 vTexCoord.setTypeModifier(GrShaderVar::TypeModifier::In);
3356 vTexCoord.appendDecl(shaderCaps, &fshaderTxt);
3357 fshaderTxt.append(";");
3358 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3359 fshaderTxt.append(";");
3360 fshaderTxt.appendf(
3361 // Copy Program FS
3362 "void main() {"
3363 "sk_FragColor = sample(u_texture, v_texCoord);"
3364 "}"
3365 );
3366 std::string vertexSkSL{vshaderTxt.c_str(), vshaderTxt.size()};
3367 std::string fragmentSkSL{fshaderTxt.c_str(), fshaderTxt.size()};
3368
3369 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
3370 std::string glsl[kGrShaderTypeCount];
3371 SkSL::ProgramSettings settings;
3372 SkSL::Program::Interface interface;
3373 skgpu::SkSLToGLSL(shaderCaps, vertexSkSL, SkSL::ProgramKind::kVertex, settings,
3374 &glsl[kVertex_GrShaderType], &interface, errorHandler);
3375 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext,
3376 fCopyPrograms[progIdx].fProgram,
3377 GR_GL_VERTEX_SHADER,
3378 glsl[kVertex_GrShaderType],
3379 /*shaderWasCached=*/false,
3380 fProgramCache->stats(),
3381 errorHandler);
3382 SkASSERT(interface == SkSL::Program::Interface());
3383 if (!vshader) {
3384 // Just delete the program, no shaders to delete
3385 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, nullptr, nullptr);
3386 return false;
3387 }
3388
3389 skgpu::SkSLToGLSL(shaderCaps, fragmentSkSL, SkSL::ProgramKind::kFragment, settings,
3390 &glsl[kFragment_GrShaderType], &interface, errorHandler);
3391 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext,
3392 fCopyPrograms[progIdx].fProgram,
3393 GR_GL_FRAGMENT_SHADER,
3394 glsl[kFragment_GrShaderType],
3395 /*shaderWasCached=*/false,
3396 fProgramCache->stats(),
3397 errorHandler);
3398 SkASSERT(interface == SkSL::Program::Interface());
3399 if (!fshader) {
3400 // Delete the program and previously compiled vertex shader
3401 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, &vshader, nullptr);
3402 return false;
3403 }
3404
3405 const std::string* sksl[kGrShaderTypeCount] = {&vertexSkSL, &fragmentSkSL};
3406 GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
3407 if (!GrGLCheckLinkStatus(this,
3408 fCopyPrograms[progIdx].fProgram,
3409 /*shaderWasCached=*/false,
3410 errorHandler,
3411 sksl,
3412 glsl)) {
3413 // Failed to link, delete everything
3414 cleanup_program(this, &fCopyPrograms[progIdx].fProgram, &vshader, &fshader);
3415 return false;
3416 }
3417
3418 GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
3419 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
3420 GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
3421 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
3422 GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
3423 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
3424
3425 GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
3426
3427 // Cleanup the shaders, but not the program
3428 cleanup_program(this, nullptr, &vshader, &fshader);
3429
3430 return true;
3431 }
3432
createMipmapProgram(int progIdx)3433 bool GrGLGpu::createMipmapProgram(int progIdx) {
3434 const bool oddWidth = SkToBool(progIdx & 0x2);
3435 const bool oddHeight = SkToBool(progIdx & 0x1);
3436 const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1);
3437
3438 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3439
3440 SkASSERT(!fMipmapPrograms[progIdx].fProgram);
3441 GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram());
3442 if (!fMipmapPrograms[progIdx].fProgram) {
3443 return false;
3444 }
3445
3446 GrShaderVar aVertex("a_vertex", SkSLType::kHalf2, GrShaderVar::TypeModifier::In);
3447 GrShaderVar uTexCoordXform("u_texCoordXform", SkSLType::kHalf4,
3448 GrShaderVar::TypeModifier::Uniform);
3449 GrShaderVar uTexture("u_texture", SkSLType::kTexture2DSampler);
3450 // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension):
3451 GrShaderVar vTexCoords[] = {
3452 GrShaderVar("v_texCoord0", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3453 GrShaderVar("v_texCoord1", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3454 GrShaderVar("v_texCoord2", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3455 GrShaderVar("v_texCoord3", SkSLType::kHalf2, GrShaderVar::TypeModifier::Out),
3456 };
3457 GrShaderVar oFragColor("o_FragColor", SkSLType::kHalf4,GrShaderVar::TypeModifier::Out);
3458
3459 SkString vshaderTxt;
3460 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3461 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3462 vshaderTxt.appendf("#extension %s : require\n", extension);
3463 }
3464 vTexCoords[0].addModifier("noperspective");
3465 vTexCoords[1].addModifier("noperspective");
3466 vTexCoords[2].addModifier("noperspective");
3467 vTexCoords[3].addModifier("noperspective");
3468 }
3469
3470 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3471 vshaderTxt.append(";");
3472 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3473 vshaderTxt.append(";");
3474 for (int i = 0; i < numTaps; ++i) {
3475 vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt);
3476 vshaderTxt.append(";");
3477 }
3478
3479 vshaderTxt.append(
3480 // Mipmap Program VS
3481 "void main() {"
3482 "sk_Position.xy = a_vertex * half2(2) - half2(1);"
3483 "sk_Position.zw = half2(0, 1);"
3484 );
3485
3486 // Insert texture coordinate computation:
3487 if (oddWidth && oddHeight) {
3488 vshaderTxt.append(
3489 "v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;"
3490 "v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);"
3491 "v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);"
3492 "v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;"
3493 );
3494 } else if (oddWidth) {
3495 vshaderTxt.append(
3496 "v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);"
3497 "v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);"
3498 );
3499 } else if (oddHeight) {
3500 vshaderTxt.append(
3501 "v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);"
3502 "v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);"
3503 );
3504 } else {
3505 vshaderTxt.append(
3506 "v_texCoord0 = a_vertex.xy;"
3507 );
3508 }
3509
3510 vshaderTxt.append("}");
3511
3512 SkString fshaderTxt;
3513 if (shaderCaps->fNoPerspectiveInterpolationSupport) {
3514 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3515 fshaderTxt.appendf("#extension %s : require\n", extension);
3516 }
3517 }
3518 for (int i = 0; i < numTaps; ++i) {
3519 vTexCoords[i].setTypeModifier(GrShaderVar::TypeModifier::In);
3520 vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt);
3521 fshaderTxt.append(";");
3522 }
3523 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3524 fshaderTxt.append(";");
3525 fshaderTxt.append(
3526 // Mipmap Program FS
3527 "void main() {"
3528 );
3529
3530 if (oddWidth && oddHeight) {
3531 fshaderTxt.append(
3532 "sk_FragColor = (sample(u_texture, v_texCoord0) + "
3533 "sample(u_texture, v_texCoord1) + "
3534 "sample(u_texture, v_texCoord2) + "
3535 "sample(u_texture, v_texCoord3)) * 0.25;"
3536 );
3537 } else if (oddWidth || oddHeight) {
3538 fshaderTxt.append(
3539 "sk_FragColor = (sample(u_texture, v_texCoord0) + "
3540 "sample(u_texture, v_texCoord1)) * 0.5;"
3541 );
3542 } else {
3543 fshaderTxt.append(
3544 "sk_FragColor = sample(u_texture, v_texCoord0);"
3545 );
3546 }
3547
3548 fshaderTxt.append("}");
3549
3550 std::string vertexSkSL{vshaderTxt.c_str(), vshaderTxt.size()};
3551 std::string fragmentSkSL{fshaderTxt.c_str(), fshaderTxt.size()};
3552
3553 auto errorHandler = this->getContext()->priv().getShaderErrorHandler();
3554 std::string glsl[kGrShaderTypeCount];
3555 SkSL::ProgramSettings settings;
3556 SkSL::Program::Interface interface;
3557
3558 skgpu::SkSLToGLSL(shaderCaps, vertexSkSL, SkSL::ProgramKind::kVertex, settings,
3559 &glsl[kVertex_GrShaderType], &interface, errorHandler);
3560 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext,
3561 fMipmapPrograms[progIdx].fProgram,
3562 GR_GL_VERTEX_SHADER,
3563 glsl[kVertex_GrShaderType],
3564 /*shaderWasCached=*/false,
3565 fProgramCache->stats(),
3566 errorHandler);
3567 SkASSERT(interface == SkSL::Program::Interface());
3568 if (!vshader) {
3569 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, nullptr, nullptr);
3570 return false;
3571 }
3572
3573 skgpu::SkSLToGLSL(shaderCaps, fragmentSkSL, SkSL::ProgramKind::kFragment, settings,
3574 &glsl[kFragment_GrShaderType], &interface, errorHandler);
3575 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext,
3576 fMipmapPrograms[progIdx].fProgram,
3577 GR_GL_FRAGMENT_SHADER,
3578 glsl[kFragment_GrShaderType],
3579 /*shaderWasCached=*/false,
3580 fProgramCache->stats(),
3581 errorHandler);
3582 SkASSERT(interface == SkSL::Program::Interface());
3583 if (!fshader) {
3584 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, &vshader, nullptr);
3585 return false;
3586 }
3587
3588 const std::string* sksl[kGrShaderTypeCount] = {&vertexSkSL, &fragmentSkSL};
3589 GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram));
3590 if (!GrGLCheckLinkStatus(this,
3591 fMipmapPrograms[progIdx].fProgram,
3592 /*shaderWasCached=*/false,
3593 errorHandler,
3594 sksl,
3595 glsl)) {
3596 // Program linking failed, clean up
3597 cleanup_program(this, &fMipmapPrograms[progIdx].fProgram, &vshader, &fshader);
3598 return false;
3599 }
3600
3601 GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform,
3602 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture"));
3603 GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3604 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform"));
3605
3606 GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex"));
3607
3608 // Clean up the shaders
3609 cleanup_program(this, nullptr, &vshader, &fshader);
3610
3611 return true;
3612 }
3613
copySurfaceAsDraw(GrSurface * dst,bool drawToMultisampleFBO,GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)3614 bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, bool drawToMultisampleFBO, GrSurface* src,
3615 const SkIRect& srcRect, const SkIRect& dstRect,
3616 GrSamplerState::Filter filter) {
3617 auto* srcTex = static_cast<GrGLTexture*>(src->asTexture());
3618 if (!srcTex) {
3619 return false;
3620 }
3621 // We don't swizzle at all in our copies.
3622 this->bindTexture(0, filter, skgpu::Swizzle::RGBA(), srcTex);
3623 if (auto* dstRT = static_cast<GrGLRenderTarget*>(dst->asRenderTarget())) {
3624 this->flushRenderTarget(dstRT, drawToMultisampleFBO);
3625 } else {
3626 auto* dstTex = static_cast<GrGLTexture*>(src->asTexture());
3627 SkASSERT(dstTex);
3628 SkASSERT(!drawToMultisampleFBO);
3629 if (!this->glCaps().isFormatRenderable(dstTex->format(), 1)) {
3630 return false;
3631 }
3632 this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER, kDst_TempFBOTarget);
3633 fHWBoundRenderTargetUniqueID.makeInvalid();
3634 }
3635 int progIdx = TextureToCopyProgramIdx(srcTex);
3636 if (!fCopyPrograms[progIdx].fProgram) {
3637 if (!this->createCopyProgram(srcTex)) {
3638 SkDebugf("Failed to create copy program.\n");
3639 return false;
3640 }
3641 }
3642 this->flushViewport(SkIRect::MakeSize(dst->dimensions()),
3643 dst->height(),
3644 kTopLeft_GrSurfaceOrigin); // the origin is irrelevant in this case
3645 this->flushProgram(fCopyPrograms[progIdx].fProgram);
3646 fHWVertexArrayState.setVertexArrayID(this, 0);
3647 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3648 attribs->enableVertexArrays(this, 1);
3649 attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3650 SkSLType::kFloat2, 2 * sizeof(GrGLfloat), 0);
3651 // dst rect edges in NDC (-1 to 1)
3652 int dw = dst->width();
3653 int dh = dst->height();
3654 GrGLfloat dx0 = 2.f * dstRect.fLeft / dw - 1.f;
3655 GrGLfloat dx1 = 2.f * dstRect.fRight / dw - 1.f;
3656 GrGLfloat dy0 = 2.f * dstRect.fTop / dh - 1.f;
3657 GrGLfloat dy1 = 2.f * dstRect.fBottom / dh - 1.f;
3658 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
3659 GrGLfloat sx1 = (GrGLfloat)(srcRect.fRight);
3660 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
3661 GrGLfloat sy1 = (GrGLfloat)(srcRect.fBottom);
3662 int sw = src->width();
3663 int sh = src->height();
3664 if (srcTex->textureType() != GrTextureType::kRectangle) {
3665 // src rect edges in normalized texture space (0 to 1)
3666 sx0 /= sw;
3667 sx1 /= sw;
3668 sy0 /= sh;
3669 sy1 /= sh;
3670 }
3671 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
3672 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
3673 sx1 - sx0, sy1 - sy0, sx0, sy0));
3674 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
3675 this->flushBlendAndColorWrite(skgpu::BlendInfo(), skgpu::Swizzle::RGBA());
3676 this->flushConservativeRasterState(false);
3677 this->flushWireframeState(false);
3678 this->flushScissorTest(GrScissorTest::kDisabled);
3679 this->disableWindowRectangles();
3680 this->disableStencil();
3681 if (this->glCaps().srgbWriteControl()) {
3682 this->flushFramebufferSRGB(true);
3683 }
3684 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3685 this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER);
3686 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3687 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3688 return true;
3689 }
3690
copySurfaceAsCopyTexSubImage(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3691 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3692 const SkIPoint& dstPoint) {
3693 SkASSERT(can_copy_texsubimage(dst, src, this->glCaps()));
3694 this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget);
3695 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
3696 SkASSERT(dstTex);
3697 // We modified the bound FBO
3698 fHWBoundRenderTargetUniqueID.makeInvalid();
3699
3700 this->bindTextureToScratchUnit(dstTex->target(), dstTex->textureID());
3701 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
3702 dstPoint.fX, dstPoint.fY,
3703 srcRect.fLeft, srcRect.fTop,
3704 srcRect.width(), srcRect.height()));
3705 this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER);
3706 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3707 srcRect.width(), srcRect.height());
3708 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3709 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3710 }
3711
copySurfaceAsBlitFramebuffer(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIRect & dstRect,GrSamplerState::Filter filter)3712 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
3713 const SkIRect& dstRect, GrSamplerState::Filter filter) {
3714 SkASSERT(can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstRect, this->glCaps()));
3715 if (dst == src) {
3716 if (SkIRect::Intersects(dstRect, srcRect)) {
3717 return false;
3718 }
3719 }
3720
3721 this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER, kDst_TempFBOTarget);
3722 this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER, kSrc_TempFBOTarget);
3723 // We modified the bound FBO
3724 fHWBoundRenderTargetUniqueID.makeInvalid();
3725
3726 // BlitFrameBuffer respects the scissor, so disable it.
3727 this->flushScissorTest(GrScissorTest::kDisabled);
3728 this->disableWindowRectangles();
3729
3730 GL_CALL(BlitFramebuffer(srcRect.fLeft,
3731 srcRect.fTop,
3732 srcRect.fRight,
3733 srcRect.fBottom,
3734 dstRect.fLeft,
3735 dstRect.fTop,
3736 dstRect.fRight,
3737 dstRect.fBottom,
3738 GR_GL_COLOR_BUFFER_BIT,
3739 filter_to_gl_mag_filter(filter)));
3740 this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER);
3741 this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER);
3742
3743 // The rect is already in device space so we pass in kTopLeft so no flip is done.
3744 this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect);
3745 return true;
3746 }
3747
onRegenerateMipMapLevels(GrTexture * texture)3748 bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) {
3749 using RegenerateMipmapType = GrGLCaps::RegenerateMipmapType;
3750
3751 auto glTex = static_cast<GrGLTexture*>(texture);
3752 // Mipmaps are only supported on 2D textures:
3753 if (GR_GL_TEXTURE_2D != glTex->target()) {
3754 return false;
3755 }
3756 GrGLFormat format = glTex->format();
3757 // Manual implementation of mipmap generation, to work around driver bugs w/sRGB.
3758 // Uses draw calls to do a series of downsample operations to successive mips.
3759
3760 // The manual approach requires the ability to limit which level we're sampling and that the
3761 // destination can be bound to a FBO:
3762 if (!this->glCaps().doManualMipmapping() || !this->glCaps().isFormatRenderable(format, 1)) {
3763 GrGLenum target = glTex->target();
3764 this->bindTextureToScratchUnit(target, glTex->textureID());
3765 GL_CALL(GenerateMipmap(glTex->target()));
3766 return true;
3767 }
3768
3769 int width = texture->width();
3770 int height = texture->height();
3771 int levelCount = SkMipmap::ComputeLevelCount(width, height) + 1;
3772 SkASSERT(levelCount == texture->maxMipmapLevel() + 1);
3773
3774 // Create (if necessary), then bind temporary FBO:
3775 if (0 == fTempDstFBOID) {
3776 GL_CALL(GenFramebuffers(1, &fTempDstFBOID));
3777 }
3778 this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID);
3779 fHWBoundRenderTargetUniqueID.makeInvalid();
3780
3781 // Bind the texture, to get things configured for filtering.
3782 // We'll be changing our base level and max level further below:
3783 this->setTextureUnit(0);
3784 // The mipmap program does not do any swizzling.
3785 this->bindTexture(0, GrSamplerState::Filter::kLinear, skgpu::Swizzle::RGBA(), glTex);
3786
3787 // Vertex data:
3788 if (!fMipmapProgramArrayBuffer) {
3789 static const GrGLfloat vdata[] = {
3790 0, 0,
3791 0, 1,
3792 1, 0,
3793 1, 1
3794 };
3795 fMipmapProgramArrayBuffer = GrGLBuffer::Make(this,
3796 sizeof(vdata),
3797 GrGpuBufferType::kVertex,
3798 kStatic_GrAccessPattern);
3799 fMipmapProgramArrayBuffer->updateData(vdata, /*offset=*/0,
3800
3801 sizeof(vdata),
3802 /*preserve=*/false);
3803 }
3804 if (!fMipmapProgramArrayBuffer) {
3805 return false;
3806 }
3807
3808 fHWVertexArrayState.setVertexArrayID(this, 0);
3809
3810 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3811 attribs->enableVertexArrays(this, 1);
3812 attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType,
3813 SkSLType::kFloat2, 2 * sizeof(GrGLfloat), 0);
3814
3815 // Set "simple" state once:
3816 this->flushBlendAndColorWrite(skgpu::BlendInfo(), skgpu::Swizzle::RGBA());
3817 this->flushScissorTest(GrScissorTest::kDisabled);
3818 this->disableWindowRectangles();
3819 this->disableStencil();
3820
3821 // Do all the blits:
3822 width = texture->width();
3823 height = texture->height();
3824
3825 std::unique_ptr<GrSemaphore> semaphore;
3826 for (GrGLint level = 1; level < levelCount; ++level) {
3827 // Get and bind the program for this particular downsample (filter shape can vary):
3828 int progIdx = TextureSizeToMipmapProgramIdx(width, height);
3829 if (!fMipmapPrograms[progIdx].fProgram) {
3830 if (!this->createMipmapProgram(progIdx)) {
3831 SkDebugf("Failed to create mipmap program.\n");
3832 // Invalidate all params to cover base and max level change in a previous iteration.
3833 glTex->textureParamsModified();
3834 return false;
3835 }
3836 }
3837 this->flushProgram(fMipmapPrograms[progIdx].fProgram);
3838
3839 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusSync &&
3840 level > 1) {
3841 this->waitSemaphore(semaphore.get());
3842 semaphore.reset();
3843 }
3844
3845 // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h)
3846 const float invWidth = 1.0f / width;
3847 const float invHeight = 1.0f / height;
3848 GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3849 invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight));
3850 GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0));
3851
3852 // Set the base level so that we only sample from the previous mip.
3853 SkASSERT(this->glCaps().mipmapLevelControlSupport());
3854 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1));
3855 // Setting the max level is technically unnecessary and can affect
3856 // validation for the framebuffer. However, by making it clear that a
3857 // rendering feedback loop is not occurring, we avoid hitting a slow
3858 // path on some drivers.
3859 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusMaxLevel) {
3860 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_MAX_LEVEL, level - 1));
3861 }
3862
3863 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D,
3864 glTex->textureID(), level));
3865
3866 width = std::max(1, width / 2);
3867 height = std::max(1, height / 2);
3868 this->flushViewport(SkIRect::MakeWH(width, height), height, kTopLeft_GrSurfaceOrigin);
3869
3870 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3871
3872 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusSync &&
3873 level < levelCount-1) {
3874 semaphore = this->makeSemaphore(true);
3875 this->insertSemaphore(semaphore.get());
3876 }
3877 }
3878
3879 // Unbind:
3880 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
3881 GR_GL_TEXTURE_2D, 0, 0));
3882
3883 // We modified the base level and max level params.
3884 GrGLTextureParameters::NonsamplerState nonsamplerState = glTex->parameters()->nonsamplerState();
3885 // We drew the 2nd to last level into the last level.
3886 nonsamplerState.fBaseMipMapLevel = levelCount - 2;
3887 if (this->glCaps().regenerateMipmapType() == RegenerateMipmapType::kBasePlusMaxLevel) {
3888 nonsamplerState.fMaxMipmapLevel = levelCount - 2;
3889 }
3890 glTex->parameters()->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
3891
3892 return true;
3893 }
3894
xferBarrier(GrRenderTarget * rt,GrXferBarrierType type)3895 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
3896 SkASSERT(type);
3897 switch (type) {
3898 case kTexture_GrXferBarrierType: {
3899 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
3900 SkASSERT(glrt->asTexture());
3901 SkASSERT(!glrt->isFBO0(false/*multisample*/));
3902 if (glrt->requiresManualMSAAResolve()) {
3903 // The render target uses separate storage so no need for glTextureBarrier.
3904 // FIXME: The render target will resolve automatically when its texture is bound,
3905 // but we could resolve only the bounds that will be read if we do it here instead.
3906 return;
3907 }
3908 SkASSERT(this->caps()->textureBarrierSupport());
3909 GL_CALL(TextureBarrier());
3910 return;
3911 }
3912 case kBlend_GrXferBarrierType:
3913 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
3914 this->caps()->blendEquationSupport());
3915 GL_CALL(BlendBarrier());
3916 return;
3917 default: break; // placate compiler warnings that kNone not handled
3918 }
3919 }
3920
onCreateBackendTexture(SkISize dimensions,const GrBackendFormat & format,GrRenderable renderable,skgpu::Mipmapped mipmapped,GrProtected isProtected,std::string_view label)3921 GrBackendTexture GrGLGpu::onCreateBackendTexture(SkISize dimensions,
3922 const GrBackendFormat& format,
3923 GrRenderable renderable,
3924 skgpu::Mipmapped mipmapped,
3925 GrProtected isProtected,
3926 std::string_view label) {
3927 this->handleDirtyContext();
3928
3929 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
3930 return {};
3931 }
3932
3933 GrGLFormat glFormat = GrBackendFormats::AsGLFormat(format);
3934 if (glFormat == GrGLFormat::kUnknown) {
3935 return {};
3936 }
3937
3938 int numMipLevels = 1;
3939 if (mipmapped == skgpu::Mipmapped::kYes) {
3940 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1;
3941 }
3942
3943 // Compressed formats go through onCreateCompressedBackendTexture
3944 SkASSERT(!GrGLFormatIsCompressed(glFormat));
3945
3946 GrGLTextureInfo info;
3947 GrGLTextureParameters::SamplerOverriddenState initialState;
3948
3949 if (glFormat == GrGLFormat::kUnknown) {
3950 return {};
3951 }
3952 switch (format.textureType()) {
3953 case GrTextureType::kNone:
3954 case GrTextureType::kExternal:
3955 return {};
3956 case GrTextureType::k2D:
3957 info.fTarget = GR_GL_TEXTURE_2D;
3958 break;
3959 case GrTextureType::kRectangle:
3960 if (!this->glCaps().rectangleTextureSupport() || mipmapped == skgpu::Mipmapped::kYes) {
3961 return {};
3962 }
3963 info.fTarget = GR_GL_TEXTURE_RECTANGLE;
3964 break;
3965 }
3966 info.fProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
3967 this->glCaps().strictProtectedness());
3968 info.fFormat = GrGLFormatToEnum(glFormat);
3969 info.fID = this->createTexture(dimensions, glFormat, info.fTarget, renderable, &initialState,
3970 numMipLevels, info.fProtected, label);
3971 if (!info.fID) {
3972 return {};
3973 }
3974
3975 // Unbind this texture from the scratch texture unit.
3976 this->bindTextureToScratchUnit(info.fTarget, 0);
3977
3978 auto parameters = sk_make_sp<GrGLTextureParameters>();
3979 // The non-sampler params are still at their default values.
3980 parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(),
3981 fResetTimestampForTextureParameters);
3982
3983 return GrBackendTextures::MakeGL(
3984 dimensions.width(), dimensions.height(), mipmapped, info, std::move(parameters), label);
3985 }
3986
onClearBackendTexture(const GrBackendTexture & backendTexture,sk_sp<skgpu::RefCntedCallback> finishedCallback,std::array<float,4> color)3987 bool GrGLGpu::onClearBackendTexture(const GrBackendTexture& backendTexture,
3988 sk_sp<skgpu::RefCntedCallback> finishedCallback,
3989 std::array<float, 4> color) {
3990 this->handleDirtyContext();
3991
3992 GrGLTextureInfo info;
3993 SkAssertResult(GrBackendTextures::GetGLTextureInfo(backendTexture, &info));
3994
3995 int numMipLevels = 1;
3996 if (backendTexture.hasMipmaps()) {
3997 numMipLevels =
3998 SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1;
3999 }
4000
4001 GrGLFormat glFormat = GrGLFormatFromGLEnum(info.fFormat);
4002
4003 this->bindTextureToScratchUnit(info.fTarget, info.fID);
4004
4005 // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1
4006 // so that the uploads go to the right levels.
4007 if (numMipLevels && this->glCaps().mipmapLevelControlSupport()) {
4008 auto params = get_gl_texture_params(backendTexture);
4009 GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState();
4010 if (params->nonsamplerState().fBaseMipMapLevel != 0) {
4011 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0));
4012 nonsamplerState.fBaseMipMapLevel = 0;
4013 }
4014 if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) {
4015 GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1));
4016 nonsamplerState.fBaseMipMapLevel = numMipLevels - 1;
4017 }
4018 params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters);
4019 }
4020
4021 uint32_t levelMask = (1 << numMipLevels) - 1;
4022 bool result = this->uploadColorToTex(glFormat,
4023 backendTexture.dimensions(),
4024 info.fTarget,
4025 color,
4026 levelMask);
4027
4028 // Unbind this texture from the scratch texture unit.
4029 this->bindTextureToScratchUnit(info.fTarget, 0);
4030 return result;
4031 }
4032
deleteBackendTexture(const GrBackendTexture & tex)4033 void GrGLGpu::deleteBackendTexture(const GrBackendTexture& tex) {
4034 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
4035
4036 GrGLTextureInfo info;
4037 if (GrBackendTextures::GetGLTextureInfo(tex, &info)) {
4038 GL_CALL(DeleteTextures(1, &info.fID));
4039 }
4040 }
4041
compile(const GrProgramDesc & desc,const GrProgramInfo & programInfo)4042 bool GrGLGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) {
4043 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult stat;
4044
4045 sk_sp<GrGLProgram> tmp = fProgramCache->findOrCreateProgram(this->getContext(),
4046 desc, programInfo, &stat);
4047 if (!tmp) {
4048 return false;
4049 }
4050
4051 return stat != GrThreadSafePipelineBuilder::Stats::ProgramCacheResult::kHit;
4052 }
4053
4054 #if defined(GPU_TEST_UTILS)
4055
isTestingOnlyBackendTexture(const GrBackendTexture & tex) const4056 bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const {
4057 SkASSERT(GrBackendApi::kOpenGL == tex.backend());
4058
4059 GrGLTextureInfo info;
4060 if (!GrBackendTextures::GetGLTextureInfo(tex, &info)) {
4061 return false;
4062 }
4063
4064 GrGLboolean result;
4065 GL_CALL_RET(result, IsTexture(info.fID));
4066
4067 return (GR_GL_TRUE == result);
4068 }
4069
createTestingOnlyBackendRenderTarget(SkISize dimensions,GrColorType colorType,int sampleCnt,GrProtected isProtected)4070 GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(SkISize dimensions,
4071 GrColorType colorType,
4072 int sampleCnt,
4073 GrProtected isProtected) {
4074 if (dimensions.width() > this->caps()->maxRenderTargetSize() ||
4075 dimensions.height() > this->caps()->maxRenderTargetSize()) {
4076 return {};
4077 }
4078 if (isProtected == GrProtected::kYes && !this->glCaps().supportsProtectedContent()) {
4079 return {};
4080 }
4081
4082 this->handleDirtyContext();
4083 auto format = this->glCaps().getFormatFromColorType(colorType);
4084 sampleCnt = this->glCaps().getRenderTargetSampleCount(sampleCnt, format);
4085 if (!sampleCnt) {
4086 return {};
4087 }
4088 // We make a texture instead of a render target if we're using a
4089 // "multisampled_render_to_texture" style extension or have a BGRA format that
4090 // is allowed for textures but not render buffer internal formats.
4091 bool useTexture = false;
4092 if (sampleCnt > 1 && !this->glCaps().usesMSAARenderBuffers()) {
4093 useTexture = true;
4094 } else if (format == GrGLFormat::kBGRA8 &&
4095 this->glCaps().getRenderbufferInternalFormat(GrGLFormat::kBGRA8) != GR_GL_BGRA8) {
4096 // We have a BGRA extension that doesn't support BGRA render buffers. We can use a texture
4097 // unless we've been asked for MSAA. Note we already checked above for render-to-
4098 // multisampled-texture style extensions.
4099 if (sampleCnt > 1) {
4100 return {};
4101 }
4102 useTexture = true;
4103 }
4104
4105 bool avoidStencil = this->glCaps().avoidStencilBuffers();
4106 int sFormatIdx = -1;
4107 if (!avoidStencil) {
4108 sFormatIdx = this->getCompatibleStencilIndex(format);
4109 if (sFormatIdx < 0) {
4110 return {};
4111 }
4112 }
4113 GrGLuint colorID = 0;
4114 GrGLuint stencilID = 0;
4115 GrGLFramebufferInfo info;
4116 info.fFBOID = 0;
4117 info.fFormat = GrGLFormatToEnum(format);
4118 info.fProtected = skgpu::Protected(isProtected == skgpu::Protected::kYes ||
4119 this->glCaps().strictProtectedness());
4120
4121 auto deleteIDs = [&](bool saveFBO = false) {
4122 if (colorID) {
4123 if (useTexture) {
4124 GL_CALL(DeleteTextures(1, &colorID));
4125 } else {
4126 GL_CALL(DeleteRenderbuffers(1, &colorID));
4127 }
4128 }
4129 if (stencilID) {
4130 GL_CALL(DeleteRenderbuffers(1, &stencilID));
4131 }
4132 if (!saveFBO && info.fFBOID) {
4133 this->deleteFramebuffer(info.fFBOID);
4134 }
4135 };
4136
4137 if (useTexture) {
4138 GL_CALL(GenTextures(1, &colorID));
4139 } else {
4140 GL_CALL(GenRenderbuffers(1, &colorID));
4141 }
4142 if (!colorID) {
4143 deleteIDs();
4144 return {};
4145 }
4146
4147 if (!avoidStencil) {
4148 GL_CALL(GenRenderbuffers(1, &stencilID));
4149 if (!stencilID) {
4150 deleteIDs();
4151 return {};
4152 }
4153 }
4154
4155 GL_CALL(GenFramebuffers(1, &info.fFBOID));
4156 if (!info.fFBOID) {
4157 deleteIDs();
4158 return {};
4159 }
4160
4161 this->invalidateBoundRenderTarget();
4162
4163 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
4164 if (useTexture) {
4165 GrGLTextureParameters::SamplerOverriddenState initialState;
4166 colorID = this->createTexture(dimensions, format, GR_GL_TEXTURE_2D, GrRenderable::kYes,
4167 &initialState,
4168 1,
4169 info.fProtected,
4170 /*label=*/"Skia");
4171 if (!colorID) {
4172 deleteIDs();
4173 return {};
4174 }
4175 if (sampleCnt == 1) {
4176 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4177 GR_GL_TEXTURE_2D, colorID, 0));
4178 } else {
4179 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4180 GR_GL_TEXTURE_2D, colorID, 0, sampleCnt));
4181 }
4182 } else {
4183 GrGLenum renderBufferFormat = this->glCaps().getRenderbufferInternalFormat(format);
4184 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID));
4185 if (sampleCnt == 1) {
4186 GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, renderBufferFormat, dimensions.width(),
4187 dimensions.height()));
4188 } else {
4189 if (!this->renderbufferStorageMSAA(this->glContext(), sampleCnt, renderBufferFormat,
4190 dimensions.width(), dimensions.height())) {
4191 deleteIDs();
4192 return {};
4193 }
4194 }
4195 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4196 GR_GL_RENDERBUFFER, colorID));
4197 }
4198 if (!avoidStencil) {
4199 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID));
4200 auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx];
4201 if (sampleCnt == 1) {
4202 GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, GrGLFormatToEnum(stencilBufferFormat),
4203 dimensions.width(), dimensions.height()));
4204 } else {
4205 if (!this->renderbufferStorageMSAA(this->glContext(), sampleCnt,
4206 GrGLFormatToEnum(stencilBufferFormat),
4207 dimensions.width(), dimensions.height())) {
4208 deleteIDs();
4209 return {};
4210 }
4211 }
4212 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
4213 GR_GL_STENCIL_ATTACHMENT,
4214 GR_GL_RENDERBUFFER,
4215 stencilID));
4216 if (GrGLFormatIsPackedDepthStencil(this->glCaps().stencilFormats()[sFormatIdx])) {
4217 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT,
4218 GR_GL_RENDERBUFFER, stencilID));
4219 }
4220 }
4221
4222 // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL
4223 // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO
4224 // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the
4225 // renderbuffers/texture.
4226 this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0);
4227 deleteIDs(/* saveFBO = */ true);
4228
4229 this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID);
4230 GrGLenum status;
4231 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
4232 if (GR_GL_FRAMEBUFFER_COMPLETE != status) {
4233 this->deleteFramebuffer(info.fFBOID);
4234 return {};
4235 }
4236
4237 int stencilBits = 0;
4238 if (!avoidStencil) {
4239 stencilBits = SkToInt(GrGLFormatStencilBits(this->glCaps().stencilFormats()[sFormatIdx]));
4240 }
4241
4242 GrBackendRenderTarget beRT = GrBackendRenderTargets::MakeGL(
4243 dimensions.width(), dimensions.height(), sampleCnt, stencilBits, info);
4244 SkASSERT(this->caps()->areColorTypeAndFormatCompatible(colorType, beRT.getBackendFormat()));
4245 return beRT;
4246 }
4247
deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget & backendRT)4248 void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) {
4249 SkASSERT(GrBackendApi::kOpenGL == backendRT.backend());
4250 GrGLFramebufferInfo info;
4251 if (GrBackendRenderTargets::GetGLFramebufferInfo(backendRT, &info)) {
4252 if (info.fFBOID) {
4253 this->deleteFramebuffer(info.fFBOID);
4254 }
4255 }
4256 }
4257 #endif
4258
4259 ///////////////////////////////////////////////////////////////////////////////
4260
bindInternalVertexArray(GrGLGpu * gpu,const GrBuffer * ibuf)4261 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
4262 const GrBuffer* ibuf) {
4263 SkASSERT(!ibuf || ibuf->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(ibuf)->isMapped());
4264 GrGLAttribArrayState* attribState;
4265
4266 if (gpu->glCaps().isCoreProfile()) {
4267 if (!fCoreProfileVertexArray) {
4268 GrGLuint arrayID;
4269 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
4270 int attrCount = gpu->glCaps().maxVertexAttributes();
4271 fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
4272 }
4273 if (ibuf) {
4274 attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
4275 } else {
4276 attribState = fCoreProfileVertexArray->bind(gpu);
4277 }
4278 } else {
4279 if (ibuf) {
4280 // bindBuffer implicitly binds VAO 0 when binding an index buffer.
4281 gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf);
4282 } else {
4283 this->setVertexArrayID(gpu, 0);
4284 }
4285 int attrCount = gpu->glCaps().maxVertexAttributes();
4286 if (fDefaultVertexArrayAttribState.count() != attrCount) {
4287 fDefaultVertexArrayAttribState.resize(attrCount);
4288 }
4289 attribState = &fDefaultVertexArrayAttribState;
4290 }
4291 return attribState;
4292 }
4293
addFinishedCallback(skgpu::AutoCallback callback,std::optional<GrTimerQuery> timerQuery)4294 void GrGLGpu::addFinishedCallback(skgpu::AutoCallback callback,
4295 std::optional<GrTimerQuery> timerQuery) {
4296 GrGLint glQuery = timerQuery ? static_cast<GrGLint>(timerQuery->query) : 0;
4297 fFinishCallbacks.add(std::move(callback), glQuery);
4298 }
4299
flush(FlushType flushType)4300 void GrGLGpu::flush(FlushType flushType) {
4301 if (fNeedsGLFlush || flushType == FlushType::kForce) {
4302 GL_CALL(Flush());
4303 fNeedsGLFlush = false;
4304 }
4305 }
4306
onSubmitToGpu(const GrSubmitInfo & info)4307 bool GrGLGpu::onSubmitToGpu(const GrSubmitInfo& info) {
4308 if (info.fSync == GrSyncCpu::kYes ||
4309 (!fFinishCallbacks.empty() && !this->glCaps().fenceSyncSupport())) {
4310 this->finishOutstandingGpuWork();
4311 fFinishCallbacks.callAll(true);
4312 } else {
4313 this->flush();
4314 // See if any previously inserted finish procs are good to go.
4315 fFinishCallbacks.check();
4316 }
4317 if (!this->glCaps().skipErrorChecks()) {
4318 this->clearErrorsAndCheckForOOM();
4319 }
4320 return true;
4321 }
4322
willExecute()4323 void GrGLGpu::willExecute() {
4324 // Because our transfers will be submitted to GL to perfom immediately (no command buffer to
4325 // submit), we must unmap any staging buffers.
4326 if (fStagingBufferManager) {
4327 fStagingBufferManager->detachBuffers();
4328 }
4329 }
4330
submit(GrOpsRenderPass * renderPass)4331 void GrGLGpu::submit(GrOpsRenderPass* renderPass) {
4332 // The GrGLOpsRenderPass doesn't buffer ops so there is nothing to do here
4333 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
4334 fCachedOpsRenderPass->reset();
4335 }
4336
insertSync()4337 [[nodiscard]] GrGLsync GrGLGpu::insertSync() {
4338 GrGLsync sync = nullptr;
4339 switch (this->glCaps().fenceType()) {
4340 case GrGLCaps::FenceType::kNone:
4341 return nullptr;
4342 case GrGLCaps::FenceType::kNVFence: {
4343 static_assert(sizeof(GrGLsync) >= sizeof(GrGLuint));
4344 GrGLuint fence = 0;
4345 GL_CALL(GenFences(1, &fence));
4346 GL_CALL(SetFence(fence, GR_GL_ALL_COMPLETED));
4347 sync = reinterpret_cast<GrGLsync>(static_cast<intptr_t>(fence));
4348 break;
4349 }
4350 case GrGLCaps::FenceType::kSyncObject: {
4351 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4352 break;
4353 }
4354 }
4355 this->setNeedsFlush();
4356 return sync;
4357 }
4358
testSync(GrGLsync sync)4359 bool GrGLGpu::testSync(GrGLsync sync) {
4360 switch (this->glCaps().fenceType()) {
4361 case GrGLCaps::FenceType::kNone:
4362 SK_ABORT("Testing sync without sync support.");
4363 return false;
4364 case GrGLCaps::FenceType::kNVFence: {
4365 GrGLuint nvFence = static_cast<GrGLuint>(reinterpret_cast<intptr_t>(sync));
4366 GrGLboolean result;
4367 GL_CALL_RET(result, TestFence(nvFence));
4368 return result == GR_GL_TRUE;
4369 }
4370 case GrGLCaps::FenceType::kSyncObject: {
4371 constexpr GrGLbitfield kFlags = 0;
4372 GrGLenum result;
4373 #if defined(__EMSCRIPTEN__)
4374 GL_CALL_RET(result, ClientWaitSync(sync, kFlags, 0, 0));
4375 #else
4376 GL_CALL_RET(result, ClientWaitSync(sync, kFlags, 0));
4377 #endif
4378 return (GR_GL_CONDITION_SATISFIED == result || GR_GL_ALREADY_SIGNALED == result);
4379 }
4380 }
4381 SkUNREACHABLE;
4382 }
4383
deleteSync(GrGLsync sync)4384 void GrGLGpu::deleteSync(GrGLsync sync) {
4385 switch (this->glCaps().fenceType()) {
4386 case GrGLCaps::FenceType::kNone:
4387 SK_ABORT("Deleting sync without sync support.");
4388 break;
4389 case GrGLCaps::FenceType::kNVFence: {
4390 GrGLuint nvFence = SkToUInt(reinterpret_cast<intptr_t>(sync));
4391 GL_CALL(DeleteFences(1, &nvFence));
4392 break;
4393 }
4394 case GrGLCaps::FenceType::kSyncObject:
4395 GL_CALL(DeleteSync(sync));
4396 break;
4397 }
4398 }
4399
makeSemaphore(bool isOwned)4400 [[nodiscard]] std::unique_ptr<GrSemaphore> GrGLGpu::makeSemaphore(bool isOwned) {
4401 SkASSERT(this->caps()->semaphoreSupport());
4402 return GrGLSemaphore::Make(this, isOwned);
4403 }
4404
wrapBackendSemaphore(const GrBackendSemaphore &,GrSemaphoreWrapType,GrWrapOwnership)4405 std::unique_ptr<GrSemaphore> GrGLGpu::wrapBackendSemaphore(const GrBackendSemaphore&,
4406 GrSemaphoreWrapType,
4407 GrWrapOwnership) {
4408 SK_ABORT("Unsupported");
4409 }
4410
insertSemaphore(GrSemaphore * semaphore)4411 void GrGLGpu::insertSemaphore(GrSemaphore* semaphore) {
4412 SkASSERT(semaphore);
4413 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore);
4414
4415 GrGLsync sync;
4416 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4417 glSem->setSync(sync);
4418 this->setNeedsFlush();
4419 }
4420
waitSemaphore(GrSemaphore * semaphore)4421 void GrGLGpu::waitSemaphore(GrSemaphore* semaphore) {
4422 SkASSERT(semaphore);
4423 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore);
4424
4425 #if defined(__EMSCRIPTEN__)
4426 constexpr auto kLo = SkTo<GrGLuint>(GR_GL_TIMEOUT_IGNORED & 0xFFFFFFFFull);
4427 constexpr auto kHi = SkTo<GrGLuint>(GR_GL_TIMEOUT_IGNORED >> 32);
4428 GL_CALL(WaitSync(glSem->sync(), 0, kLo, kHi));
4429 #else
4430 GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
4431 #endif
4432 }
4433
startTimerQuery()4434 std::optional<GrTimerQuery> GrGLGpu::startTimerQuery() {
4435 if (glCaps().timerQueryType() == GrGLCaps::TimerQueryType::kNone) {
4436 return {};
4437 }
4438 GrGLuint glQuery;
4439 GL_CALL(GenQueries(1, &glQuery));
4440 if (!glQuery) {
4441 return {};
4442 }
4443 if (glCaps().timerQueryType() == GrGLCaps::TimerQueryType::kDisjoint) {
4444 // Clear the disjoint state
4445 GrGLint _;
4446 GR_GL_GetIntegerv(this->glInterface(), GR_GL_GPU_DISJOINT, &_);
4447 }
4448 GL_CALL(BeginQuery(GR_GL_TIME_ELAPSED, glQuery));
4449 return GrTimerQuery{glQuery};
4450 }
4451
endTimerQuery(const GrTimerQuery & timerQuery)4452 void GrGLGpu::endTimerQuery(const GrTimerQuery& timerQuery) {
4453 SkASSERT(glCaps().timerQueryType() != GrGLCaps::TimerQueryType::kNone);
4454 SkASSERT(SkToUInt(timerQuery.query));
4455 // Since only one query of a particular type can be active at once, glEndQuery doesn't take a
4456 // query parameter.
4457 GL_CALL(EndQuery(GR_GL_TIME_ELAPSED));
4458 }
4459
getTimerQueryResult(GrGLuint query)4460 uint64_t GrGLGpu::getTimerQueryResult(GrGLuint query) {
4461 SkASSERT(glCaps().timerQueryType() != GrGLCaps::TimerQueryType::kNone);
4462 SkASSERT(query);
4463
4464 // Because we only call this after a sync completes the query *should* be available.
4465 GrGLuint available;
4466 GL_CALL(GetQueryObjectuiv(query, GR_GL_QUERY_RESULT_AVAILABLE, &available));
4467 bool getResult = true;
4468 if (!available) {
4469 SkDebugf("GL timer query is not available.\n");
4470 getResult = false;
4471 }
4472
4473 if (glCaps().timerQueryType() == GrGLCaps::TimerQueryType::kDisjoint) {
4474 // Clear the disjoint state
4475 GrGLint disjoint;
4476 GR_GL_GetIntegerv(this->glInterface(), GR_GL_GPU_DISJOINT, &disjoint);
4477 if (disjoint) {
4478 SkDebugf("GL timer query ignored because of disjoint event.\n");
4479 getResult = false;
4480 }
4481 }
4482
4483 uint64_t result = 0;
4484 if (getResult) {
4485 GR_GL_GetQueryObjectui64v(this->glInterface(), query, GR_GL_QUERY_RESULT, &result);
4486 }
4487 GL_CALL(DeleteQueries(1, &query));
4488 return result;
4489 }
4490
checkFinishedCallbacks()4491 void GrGLGpu::checkFinishedCallbacks() {
4492 fFinishCallbacks.check();
4493 }
4494
finishOutstandingGpuWork()4495 void GrGLGpu::finishOutstandingGpuWork() {
4496 GL_CALL(Finish());
4497 }
4498
clearErrorsAndCheckForOOM()4499 void GrGLGpu::clearErrorsAndCheckForOOM() {
4500 while (this->getErrorAndCheckForOOM() != GR_GL_NO_ERROR) {}
4501 }
4502
getErrorAndCheckForOOM()4503 GrGLenum GrGLGpu::getErrorAndCheckForOOM() {
4504 #if GR_GL_CHECK_ERROR
4505 if (this->glInterface()->checkAndResetOOMed()) {
4506 this->setOOMed();
4507 }
4508 #endif
4509 GrGLenum error = this->fGLContext->glInterface()->fFunctions.fGetError();
4510 if (error == GR_GL_OUT_OF_MEMORY) {
4511 this->setOOMed();
4512 }
4513 return error;
4514 }
4515
prepareTextureForCrossContextUsage(GrTexture * texture)4516 std::unique_ptr<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) {
4517 // Set up a semaphore to be signaled once the data is ready, and flush GL
4518 std::unique_ptr<GrSemaphore> semaphore = this->makeSemaphore(true);
4519 SkASSERT(semaphore);
4520 this->insertSemaphore(semaphore.get());
4521 // We must call flush here to make sure the GrGLsync object gets created and sent to the gpu.
4522 this->flush(FlushType::kForce);
4523
4524 return semaphore;
4525 }
4526
TextureToCopyProgramIdx(GrTexture * texture)4527 int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) {
4528 switch (SkSLCombinedSamplerTypeForTextureType(texture->textureType())) {
4529 case SkSLType::kTexture2DSampler:
4530 return 0;
4531 case SkSLType::kTexture2DRectSampler:
4532 return 1;
4533 case SkSLType::kTextureExternalSampler:
4534 return 2;
4535 default:
4536 SK_ABORT("Unexpected samper type");
4537 }
4538 }
4539
4540 #ifdef SK_ENABLE_DUMP_GPU
4541 #include "src/utils/SkJSONWriter.h"
onDumpJSON(SkJSONWriter * writer) const4542 void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const {
4543 // We are called by the base class, which has already called beginObject(). We choose to nest
4544 // all of our caps information in a named sub-object.
4545 writer->beginObject("GL GPU");
4546
4547 const GrGLubyte* str;
4548 GL_CALL_RET(str, GetString(GR_GL_VERSION));
4549 writer->appendCString("GL_VERSION", (const char*)(str));
4550 GL_CALL_RET(str, GetString(GR_GL_RENDERER));
4551 writer->appendCString("GL_RENDERER", (const char*)(str));
4552 GL_CALL_RET(str, GetString(GR_GL_VENDOR));
4553 writer->appendCString("GL_VENDOR", (const char*)(str));
4554 GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION));
4555 writer->appendCString("GL_SHADING_LANGUAGE_VERSION", (const char*)(str));
4556
4557 writer->appendName("extensions");
4558 glInterface()->fExtensions.dumpJSON(writer);
4559
4560 writer->endObject();
4561 }
4562 #endif
4563