1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/GrGeometryProcessor.h"
9
10 #include "include/core/SkSamplingOptions.h"
11 #include "include/core/SkString.h"
12 #include "include/private/SkSLSampleUsage.h"
13 #include "include/private/base/SkSafe32.h"
14 #include "src/core/SkMatrixPriv.h"
15 #include "src/gpu/KeyBuilder.h"
16 #include "src/gpu/ganesh/GrPipeline.h"
17 #include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
18 #include "src/gpu/ganesh/glsl/GrGLSLProgramBuilder.h"
19 #include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
20 #include "src/gpu/ganesh/glsl/GrGLSLVarying.h"
21 #include "src/gpu/ganesh/glsl/GrGLSLVertexGeoBuilder.h"
22
23 #include <algorithm>
24 #include <queue>
25 #include <utility>
26 #include <vector>
27
GrGeometryProcessor(ClassID classID)28 GrGeometryProcessor::GrGeometryProcessor(ClassID classID) : GrProcessor(classID) {}
29
textureSampler(int i) const30 const GrGeometryProcessor::TextureSampler& GrGeometryProcessor::textureSampler(int i) const {
31 SkASSERT(i >= 0 && i < this->numTextureSamplers());
32 return this->onTextureSampler(i);
33 }
34
ComputeCoordTransformsKey(const GrFragmentProcessor & fp)35 uint32_t GrGeometryProcessor::ComputeCoordTransformsKey(const GrFragmentProcessor& fp) {
36 // This is highly coupled with the code in ProgramImpl::collectTransforms().
37 uint32_t key = static_cast<uint32_t>(fp.sampleUsage().kind()) << 1;
38 // This needs to be updated if GP starts specializing varyings on additional matrix types.
39 if (fp.sampleUsage().hasPerspective()) {
40 key |= 0b1;
41 }
42 return key;
43 }
44
getAttributeKey(skgpu::KeyBuilder * b) const45 void GrGeometryProcessor::getAttributeKey(skgpu::KeyBuilder* b) const {
46 b->appendComment("vertex attributes");
47 fVertexAttributes.addToKey(b);
48 b->appendComment("instance attributes");
49 fInstanceAttributes.addToKey(b);
50 }
51
52 ///////////////////////////////////////////////////////////////////////////////////////////////////
53
clamp_filter(GrTextureType type,GrSamplerState::Filter requestedFilter)54 static inline GrSamplerState::Filter clamp_filter(GrTextureType type,
55 GrSamplerState::Filter requestedFilter) {
56 if (GrTextureTypeHasRestrictedSampling(type)) {
57 return std::min(requestedFilter, GrSamplerState::Filter::kLinear);
58 }
59 return requestedFilter;
60 }
61
TextureSampler(GrSamplerState samplerState,const GrBackendFormat & backendFormat,const skgpu::Swizzle & swizzle)62 GrGeometryProcessor::TextureSampler::TextureSampler(GrSamplerState samplerState,
63 const GrBackendFormat& backendFormat,
64 const skgpu::Swizzle& swizzle) {
65 this->reset(samplerState, backendFormat, swizzle);
66 }
67
reset(GrSamplerState samplerState,const GrBackendFormat & backendFormat,const skgpu::Swizzle & swizzle)68 void GrGeometryProcessor::TextureSampler::reset(GrSamplerState samplerState,
69 const GrBackendFormat& backendFormat,
70 const skgpu::Swizzle& swizzle) {
71 fSamplerState = samplerState;
72 fSamplerState = GrSamplerState(samplerState.wrapModeX(),
73 samplerState.wrapModeY(),
74 clamp_filter(backendFormat.textureType(), samplerState.filter()),
75 samplerState.mipmapMode());
76 fBackendFormat = backendFormat;
77 fSwizzle = swizzle;
78 fIsInitialized = true;
79 }
80
81 //////////////////////////////////////////////////////////////////////////////
82
83 using ProgramImpl = GrGeometryProcessor::ProgramImpl;
84
85 std::tuple<ProgramImpl::FPCoordsMap, GrShaderVar>
emitCode(EmitArgs & args,const GrPipeline & pipeline)86 ProgramImpl::emitCode(EmitArgs& args, const GrPipeline& pipeline) {
87 GrGPArgs gpArgs;
88 this->onEmitCode(args, &gpArgs);
89
90 FPCoordsMap transformMap = this->collectTransforms(args.fVertBuilder,
91 args.fVaryingHandler,
92 args.fUniformHandler,
93 gpArgs.fLocalCoordShader,
94 gpArgs.fLocalCoordVar,
95 gpArgs.fPositionVar,
96 pipeline);
97
98 GrGLSLVertexBuilder* vBuilder = args.fVertBuilder;
99 // Emit the vertex position to the hardware in the normalized window coordinates it expects.
100 SkASSERT(SkSLType::kFloat2 == gpArgs.fPositionVar.getType() ||
101 SkSLType::kFloat3 == gpArgs.fPositionVar.getType());
102 vBuilder->emitNormalizedSkPosition(gpArgs.fPositionVar.c_str(),
103 gpArgs.fPositionVar.getType());
104 if (SkSLType::kFloat2 == gpArgs.fPositionVar.getType()) {
105 args.fVaryingHandler->setNoPerspective();
106 }
107
108 return {transformMap, gpArgs.fLocalCoordVar};
109 }
110
collectTransforms(GrGLSLVertexBuilder * vb,GrGLSLVaryingHandler * varyingHandler,GrGLSLUniformHandler * uniformHandler,GrShaderType localCoordsShader,const GrShaderVar & localCoordsVar,const GrShaderVar & positionVar,const GrPipeline & pipeline)111 ProgramImpl::FPCoordsMap ProgramImpl::collectTransforms(GrGLSLVertexBuilder* vb,
112 GrGLSLVaryingHandler* varyingHandler,
113 GrGLSLUniformHandler* uniformHandler,
114 GrShaderType localCoordsShader,
115 const GrShaderVar& localCoordsVar,
116 const GrShaderVar& positionVar,
117 const GrPipeline& pipeline) {
118 SkASSERT(localCoordsVar.getType() == SkSLType::kFloat2 ||
119 localCoordsVar.getType() == SkSLType::kFloat3 ||
120 localCoordsVar.getType() == SkSLType::kVoid);
121 SkASSERT(positionVar.getType() == SkSLType::kFloat2 ||
122 positionVar.getType() == SkSLType::kFloat3 ||
123 positionVar.getType() == SkSLType::kVoid);
124
125 auto baseLocalCoordFSVar = [&, baseLocalCoordVarying = GrGLSLVarying()]() mutable {
126 if (localCoordsShader == kFragment_GrShaderType) {
127 return localCoordsVar;
128 }
129 SkASSERT(localCoordsShader == kVertex_GrShaderType);
130 SkASSERT(SkSLTypeIsFloatType(localCoordsVar.getType()));
131 if (baseLocalCoordVarying.type() == SkSLType::kVoid) {
132 // Initialize to the GP provided coordinate
133 baseLocalCoordVarying = GrGLSLVarying(localCoordsVar.getType());
134 varyingHandler->addVarying("LocalCoord", &baseLocalCoordVarying);
135 vb->codeAppendf("%s = %s;\n",
136 baseLocalCoordVarying.vsOut(),
137 localCoordsVar.getName().c_str());
138 }
139 return baseLocalCoordVarying.fsInVar();
140 };
141
142 bool canUsePosition = positionVar.getType() != SkSLType::kVoid;
143
144 FPCoordsMap result;
145 // Performs a pre-order traversal of FP hierarchy rooted at fp and identifies FPs that are
146 // sampled with a series of matrices applied to local coords. For each such FP a varying is
147 // added to the varying handler and added to 'result'.
148 auto liftTransforms = [&, traversalIndex = 0](
149 auto& self,
150 const GrFragmentProcessor& fp,
151 bool hasPerspective,
152 const GrFragmentProcessor* lastMatrixFP = nullptr,
153 int lastMatrixTraversalIndex = -1,
154 BaseCoord baseCoord = BaseCoord::kLocal) mutable -> void {
155 ++traversalIndex;
156 if (localCoordsShader == kVertex_GrShaderType) {
157 switch (fp.sampleUsage().kind()) {
158 case SkSL::SampleUsage::Kind::kNone:
159 // This should only happen at the root. Otherwise how did this FP get added?
160 SkASSERT(!fp.parent());
161 break;
162 case SkSL::SampleUsage::Kind::kPassThrough:
163 break;
164 case SkSL::SampleUsage::Kind::kUniformMatrix:
165 // Update tracking of last matrix and matrix props.
166 hasPerspective |= fp.sampleUsage().hasPerspective();
167 lastMatrixFP = &fp;
168 lastMatrixTraversalIndex = traversalIndex;
169 break;
170 case SkSL::SampleUsage::Kind::kFragCoord:
171 hasPerspective = positionVar.getType() == SkSLType::kFloat3;
172 lastMatrixFP = nullptr;
173 lastMatrixTraversalIndex = -1;
174 baseCoord = BaseCoord::kPosition;
175 break;
176 case SkSL::SampleUsage::Kind::kExplicit:
177 baseCoord = BaseCoord::kNone;
178 break;
179 }
180 } else {
181 // If the GP doesn't provide an interpolatable local coord then there is no hope to
182 // lift.
183 baseCoord = BaseCoord::kNone;
184 }
185
186 auto& [varyingFSVar, hasCoordsParam] = result[&fp];
187 hasCoordsParam = fp.usesSampleCoordsDirectly();
188
189 // We add a varying if we're in a chain of matrices multiplied by local or device coords.
190 // If the coord is the untransformed local coord we add a varying. We don't if it is
191 // untransformed device coords since it doesn't save us anything over "sk_FragCoord.xy". Of
192 // course, if the FP doesn't directly use its coords then we don't add a varying.
193 if (fp.usesSampleCoordsDirectly() &&
194 (baseCoord == BaseCoord::kLocal ||
195 (baseCoord == BaseCoord::kPosition && lastMatrixFP && canUsePosition))) {
196 // Associate the varying with the highest possible node in the FP tree that shares the
197 // same coordinates so that multiple FPs in a subtree can share. If there are no matrix
198 // sample nodes on the way up the tree then directly use the local coord.
199 if (!lastMatrixFP) {
200 varyingFSVar = baseLocalCoordFSVar();
201 } else {
202 // If there is an already a varying that incorporates all matrices from the root to
203 // lastMatrixFP just use it. Otherwise, we add it.
204 auto& [varying, inputCoords, varyingIdx] = fTransformVaryingsMap[lastMatrixFP];
205 if (varying.type() == SkSLType::kVoid) {
206 varying = GrGLSLVarying(hasPerspective ? SkSLType::kFloat3 : SkSLType::kFloat2);
207 SkString strVaryingName = SkStringPrintf("TransformedCoords_%d",
208 lastMatrixTraversalIndex);
209 varyingHandler->addVarying(strVaryingName.c_str(), &varying);
210 inputCoords = baseCoord == BaseCoord::kLocal ? localCoordsVar : positionVar;
211 varyingIdx = lastMatrixTraversalIndex;
212 }
213 SkASSERT(varyingIdx == lastMatrixTraversalIndex);
214 // The FP will use the varying in the fragment shader as its coords.
215 varyingFSVar = varying.fsInVar();
216 }
217 hasCoordsParam = false;
218 }
219
220 for (int c = 0; c < fp.numChildProcessors(); ++c) {
221 if (auto* child = fp.childProcessor(c)) {
222 self(self,
223 *child,
224 hasPerspective,
225 lastMatrixFP,
226 lastMatrixTraversalIndex,
227 baseCoord);
228 // If we have a varying then we never need a param. Otherwise, if one of our
229 // children takes a non-explicit coord then we'll need our coord.
230 hasCoordsParam |= varyingFSVar.getType() == SkSLType::kVoid &&
231 !child->sampleUsage().isExplicit() &&
232 !child->sampleUsage().isFragCoord() &&
233 result[child].hasCoordsParam;
234 }
235 }
236 };
237
238 bool hasPerspective = SkSLTypeVecLength(localCoordsVar.getType()) == 3;
239 for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) {
240 liftTransforms(liftTransforms, pipeline.getFragmentProcessor(i), hasPerspective);
241 }
242 return result;
243 }
244
emitTransformCode(GrGLSLVertexBuilder * vb,GrGLSLUniformHandler * uniformHandler)245 void ProgramImpl::emitTransformCode(GrGLSLVertexBuilder* vb, GrGLSLUniformHandler* uniformHandler) {
246 // Because descendant varyings may be computed using the varyings of ancestor FPs we make
247 // sure to visit the varyings according to FP pre-order traversal by dumping them into a
248 // priority queue.
249 using FPAndInfo = std::tuple<const GrFragmentProcessor*, TransformInfo>;
250 auto compare = [](const FPAndInfo& a, const FPAndInfo& b) {
251 return std::get<1>(a).traversalOrder > std::get<1>(b).traversalOrder;
252 };
253 std::priority_queue<FPAndInfo, std::vector<FPAndInfo>, decltype(compare)> pq(compare);
254 std::for_each(fTransformVaryingsMap.begin(), fTransformVaryingsMap.end(), [&pq](auto entry) {
255 pq.push(entry);
256 });
257 for (; !pq.empty(); pq.pop()) {
258 const auto& [fp, info] = pq.top();
259 // If we recorded a transform info, its sample matrix must be uniform
260 SkASSERT(fp->sampleUsage().isUniformMatrix());
261 GrShaderVar uniform = uniformHandler->liftUniformToVertexShader(
262 *fp->parent(), SkString(SkSL::SampleUsage::MatrixUniformName()));
263 // Start with this matrix and accumulate additional matrices as we walk up the FP tree
264 // to either the base coords or an ancestor FP that has an associated varying.
265 SkString transformExpression = uniform.getName();
266
267 // If we hit an ancestor with a varying on our walk up then save off the varying as the
268 // input to our accumulated transformExpression. Start off assuming we'll reach the root.
269 GrShaderVar inputCoords = info.inputCoords;
270
271 for (const auto* base = fp->parent(); base; base = base->parent()) {
272 if (auto iter = fTransformVaryingsMap.find(base); iter != fTransformVaryingsMap.end()) {
273 // Can stop here, as this varying already holds all transforms from higher FPs
274 // We'll apply the residual transformExpression we've accumulated up from our
275 // starting FP to this varying.
276 inputCoords = iter->second.varying.vsOutVar();
277 break;
278 } else if (base->sampleUsage().isUniformMatrix()) {
279 // Accumulate any matrices along the path to either the original local/device coords
280 // or a parent varying. Getting here means this FP was sampled with a uniform matrix
281 // but all uses of coords below here in the FP hierarchy are beneath additional
282 // matrix samples and thus this node wasn't assigned a varying.
283 GrShaderVar parentUniform = uniformHandler->liftUniformToVertexShader(
284 *base->parent(), SkString(SkSL::SampleUsage::MatrixUniformName()));
285 transformExpression.appendf(" * %s", parentUniform.getName().c_str());
286 } else if (base->sampleUsage().isFragCoord()) {
287 // Our chain of matrices starts here and is based on the device space position.
288 break;
289 } else {
290 // This intermediate FP is just a pass through and doesn't need to be built
291 // in to the expression, but we must visit its parents in case they add transforms.
292 SkASSERT(base->sampleUsage().isPassThrough() || !base->sampleUsage().isSampled());
293 }
294 }
295
296 SkString inputStr;
297 if (inputCoords.getType() == SkSLType::kFloat2) {
298 inputStr = SkStringPrintf("%s.xy1", inputCoords.getName().c_str());
299 } else {
300 SkASSERT(inputCoords.getType() == SkSLType::kFloat3);
301 inputStr = inputCoords.getName();
302 }
303
304 vb->codeAppend("{\n");
305 if (info.varying.type() == SkSLType::kFloat2) {
306 if (vb->getProgramBuilder()->shaderCaps()->fNonsquareMatrixSupport) {
307 vb->codeAppendf("%s = float3x2(%s) * %s",
308 info.varying.vsOut(),
309 transformExpression.c_str(),
310 inputStr.c_str());
311 } else {
312 vb->codeAppendf("%s = (%s * %s).xy",
313 info.varying.vsOut(),
314 transformExpression.c_str(),
315 inputStr.c_str());
316 }
317 } else {
318 SkASSERT(info.varying.type() == SkSLType::kFloat3);
319 vb->codeAppendf("%s = %s * %s",
320 info.varying.vsOut(),
321 transformExpression.c_str(),
322 inputStr.c_str());
323 }
324 vb->codeAppend(";\n");
325 vb->codeAppend("}\n");
326 }
327 // We don't need this map anymore.
328 fTransformVaryingsMap.clear();
329 }
330
setupUniformColor(GrGLSLFPFragmentBuilder * fragBuilder,GrGLSLUniformHandler * uniformHandler,const char * outputName,UniformHandle * colorUniform)331 void ProgramImpl::setupUniformColor(GrGLSLFPFragmentBuilder* fragBuilder,
332 GrGLSLUniformHandler* uniformHandler,
333 const char* outputName,
334 UniformHandle* colorUniform) {
335 SkASSERT(colorUniform);
336 const char* stagedLocalVarName;
337 *colorUniform = uniformHandler->addUniform(nullptr,
338 kFragment_GrShaderFlag,
339 SkSLType::kHalf4,
340 "Color",
341 &stagedLocalVarName);
342 fragBuilder->codeAppendf("%s = %s;", outputName, stagedLocalVarName);
343 if (fragBuilder->getProgramBuilder()->shaderCaps()->fMustObfuscateUniformColor) {
344 fragBuilder->codeAppendf("%s = max(%s, half4(0));", outputName, outputName);
345 }
346 }
347
SetTransform(const GrGLSLProgramDataManager & pdman,const GrShaderCaps & shaderCaps,const UniformHandle & uniform,const SkMatrix & matrix,SkMatrix * state)348 void ProgramImpl::SetTransform(const GrGLSLProgramDataManager& pdman,
349 const GrShaderCaps& shaderCaps,
350 const UniformHandle& uniform,
351 const SkMatrix& matrix,
352 SkMatrix* state) {
353 if (!uniform.isValid() || (state && SkMatrixPriv::CheapEqual(*state, matrix))) {
354 // No update needed
355 return;
356 }
357 if (state) {
358 *state = matrix;
359 }
360 if (matrix.isScaleTranslate() && !shaderCaps.fReducedShaderMode) {
361 // ComputeMatrixKey and writeX() assume the uniform is a float4 (can't assert since nothing
362 // is exposed on a handle, but should be caught lower down).
363 float values[4] = {matrix.getScaleX(), matrix.getTranslateX(),
364 matrix.getScaleY(), matrix.getTranslateY()};
365 pdman.set4fv(uniform, 1, values);
366 } else {
367 pdman.setSkMatrix(uniform, matrix);
368 }
369 }
370
write_passthrough_vertex_position(GrGLSLVertexBuilder * vertBuilder,const GrShaderVar & inPos,GrShaderVar * outPos)371 static void write_passthrough_vertex_position(GrGLSLVertexBuilder* vertBuilder,
372 const GrShaderVar& inPos,
373 GrShaderVar* outPos) {
374 SkASSERT(inPos.getType() == SkSLType::kFloat3 || inPos.getType() == SkSLType::kFloat2);
375 SkString outName = vertBuilder->newTmpVarName(inPos.getName().c_str());
376 outPos->set(inPos.getType(), outName.c_str());
377 vertBuilder->codeAppendf("float%d %s = %s;",
378 SkSLTypeVecLength(inPos.getType()),
379 outName.c_str(),
380 inPos.getName().c_str());
381 }
382
write_vertex_position(GrGLSLVertexBuilder * vertBuilder,GrGLSLUniformHandler * uniformHandler,const GrShaderCaps & shaderCaps,const GrShaderVar & inPos,const SkMatrix & matrix,const char * matrixName,GrShaderVar * outPos,ProgramImpl::UniformHandle * matrixUniform)383 static void write_vertex_position(GrGLSLVertexBuilder* vertBuilder,
384 GrGLSLUniformHandler* uniformHandler,
385 const GrShaderCaps& shaderCaps,
386 const GrShaderVar& inPos,
387 const SkMatrix& matrix,
388 const char* matrixName,
389 GrShaderVar* outPos,
390 ProgramImpl::UniformHandle* matrixUniform) {
391 SkASSERT(inPos.getType() == SkSLType::kFloat3 || inPos.getType() == SkSLType::kFloat2);
392 SkString outName = vertBuilder->newTmpVarName(inPos.getName().c_str());
393
394 if (matrix.isIdentity() && !shaderCaps.fReducedShaderMode) {
395 write_passthrough_vertex_position(vertBuilder, inPos, outPos);
396 return;
397 }
398 SkASSERT(matrixUniform);
399
400 bool useCompactTransform = matrix.isScaleTranslate() && !shaderCaps.fReducedShaderMode;
401 const char* mangledMatrixName;
402 *matrixUniform = uniformHandler->addUniform(nullptr,
403 kVertex_GrShaderFlag,
404 useCompactTransform ? SkSLType::kFloat4
405 : SkSLType::kFloat3x3,
406 matrixName,
407 &mangledMatrixName);
408
409 if (inPos.getType() == SkSLType::kFloat3) {
410 // A float3 stays a float3 whether or not the matrix adds perspective
411 if (useCompactTransform) {
412 vertBuilder->codeAppendf("float3 %s = %s.xz1 * %s + %s.yw0;\n",
413 outName.c_str(),
414 mangledMatrixName,
415 inPos.getName().c_str(),
416 mangledMatrixName);
417 } else {
418 vertBuilder->codeAppendf("float3 %s = %s * %s;\n",
419 outName.c_str(),
420 mangledMatrixName,
421 inPos.getName().c_str());
422 }
423 outPos->set(SkSLType::kFloat3, outName.c_str());
424 return;
425 }
426 if (matrix.hasPerspective()) {
427 // A float2 is promoted to a float3 if we add perspective via the matrix
428 SkASSERT(!useCompactTransform);
429 vertBuilder->codeAppendf("float3 %s = (%s * %s.xy1);",
430 outName.c_str(),
431 mangledMatrixName,
432 inPos.getName().c_str());
433 outPos->set(SkSLType::kFloat3, outName.c_str());
434 return;
435 }
436 if (useCompactTransform) {
437 vertBuilder->codeAppendf("float2 %s = %s.xz * %s + %s.yw;\n",
438 outName.c_str(),
439 mangledMatrixName,
440 inPos.getName().c_str(),
441 mangledMatrixName);
442 } else if (shaderCaps.fNonsquareMatrixSupport) {
443 vertBuilder->codeAppendf("float2 %s = float3x2(%s) * %s.xy1;\n",
444 outName.c_str(),
445 mangledMatrixName,
446 inPos.getName().c_str());
447 } else {
448 vertBuilder->codeAppendf("float2 %s = (%s * %s.xy1).xy;\n",
449 outName.c_str(),
450 mangledMatrixName,
451 inPos.getName().c_str());
452 }
453 outPos->set(SkSLType::kFloat2, outName.c_str());
454 }
455
WriteOutputPosition(GrGLSLVertexBuilder * vertBuilder,GrGPArgs * gpArgs,const char * posName)456 void ProgramImpl::WriteOutputPosition(GrGLSLVertexBuilder* vertBuilder,
457 GrGPArgs* gpArgs,
458 const char* posName) {
459 // writeOutputPosition assumes the incoming pos name points to a float2 variable
460 GrShaderVar inPos(posName, SkSLType::kFloat2);
461 write_passthrough_vertex_position(vertBuilder, inPos, &gpArgs->fPositionVar);
462 }
463
WriteOutputPosition(GrGLSLVertexBuilder * vertBuilder,GrGLSLUniformHandler * uniformHandler,const GrShaderCaps & shaderCaps,GrGPArgs * gpArgs,const char * posName,const SkMatrix & mat,UniformHandle * viewMatrixUniform)464 void ProgramImpl::WriteOutputPosition(GrGLSLVertexBuilder* vertBuilder,
465 GrGLSLUniformHandler* uniformHandler,
466 const GrShaderCaps& shaderCaps,
467 GrGPArgs* gpArgs,
468 const char* posName,
469 const SkMatrix& mat,
470 UniformHandle* viewMatrixUniform) {
471 GrShaderVar inPos(posName, SkSLType::kFloat2);
472 write_vertex_position(vertBuilder,
473 uniformHandler,
474 shaderCaps,
475 inPos,
476 mat,
477 "viewMatrix",
478 &gpArgs->fPositionVar,
479 viewMatrixUniform);
480 }
481
WriteLocalCoord(GrGLSLVertexBuilder * vertBuilder,GrGLSLUniformHandler * uniformHandler,const GrShaderCaps & shaderCaps,GrGPArgs * gpArgs,GrShaderVar localVar,const SkMatrix & localMatrix,UniformHandle * localMatrixUniform)482 void ProgramImpl::WriteLocalCoord(GrGLSLVertexBuilder* vertBuilder,
483 GrGLSLUniformHandler* uniformHandler,
484 const GrShaderCaps& shaderCaps,
485 GrGPArgs* gpArgs,
486 GrShaderVar localVar,
487 const SkMatrix& localMatrix,
488 UniformHandle* localMatrixUniform) {
489 write_vertex_position(vertBuilder,
490 uniformHandler,
491 shaderCaps,
492 localVar,
493 localMatrix,
494 "localMatrix",
495 &gpArgs->fLocalCoordVar,
496 localMatrixUniform);
497 }
498
499 //////////////////////////////////////////////////////////////////////////////
500
501 using Attribute = GrGeometryProcessor::Attribute;
502 using AttributeSet = GrGeometryProcessor::AttributeSet;
503
operator *() const504 GrGeometryProcessor::Attribute AttributeSet::Iter::operator*() const {
505 if (fCurr->offset().has_value()) {
506 return *fCurr;
507 }
508 return Attribute(fCurr->name(), fCurr->cpuType(), fCurr->gpuType(), fImplicitOffset);
509 }
510
operator ++()511 void AttributeSet::Iter::operator++() {
512 if (fRemaining) {
513 fRemaining--;
514 fImplicitOffset += Attribute::AlignOffset(fCurr->size());
515 fCurr++;
516 this->skipUninitialized();
517 }
518 }
519
skipUninitialized()520 void AttributeSet::Iter::skipUninitialized() {
521 if (!fRemaining) {
522 fCurr = nullptr;
523 } else {
524 while (!fCurr->isInitialized()) {
525 ++fCurr;
526 }
527 }
528 }
529
initImplicit(const Attribute * attrs,int count)530 void AttributeSet::initImplicit(const Attribute* attrs, int count) {
531 fAttributes = attrs;
532 fRawCount = count;
533 fCount = 0;
534 fStride = 0;
535 for (int i = 0; i < count; ++i) {
536 if (attrs[i].isInitialized()) {
537 fCount++;
538 fStride += Attribute::AlignOffset(attrs[i].size());
539 }
540 }
541 }
542
initExplicit(const Attribute * attrs,int count,size_t stride)543 void AttributeSet::initExplicit(const Attribute* attrs, int count, size_t stride) {
544 fAttributes = attrs;
545 fRawCount = count;
546 fCount = count;
547 fStride = stride;
548 SkASSERT(Attribute::AlignOffset(fStride) == fStride);
549 for (int i = 0; i < count; ++i) {
550 SkASSERT(attrs[i].isInitialized());
551 SkASSERT(attrs[i].offset().has_value());
552 SkASSERT(Attribute::AlignOffset(*attrs[i].offset()) == *attrs[i].offset());
553 SkASSERT(*attrs[i].offset() + attrs[i].size() <= fStride);
554 }
555 }
556
addToKey(skgpu::KeyBuilder * b) const557 void AttributeSet::addToKey(skgpu::KeyBuilder* b) const {
558 int rawCount = SkAbs32(fRawCount);
559 b->addBits(16, SkToU16(this->stride()), "stride");
560 b->addBits(16, rawCount, "attribute count");
561 size_t implicitOffset = 0;
562 for (int i = 0; i < rawCount; ++i) {
563 const Attribute& attr = fAttributes[i];
564 b->appendComment(attr.isInitialized() ? attr.name() : "unusedAttr");
565 static_assert(kGrVertexAttribTypeCount < (1 << 8), "");
566 static_assert(kSkSLTypeCount < (1 << 8), "");
567 b->addBits(8, attr.isInitialized() ? attr.cpuType() : 0xff, "attrType");
568 b->addBits(8 , attr.isInitialized() ? static_cast<int>(attr.gpuType()) : 0xff,
569 "attrGpuType");
570 int16_t offset = -1;
571 if (attr.isInitialized()) {
572 if (attr.offset().has_value()) {
573 offset = *attr.offset();
574 } else {
575 offset = implicitOffset;
576 implicitOffset += Attribute::AlignOffset(attr.size());
577 }
578 }
579 b->addBits(16, static_cast<uint16_t>(offset), "attrOffset");
580 }
581 }
582
begin() const583 AttributeSet::Iter AttributeSet::begin() const { return Iter(fAttributes, fCount); }
end() const584 AttributeSet::Iter AttributeSet::end() const { return Iter(); }
585