xref: /aosp_15_r20/external/angle/third_party/glslang/src/glslang/MachineIndependent/linkValidate.cpp (revision 8975f5c5ed3d1c378011245431ada316dfb6f244)
1 //
2 // Copyright (C) 2013 LunarG, Inc.
3 // Copyright (C) 2017 ARM Limited.
4 // Copyright (C) 2015-2018 Google, Inc.
5 //
6 // All rights reserved.
7 //
8 // Redistribution and use in source and binary forms, with or without
9 // modification, are permitted provided that the following conditions
10 // are met:
11 //
12 //    Redistributions of source code must retain the above copyright
13 //    notice, this list of conditions and the following disclaimer.
14 //
15 //    Redistributions in binary form must reproduce the above
16 //    copyright notice, this list of conditions and the following
17 //    disclaimer in the documentation and/or other materials provided
18 //    with the distribution.
19 //
20 //    Neither the name of 3Dlabs Inc. Ltd. nor the names of its
21 //    contributors may be used to endorse or promote products derived
22 //    from this software without specific prior written permission.
23 //
24 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34 // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 // POSSIBILITY OF SUCH DAMAGE.
36 //
37 
38 //
39 // Do link-time merging and validation of intermediate representations.
40 //
41 // Basic model is that during compilation, each compilation unit (shader) is
42 // compiled into one TIntermediate instance.  Then, at link time, multiple
43 // units for the same stage can be merged together, which can generate errors.
44 // Then, after all merging, a single instance of TIntermediate represents
45 // the whole stage.  A final error check can be done on the resulting stage,
46 // even if no merging was done (i.e., the stage was only one compilation unit).
47 //
48 
49 #include "glslang/Public/ShaderLang.h"
50 #include "localintermediate.h"
51 #include "../Include/InfoSink.h"
52 #include "SymbolTable.h"
53 #include "LiveTraverser.h"
54 
55 namespace glslang {
56 
57 //
58 // Link-time error emitter.
59 //
error(TInfoSink & infoSink,const char * message,EShLanguage unitStage)60 void TIntermediate::error(TInfoSink& infoSink, const char* message, EShLanguage unitStage)
61 {
62     infoSink.info.prefix(EPrefixError);
63     if (unitStage == EShLangCount)
64         infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
65     else if (language == EShLangCount)
66         infoSink.info << "Linking " << StageName(unitStage) << " stage: " << message << "\n";
67     else
68         infoSink.info << "Linking " << StageName(language) << " and " << StageName(unitStage) << " stages: " << message << "\n";
69 
70     ++numErrors;
71 }
72 
73 // Link-time warning.
warn(TInfoSink & infoSink,const char * message,EShLanguage unitStage)74 void TIntermediate::warn(TInfoSink& infoSink, const char* message, EShLanguage unitStage)
75 {
76     infoSink.info.prefix(EPrefixWarning);
77     if (unitStage == EShLangCount)
78         infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
79     else if (language == EShLangCount)
80         infoSink.info << "Linking " << StageName(unitStage) << " stage: " << message << "\n";
81     else
82         infoSink.info << "Linking " << StageName(language) << " and " << StageName(unitStage) << " stages: " << message << "\n";
83 }
84 
85 // TODO: 4.4 offset/align:  "Two blocks linked together in the same program with the same block
86 // name must have the exact same set of members qualified with offset and their integral-constant
87 // expression values must be the same, or a link-time error results."
88 
89 //
90 // Merge the information from 'unit' into 'this'
91 //
merge(TInfoSink & infoSink,TIntermediate & unit)92 void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
93 {
94     mergeCallGraphs(infoSink, unit);
95     mergeModes(infoSink, unit);
96     mergeTrees(infoSink, unit);
97 }
98 
99 //
100 // check that link objects between stages
101 //
mergeUniformObjects(TInfoSink & infoSink,TIntermediate & unit)102 void TIntermediate::mergeUniformObjects(TInfoSink& infoSink, TIntermediate& unit) {
103     if (unit.treeRoot == nullptr || treeRoot == nullptr)
104         return;
105 
106     // Get the linker-object lists
107     TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
108     TIntermSequence unitLinkerObjects = unit.findLinkerObjects()->getSequence();
109 
110     // filter unitLinkerObjects to only contain uniforms
111     auto end = std::remove_if(unitLinkerObjects.begin(), unitLinkerObjects.end(),
112         [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqUniform &&
113                                       node->getAsSymbolNode()->getQualifier().storage != EvqBuffer; });
114     unitLinkerObjects.resize(end - unitLinkerObjects.begin());
115 
116     // merge uniforms and do error checking
117     bool mergeExistingOnly = false;
118     mergeGlobalUniformBlocks(infoSink, unit, mergeExistingOnly);
119     mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
120 }
121 
isSameInterface(TIntermSymbol * symbol,TIntermSymbol * unitSymbol)122 static inline bool isSameInterface(TIntermSymbol* symbol, TIntermSymbol* unitSymbol) {
123     EShLanguage stage = symbol->getStage();
124     EShLanguage unitStage = unitSymbol->getStage();
125     return // 1) same stage and same shader interface
126         (stage == unitStage && symbol->getType().getShaderInterface() == unitSymbol->getType().getShaderInterface()) ||
127         // 2) accross stages and both are uniform or buffer
128         (symbol->getQualifier().storage == EvqUniform  && unitSymbol->getQualifier().storage == EvqUniform) ||
129         (symbol->getQualifier().storage == EvqBuffer   && unitSymbol->getQualifier().storage == EvqBuffer) ||
130         // 3) in/out matched across stage boundary
131         (stage < unitStage && symbol->getQualifier().storage == EvqVaryingOut  && unitSymbol->getQualifier().storage == EvqVaryingIn) ||
132         (unitStage < stage && symbol->getQualifier().storage == EvqVaryingIn && unitSymbol->getQualifier().storage == EvqVaryingOut);
133 }
134 
isSameSymbol(TIntermSymbol * symbol1,TIntermSymbol * symbol2)135 static bool isSameSymbol(TIntermSymbol* symbol1, TIntermSymbol* symbol2) {
136     // If they are both blocks in the same shader interface,
137     // match by the block-name, not the identifier name.
138     if (symbol1->getType().getBasicType() == EbtBlock && symbol2->getType().getBasicType() == EbtBlock) {
139         if (isSameInterface(symbol1, symbol2)) {
140             return symbol1->getType().getTypeName() == symbol2->getType().getTypeName();
141         }
142     } else if (symbol1->getName() == symbol2->getName())
143         return true;
144     return false;
145 }
146 //
147 // do error checking on the shader boundary in / out vars
148 //
checkStageIO(TInfoSink & infoSink,TIntermediate & unit)149 void TIntermediate::checkStageIO(TInfoSink& infoSink, TIntermediate& unit) {
150     if (unit.treeRoot == nullptr || treeRoot == nullptr)
151         return;
152 
153     // Get copies of the linker-object lists
154     TIntermSequence linkerObjects = findLinkerObjects()->getSequence();
155     TIntermSequence unitLinkerObjects = unit.findLinkerObjects()->getSequence();
156 
157     // filter linkerObjects to only contain out variables
158     auto end = std::remove_if(linkerObjects.begin(), linkerObjects.end(),
159         [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqVaryingOut; });
160     linkerObjects.resize(end - linkerObjects.begin());
161 
162     // filter unitLinkerObjects to only contain in variables
163     auto unitEnd = std::remove_if(unitLinkerObjects.begin(), unitLinkerObjects.end(),
164         [](TIntermNode* node) {return node->getAsSymbolNode()->getQualifier().storage != EvqVaryingIn; });
165     unitLinkerObjects.resize(unitEnd - unitLinkerObjects.begin());
166 
167     // do matching and error checking
168     mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
169 
170     // Check that all of our inputs have matching outputs from the previous stage.
171     // Only do this for Vulkan, since GL_ARB_separate_shader_objects allows for
172     // the in/out to not match
173     if (spvVersion.vulkan > 0) {
174         for (auto& nextStageInterm : unitLinkerObjects) {
175             auto* nextStageSymbol = nextStageInterm->getAsSymbolNode();
176             bool found = false;
177             for (auto& curStageInterm : linkerObjects) {
178                 if (isSameSymbol(curStageInterm->getAsSymbolNode(), nextStageSymbol)) {
179                     found = true;
180                     break;
181                 }
182             }
183             if (!found) {
184                 TString errmsg;
185                 errmsg.append("Input '");
186                 if (nextStageSymbol->getType().getBasicType() == EbtBlock)
187                     errmsg.append(nextStageSymbol->getType().getTypeName());
188                 else
189                     errmsg.append(nextStageSymbol->getName());
190                 errmsg.append("' in ").append(StageName(unit.getStage()));
191                 errmsg.append(" shader has no corresponding output in ").append(StageName(getStage())).append(" shader.");
192                 error(infoSink, errmsg.c_str(), unit.getStage());
193             }
194         }
195     }
196 }
197 
optimizeStageIO(TInfoSink &,TIntermediate & unit)198 void TIntermediate::optimizeStageIO(TInfoSink&, TIntermediate& unit)
199 {
200     // don't do any input/output demotion on compute, raytracing, or task/mesh stages
201     // TODO: support task/mesh
202     if (getStage() > EShLangFragment || unit.getStage() > EShLangFragment) {
203         return;
204     }
205 
206     class TIOTraverser : public TLiveTraverser {
207     public:
208         TIOTraverser(TIntermediate& i, bool all, TIntermSequence& sequence, TStorageQualifier storage)
209             : TLiveTraverser(i, all, true, false, false), sequence(sequence), storage(storage)
210         {
211         }
212 
213         virtual void visitSymbol(TIntermSymbol* symbol)
214         {
215             if (symbol->getQualifier().storage == storage) {
216                 sequence.push_back(symbol);
217             }
218         }
219 
220     private:
221         TIntermSequence& sequence;
222         TStorageQualifier storage;
223     };
224 
225     // live symbols only
226     TIntermSequence unitLiveInputs;
227 
228     TIOTraverser unitTraverser(unit, false, unitLiveInputs, EvqVaryingIn);
229     unitTraverser.pushFunction(unit.getEntryPointMangledName().c_str());
230     while (! unitTraverser.destinations.empty()) {
231         TIntermNode* destination = unitTraverser.destinations.back();
232         unitTraverser.destinations.pop_back();
233         destination->traverse(&unitTraverser);
234     }
235 
236     TIntermSequence allOutputs;
237     TIntermSequence unitAllInputs;
238 
239     TIOTraverser allTraverser(*this, true, allOutputs, EvqVaryingOut);
240     getTreeRoot()->traverse(&allTraverser);
241 
242     TIOTraverser unitAllTraverser(unit, true, unitAllInputs, EvqVaryingIn);
243     unit.getTreeRoot()->traverse(&unitAllTraverser);
244 
245     // find outputs not consumed by the next stage
246     std::for_each(allOutputs.begin(), allOutputs.end(), [&unitLiveInputs, &unitAllInputs](TIntermNode* output) {
247         // don't do anything to builtins
248         if (output->getAsSymbolNode()->getAccessName().compare(0, 3, "gl_") == 0)
249             return;
250 
251         // don't demote block outputs (for now)
252         if (output->getAsSymbolNode()->getBasicType() == EbtBlock)
253             return;
254 
255         // check if the (loose) output has a matching loose input
256         auto isMatchingInput = [output](TIntermNode* input) {
257             return output->getAsSymbolNode()->getAccessName() == input->getAsSymbolNode()->getAccessName();
258         };
259 
260         // check if the (loose) output has a matching block member input
261         auto isMatchingInputBlockMember = [output](TIntermNode* input) {
262             // ignore loose inputs
263             if (input->getAsSymbolNode()->getBasicType() != EbtBlock)
264                 return false;
265 
266             // don't demote loose outputs with matching input block members
267             auto isMatchingBlockMember = [output](TTypeLoc type) {
268                 return type.type->getFieldName() == output->getAsSymbolNode()->getName();
269             };
270             const TTypeList* members = input->getAsSymbolNode()->getType().getStruct();
271             return std::any_of(members->begin(), members->end(), isMatchingBlockMember);
272         };
273 
274         // determine if the input/output pair should be demoted
275         // do the faster (and more likely) loose-loose check first
276         if (std::none_of(unitLiveInputs.begin(), unitLiveInputs.end(), isMatchingInput) &&
277             std::none_of(unitAllInputs.begin(), unitAllInputs.end(), isMatchingInputBlockMember)) {
278             // demote any input matching the output
279             auto demoteMatchingInputs = [output](TIntermNode* input) {
280                 if (output->getAsSymbolNode()->getAccessName() == input->getAsSymbolNode()->getAccessName()) {
281                     // demote input to a plain variable
282                     TIntermSymbol* symbol = input->getAsSymbolNode();
283                     symbol->getQualifier().storage = EvqGlobal;
284                     symbol->getQualifier().clearInterstage();
285                     symbol->getQualifier().clearLayout();
286                 }
287             };
288 
289             // demote all matching outputs to a plain variable
290             TIntermSymbol* symbol = output->getAsSymbolNode();
291             symbol->getQualifier().storage = EvqGlobal;
292             symbol->getQualifier().clearInterstage();
293             symbol->getQualifier().clearLayout();
294             std::for_each(unitAllInputs.begin(), unitAllInputs.end(), demoteMatchingInputs);
295         }
296     });
297 }
298 
mergeCallGraphs(TInfoSink & infoSink,TIntermediate & unit)299 void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
300 {
301     if (unit.getNumEntryPoints() > 0) {
302         if (getNumEntryPoints() > 0)
303             error(infoSink, "can't handle multiple entry points per stage");
304         else {
305             entryPointName = unit.getEntryPointName();
306             entryPointMangledName = unit.getEntryPointMangledName();
307         }
308     }
309     numEntryPoints += unit.getNumEntryPoints();
310 
311     callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
312 }
313 
314 #define MERGE_MAX(member) member = std::max(member, unit.member)
315 #define MERGE_TRUE(member) if (unit.member) member = unit.member;
316 
mergeModes(TInfoSink & infoSink,TIntermediate & unit)317 void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
318 {
319     if (language != unit.language)
320         error(infoSink, "stages must match when linking into a single stage");
321 
322     if (getSource() == EShSourceNone)
323         setSource(unit.getSource());
324     if (getSource() != unit.getSource())
325         error(infoSink, "can't link compilation units from different source languages");
326 
327     if (treeRoot == nullptr) {
328         profile = unit.profile;
329         version = unit.version;
330         requestedExtensions = unit.requestedExtensions;
331     } else {
332         if ((isEsProfile()) != (unit.isEsProfile()))
333             error(infoSink, "Cannot cross link ES and desktop profiles");
334         else if (unit.profile == ECompatibilityProfile)
335             profile = ECompatibilityProfile;
336         version = std::max(version, unit.version);
337         requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end());
338     }
339 
340     MERGE_MAX(spvVersion.spv);
341     MERGE_MAX(spvVersion.vulkanGlsl);
342     MERGE_MAX(spvVersion.vulkan);
343     MERGE_MAX(spvVersion.openGl);
344     MERGE_TRUE(spvVersion.vulkanRelaxed);
345 
346     numErrors += unit.getNumErrors();
347     // Only one push_constant is allowed, mergeLinkerObjects() will ensure the push_constant
348     // is the same for all units.
349     if (numPushConstants > 1 || unit.numPushConstants > 1)
350         error(infoSink, "Only one push_constant block is allowed per stage");
351     numPushConstants = std::min(numPushConstants + unit.numPushConstants, 1);
352 
353     if (unit.invocations != TQualifier::layoutNotSet) {
354         if (invocations == TQualifier::layoutNotSet)
355             invocations = unit.invocations;
356         else if (invocations != unit.invocations)
357             error(infoSink, "number of invocations must match between compilation units");
358     }
359 
360     if (vertices == TQualifier::layoutNotSet)
361         vertices = unit.vertices;
362     else if (unit.vertices != TQualifier::layoutNotSet && vertices != unit.vertices) {
363         if (language == EShLangGeometry || language == EShLangMesh)
364             error(infoSink, "Contradictory layout max_vertices values");
365         else if (language == EShLangTessControl)
366             error(infoSink, "Contradictory layout vertices values");
367         else
368             assert(0);
369     }
370     if (primitives == TQualifier::layoutNotSet)
371         primitives = unit.primitives;
372     else if (primitives != unit.primitives) {
373         if (language == EShLangMesh)
374             error(infoSink, "Contradictory layout max_primitives values");
375         else
376             assert(0);
377     }
378 
379     if (inputPrimitive == ElgNone)
380         inputPrimitive = unit.inputPrimitive;
381     else if (unit.inputPrimitive != ElgNone && inputPrimitive != unit.inputPrimitive)
382         error(infoSink, "Contradictory input layout primitives");
383 
384     if (outputPrimitive == ElgNone)
385         outputPrimitive = unit.outputPrimitive;
386     else if (unit.outputPrimitive != ElgNone && outputPrimitive != unit.outputPrimitive)
387         error(infoSink, "Contradictory output layout primitives");
388 
389     if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger)
390         error(infoSink, "gl_FragCoord redeclarations must match across shaders");
391 
392     if (vertexSpacing == EvsNone)
393         vertexSpacing = unit.vertexSpacing;
394     else if (vertexSpacing != unit.vertexSpacing)
395         error(infoSink, "Contradictory input vertex spacing");
396 
397     if (vertexOrder == EvoNone)
398         vertexOrder = unit.vertexOrder;
399     else if (vertexOrder != unit.vertexOrder)
400         error(infoSink, "Contradictory triangle ordering");
401 
402     MERGE_TRUE(pointMode);
403 
404     for (int i = 0; i < 3; ++i) {
405         if (unit.localSizeNotDefault[i]) {
406             if (!localSizeNotDefault[i]) {
407                 localSize[i] = unit.localSize[i];
408                 localSizeNotDefault[i] = true;
409             }
410             else if (localSize[i] != unit.localSize[i])
411                 error(infoSink, "Contradictory local size");
412         }
413 
414         if (localSizeSpecId[i] == TQualifier::layoutNotSet)
415             localSizeSpecId[i] = unit.localSizeSpecId[i];
416         else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
417             error(infoSink, "Contradictory local size specialization ids");
418     }
419 
420     MERGE_TRUE(earlyFragmentTests);
421     MERGE_TRUE(postDepthCoverage);
422     MERGE_TRUE(nonCoherentColorAttachmentReadEXT);
423     MERGE_TRUE(nonCoherentDepthAttachmentReadEXT);
424     MERGE_TRUE(nonCoherentStencilAttachmentReadEXT);
425 
426     if (depthLayout == EldNone)
427         depthLayout = unit.depthLayout;
428     else if (depthLayout != unit.depthLayout)
429         error(infoSink, "Contradictory depth layouts");
430 
431     MERGE_TRUE(depthReplacing);
432     MERGE_TRUE(hlslFunctionality1);
433 
434     blendEquations |= unit.blendEquations;
435 
436     MERGE_TRUE(xfbMode);
437 
438     for (size_t b = 0; b < xfbBuffers.size(); ++b) {
439         if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
440             xfbBuffers[b].stride = unit.xfbBuffers[b].stride;
441         else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride)
442             error(infoSink, "Contradictory xfb_stride");
443         xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
444         if (unit.xfbBuffers[b].contains64BitType)
445             xfbBuffers[b].contains64BitType = true;
446         if (unit.xfbBuffers[b].contains32BitType)
447             xfbBuffers[b].contains32BitType = true;
448         if (unit.xfbBuffers[b].contains16BitType)
449             xfbBuffers[b].contains16BitType = true;
450         // TODO: 4.4 link: enhanced layouts: compare ranges
451     }
452 
453     MERGE_TRUE(multiStream);
454     MERGE_TRUE(layoutOverrideCoverage);
455     MERGE_TRUE(geoPassthroughEXT);
456 
457     for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
458         if (unit.shiftBinding[i] > 0)
459             setShiftBinding((TResourceType)i, unit.shiftBinding[i]);
460     }
461 
462     for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) {
463         for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it)
464             setShiftBindingForSet((TResourceType)i, it->second, it->first);
465     }
466 
467     resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end());
468 
469     MERGE_TRUE(autoMapBindings);
470     MERGE_TRUE(autoMapLocations);
471     MERGE_TRUE(invertY);
472     MERGE_TRUE(dxPositionW);
473     MERGE_TRUE(debugInfo);
474     MERGE_TRUE(flattenUniformArrays);
475     MERGE_TRUE(useUnknownFormat);
476     MERGE_TRUE(hlslOffsets);
477     MERGE_TRUE(useStorageBuffer);
478     MERGE_TRUE(invariantAll);
479     MERGE_TRUE(hlslIoMapping);
480 
481     // TODO: sourceFile
482     // TODO: sourceText
483     // TODO: processes
484 
485     MERGE_TRUE(needToLegalize);
486     MERGE_TRUE(binaryDoubleOutput);
487     MERGE_TRUE(usePhysicalStorageBuffer);
488 }
489 
490 //
491 // Merge the 'unit' AST into 'this' AST.
492 // That includes rationalizing the unique IDs, which were set up independently,
493 // and might have overlaps that are not the same symbol, or might have different
494 // IDs for what should be the same shared symbol.
495 //
mergeTrees(TInfoSink & infoSink,TIntermediate & unit)496 void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
497 {
498     if (unit.treeRoot == nullptr)
499         return;
500 
501     if (treeRoot == nullptr) {
502         treeRoot = unit.treeRoot;
503         return;
504     }
505 
506     // Getting this far means we have two existing trees to merge...
507     numShaderRecordBlocks += unit.numShaderRecordBlocks;
508     numTaskNVBlocks += unit.numTaskNVBlocks;
509 
510     // Get the top-level globals of each unit
511     TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
512     TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence();
513 
514     // Get the linker-object lists
515     TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
516     const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
517 
518     // Map by global name to unique ID to rationalize the same object having
519     // differing IDs in different trees.
520     TIdMaps idMaps;
521     long long idShift;
522     seedIdMap(idMaps, idShift);
523     remapIds(idMaps, idShift + 1, unit);
524 
525     mergeBodies(infoSink, globals, unitGlobals);
526     bool mergeExistingOnly = false;
527     mergeGlobalUniformBlocks(infoSink, unit, mergeExistingOnly);
528     mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects, unit.getStage());
529     ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
530 }
531 
getNameForIdMap(TIntermSymbol * symbol)532 static const TString& getNameForIdMap(TIntermSymbol* symbol)
533 {
534     TShaderInterface si = symbol->getType().getShaderInterface();
535     if (si == EsiNone)
536         return symbol->getName();
537     else
538         return symbol->getType().getTypeName();
539 }
540 
541 
542 
543 // Traverser that seeds an ID map with all built-ins, and tracks the
544 // maximum ID used, currently using (maximum ID + 1) as new symbol id shift seed.
545 // Level id will keep same after shifting.
546 // (It would be nice to put this in a function, but that causes warnings
547 // on having no bodies for the copy-constructor/operator=.)
548 class TBuiltInIdTraverser : public TIntermTraverser {
549 public:
TBuiltInIdTraverser(TIdMaps & idMaps)550     TBuiltInIdTraverser(TIdMaps& idMaps) : idMaps(idMaps), idShift(0) { }
551     // If it's a built in, add it to the map.
visitSymbol(TIntermSymbol * symbol)552     virtual void visitSymbol(TIntermSymbol* symbol)
553     {
554         const TQualifier& qualifier = symbol->getType().getQualifier();
555         if (qualifier.builtIn != EbvNone) {
556             TShaderInterface si = symbol->getType().getShaderInterface();
557             idMaps[si][getNameForIdMap(symbol)] = symbol->getId();
558         }
559         idShift = (symbol->getId() & ~TSymbolTable::uniqueIdMask) |
560                 std::max(idShift & TSymbolTable::uniqueIdMask,
561                          symbol->getId() & TSymbolTable::uniqueIdMask);
562     }
getIdShift() const563     long long getIdShift() const { return idShift; }
564 protected:
565     TBuiltInIdTraverser(TBuiltInIdTraverser&);
566     TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&);
567     TIdMaps& idMaps;
568     long long idShift;
569 };
570 
571 // Traverser that seeds an ID map with non-builtins.
572 // (It would be nice to put this in a function, but that causes warnings
573 // on having no bodies for the copy-constructor/operator=.)
574 class TUserIdTraverser : public TIntermTraverser {
575 public:
TUserIdTraverser(TIdMaps & idMaps)576     TUserIdTraverser(TIdMaps& idMaps) : idMaps(idMaps) { }
577     // If its a non-built-in global, add it to the map.
visitSymbol(TIntermSymbol * symbol)578     virtual void visitSymbol(TIntermSymbol* symbol)
579     {
580         const TQualifier& qualifier = symbol->getType().getQualifier();
581         if (qualifier.builtIn == EbvNone) {
582             TShaderInterface si = symbol->getType().getShaderInterface();
583             idMaps[si][getNameForIdMap(symbol)] = symbol->getId();
584         }
585     }
586 
587 protected:
588     TUserIdTraverser(TUserIdTraverser&);
589     TUserIdTraverser& operator=(TUserIdTraverser&);
590     TIdMaps& idMaps; // over biggest id
591 };
592 
593 // Initialize the the ID map with what we know of 'this' AST.
seedIdMap(TIdMaps & idMaps,long long & idShift)594 void TIntermediate::seedIdMap(TIdMaps& idMaps, long long& idShift)
595 {
596     // all built-ins everywhere need to align on IDs and contribute to the max ID
597     TBuiltInIdTraverser builtInIdTraverser(idMaps);
598     treeRoot->traverse(&builtInIdTraverser);
599     idShift = builtInIdTraverser.getIdShift() & TSymbolTable::uniqueIdMask;
600 
601     // user variables in the linker object list need to align on ids
602     TUserIdTraverser userIdTraverser(idMaps);
603     findLinkerObjects()->traverse(&userIdTraverser);
604 }
605 
606 // Traverser to map an AST ID to what was known from the seeding AST.
607 // (It would be nice to put this in a function, but that causes warnings
608 // on having no bodies for the copy-constructor/operator=.)
609 class TRemapIdTraverser : public TIntermTraverser {
610 public:
TRemapIdTraverser(const TIdMaps & idMaps,long long idShift)611     TRemapIdTraverser(const TIdMaps& idMaps, long long idShift) : idMaps(idMaps), idShift(idShift) { }
612     // Do the mapping:
613     //  - if the same symbol, adopt the 'this' ID
614     //  - otherwise, ensure a unique ID by shifting to a new space
visitSymbol(TIntermSymbol * symbol)615     virtual void visitSymbol(TIntermSymbol* symbol)
616     {
617         const TQualifier& qualifier = symbol->getType().getQualifier();
618         bool remapped = false;
619         if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) {
620             TShaderInterface si = symbol->getType().getShaderInterface();
621             auto it = idMaps[si].find(getNameForIdMap(symbol));
622             if (it != idMaps[si].end()) {
623                 uint64_t id = (symbol->getId() & ~TSymbolTable::uniqueIdMask) |
624                     (it->second & TSymbolTable::uniqueIdMask);
625                 symbol->changeId(id);
626                 remapped = true;
627             }
628         }
629         if (!remapped)
630             symbol->changeId(symbol->getId() + idShift);
631     }
632 protected:
633     TRemapIdTraverser(TRemapIdTraverser&);
634     TRemapIdTraverser& operator=(TRemapIdTraverser&);
635     const TIdMaps& idMaps;
636     long long idShift;
637 };
638 
remapIds(const TIdMaps & idMaps,long long idShift,TIntermediate & unit)639 void TIntermediate::remapIds(const TIdMaps& idMaps, long long idShift, TIntermediate& unit)
640 {
641     // Remap all IDs to either share or be unique, as dictated by the idMap and idShift.
642     TRemapIdTraverser idTraverser(idMaps, idShift);
643     unit.getTreeRoot()->traverse(&idTraverser);
644 }
645 
646 //
647 // Merge the function bodies and global-level initializers from unitGlobals into globals.
648 // Will error check duplication of function bodies for the same signature.
649 //
mergeBodies(TInfoSink & infoSink,TIntermSequence & globals,const TIntermSequence & unitGlobals)650 void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals)
651 {
652     // TODO: link-time performance: Processing in alphabetical order will be faster
653 
654     // Error check the global objects, not including the linker objects
655     for (unsigned int child = 0; child < globals.size() - 1; ++child) {
656         for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) {
657             TIntermAggregate* body = globals[child]->getAsAggregate();
658             TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate();
659             if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) {
660                 error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:");
661                 infoSink.info << "    " << globals[child]->getAsAggregate()->getName() << "\n";
662             }
663         }
664     }
665 
666     // Merge the global objects, just in front of the linker objects
667     globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1);
668 }
669 
670 //
671 // Global Unfiform block stores any default uniforms (i.e. uniforms without a block)
672 // If two linked stages declare the same member, they are meant to be the same uniform
673 // and need to be in the same block
674 // merge the members of different stages to allow them to be linked properly
675 // as a single block
676 //
mergeGlobalUniformBlocks(TInfoSink & infoSink,TIntermediate & unit,bool mergeExistingOnly)677 void TIntermediate::mergeGlobalUniformBlocks(TInfoSink& infoSink, TIntermediate& unit, bool mergeExistingOnly)
678 {
679     TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
680     TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence();
681 
682     // build lists of default blocks from the intermediates
683     TIntermSequence defaultBlocks;
684     TIntermSequence unitDefaultBlocks;
685 
686     auto filter = [](TIntermSequence& list, TIntermNode* node) {
687         if (node->getAsSymbolNode()->getQualifier().defaultBlock) {
688             list.push_back(node);
689         }
690     };
691 
692     std::for_each(linkerObjects.begin(), linkerObjects.end(),
693         [&defaultBlocks, &filter](TIntermNode* node) {
694             filter(defaultBlocks, node);
695         });
696     std::for_each(unitLinkerObjects.begin(), unitLinkerObjects.end(),
697         [&unitDefaultBlocks, &filter](TIntermNode* node) {
698             filter(unitDefaultBlocks, node);
699     });
700 
701     auto itUnitBlock = unitDefaultBlocks.begin();
702     for (; itUnitBlock != unitDefaultBlocks.end(); itUnitBlock++) {
703 
704         bool add = !mergeExistingOnly;
705         auto itBlock = defaultBlocks.begin();
706 
707         for (; itBlock != defaultBlocks.end(); itBlock++) {
708             TIntermSymbol* block = (*itBlock)->getAsSymbolNode();
709             TIntermSymbol* unitBlock = (*itUnitBlock)->getAsSymbolNode();
710 
711             assert(block && unitBlock);
712 
713             // if the two default blocks match, then merge their definitions
714             if (block->getType().getTypeName() == unitBlock->getType().getTypeName() &&
715                 block->getQualifier().storage == unitBlock->getQualifier().storage) {
716                 add = false;
717                 mergeBlockDefinitions(infoSink, block, unitBlock, &unit);
718             }
719         }
720         if (add) {
721             // push back on original list; won't change the size of the list we're iterating over
722             linkerObjects.push_back(*itUnitBlock);
723         }
724     }
725 }
726 
mergeBlockDefinitions(TInfoSink & infoSink,TIntermSymbol * block,TIntermSymbol * unitBlock,TIntermediate * unit)727 void TIntermediate::mergeBlockDefinitions(TInfoSink& infoSink, TIntermSymbol* block, TIntermSymbol* unitBlock, TIntermediate* unit) {
728 
729     if (block->getType().getTypeName() != unitBlock->getType().getTypeName() ||
730         block->getType().getBasicType() != unitBlock->getType().getBasicType() ||
731         block->getQualifier().storage != unitBlock->getQualifier().storage ||
732         block->getQualifier().layoutSet != unitBlock->getQualifier().layoutSet) {
733         // different block names likely means different blocks
734         return;
735     }
736 
737     // merge the struct
738     // order of declarations doesn't matter and they matched based on member name
739     TTypeList* memberList = block->getType().getWritableStruct();
740     TTypeList* unitMemberList = unitBlock->getType().getWritableStruct();
741 
742     // keep track of which members have changed position
743     // so we don't have to search the array again
744     std::map<unsigned int, unsigned int> memberIndexUpdates;
745 
746     size_t memberListStartSize = memberList->size();
747     for (unsigned int i = 0; i < unitMemberList->size(); ++i) {
748         bool merge = true;
749         for (unsigned int j = 0; j < memberListStartSize; ++j) {
750             if ((*memberList)[j].type->getFieldName() == (*unitMemberList)[i].type->getFieldName()) {
751                 merge = false;
752                 const TType* memberType = (*memberList)[j].type;
753                 const TType* unitMemberType = (*unitMemberList)[i].type;
754 
755                 // compare types
756                 // don't need as many checks as when merging symbols, since
757                 // initializers and most qualifiers are stripped when the member is moved into the block
758                 if ((*memberType) != (*unitMemberType)) {
759                     error(infoSink, "Types must match:", unitBlock->getStage());
760                     infoSink.info << "    " << memberType->getFieldName() << ": ";
761                     infoSink.info << "\"" << memberType->getCompleteString() << "\" in stage " << StageName(block->getStage()) << " versus ";
762                     infoSink.info << "\"" << unitMemberType->getCompleteString() << "\" in stage " << StageName(unitBlock->getStage()) << "\n";
763                 }
764 
765                 memberIndexUpdates[i] = j;
766             }
767         }
768         if (merge) {
769             memberList->push_back((*unitMemberList)[i]);
770             memberIndexUpdates[i] = (unsigned int)memberList->size() - 1;
771         }
772     }
773 
774     // update symbol node in unit tree,
775     // and other nodes that may reference it
776     class TMergeBlockTraverser : public TIntermTraverser {
777     public:
778         TMergeBlockTraverser(const TIntermSymbol* newSym)
779             : newSymbol(newSym), newType(nullptr), unit(nullptr), memberIndexUpdates(nullptr)
780         {
781         }
782         TMergeBlockTraverser(const TIntermSymbol* newSym, const glslang::TType* unitType, glslang::TIntermediate* unit,
783                              const std::map<unsigned int, unsigned int>* memberIdxUpdates)
784             : TIntermTraverser(false, true), newSymbol(newSym), newType(unitType), unit(unit), memberIndexUpdates(memberIdxUpdates)
785         {
786         }
787         virtual ~TMergeBlockTraverser() {}
788 
789         const TIntermSymbol* newSymbol;
790         const glslang::TType* newType; // shallow copy of the new type
791         glslang::TIntermediate* unit;   // intermediate that is being updated
792         const std::map<unsigned int, unsigned int>* memberIndexUpdates;
793 
794         virtual void visitSymbol(TIntermSymbol* symbol)
795         {
796             if (newSymbol->getAccessName() == symbol->getAccessName() &&
797                 newSymbol->getQualifier().getBlockStorage() == symbol->getQualifier().getBlockStorage()) {
798                 // Each symbol node may have a local copy of the block structure.
799                 // Update those structures to match the new one post-merge
800                 *(symbol->getWritableType().getWritableStruct()) = *(newSymbol->getType().getStruct());
801             }
802         }
803 
804         virtual bool visitBinary(TVisit, glslang::TIntermBinary* node)
805         {
806             if (!unit || !newType || !memberIndexUpdates || memberIndexUpdates->empty())
807                 return true;
808 
809             if (node->getOp() == EOpIndexDirectStruct && node->getLeft()->getType() == *newType) {
810                 // this is a dereference to a member of the block since the
811                 // member list changed, need to update this to point to the
812                 // right index
813                 assert(node->getRight()->getAsConstantUnion());
814 
815                 glslang::TIntermConstantUnion* constNode = node->getRight()->getAsConstantUnion();
816                 unsigned int memberIdx = constNode->getConstArray()[0].getUConst();
817                 unsigned int newIdx = memberIndexUpdates->at(memberIdx);
818                 TIntermTyped* newConstNode = unit->addConstantUnion(newIdx, node->getRight()->getLoc());
819 
820                 node->setRight(newConstNode);
821                 delete constNode;
822 
823                 return true;
824             }
825             return true;
826         }
827     };
828 
829     // 'this' may have symbols that are using the old block structure, so traverse the tree to update those
830     // in 'visitSymbol'
831     TMergeBlockTraverser finalLinkTraverser(block);
832     getTreeRoot()->traverse(&finalLinkTraverser);
833 
834     // The 'unit' intermediate needs the block structures update, but also structure entry indices
835     // may have changed from the old block to the new one that it was merged into, so update those
836     // in 'visitBinary'
837     TType newType;
838     newType.shallowCopy(block->getType());
839     TMergeBlockTraverser unitFinalLinkTraverser(block, &newType, unit, &memberIndexUpdates);
840     unit->getTreeRoot()->traverse(&unitFinalLinkTraverser);
841 
842     // update the member list
843     (*unitMemberList) = (*memberList);
844 }
845 
846 //
847 // Merge the linker objects from unitLinkerObjects into linkerObjects.
848 // Duplication is expected and filtered out, but contradictions are an error.
849 //
mergeLinkerObjects(TInfoSink & infoSink,TIntermSequence & linkerObjects,const TIntermSequence & unitLinkerObjects,EShLanguage unitStage)850 void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects, EShLanguage unitStage)
851 {
852     // Error check and merge the linker objects (duplicates should not be created)
853     std::size_t initialNumLinkerObjects = linkerObjects.size();
854     for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) {
855         TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
856         bool merge = true;
857 
858         // Don't merge inputs backwards into previous stages
859         if (getStage() != unitStage && unitSymbol->getQualifier().storage == EvqVaryingIn)
860             merge = false;
861 
862         for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
863             TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
864             assert(symbol && unitSymbol);
865 
866             if (isSameSymbol(symbol, unitSymbol)) {
867                 // filter out copy
868                 merge = false;
869 
870                 // but if one has an initializer and the other does not, update
871                 // the initializer
872                 if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty())
873                     symbol->setConstArray(unitSymbol->getConstArray());
874 
875                 // Similarly for binding
876                 if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding())
877                     symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding;
878 
879                 // Similarly for location
880                 if (!symbol->getQualifier().hasLocation() && unitSymbol->getQualifier().hasLocation()) {
881                     symbol->getQualifier().layoutLocation = unitSymbol->getQualifier().layoutLocation;
882                 }
883 
884                 // Update implicit array sizes
885                 if (symbol->getWritableType().isImplicitlySizedArray() && unitSymbol->getType().isImplicitlySizedArray()) {
886                     if (unitSymbol->getType().getImplicitArraySize() > symbol->getType().getImplicitArraySize()){
887                         symbol->getWritableType().updateImplicitArraySize(unitSymbol->getType().getImplicitArraySize());
888                     }
889                 }
890                 else if (symbol->getWritableType().isImplicitlySizedArray() && unitSymbol->getType().isSizedArray()) {
891                     if (symbol->getWritableType().getImplicitArraySize() > unitSymbol->getType().getOuterArraySize())
892                         error(infoSink, "Implicit size of unsized array doesn't match same symbol among multiple shaders.");
893                 }
894                 else if (unitSymbol->getType().isImplicitlySizedArray() && symbol->getWritableType().isSizedArray()) {
895                     if (unitSymbol->getType().getImplicitArraySize() > symbol->getWritableType().getOuterArraySize())
896                         error(infoSink, "Implicit size of unsized array doesn't match same symbol among multiple shaders.");
897                 }
898 
899                 // Update implicit array sizes
900                 mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType());
901 
902                 // Check for consistent types/qualification/initializers etc.
903                 mergeErrorCheck(infoSink, *symbol, *unitSymbol);
904             }
905             // If different symbols, verify they arn't push_constant since there can only be one per stage
906             else if (symbol->getQualifier().isPushConstant() && unitSymbol->getQualifier().isPushConstant() && getStage() == unitStage)
907                 error(infoSink, "Only one push_constant block is allowed per stage");
908         }
909 
910         // Check conflicts between preset primitives and sizes of I/O variables among multiple geometry shaders
911         if (language == EShLangGeometry && unitStage == EShLangGeometry)
912         {
913             TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
914             if (unitSymbol->isArray() && unitSymbol->getQualifier().storage == EvqVaryingIn && unitSymbol->getQualifier().builtIn == EbvNone)
915                 if ((unitSymbol->getArraySizes()->isImplicitlySized() &&
916                         unitSymbol->getArraySizes()->getImplicitSize() != TQualifier::mapGeometryToSize(getInputPrimitive())) ||
917                     (! unitSymbol->getArraySizes()->isImplicitlySized() &&
918                         unitSymbol->getArraySizes()->getDimSize(0) != TQualifier::mapGeometryToSize(getInputPrimitive())))
919                     error(infoSink, "Not all array sizes match across all geometry shaders in the program");
920         }
921 
922         if (merge) {
923             linkerObjects.push_back(unitLinkerObjects[unitLinkObj]);
924 
925             // for anonymous blocks, check that their members don't conflict with other names
926             if (unitLinkerObjects[unitLinkObj]->getAsSymbolNode()->getBasicType() == EbtBlock &&
927                 IsAnonymous(unitLinkerObjects[unitLinkObj]->getAsSymbolNode()->getName())) {
928                 for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) {
929                     TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode();
930                     TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode();
931                     assert(symbol && unitSymbol);
932 
933                     auto checkName = [this, unitSymbol, &infoSink](const TString& name) {
934                         for (unsigned int i = 0; i < unitSymbol->getType().getStruct()->size(); ++i) {
935                             if (name == (*unitSymbol->getType().getStruct())[i].type->getFieldName()
936                                 && !((*unitSymbol->getType().getStruct())[i].type->getQualifier().hasLocation()
937                                     || unitSymbol->getType().getQualifier().hasLocation())
938                                 ) {
939                                 error(infoSink, "Anonymous member name used for global variable or other anonymous member: ");
940                                 infoSink.info << (*unitSymbol->getType().getStruct())[i].type->getCompleteString() << "\n";
941                             }
942                         }
943                     };
944 
945                     if (isSameInterface(symbol, unitSymbol)) {
946                         checkName(symbol->getName());
947 
948                         // check members of other anonymous blocks
949                         if (symbol->getBasicType() == EbtBlock && IsAnonymous(symbol->getName())) {
950                             for (unsigned int i = 0; i < symbol->getType().getStruct()->size(); ++i) {
951                                 checkName((*symbol->getType().getStruct())[i].type->getFieldName());
952                             }
953                         }
954                     }
955                 }
956             }
957         }
958     }
959 }
960 
961 // TODO 4.5 link functionality: cull distance array size checking
962 
963 // Recursively merge the implicit array sizes through the objects' respective type trees.
mergeImplicitArraySizes(TType & type,const TType & unitType)964 void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
965 {
966     if (type.isUnsizedArray()) {
967         if (unitType.isUnsizedArray()) {
968             type.updateImplicitArraySize(unitType.getImplicitArraySize());
969             if (unitType.isArrayVariablyIndexed())
970                 type.setArrayVariablyIndexed();
971         } else if (unitType.isSizedArray())
972             type.changeOuterArraySize(unitType.getOuterArraySize());
973     }
974 
975     // Type mismatches are caught and reported after this, just be careful for now.
976     if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size())
977         return;
978 
979     for (int i = 0; i < (int)type.getStruct()->size(); ++i)
980         mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
981 }
982 
983 //
984 // Compare two global objects from two compilation units and see if they match
985 // well enough.  Rules can be different for intra- vs. cross-stage matching.
986 //
987 // This function only does one of intra- or cross-stage matching per call.
988 //
mergeErrorCheck(TInfoSink & infoSink,const TIntermSymbol & symbol,const TIntermSymbol & unitSymbol)989 void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol)
990 {
991     EShLanguage stage = symbol.getStage();
992     EShLanguage unitStage = unitSymbol.getStage();
993     bool crossStage = stage != unitStage;
994     bool writeTypeComparison = false;
995     bool errorReported = false;
996     bool printQualifiers = false;
997     bool printPrecision = false;
998     bool printType = false;
999 
1000     // Types have to match
1001     {
1002         // but, we make an exception if one is an implicit array and the other is sized
1003         // or if the array sizes differ because of the extra array dimension on some in/out boundaries
1004         bool arraysMatch = false;
1005         if (isIoResizeArray(symbol.getType(), stage) || isIoResizeArray(unitSymbol.getType(), unitStage)) {
1006             // if the arrays have an extra dimension because of the stage.
1007             // compare dimensions while ignoring the outer dimension
1008             unsigned int firstDim = isIoResizeArray(symbol.getType(), stage) ? 1 : 0;
1009             unsigned int numDim = symbol.getArraySizes()
1010                 ? symbol.getArraySizes()->getNumDims() : 0;
1011             unsigned int unitFirstDim = isIoResizeArray(unitSymbol.getType(), unitStage) ? 1 : 0;
1012             unsigned int unitNumDim = unitSymbol.getArraySizes()
1013                 ? unitSymbol.getArraySizes()->getNumDims() : 0;
1014             arraysMatch = (numDim - firstDim) == (unitNumDim - unitFirstDim);
1015             // check that array sizes match as well
1016             for (unsigned int i = 0; i < (numDim - firstDim) && arraysMatch; i++) {
1017                 if (symbol.getArraySizes()->getDimSize(firstDim + i) !=
1018                     unitSymbol.getArraySizes()->getDimSize(unitFirstDim + i)) {
1019                     arraysMatch = false;
1020                     break;
1021                 }
1022             }
1023         }
1024         else {
1025             arraysMatch = symbol.getType().sameArrayness(unitSymbol.getType()) ||
1026                 (symbol.getType().isArray() && unitSymbol.getType().isArray() &&
1027                  (symbol.getType().isImplicitlySizedArray() || unitSymbol.getType().isImplicitlySizedArray() ||
1028                   symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()));
1029         }
1030 
1031         int lpidx = -1;
1032         int rpidx = -1;
1033         if (!symbol.getType().sameElementType(unitSymbol.getType(), &lpidx, &rpidx)) {
1034             if (lpidx >= 0 && rpidx >= 0) {
1035                 error(infoSink, "Member names and types must match:", unitStage);
1036                 infoSink.info << "    Block: " << symbol.getType().getTypeName() << "\n";
1037                 infoSink.info << "        " << StageName(stage) << " stage: \""
1038                               << (*symbol.getType().getStruct())[lpidx].type->getCompleteString(true, false, false, true,
1039                                       (*symbol.getType().getStruct())[lpidx].type->getFieldName()) << "\"\n";
1040                 infoSink.info << "        " << StageName(unitStage) << " stage: \""
1041                               << (*unitSymbol.getType().getStruct())[rpidx].type->getCompleteString(true, false, false, true,
1042                                       (*unitSymbol.getType().getStruct())[rpidx].type->getFieldName()) << "\"\n";
1043                 errorReported = true;
1044             } else if (lpidx >= 0 && rpidx == -1) {
1045                   TString errmsg = StageName(stage);
1046                   errmsg.append(" block member has no corresponding member in ").append(StageName(unitStage)).append(" block:");
1047                   error(infoSink, errmsg.c_str(), unitStage);
1048                   infoSink.info << "    " << StageName(stage) << " stage: Block: " << symbol.getType().getTypeName() << ", Member: "
1049                     << (*symbol.getType().getStruct())[lpidx].type->getFieldName() << "\n";
1050                   infoSink.info << "    " << StageName(unitStage) << " stage: Block: " << unitSymbol.getType().getTypeName() << ", Member: n/a \n";
1051                   errorReported = true;
1052             } else if (lpidx == -1 && rpidx >= 0) {
1053                   TString errmsg = StageName(unitStage);
1054                   errmsg.append(" block member has no corresponding member in ").append(StageName(stage)).append(" block:");
1055                   error(infoSink, errmsg.c_str(), unitStage);
1056                   infoSink.info << "    " << StageName(unitStage) << " stage: Block: " << unitSymbol.getType().getTypeName() << ", Member: "
1057                     << (*unitSymbol.getType().getStruct())[rpidx].type->getFieldName() << "\n";
1058                   infoSink.info << "    " << StageName(stage) << " stage: Block: " << symbol.getType().getTypeName() << ", Member: n/a \n";
1059                   errorReported = true;
1060             } else {
1061                   error(infoSink, "Types must match:", unitStage);
1062                   writeTypeComparison = true;
1063                   printType = true;
1064             }
1065         } else if (!arraysMatch) {
1066             error(infoSink, "Array sizes must be compatible:", unitStage);
1067             writeTypeComparison = true;
1068             printType = true;
1069         } else if (!symbol.getType().sameTypeParameters(unitSymbol.getType())) {
1070             error(infoSink, "Type parameters must match:", unitStage);
1071             writeTypeComparison = true;
1072             printType = true;
1073         }
1074     }
1075 
1076     // Interface block  member-wise layout qualifiers have to match
1077     if (symbol.getType().getBasicType() == EbtBlock && unitSymbol.getType().getBasicType() == EbtBlock &&
1078         symbol.getType().getStruct() && unitSymbol.getType().getStruct() &&
1079         symbol.getType().sameStructType(unitSymbol.getType())) {
1080         unsigned int li = 0;
1081         unsigned int ri = 0;
1082         while (li < symbol.getType().getStruct()->size() && ri < unitSymbol.getType().getStruct()->size()) {
1083             if ((*symbol.getType().getStruct())[li].type->hiddenMember()) {
1084                 ++li;
1085                 continue;
1086             }
1087             if ((*unitSymbol.getType().getStruct())[ri].type->hiddenMember()) {
1088                 ++ri;
1089                 continue;
1090             }
1091             const TQualifier& qualifier = (*symbol.getType().getStruct())[li].type->getQualifier();
1092             const TQualifier & unitQualifier = (*unitSymbol.getType().getStruct())[ri].type->getQualifier();
1093             bool layoutQualifierError = false;
1094             if (qualifier.layoutMatrix != unitQualifier.layoutMatrix) {
1095                 error(infoSink, "Interface block member layout matrix qualifier must match:", unitStage);
1096                 layoutQualifierError = true;
1097             }
1098             if (qualifier.layoutOffset != unitQualifier.layoutOffset) {
1099                 error(infoSink, "Interface block member layout offset qualifier must match:", unitStage);
1100                 layoutQualifierError = true;
1101             }
1102             if (qualifier.layoutAlign != unitQualifier.layoutAlign) {
1103                 error(infoSink, "Interface block member layout align qualifier must match:", unitStage);
1104                 layoutQualifierError = true;
1105             }
1106             if (qualifier.layoutLocation != unitQualifier.layoutLocation) {
1107                 error(infoSink, "Interface block member layout location qualifier must match:", unitStage);
1108                 layoutQualifierError = true;
1109             }
1110             if (qualifier.layoutComponent != unitQualifier.layoutComponent) {
1111                 error(infoSink, "Interface block member layout component qualifier must match:", unitStage);
1112                 layoutQualifierError = true;
1113             }
1114             if (layoutQualifierError) {
1115                 infoSink.info << "    " << StageName(stage) << " stage: Block: " << symbol.getType().getTypeName() << ", Member: "
1116                               << (*symbol.getType().getStruct())[li].type->getFieldName() << " \""
1117                               << (*symbol.getType().getStruct())[li].type->getCompleteString(true, true, false, false) << "\"\n";
1118                 infoSink.info << "    " << StageName(unitStage) << " stage: Block: " << unitSymbol.getType().getTypeName() << ", Member: "
1119                               << (*unitSymbol.getType().getStruct())[ri].type->getFieldName() << " \""
1120                               << (*unitSymbol.getType().getStruct())[ri].type->getCompleteString(true, true, false, false) << "\"\n";
1121                 errorReported = true;
1122             }
1123             ++li;
1124             ++ri;
1125         }
1126     }
1127 
1128     bool isInOut = crossStage &&
1129                    ((symbol.getQualifier().storage == EvqVaryingIn && unitSymbol.getQualifier().storage == EvqVaryingOut) ||
1130                    (symbol.getQualifier().storage == EvqVaryingOut && unitSymbol.getQualifier().storage == EvqVaryingIn));
1131 
1132     // Qualifiers have to (almost) match
1133     // Storage...
1134     if (!isInOut && symbol.getQualifier().storage != unitSymbol.getQualifier().storage) {
1135         error(infoSink, "Storage qualifiers must match:", unitStage);
1136         writeTypeComparison = true;
1137         printQualifiers = true;
1138     }
1139 
1140     // Uniform and buffer blocks must either both have an instance name, or
1141     // must both be anonymous. The names don't need to match though.
1142     if (symbol.getQualifier().isUniformOrBuffer() &&
1143         (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()))) {
1144         error(infoSink, "Matched Uniform or Storage blocks must all be anonymous,"
1145                         " or all be named:", unitStage);
1146         writeTypeComparison = true;
1147     }
1148 
1149     if (symbol.getQualifier().storage == unitSymbol.getQualifier().storage &&
1150         (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()) ||
1151          (!IsAnonymous(symbol.getName()) && symbol.getName() != unitSymbol.getName()))) {
1152         warn(infoSink, "Matched shader interfaces are using different instance names.", unitStage);
1153         writeTypeComparison = true;
1154     }
1155 
1156     // Precision...
1157     if (!isInOut && symbol.getQualifier().precision != unitSymbol.getQualifier().precision) {
1158         error(infoSink, "Precision qualifiers must match:", unitStage);
1159         writeTypeComparison = true;
1160         printPrecision = true;
1161     }
1162 
1163     // Invariance...
1164     if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) {
1165         error(infoSink, "Presence of invariant qualifier must match:", unitStage);
1166         writeTypeComparison = true;
1167         printQualifiers = true;
1168     }
1169 
1170     // Precise...
1171     if (! crossStage && symbol.getQualifier().isNoContraction() != unitSymbol.getQualifier().isNoContraction()) {
1172         error(infoSink, "Presence of precise qualifier must match:", unitStage);
1173         writeTypeComparison = true;
1174         printPrecision = true;
1175     }
1176 
1177     // Auxiliary and interpolation...
1178     // "interpolation qualification (e.g., flat) and auxiliary qualification (e.g. centroid) may differ.
1179     //  These mismatches are allowed between any pair of stages ...
1180     //  those provided in the fragment shader supersede those provided in previous stages."
1181     if (!crossStage &&
1182         (symbol.getQualifier().centroid  != unitSymbol.getQualifier().centroid ||
1183         symbol.getQualifier().smooth    != unitSymbol.getQualifier().smooth ||
1184         symbol.getQualifier().flat      != unitSymbol.getQualifier().flat ||
1185         symbol.getQualifier().isSample()!= unitSymbol.getQualifier().isSample() ||
1186         symbol.getQualifier().isPatch() != unitSymbol.getQualifier().isPatch() ||
1187         symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective())) {
1188         error(infoSink, "Interpolation and auxiliary storage qualifiers must match:", unitStage);
1189         writeTypeComparison = true;
1190         printQualifiers = true;
1191     }
1192 
1193     // Memory...
1194     bool memoryQualifierError = false;
1195     if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent) {
1196         error(infoSink, "Memory coherent qualifier must match:", unitStage);
1197         memoryQualifierError = true;
1198     }
1199     if (symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent) {
1200         error(infoSink, "Memory devicecoherent qualifier must match:", unitStage);
1201         memoryQualifierError = true;
1202     }
1203     if (symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent) {
1204         error(infoSink, "Memory queuefamilycoherent qualifier must match:", unitStage);
1205         memoryQualifierError = true;
1206     }
1207     if (symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent) {
1208         error(infoSink, "Memory workgroupcoherent qualifier must match:", unitStage);
1209         memoryQualifierError = true;
1210     }
1211     if (symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent) {
1212         error(infoSink, "Memory subgroupcoherent qualifier must match:", unitStage);
1213         memoryQualifierError = true;
1214     }
1215     if (symbol.getQualifier().shadercallcoherent != unitSymbol.getQualifier().shadercallcoherent) {
1216         error(infoSink, "Memory shadercallcoherent qualifier must match:", unitStage);
1217         memoryQualifierError = true;
1218     }
1219     if (symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate) {
1220         error(infoSink, "Memory nonprivate qualifier must match:", unitStage);
1221         memoryQualifierError = true;
1222     }
1223     if (symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil) {
1224         error(infoSink, "Memory volatil qualifier must match:", unitStage);
1225         memoryQualifierError = true;
1226     }
1227     if (symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict) {
1228         error(infoSink, "Memory restrict qualifier must match:", unitStage);
1229         memoryQualifierError = true;
1230     }
1231     if (symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly) {
1232         error(infoSink, "Memory readonly qualifier must match:", unitStage);
1233         memoryQualifierError = true;
1234     }
1235     if (symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) {
1236         error(infoSink, "Memory writeonly qualifier must match:", unitStage);
1237         memoryQualifierError = true;
1238     }
1239     if (memoryQualifierError) {
1240           writeTypeComparison = true;
1241           printQualifiers = true;
1242     }
1243 
1244     // Layouts...
1245     // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec
1246     //       requires separate user-supplied offset from actual computed offset, but
1247     //       current implementation only has one offset.
1248     bool layoutQualifierError = false;
1249     if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix) {
1250         error(infoSink, "Layout matrix qualifier must match:", unitStage);
1251         layoutQualifierError = true;
1252     }
1253     if (symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking) {
1254         error(infoSink, "Layout packing qualifier must match:", unitStage);
1255         layoutQualifierError = true;
1256     }
1257     if (symbol.getQualifier().hasLocation() && unitSymbol.getQualifier().hasLocation() && symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation) {
1258         error(infoSink, "Layout location qualifier must match:", unitStage);
1259         layoutQualifierError = true;
1260     }
1261     if (symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent) {
1262         error(infoSink, "Layout component qualifier must match:", unitStage);
1263         layoutQualifierError = true;
1264     }
1265     if (symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex) {
1266         error(infoSink, "Layout index qualifier must match:", unitStage);
1267         layoutQualifierError = true;
1268     }
1269     if (symbol.getQualifier().hasBinding() && unitSymbol.getQualifier().hasBinding() && symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding) {
1270         error(infoSink, "Layout binding qualifier must match:", unitStage);
1271         layoutQualifierError = true;
1272     }
1273     if (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset)) {
1274         error(infoSink, "Layout offset qualifier must match:", unitStage);
1275         layoutQualifierError = true;
1276     }
1277     if (layoutQualifierError) {
1278         writeTypeComparison = true;
1279         printQualifiers = true;
1280     }
1281 
1282     // Initializers have to match, if both are present, and if we don't already know the types don't match
1283     if (! writeTypeComparison && ! errorReported) {
1284         if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) {
1285             if (symbol.getConstArray() != unitSymbol.getConstArray()) {
1286                 error(infoSink, "Initializers must match:", unitStage);
1287                 infoSink.info << "    " << symbol.getName() << "\n";
1288             }
1289         }
1290     }
1291 
1292     if (writeTypeComparison) {
1293         if (symbol.getType().getBasicType() == EbtBlock && unitSymbol.getType().getBasicType() == EbtBlock &&
1294             symbol.getType().getStruct() && unitSymbol.getType().getStruct()) {
1295           if (printType) {
1296             infoSink.info << "    " << StageName(stage) << " stage: \"" << symbol.getType().getCompleteString(true, printQualifiers, printPrecision,
1297                                                     printType, symbol.getName(), symbol.getType().getTypeName()) << "\"\n";
1298             infoSink.info << "    " << StageName(unitStage) << " stage: \"" << unitSymbol.getType().getCompleteString(true, printQualifiers, printPrecision,
1299                                                     printType, unitSymbol.getName(), unitSymbol.getType().getTypeName()) << "\"\n";
1300           } else {
1301             infoSink.info << "    " << StageName(stage) << " stage: Block: " << symbol.getType().getTypeName() << " Instance: " << symbol.getName()
1302               << ": \"" << symbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType) << "\"\n";
1303             infoSink.info << "    " << StageName(unitStage) << " stage: Block: " << unitSymbol.getType().getTypeName() << " Instance: " << unitSymbol.getName()
1304               << ": \"" << unitSymbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType) << "\"\n";
1305           }
1306         } else {
1307           if (printType) {
1308             infoSink.info << "    " << StageName(stage) << " stage: \""
1309               << symbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType, symbol.getName()) << "\"\n";
1310             infoSink.info << "    " << StageName(unitStage) << " stage: \""
1311               << unitSymbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType, unitSymbol.getName()) << "\"\n";
1312           } else {
1313             infoSink.info << "    " << StageName(stage) << " stage: " << symbol.getName() << " \""
1314               << symbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType) << "\"\n";
1315             infoSink.info << "    " << StageName(unitStage) << " stage: " << unitSymbol.getName() << " \""
1316               << unitSymbol.getType().getCompleteString(true, printQualifiers, printPrecision, printType) << "\"\n";
1317           }
1318         }
1319     }
1320 }
1321 
sharedBlockCheck(TInfoSink & infoSink)1322 void TIntermediate::sharedBlockCheck(TInfoSink& infoSink)
1323 {
1324     bool has_shared_block = false;
1325     bool has_shared_non_block = false;
1326     TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
1327     for (size_t i = 0; i < linkObjects.size(); ++i) {
1328         const TType& type = linkObjects[i]->getAsTyped()->getType();
1329         const TQualifier& qualifier = type.getQualifier();
1330         if (qualifier.storage == glslang::EvqShared) {
1331             if (type.getBasicType() == glslang::EbtBlock)
1332                 has_shared_block = true;
1333             else
1334                 has_shared_non_block = true;
1335         }
1336     }
1337     if (has_shared_block && has_shared_non_block)
1338         error(infoSink, "cannot mix use of shared variables inside and outside blocks");
1339 }
1340 
1341 //
1342 // Do final link-time error checking of a complete (merged) intermediate representation.
1343 // (Much error checking was done during merging).
1344 //
1345 // Also, lock in defaults of things not set, including array sizes.
1346 //
finalCheck(TInfoSink & infoSink,bool keepUncalled)1347 void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
1348 {
1349     if (getTreeRoot() == nullptr)
1350         return;
1351 
1352     if (numEntryPoints < 1) {
1353         if (getSource() == EShSourceGlsl)
1354             error(infoSink, "Missing entry point: Each stage requires one entry point");
1355         else
1356             warn(infoSink, "Entry point not found");
1357     }
1358 
1359     // recursion and missing body checking
1360     checkCallGraphCycles(infoSink);
1361     checkCallGraphBodies(infoSink, keepUncalled);
1362 
1363     // overlap/alias/missing I/O, etc.
1364     inOutLocationCheck(infoSink);
1365 
1366     if (getNumPushConstants() > 1)
1367         error(infoSink, "Only one push_constant block is allowed per stage");
1368 
1369     // invocations
1370     if (invocations == TQualifier::layoutNotSet)
1371         invocations = 1;
1372 
1373     if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
1374         error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
1375     if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
1376         error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
1377 
1378     if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData")))
1379         error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs");
1380     if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData"))
1381         error(infoSink, "Cannot use both gl_FragColor and gl_FragData");
1382 
1383     for (size_t b = 0; b < xfbBuffers.size(); ++b) {
1384         if (xfbBuffers[b].contains64BitType)
1385             RoundToPow2(xfbBuffers[b].implicitStride, 8);
1386         else if (xfbBuffers[b].contains32BitType)
1387             RoundToPow2(xfbBuffers[b].implicitStride, 4);
1388         else if (xfbBuffers[b].contains16BitType)
1389             RoundToPow2(xfbBuffers[b].implicitStride, 2);
1390 
1391         // "It is a compile-time or link-time error to have
1392         // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
1393         // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a
1394         // compile-time or link-time error to have different values specified for the stride for the same buffer."
1395         if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) {
1396             error(infoSink, "xfb_stride is too small to hold all buffer entries:");
1397             infoSink.info.prefix(EPrefixError);
1398             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n";
1399         }
1400         if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd)
1401             xfbBuffers[b].stride = xfbBuffers[b].implicitStride;
1402 
1403         // "If the buffer is capturing any
1404         // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a
1405         // multiple of 4, or a compile-time or link-time error results."
1406         if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) {
1407             error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
1408             infoSink.info.prefix(EPrefixError);
1409             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1410         } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
1411             error(infoSink, "xfb_stride must be multiple of 4:");
1412             infoSink.info.prefix(EPrefixError);
1413             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1414         }
1415         // "If the buffer is capturing any
1416         // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
1417         else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
1418             error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:");
1419             infoSink.info.prefix(EPrefixError);
1420             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
1421         }
1422 
1423         // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
1424         // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
1425         if (xfbBuffers[b].stride > (unsigned int)(4 * resources->maxTransformFeedbackInterleavedComponents)) {
1426             error(infoSink, "xfb_stride is too large:");
1427             infoSink.info.prefix(EPrefixError);
1428             infoSink.info << "    xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources->maxTransformFeedbackInterleavedComponents << "\n";
1429         }
1430     }
1431 
1432     switch (language) {
1433     case EShLangVertex:
1434         break;
1435     case EShLangTessControl:
1436         if (vertices == TQualifier::layoutNotSet)
1437             error(infoSink, "At least one shader must specify an output layout(vertices=...)");
1438         break;
1439     case EShLangTessEvaluation:
1440         if (getSource() == EShSourceGlsl) {
1441             if (inputPrimitive == ElgNone)
1442                 error(infoSink, "At least one shader must specify an input layout primitive");
1443             if (vertexSpacing == EvsNone)
1444                 vertexSpacing = EvsEqual;
1445             if (vertexOrder == EvoNone)
1446                 vertexOrder = EvoCcw;
1447         }
1448         break;
1449     case EShLangGeometry:
1450         if (inputPrimitive == ElgNone)
1451             error(infoSink, "At least one shader must specify an input layout primitive");
1452         if (outputPrimitive == ElgNone)
1453             error(infoSink, "At least one shader must specify an output layout primitive");
1454         if (vertices == TQualifier::layoutNotSet)
1455             error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
1456         break;
1457     case EShLangFragment:
1458         // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in
1459         // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage
1460         // requiring explicit early_fragment_tests
1461         if (getPostDepthCoverage() && !getEarlyFragmentTests())
1462             error(infoSink, "post_depth_coverage requires early_fragment_tests");
1463         break;
1464     case EShLangCompute:
1465         sharedBlockCheck(infoSink);
1466         break;
1467     case EShLangRayGen:
1468     case EShLangIntersect:
1469     case EShLangAnyHit:
1470     case EShLangClosestHit:
1471     case EShLangMiss:
1472     case EShLangCallable:
1473         if (numShaderRecordBlocks > 1)
1474             error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage");
1475         break;
1476     case EShLangMesh:
1477         // NV_mesh_shader doesn't allow use of both single-view and per-view builtins.
1478         if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV"))
1479             error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV");
1480         if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV"))
1481             error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV");
1482         if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV"))
1483             error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV");
1484         if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV"))
1485             error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV");
1486         if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV"))
1487             error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV");
1488         if (outputPrimitive == ElgNone)
1489             error(infoSink, "At least one shader must specify an output layout primitive");
1490         if (vertices == TQualifier::layoutNotSet)
1491             error(infoSink, "At least one shader must specify a layout(max_vertices = value)");
1492         if (primitives == TQualifier::layoutNotSet)
1493             error(infoSink, "At least one shader must specify a layout(max_primitives = value)");
1494         [[fallthrough]];
1495     case EShLangTask:
1496         if (numTaskNVBlocks > 1)
1497             error(infoSink, "Only one taskNV interface block is allowed per shader");
1498         if (numTaskEXTPayloads > 1)
1499             error(infoSink, "Only single variable of type taskPayloadSharedEXT is allowed per shader");
1500         sharedBlockCheck(infoSink);
1501         break;
1502     default:
1503         error(infoSink, "Unknown Stage.");
1504         break;
1505     }
1506 
1507     // Process the tree for any node-specific work.
1508     class TFinalLinkTraverser : public TIntermTraverser {
1509     public:
1510         TFinalLinkTraverser() { }
1511         virtual ~TFinalLinkTraverser() { }
1512 
1513         virtual void visitSymbol(TIntermSymbol* symbol)
1514         {
1515             // Implicitly size arrays.
1516             // If an unsized array is left as unsized, it effectively
1517             // becomes run-time sized.
1518             symbol->getWritableType().adoptImplicitArraySizes(false);
1519         }
1520     } finalLinkTraverser;
1521 
1522     treeRoot->traverse(&finalLinkTraverser);
1523 }
1524 
1525 //
1526 // See if the call graph contains any static recursion, which is disallowed
1527 // by the specification.
1528 //
checkCallGraphCycles(TInfoSink & infoSink)1529 void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink)
1530 {
1531     // Clear fields we'll use for this.
1532     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1533         call->visited = false;
1534         call->currentPath = false;
1535         call->errorGiven = false;
1536     }
1537 
1538     //
1539     // Loop, looking for a new connected subgraph.  One subgraph is handled per loop iteration.
1540     //
1541 
1542     TCall* newRoot;
1543     do {
1544         // See if we have unvisited parts of the graph.
1545         newRoot = nullptr;
1546         for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1547             if (! call->visited) {
1548                 newRoot = &(*call);
1549                 break;
1550             }
1551         }
1552 
1553         // If not, we are done.
1554         if (! newRoot)
1555             break;
1556 
1557         // Otherwise, we found a new subgraph, process it:
1558         // See what all can be reached by this new root, and if any of
1559         // that is recursive.  This is done by depth-first traversals, seeing
1560         // if a new call is found that was already in the currentPath (a back edge),
1561         // thereby detecting recursion.
1562         std::list<TCall*> stack;
1563         newRoot->currentPath = true; // currentPath will be true iff it is on the stack
1564         stack.push_back(newRoot);
1565         while (! stack.empty()) {
1566             // get a caller
1567             TCall* call = stack.back();
1568 
1569             // Add to the stack just one callee.
1570             // This algorithm always terminates, because only !visited and !currentPath causes a push
1571             // and all pushes change currentPath to true, and all pops change visited to true.
1572             TGraph::iterator child = callGraph.begin();
1573             for (; child != callGraph.end(); ++child) {
1574 
1575                 // If we already visited this node, its whole subgraph has already been processed, so skip it.
1576                 if (child->visited)
1577                     continue;
1578 
1579                 if (call->callee == child->caller) {
1580                     if (child->currentPath) {
1581                         // Then, we found a back edge
1582                         if (! child->errorGiven) {
1583                             error(infoSink, "Recursion detected:");
1584                             infoSink.info << "    " << call->callee << " calling " << child->callee << "\n";
1585                             child->errorGiven = true;
1586                             recursive = true;
1587                         }
1588                     } else {
1589                         child->currentPath = true;
1590                         stack.push_back(&(*child));
1591                         break;
1592                     }
1593                 }
1594             }
1595             if (child == callGraph.end()) {
1596                 // no more callees, we bottomed out, never look at this node again
1597                 stack.back()->currentPath = false;
1598                 stack.back()->visited = true;
1599                 stack.pop_back();
1600             }
1601         }  // end while, meaning nothing left to process in this subtree
1602 
1603     } while (newRoot);  // redundant loop check; should always exit via the 'break' above
1604 }
1605 
1606 //
1607 // See which functions are reachable from the entry point and which have bodies.
1608 // Reachable ones with missing bodies are errors.
1609 // Unreachable bodies are dead code.
1610 //
checkCallGraphBodies(TInfoSink & infoSink,bool keepUncalled)1611 void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled)
1612 {
1613     // Clear fields we'll use for this.
1614     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1615         call->visited = false;
1616         call->calleeBodyPosition = -1;
1617     }
1618 
1619     // The top level of the AST includes function definitions (bodies).
1620     // Compare these to function calls in the call graph.
1621     // We'll end up knowing which have bodies, and if so,
1622     // how to map the call-graph node to the location in the AST.
1623     TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence();
1624     std::vector<bool> reachable(functionSequence.size(), true); // so that non-functions are reachable
1625     for (int f = 0; f < (int)functionSequence.size(); ++f) {
1626         glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate();
1627         if (node && (node->getOp() == glslang::EOpFunction)) {
1628             if (node->getName().compare(getEntryPointMangledName().c_str()) != 0)
1629                 reachable[f] = false; // so that function bodies are unreachable, until proven otherwise
1630             for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1631                 if (call->callee == node->getName())
1632                     call->calleeBodyPosition = f;
1633             }
1634         }
1635     }
1636 
1637     // Start call-graph traversal by visiting the entry point nodes.
1638     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1639         if (call->caller.compare(getEntryPointMangledName().c_str()) == 0)
1640             call->visited = true;
1641     }
1642 
1643     // Propagate 'visited' through the call-graph to every part of the graph it
1644     // can reach (seeded with the entry-point setting above).
1645     bool changed;
1646     do {
1647         changed = false;
1648         for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) {
1649             if (call1->visited) {
1650                 for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) {
1651                     if (! call2->visited) {
1652                         if (call1->callee == call2->caller) {
1653                             changed = true;
1654                             call2->visited = true;
1655                         }
1656                     }
1657                 }
1658             }
1659         }
1660     } while (changed);
1661 
1662     // Any call-graph node set to visited but without a callee body is an error.
1663     for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) {
1664         if (call->visited) {
1665             if (call->calleeBodyPosition == -1) {
1666                 error(infoSink, "No function definition (body) found: ");
1667                 infoSink.info << "    " << call->callee << "\n";
1668             } else
1669                 reachable[call->calleeBodyPosition] = true;
1670         }
1671     }
1672 
1673     // Bodies in the AST not reached by the call graph are dead;
1674     // clear them out, since they can't be reached and also can't
1675     // be translated further due to possibility of being ill defined.
1676     if (! keepUncalled) {
1677         for (int f = 0; f < (int)functionSequence.size(); ++f) {
1678             if (! reachable[f])
1679             {
1680                 resetTopLevelUncalledStatus(functionSequence[f]->getAsAggregate()->getName());
1681                 functionSequence[f] = nullptr;
1682             }
1683         }
1684         functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end());
1685     }
1686 }
1687 
1688 //
1689 // Satisfy rules for location qualifiers on inputs and outputs
1690 //
inOutLocationCheck(TInfoSink & infoSink)1691 void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
1692 {
1693     // ES 3.0 requires all outputs to have location qualifiers if there is more than one output
1694     bool fragOutWithNoLocation = false;
1695     int numFragOut = 0;
1696 
1697     // TODO: linker functionality: location collision checking
1698 
1699     TIntermSequence& linkObjects = findLinkerObjects()->getSequence();
1700     for (size_t i = 0; i < linkObjects.size(); ++i) {
1701         const TType& type = linkObjects[i]->getAsTyped()->getType();
1702         const TQualifier& qualifier = type.getQualifier();
1703         if (language == EShLangFragment) {
1704             if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) {
1705                 ++numFragOut;
1706                 if (!qualifier.hasAnyLocation())
1707                     fragOutWithNoLocation = true;
1708             }
1709         }
1710     }
1711 
1712     if (isEsProfile()) {
1713         if (numFragOut > 1 && fragOutWithNoLocation)
1714             error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
1715     }
1716 }
1717 
findLinkerObjects() const1718 TIntermAggregate* TIntermediate::findLinkerObjects() const
1719 {
1720     // Get the top-level globals
1721     TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
1722 
1723     // Get the last member of the sequences, expected to be the linker-object lists
1724     assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects);
1725 
1726     return globals.back()->getAsAggregate();
1727 }
1728 
1729 // See if a variable was both a user-declared output and used.
1730 // Note: the spec discusses writing to one, but this looks at read or write, which
1731 // is more useful, and perhaps the spec should be changed to reflect that.
userOutputUsed() const1732 bool TIntermediate::userOutputUsed() const
1733 {
1734     const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence();
1735 
1736     bool found = false;
1737     for (size_t i = 0; i < linkerObjects.size(); ++i) {
1738         const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode();
1739         if (symbolNode.getQualifier().storage == EvqVaryingOut &&
1740             symbolNode.getName().compare(0, 3, "gl_") != 0 &&
1741             inIoAccessed(symbolNode.getName())) {
1742             found = true;
1743             break;
1744         }
1745     }
1746 
1747     return found;
1748 }
1749 
1750 // Accumulate locations used for inputs, outputs, and uniforms, payload, callable data, and tileImageEXT
1751 // and check for collisions as the accumulation is done.
1752 //
1753 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1754 //
1755 // typeCollision is set to true if there is no direct collision, but the types in the same location
1756 // are different.
1757 //
addUsedLocation(const TQualifier & qualifier,const TType & type,bool & typeCollision)1758 int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision)
1759 {
1760     typeCollision = false;
1761 
1762     int set;
1763     if (qualifier.isPipeInput())
1764         set = 0;
1765     else if (qualifier.isPipeOutput())
1766         set = 1;
1767     else if (qualifier.storage == EvqUniform)
1768         set = 2;
1769     else if (qualifier.storage == EvqBuffer)
1770         set = 3;
1771     else if (qualifier.storage == EvqTileImageEXT)
1772         set = 4;
1773     else if (qualifier.isAnyPayload())
1774         set = 0;
1775     else if (qualifier.isAnyCallable())
1776         set = 1;
1777     else if (qualifier.isHitObjectAttrNV())
1778         set = 2;
1779     else
1780         return -1;
1781 
1782     int size;
1783     if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) {
1784         size = 1;
1785     } else if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
1786         if (type.isSizedArray())
1787             size = type.getCumulativeArraySize();
1788         else
1789             size = 1;
1790     } else {
1791         // Strip off the outer array dimension for those having an extra one.
1792         if (type.isArray() && qualifier.isArrayedIo(language)) {
1793             TType elementType(type, 0);
1794             size = computeTypeLocationSize(elementType, language);
1795         } else
1796             size = computeTypeLocationSize(type, language);
1797     }
1798 
1799     // Locations, and components within locations.
1800     //
1801     // Almost always, dealing with components means a single location is involved.
1802     // The exception is a dvec3. From the spec:
1803     //
1804     // "A dvec3 will consume all four components of the first location and components 0 and 1 of
1805     // the second location. This leaves components 2 and 3 available for other component-qualified
1806     // declarations."
1807     //
1808     // That means, without ever mentioning a component, a component range
1809     // for a different location gets specified, if it's not a vertex shader input. (!)
1810     // (A vertex shader input will show using only one location, even for a dvec3/4.)
1811     //
1812     // So, for the case of dvec3, we need two independent ioRanges.
1813     //
1814     // For raytracing IO (payloads and callabledata) each declaration occupies a single
1815     // slot irrespective of type.
1816     int collision = -1; // no collision
1817     if (qualifier.isAnyPayload() || qualifier.isAnyCallable() || qualifier.isHitObjectAttrNV()) {
1818         TRange range(qualifier.layoutLocation, qualifier.layoutLocation);
1819         collision = checkLocationRT(set, qualifier.layoutLocation);
1820         if (collision < 0)
1821             usedIoRT[set].push_back(range);
1822         return collision;
1823     }
1824     if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
1825         (qualifier.isPipeInput() || qualifier.isPipeOutput())) {
1826         // Dealing with dvec3 in/out split across two locations.
1827         // Need two io-ranges.
1828         // The case where the dvec3 doesn't start at component 0 was previously caught as overflow.
1829 
1830         // First range:
1831         TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation);
1832         TRange componentRange(0, 3);
1833         TIoRange range(locationRange, componentRange, type.getBasicType(), 0, qualifier.centroid, qualifier.smooth, qualifier.flat, qualifier.sample, qualifier.patch);
1834 
1835         // check for collisions
1836         collision = checkLocationRange(set, range, type, typeCollision);
1837         if (collision < 0) {
1838             usedIo[set].push_back(range);
1839 
1840             // Second range:
1841             TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1);
1842             TRange componentRange2(0, 1);
1843             TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0, qualifier.centroid, qualifier.smooth, qualifier.flat, qualifier.sample, qualifier.patch);
1844 
1845             // check for collisions
1846             collision = checkLocationRange(set, range2, type, typeCollision);
1847             if (collision < 0)
1848                 usedIo[set].push_back(range2);
1849         }
1850         return collision;
1851     }
1852 
1853     // Not a dvec3 in/out split across two locations, generic path.
1854     // Need a single IO-range block.
1855 
1856     TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1);
1857     TRange componentRange(0, 3);
1858     if (qualifier.hasComponent() || type.getVectorSize() > 0) {
1859         int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1);
1860         if (qualifier.hasComponent())
1861             componentRange.start = qualifier.layoutComponent;
1862         componentRange.last  = componentRange.start + consumedComponents - 1;
1863     }
1864 
1865     // combine location and component ranges
1866     TBasicType basicTy = type.getBasicType();
1867     if (basicTy == EbtSampler && type.getSampler().isAttachmentEXT())
1868         basicTy = type.getSampler().type;
1869     TIoRange range(locationRange, componentRange, basicTy, qualifier.hasIndex() ? qualifier.getIndex() : 0, qualifier.centroid, qualifier.smooth, qualifier.flat, qualifier.sample, qualifier.patch);
1870 
1871     // check for collisions, except for vertex inputs on desktop targeting OpenGL
1872     if (! (!isEsProfile() && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
1873         collision = checkLocationRange(set, range, type, typeCollision);
1874 
1875     if (collision < 0)
1876         usedIo[set].push_back(range);
1877 
1878     return collision;
1879 }
1880 
1881 // Check that two types can be stored in different components in the same location.
1882 // They must be the same type, except signed/unsigned integers are considered compatible.
checkCompatibleTypes(TBasicType t1,TBasicType t2)1883 static bool checkCompatibleTypes(TBasicType t1, TBasicType t2) {
1884     if (t1 != t2) {
1885         if ((t1 == EbtInt8 && t2 == EbtUint8) ||
1886             (t2 == EbtInt8 && t1 == EbtUint8) ||
1887             (t1 == EbtInt16 && t2 == EbtUint16) ||
1888             (t2 == EbtInt16 && t1 == EbtUint16)||
1889             (t1 == EbtInt && t2 == EbtUint) ||
1890             (t2 == EbtInt && t1 == EbtUint)||
1891             (t1 == EbtInt64 && t2 == EbtUint64) ||
1892             (t2 == EbtInt64 && t1 == EbtUint64)) {
1893             return true;
1894         }
1895     }
1896     return t1 == t2;
1897 }
1898 
1899 // Compare a new (the passed in) 'range' against the existing set, and see
1900 // if there are any collisions.
1901 //
1902 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1903 //
checkLocationRange(int set,const TIoRange & range,const TType & type,bool & typeCollision)1904 int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision)
1905 {
1906     for (size_t r = 0; r < usedIo[set].size(); ++r) {
1907         if (range.overlap(usedIo[set][r])) {
1908             // there is a collision; pick one
1909             return std::max(range.location.start, usedIo[set][r].location.start);
1910         } else if (range.location.overlap(usedIo[set][r].location) &&
1911                    (!checkCompatibleTypes(type.getBasicType(), usedIo[set][r].basicType) ||
1912                     type.getQualifier().centroid != usedIo[set][r].centroid ||
1913                     type.getQualifier().smooth != usedIo[set][r].smooth ||
1914                     type.getQualifier().flat != usedIo[set][r].flat ||
1915                     type.getQualifier().sample != usedIo[set][r].sample ||
1916                     type.getQualifier().patch != usedIo[set][r].patch)) {
1917             // aliased-type mismatch
1918             typeCollision = true;
1919             return std::max(range.location.start, usedIo[set][r].location.start);
1920         }
1921     }
1922 
1923     // check typeCollision between tileImageEXT and out
1924     if (set == 4 || set == 1) {
1925       // if the set is "tileImageEXT", check against "out" and vice versa
1926       int againstSet = (set == 4) ? 1 : 4;
1927       for (size_t r = 0; r < usedIo[againstSet].size(); ++r) {
1928         if (range.location.overlap(usedIo[againstSet][r].location) && type.getBasicType() != usedIo[againstSet][r].basicType) {
1929             // aliased-type mismatch
1930             typeCollision = true;
1931             return std::max(range.location.start, usedIo[againstSet][r].location.start);
1932         }
1933       }
1934     }
1935 
1936     return -1; // no collision
1937 }
1938 
checkLocationRT(int set,int location)1939 int TIntermediate::checkLocationRT(int set, int location) {
1940     TRange range(location, location);
1941     for (size_t r = 0; r < usedIoRT[set].size(); ++r) {
1942         if (range.overlap(usedIoRT[set][r])) {
1943             return range.start;
1944         }
1945     }
1946     return -1; // no collision
1947 }
1948 
1949 // Accumulate bindings and offsets, and check for collisions
1950 // as the accumulation is done.
1951 //
1952 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
1953 //
addUsedOffsets(int binding,int offset,int numOffsets)1954 int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets)
1955 {
1956     TRange bindingRange(binding, binding);
1957     TRange offsetRange(offset, offset + numOffsets - 1);
1958     TOffsetRange range(bindingRange, offsetRange);
1959 
1960     // check for collisions, except for vertex inputs on desktop
1961     for (size_t r = 0; r < usedAtomics.size(); ++r) {
1962         if (range.overlap(usedAtomics[r])) {
1963             // there is a collision; pick one
1964             return std::max(offset, usedAtomics[r].offset.start);
1965         }
1966     }
1967 
1968     usedAtomics.push_back(range);
1969 
1970     return -1; // no collision
1971 }
1972 
1973 // Accumulate used constant_id values.
1974 //
1975 // Return false is one was already used.
addUsedConstantId(int id)1976 bool TIntermediate::addUsedConstantId(int id)
1977 {
1978     if (usedConstantId.find(id) != usedConstantId.end())
1979         return false;
1980 
1981     usedConstantId.insert(id);
1982 
1983     return true;
1984 }
1985 
1986 // Recursively figure out how many locations are used up by an input or output type.
1987 // Return the size of type, as measured by "locations".
computeTypeLocationSize(const TType & type,EShLanguage stage)1988 int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
1989 {
1990     // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n
1991     // consecutive locations..."
1992     if (type.isArray()) {
1993         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
1994         // TODO: are there valid cases of having an unsized array with a location?  If so, running this code too early.
1995         TType elementType(type, 0);
1996         if (type.isSizedArray() && !type.getQualifier().isPerView())
1997             return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
1998         else {
1999             // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
2000             elementType.getQualifier().perViewNV = false;
2001             return computeTypeLocationSize(elementType, stage);
2002         }
2003     }
2004 
2005     // "The locations consumed by block and structure members are determined by applying the rules above
2006     // recursively..."
2007     if (type.isStruct()) {
2008         int size = 0;
2009         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
2010             TType memberType(type, member);
2011             size += computeTypeLocationSize(memberType, stage);
2012         }
2013         return size;
2014     }
2015 
2016     // ES: "If a shader input is any scalar or vector type, it will consume a single location."
2017 
2018     // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex
2019     // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while
2020     // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will
2021     // consume only a single location, in all stages."
2022     if (type.isScalar())
2023         return 1;
2024     if (type.isVector()) {
2025         if (stage == EShLangVertex && type.getQualifier().isPipeInput())
2026             return 1;
2027         if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2)
2028             return 2;
2029         else
2030             return 1;
2031     }
2032 
2033     // "If the declared input is an n x m single- or double-precision matrix, ...
2034     // The number of locations assigned for each matrix will be the same as
2035     // for an n-element array of m-component vectors..."
2036     if (type.isMatrix()) {
2037         TType columnType(type, 0);
2038         return type.getMatrixCols() * computeTypeLocationSize(columnType, stage);
2039     }
2040 
2041     assert(0);
2042     return 1;
2043 }
2044 
2045 // Same as computeTypeLocationSize but for uniforms
computeTypeUniformLocationSize(const TType & type)2046 int TIntermediate::computeTypeUniformLocationSize(const TType& type)
2047 {
2048     // "Individual elements of a uniform array are assigned
2049     // consecutive locations with the first element taking location
2050     // location."
2051     if (type.isArray()) {
2052         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
2053         TType elementType(type, 0);
2054         if (type.isSizedArray()) {
2055             return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType);
2056         } else {
2057             // TODO: are there valid cases of having an implicitly-sized array with a location?  If so, running this code too early.
2058             return computeTypeUniformLocationSize(elementType);
2059         }
2060     }
2061 
2062     // "Each subsequent inner-most member or element gets incremental
2063     // locations for the entire structure or array."
2064     if (type.isStruct()) {
2065         int size = 0;
2066         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
2067             TType memberType(type, member);
2068             size += computeTypeUniformLocationSize(memberType);
2069         }
2070         return size;
2071     }
2072 
2073     return 1;
2074 }
2075 
2076 // Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
2077 //
2078 // Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
2079 //
addXfbBufferOffset(const TType & type)2080 int TIntermediate::addXfbBufferOffset(const TType& type)
2081 {
2082     const TQualifier& qualifier = type.getQualifier();
2083 
2084     assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer());
2085     TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
2086 
2087     // compute the range
2088     unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
2089     buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
2090     TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
2091 
2092     // check for collisions
2093     for (size_t r = 0; r < buffer.ranges.size(); ++r) {
2094         if (range.overlap(buffer.ranges[r])) {
2095             // there is a collision; pick an example to return
2096             return std::max(range.start, buffer.ranges[r].start);
2097         }
2098     }
2099 
2100     buffer.ranges.push_back(range);
2101 
2102     return -1;  // no collision
2103 }
2104 
2105 // Recursively figure out how many bytes of xfb buffer are used by the given type.
2106 // Return the size of type, in bytes.
2107 // Sets contains64BitType to true if the type contains a 64-bit data type.
2108 // Sets contains32BitType to true if the type contains a 32-bit data type.
2109 // Sets contains16BitType to true if the type contains a 16-bit data type.
2110 // N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
computeTypeXfbSize(const TType & type,bool & contains64BitType,bool & contains32BitType,bool & contains16BitType) const2111 unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
2112 {
2113     // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
2114     // and the space taken in the buffer will be a multiple of 8.
2115     // ...within the qualified entity, subsequent components are each
2116     // assigned, in order, to the next available offset aligned to a multiple of
2117     // that component's size.  Aggregate types are flattened down to the component
2118     // level to get this sequence of components."
2119 
2120     if (type.isSizedArray()) {
2121         // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
2122         // Unsized array use to xfb should be a compile error.
2123         TType elementType(type, 0);
2124         return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
2125     }
2126 
2127     if (type.isStruct()) {
2128         unsigned int size = 0;
2129         bool structContains64BitType = false;
2130         bool structContains32BitType = false;
2131         bool structContains16BitType = false;
2132         for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
2133             TType memberType(type, member);
2134             // "... if applied to
2135             // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
2136             // and the space taken in the buffer will be a multiple of 8."
2137             bool memberContains64BitType = false;
2138             bool memberContains32BitType = false;
2139             bool memberContains16BitType = false;
2140             int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
2141             if (memberContains64BitType) {
2142                 structContains64BitType = true;
2143                 RoundToPow2(size, 8);
2144             } else if (memberContains32BitType) {
2145                 structContains32BitType = true;
2146                 RoundToPow2(size, 4);
2147             } else if (memberContains16BitType) {
2148                 structContains16BitType = true;
2149                 RoundToPow2(size, 2);
2150             }
2151             size += memberSize;
2152         }
2153 
2154         if (structContains64BitType) {
2155             contains64BitType = true;
2156             RoundToPow2(size, 8);
2157         } else if (structContains32BitType) {
2158             contains32BitType = true;
2159             RoundToPow2(size, 4);
2160         } else if (structContains16BitType) {
2161             contains16BitType = true;
2162             RoundToPow2(size, 2);
2163         }
2164         return size;
2165     }
2166 
2167     int numComponents {0};
2168     if (type.isScalar())
2169         numComponents = 1;
2170     else if (type.isVector())
2171         numComponents = type.getVectorSize();
2172     else if (type.isMatrix())
2173         numComponents = type.getMatrixCols() * type.getMatrixRows();
2174     else {
2175         assert(0);
2176         numComponents = 1;
2177     }
2178 
2179     if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
2180         contains64BitType = true;
2181         return 8 * numComponents;
2182     } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
2183         contains16BitType = true;
2184         return 2 * numComponents;
2185     } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8)
2186         return numComponents;
2187     else {
2188         contains32BitType = true;
2189         return 4 * numComponents;
2190     }
2191 }
2192 
2193 const int baseAlignmentVec4Std140 = 16;
2194 
2195 // Return the size and alignment of a component of the given type.
2196 // The size is returned in the 'size' parameter
2197 // Return value is the alignment..
getBaseAlignmentScalar(const TType & type,int & size)2198 int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
2199 {
2200     switch (type.getBasicType()) {
2201     case EbtInt64:
2202     case EbtUint64:
2203     case EbtDouble:  size = 8; return 8;
2204     case EbtFloat16: size = 2; return 2;
2205     case EbtInt8:
2206     case EbtUint8:   size = 1; return 1;
2207     case EbtInt16:
2208     case EbtUint16:  size = 2; return 2;
2209     case EbtReference: size = 8; return 8;
2210     case EbtSampler:
2211     {
2212         if (type.isBindlessImage() || type.isBindlessTexture()) {
2213             size = 8; return 8;
2214         }
2215         else {
2216             size = 4; return 4;
2217         }
2218     }
2219     default:         size = 4; return 4;
2220     }
2221 }
2222 
2223 // Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout
2224 // Operates recursively.
2225 //
2226 // If std140 is true, it does the rounding up to vec4 size required by std140,
2227 // otherwise it does not, yielding std430 rules.
2228 //
2229 // The size is returned in the 'size' parameter
2230 //
2231 // The stride is only non-0 for arrays or matrices, and is the stride of the
2232 // top-level object nested within the type.  E.g., for an array of matrices,
2233 // it is the distances needed between matrices, despite the rules saying the
2234 // stride comes from the flattening down to vectors.
2235 //
2236 // Return value is the alignment of the type.
getBaseAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)2237 int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
2238 {
2239     int alignment;
2240 
2241     bool std140 = layoutPacking == glslang::ElpStd140;
2242     // When using the std140 storage layout, structures will be laid out in buffer
2243     // storage with its members stored in monotonically increasing order based on their
2244     // location in the declaration. A structure and each structure member have a base
2245     // offset and a base alignment, from which an aligned offset is computed by rounding
2246     // the base offset up to a multiple of the base alignment. The base offset of the first
2247     // member of a structure is taken from the aligned offset of the structure itself. The
2248     // base offset of all other structure members is derived by taking the offset of the
2249     // last basic machine unit consumed by the previous member and adding one. Each
2250     // structure member is stored in memory at its aligned offset. The members of a top-
2251     // level uniform block are laid out in buffer storage by treating the uniform block as
2252     // a structure with a base offset of zero.
2253     //
2254     //   1. If the member is a scalar consuming N basic machine units, the base alignment is N.
2255     //
2256     //   2. If the member is a two- or four-component vector with components consuming N basic
2257     //      machine units, the base alignment is 2N or 4N, respectively.
2258     //
2259     //   3. If the member is a three-component vector with components consuming N
2260     //      basic machine units, the base alignment is 4N.
2261     //
2262     //   4. If the member is an array of scalars or vectors, the base alignment and array
2263     //      stride are set to match the base alignment of a single array element, according
2264     //      to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
2265     //      array may have padding at the end; the base offset of the member following
2266     //      the array is rounded up to the next multiple of the base alignment.
2267     //
2268     //   5. If the member is a column-major matrix with C columns and R rows, the
2269     //      matrix is stored identically to an array of C column vectors with R
2270     //      components each, according to rule (4).
2271     //
2272     //   6. If the member is an array of S column-major matrices with C columns and
2273     //      R rows, the matrix is stored identically to a row of S X C column vectors
2274     //      with R components each, according to rule (4).
2275     //
2276     //   7. If the member is a row-major matrix with C columns and R rows, the matrix
2277     //      is stored identically to an array of R row vectors with C components each,
2278     //      according to rule (4).
2279     //
2280     //   8. If the member is an array of S row-major matrices with C columns and R
2281     //      rows, the matrix is stored identically to a row of S X R row vectors with C
2282     //      components each, according to rule (4).
2283     //
2284     //   9. If the member is a structure, the base alignment of the structure is N , where
2285     //      N is the largest base alignment value of any    of its members, and rounded
2286     //      up to the base alignment of a vec4. The individual members of this substructure
2287     //      are then assigned offsets by applying this set of rules recursively,
2288     //      where the base offset of the first member of the sub-structure is equal to the
2289     //      aligned offset of the structure. The structure may have padding at the end;
2290     //      the base offset of the member following the sub-structure is rounded up to
2291     //      the next multiple of the base alignment of the structure.
2292     //
2293     //   10. If the member is an array of S structures, the S elements of the array are laid
2294     //       out in order, according to rule (9).
2295     //
2296     //   Assuming, for rule 10:  The stride is the same as the size of an element.
2297 
2298     stride = 0;
2299     int dummyStride;
2300 
2301     // rules 4, 6, 8, and 10
2302     if (type.isArray()) {
2303         // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
2304         TType derefType(type, 0);
2305         alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
2306         if (std140)
2307             alignment = std::max(baseAlignmentVec4Std140, alignment);
2308         RoundToPow2(size, alignment);
2309         stride = size;  // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected)
2310                         // uses the assumption for rule 10 in the comment above
2311         // use one element to represent the last member of SSBO which is unsized array
2312         int arraySize = (type.isUnsizedArray() && (type.getOuterArraySize() == 0)) ? 1 : type.getOuterArraySize();
2313         size = stride * arraySize;
2314         return alignment;
2315     }
2316 
2317     // rule 9
2318     if (type.getBasicType() == EbtStruct || type.getBasicType() == EbtBlock) {
2319         const TTypeList& memberList = *type.getStruct();
2320 
2321         size = 0;
2322         int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0;
2323         for (size_t m = 0; m < memberList.size(); ++m) {
2324             int memberSize;
2325             // modify just the children's view of matrix layout, if there is one for this member
2326             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
2327             int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking,
2328                                                    (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
2329             maxAlignment = std::max(maxAlignment, memberAlignment);
2330             RoundToPow2(size, memberAlignment);
2331             size += memberSize;
2332         }
2333 
2334         // The structure may have padding at the end; the base offset of
2335         // the member following the sub-structure is rounded up to the next
2336         // multiple of the base alignment of the structure.
2337         RoundToPow2(size, maxAlignment);
2338 
2339         return maxAlignment;
2340     }
2341 
2342     // rule 1
2343     if (type.isScalar())
2344         return getBaseAlignmentScalar(type, size);
2345 
2346     // rules 2 and 3
2347     if (type.isVector()) {
2348         int scalarAlign = getBaseAlignmentScalar(type, size);
2349         switch (type.getVectorSize()) {
2350         case 1: // HLSL has this, GLSL does not
2351             return scalarAlign;
2352         case 2:
2353             size *= 2;
2354             return 2 * scalarAlign;
2355         default:
2356             size *= type.getVectorSize();
2357             return 4 * scalarAlign;
2358         }
2359     }
2360 
2361     // rules 5 and 7
2362     if (type.isMatrix()) {
2363         // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows
2364         TType derefType(type, 0, rowMajor);
2365 
2366         alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor);
2367         if (std140)
2368             alignment = std::max(baseAlignmentVec4Std140, alignment);
2369         RoundToPow2(size, alignment);
2370         stride = size;  // use intra-matrix stride for stride of a just a matrix
2371         if (rowMajor)
2372             size = stride * type.getMatrixRows();
2373         else
2374             size = stride * type.getMatrixCols();
2375 
2376         return alignment;
2377     }
2378 
2379     assert(0);  // all cases should be covered above
2380     size = baseAlignmentVec4Std140;
2381     return baseAlignmentVec4Std140;
2382 }
2383 
2384 // To aid the basic HLSL rule about crossing vec4 boundaries.
improperStraddle(const TType & type,int size,int offset,bool vectorLike)2385 bool TIntermediate::improperStraddle(const TType& type, int size, int offset, bool vectorLike)
2386 {
2387     if (! vectorLike || type.isArray())
2388         return false;
2389 
2390     return size <= 16 ? offset / 16 != (offset + size - 1) / 16
2391                       : offset % 16 != 0;
2392 }
2393 
getScalarAlignment(const TType & type,int & size,int & stride,bool rowMajor)2394 int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor)
2395 {
2396     int alignment;
2397 
2398     stride = 0;
2399     int dummyStride;
2400 
2401     if (type.isArray()) {
2402         TType derefType(type, 0);
2403         alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
2404 
2405         stride = size;
2406         RoundToPow2(stride, alignment);
2407 
2408         size = stride * (type.getOuterArraySize() - 1) + size;
2409         return alignment;
2410     }
2411 
2412     if (type.getBasicType() == EbtStruct) {
2413         const TTypeList& memberList = *type.getStruct();
2414 
2415         size = 0;
2416         int maxAlignment = 0;
2417         for (size_t m = 0; m < memberList.size(); ++m) {
2418             int memberSize;
2419             // modify just the children's view of matrix layout, if there is one for this member
2420             TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
2421             int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride,
2422                                                      (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor);
2423             maxAlignment = std::max(maxAlignment, memberAlignment);
2424             RoundToPow2(size, memberAlignment);
2425             size += memberSize;
2426         }
2427 
2428         return maxAlignment;
2429     }
2430 
2431     if (type.isScalar())
2432         return getBaseAlignmentScalar(type, size);
2433 
2434     if (type.isVector()) {
2435         int scalarAlign = getBaseAlignmentScalar(type, size);
2436 
2437         size *= type.getVectorSize();
2438         return scalarAlign;
2439     }
2440 
2441     if (type.isMatrix()) {
2442         TType derefType(type, 0, rowMajor);
2443 
2444         alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor);
2445 
2446         stride = size;  // use intra-matrix stride for stride of a just a matrix
2447         if (rowMajor)
2448             size = stride * type.getMatrixRows();
2449         else
2450             size = stride * type.getMatrixCols();
2451 
2452         return alignment;
2453     }
2454 
2455     assert(0);  // all cases should be covered above
2456     size = 1;
2457     return 1;
2458 }
2459 
getMemberAlignment(const TType & type,int & size,int & stride,TLayoutPacking layoutPacking,bool rowMajor)2460 int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor)
2461 {
2462     if (layoutPacking == glslang::ElpScalar) {
2463         return getScalarAlignment(type, size, stride, rowMajor);
2464     } else {
2465         return getBaseAlignment(type, size, stride, layoutPacking, rowMajor);
2466     }
2467 }
2468 
2469 // shared calculation by getOffset and getOffsets
updateOffset(const TType & parentType,const TType & memberType,int & offset,int & memberSize)2470 void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
2471 {
2472     int dummyStride;
2473 
2474     // modify just the children's view of matrix layout, if there is one for this member
2475     TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
2476     int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
2477                                              parentType.getQualifier().layoutPacking,
2478                                              subMatrixLayout != ElmNone
2479                                                  ? subMatrixLayout == ElmRowMajor
2480                                                  : parentType.getQualifier().layoutMatrix == ElmRowMajor);
2481     RoundToPow2(offset, memberAlignment);
2482 }
2483 
2484 // Lookup or calculate the offset of a block member, using the recursively
2485 // defined block offset rules.
getOffset(const TType & type,int index)2486 int TIntermediate::getOffset(const TType& type, int index)
2487 {
2488     const TTypeList& memberList = *type.getStruct();
2489 
2490     // Don't calculate offset if one is present, it could be user supplied
2491     // and different than what would be calculated.  That is, this is faster,
2492     // but not just an optimization.
2493     if (memberList[index].type->getQualifier().hasOffset())
2494         return memberList[index].type->getQualifier().layoutOffset;
2495 
2496     int memberSize = 0;
2497     int offset = 0;
2498     for (int m = 0; m <= index; ++m) {
2499         updateOffset(type, *memberList[m].type, offset, memberSize);
2500 
2501         if (m < index)
2502             offset += memberSize;
2503     }
2504 
2505     return offset;
2506 }
2507 
2508 // Calculate the block data size.
2509 // Block arrayness is not taken into account, each element is backed by a separate buffer.
getBlockSize(const TType & blockType)2510 int TIntermediate::getBlockSize(const TType& blockType)
2511 {
2512     const TTypeList& memberList = *blockType.getStruct();
2513     int lastIndex = (int)memberList.size() - 1;
2514     int lastOffset = getOffset(blockType, lastIndex);
2515 
2516     int lastMemberSize;
2517     int dummyStride;
2518     getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
2519                        blockType.getQualifier().layoutPacking,
2520                        blockType.getQualifier().layoutMatrix == ElmRowMajor);
2521 
2522     return lastOffset + lastMemberSize;
2523 }
2524 
computeBufferReferenceTypeSize(const TType & type)2525 int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
2526 {
2527     assert(type.isReference());
2528     int size = getBlockSize(*type.getReferentType());
2529 
2530     int align = type.getBufferReferenceAlignment();
2531 
2532     if (align) {
2533         size = (size + align - 1) & ~(align-1);
2534     }
2535 
2536     return size;
2537 }
2538 
isIoResizeArray(const TType & type,EShLanguage language)2539 bool TIntermediate::isIoResizeArray(const TType& type, EShLanguage language) {
2540     return type.isArray() &&
2541             ((language == EShLangGeometry    && type.getQualifier().storage == EvqVaryingIn) ||
2542             (language == EShLangTessControl && (type.getQualifier().storage == EvqVaryingIn || type.getQualifier().storage == EvqVaryingOut) &&
2543                 ! type.getQualifier().patch) ||
2544             (language == EShLangTessEvaluation && type.getQualifier().storage == EvqVaryingIn) ||
2545             (language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn &&
2546              (type.getQualifier().pervertexNV || type.getQualifier().pervertexEXT)) ||
2547             (language == EShLangMesh && type.getQualifier().storage == EvqVaryingOut &&
2548                 !type.getQualifier().perTaskNV));
2549 }
2550 
2551 } // end namespace glslang
2552