1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
8 * Copyright (c) 2018 The Khronos Group Inc.
9 *
10 * Licensed under the Apache License, Version 2.0 (the "License");
11 * you may not use this file except in compliance with the License.
12 * You may obtain a copy of the License at
13 *
14 * http://www.apache.org/licenses/LICENSE-2.0
15 *
16 * Unless required by applicable law or agreed to in writing, software
17 * distributed under the License is distributed on an "AS IS" BASIS,
18 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 * See the License for the specific language governing permissions and
20 * limitations under the License.
21 *
22 *//*!
23 * \file
24 * \brief Vulkan Transform Feedback Fuzz Layout Tests
25 *//*--------------------------------------------------------------------*/
26
27 #include "vktTransformFeedbackFuzzLayoutCase.hpp"
28
29 #include "vkPrograms.hpp"
30
31 #include "gluVarType.hpp"
32 #include "tcuTestLog.hpp"
33 #include "tcuSurface.hpp"
34 #include "deRandom.hpp"
35 #include "deStringUtil.hpp"
36 #include "deMath.h"
37
38 #include "tcuTextureUtil.hpp"
39 #include "deSharedPtr.hpp"
40
41 #include "vkMemUtil.hpp"
42 #include "vkQueryUtil.hpp"
43 #include "vkTypeUtil.hpp"
44 #include "vkRef.hpp"
45 #include "vkRefUtil.hpp"
46 #include "vkBuilderUtil.hpp"
47 #include "vkCmdUtil.hpp"
48 #include "vkObjUtil.hpp"
49
50 #include <map>
51 #include <set>
52 #include <vector>
53 #include <iostream>
54 #include <iomanip>
55
56 namespace vkt
57 {
58 namespace TransformFeedback
59 {
60
61 using namespace vk;
62
63 typedef std::map<int, int> BufferGeneralMapping;
64
65 typedef std::pair<int, int> UsedRange;
66 typedef std::vector<UsedRange> UsedRangeList;
67 typedef std::map<int, UsedRangeList> BufferUsedRangesMap;
68
69 // VarType implementation.
70
VarType(void)71 VarType::VarType(void) : m_type(TYPE_LAST), m_flags(0)
72 {
73 }
74
VarType(const VarType & other)75 VarType::VarType(const VarType &other) : m_type(TYPE_LAST), m_flags(0)
76 {
77 *this = other;
78 }
79
VarType(glu::DataType basicType,uint32_t flags)80 VarType::VarType(glu::DataType basicType, uint32_t flags) : m_type(TYPE_BASIC), m_flags(flags)
81 {
82 m_data.basicType = basicType;
83 }
84
VarType(const VarType & elementType,int arraySize)85 VarType::VarType(const VarType &elementType, int arraySize) : m_type(TYPE_ARRAY), m_flags(0)
86 {
87 m_data.array.size = arraySize;
88 m_data.array.elementType = new VarType(elementType);
89 }
90
VarType(const StructType * structPtr,uint32_t flags)91 VarType::VarType(const StructType *structPtr, uint32_t flags) : m_type(TYPE_STRUCT), m_flags(flags)
92 {
93 m_data.structPtr = structPtr;
94 }
95
~VarType(void)96 VarType::~VarType(void)
97 {
98 if (m_type == TYPE_ARRAY)
99 delete m_data.array.elementType;
100 }
101
operator =(const VarType & other)102 VarType &VarType::operator=(const VarType &other)
103 {
104 if (this == &other)
105 return *this; // Self-assignment.
106
107 VarType *oldElementType = m_type == TYPE_ARRAY ? m_data.array.elementType : DE_NULL;
108
109 m_type = other.m_type;
110 m_flags = other.m_flags;
111 m_data = Data();
112
113 if (m_type == TYPE_ARRAY)
114 {
115 m_data.array.elementType = new VarType(*other.m_data.array.elementType);
116 m_data.array.size = other.m_data.array.size;
117 }
118 else
119 m_data = other.m_data;
120
121 delete oldElementType;
122
123 return *this;
124 }
125
126 // StructType implementation.
addMember(const std::string & name,const VarType & type,uint32_t flags)127 void StructType::addMember(const std::string &name, const VarType &type, uint32_t flags)
128 {
129 m_members.push_back(StructMember(name, type, flags));
130 }
131
132 // InterfaceBlockMember implementation.
InterfaceBlockMember(const std::string & name,const VarType & type,uint32_t flags)133 InterfaceBlockMember::InterfaceBlockMember(const std::string &name, const VarType &type, uint32_t flags)
134 : m_name(name)
135 , m_type(type)
136 , m_flags(flags)
137 {
138 }
139
140 // InterfaceBlock implementation.
InterfaceBlock(const std::string & blockName)141 InterfaceBlock::InterfaceBlock(const std::string &blockName)
142 : m_blockName(blockName)
143 , m_xfbBuffer(0)
144 , m_arraySize(0)
145 , m_flags(0)
146 {
147 }
148
operator <<(std::ostream & stream,const BlockLayoutEntry & entry)149 std::ostream &operator<<(std::ostream &stream, const BlockLayoutEntry &entry)
150 {
151 stream << entry.name << " { name = " << entry.name << ", buffer = " << entry.xfbBuffer
152 << ", offset = " << entry.xfbOffset << ", size = " << entry.xfbSize
153 << ", blockDeclarationNdx = " << entry.blockDeclarationNdx << ", instanceNdx = " << entry.instanceNdx
154 << ", activeInterfaceIndices = [";
155
156 for (std::vector<int>::const_iterator i = entry.activeInterfaceIndices.begin();
157 i != entry.activeInterfaceIndices.end(); i++)
158 {
159 if (i != entry.activeInterfaceIndices.begin())
160 stream << ", ";
161 stream << *i;
162 }
163
164 stream << "] }";
165 return stream;
166 }
167
operator <<(std::ostream & stream,const InterfaceLayoutEntry & entry)168 std::ostream &operator<<(std::ostream &stream, const InterfaceLayoutEntry &entry)
169 {
170 stream << entry.name << " { type = " << glu::getDataTypeName(entry.type) << ", arraySize = " << entry.arraySize
171 << ", blockNdx = " << entry.blockLayoutNdx << ", offset = " << entry.offset
172 << ", arrayStride = " << entry.arrayStride << ", matrixStride = " << entry.matrixStride << " }";
173
174 return stream;
175 }
176
operator <<(std::ostream & str,const InterfaceLayout & layout)177 std::ostream &operator<<(std::ostream &str, const InterfaceLayout &layout)
178 {
179 const int numBlocks = (int)layout.blocks.size();
180
181 str << "Blocks:" << std::endl;
182 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
183 str << layout.blocks[blockNdx] << std::endl;
184 str << std::endl;
185
186 str << "Interfaces:" << std::endl;
187 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
188 {
189 int numEntries = (int)layout.blocks[blockNdx].activeInterfaceIndices.size();
190
191 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
192 {
193 const InterfaceLayoutEntry &entry =
194 layout.interfaces[layout.blocks[blockNdx].activeInterfaceIndices[entryNdx]];
195
196 str << blockNdx << ":" << entryNdx << " " << entry << std::endl;
197 }
198 }
199 str << std::endl;
200
201 return str;
202 }
203
getInterfaceLayoutIndex(int blockNdx,const std::string & name) const204 int InterfaceLayout::getInterfaceLayoutIndex(int blockNdx, const std::string &name) const
205 {
206 for (int ndx = 0; ndx < (int)interfaces.size(); ndx++)
207 {
208 if (blocks[interfaces[ndx].blockLayoutNdx].blockDeclarationNdx == blockNdx && interfaces[ndx].name == name)
209 return ndx;
210 }
211
212 return -1;
213 }
214
getBlockLayoutIndex(int blockNdx,int instanceNdx) const215 int InterfaceLayout::getBlockLayoutIndex(int blockNdx, int instanceNdx) const
216 {
217 for (int ndx = 0; ndx < (int)blocks.size(); ndx++)
218 {
219 if (blocks[ndx].blockDeclarationNdx == blockNdx && blocks[ndx].instanceNdx == instanceNdx)
220 return ndx;
221 }
222
223 return -1;
224 }
225
226 // ShaderInterface implementation.
227
ShaderInterface(void)228 ShaderInterface::ShaderInterface(void)
229 {
230 }
231
~ShaderInterface(void)232 ShaderInterface::~ShaderInterface(void)
233 {
234 }
235
allocStruct(const std::string & name)236 StructType &ShaderInterface::allocStruct(const std::string &name)
237 {
238 m_structs.push_back(StructTypeSP(new StructType(name)));
239 return *m_structs.back();
240 }
241
242 struct StructNameEquals
243 {
244 std::string name;
245
StructNameEqualsvkt::TransformFeedback::StructNameEquals246 StructNameEquals(const std::string &name_) : name(name_)
247 {
248 }
249
operator ()vkt::TransformFeedback::StructNameEquals250 bool operator()(const StructTypeSP type) const
251 {
252 return type->hasTypeName() && name == type->getTypeName();
253 }
254 };
255
getNamedStructs(std::vector<const StructType * > & structs) const256 void ShaderInterface::getNamedStructs(std::vector<const StructType *> &structs) const
257 {
258 for (std::vector<StructTypeSP>::const_iterator i = m_structs.begin(); i != m_structs.end(); i++)
259 {
260 if ((*i)->hasTypeName())
261 structs.push_back((*i).get());
262 }
263 }
264
allocBlock(const std::string & name)265 InterfaceBlock &ShaderInterface::allocBlock(const std::string &name)
266 {
267 m_interfaceBlocks.push_back(InterfaceBlockSP(new InterfaceBlock(name)));
268
269 return *m_interfaceBlocks.back();
270 }
271
272 namespace // Utilities
273 {
274
275 struct PrecisionFlagsFmt
276 {
277 uint32_t flags;
PrecisionFlagsFmtvkt::TransformFeedback::__anon3dcebe630111::PrecisionFlagsFmt278 PrecisionFlagsFmt(uint32_t flags_) : flags(flags_)
279 {
280 }
281 };
282
dumpBytes(std::ostream & str,const std::string & msg,const void * dataBytes,size_t size,const void * dataMask=DE_NULL)283 void dumpBytes(std::ostream &str, const std::string &msg, const void *dataBytes, size_t size,
284 const void *dataMask = DE_NULL)
285 {
286 const uint8_t *data = (const uint8_t *)dataBytes;
287 const uint8_t *mask = (const uint8_t *)dataMask;
288 std::ios::fmtflags flags;
289
290 str << msg;
291
292 flags = str.flags(std::ios::hex | std::ios::uppercase);
293 {
294 for (size_t i = 0; i < size; i++)
295 {
296 if (i % 16 == 0)
297 str << std::endl << std::setfill('0') << std::setw(8) << i << ":";
298 else if (i % 8 == 0)
299 str << " ";
300 else if (i % 4 == 0)
301 str << " ";
302
303 str << " " << std::setfill('0') << std::setw(2);
304
305 if (mask == DE_NULL || mask[i] != 0)
306 str << (uint32_t)data[i];
307 else
308 str << "__";
309 }
310 str << std::endl << std::endl;
311 }
312 str.flags(flags);
313 }
314
operator <<(std::ostream & str,const PrecisionFlagsFmt & fmt)315 std::ostream &operator<<(std::ostream &str, const PrecisionFlagsFmt &fmt)
316 {
317 // Precision.
318 DE_ASSERT(dePop32(fmt.flags & (PRECISION_LOW | PRECISION_MEDIUM | PRECISION_HIGH)) <= 1);
319 str << (fmt.flags & PRECISION_LOW ? "lowp" :
320 fmt.flags & PRECISION_MEDIUM ? "mediump" :
321 fmt.flags & PRECISION_HIGH ? "highp" :
322 "");
323 return str;
324 }
325
326 struct LayoutFlagsFmt
327 {
328 uint32_t flags;
329 uint32_t buffer;
330 uint32_t stride;
331 uint32_t offset;
332
LayoutFlagsFmtvkt::TransformFeedback::__anon3dcebe630111::LayoutFlagsFmt333 LayoutFlagsFmt(const uint32_t flags_, const uint32_t buffer_, const uint32_t stride_, const uint32_t offset_)
334 : flags(flags_)
335 , buffer(buffer_)
336 , stride(stride_)
337 , offset(offset_)
338 {
339 }
340 };
341
operator <<(std::ostream & str,const LayoutFlagsFmt & fmt)342 std::ostream &operator<<(std::ostream &str, const LayoutFlagsFmt &fmt)
343 {
344 static const struct
345 {
346 uint32_t bit;
347 const char *token;
348 } bitDesc[] = {
349 {LAYOUT_XFBBUFFER, "xfb_buffer"},
350 {LAYOUT_XFBOFFSET, "xfb_offset"},
351 {LAYOUT_XFBSTRIDE, "xfb_stride"},
352 };
353
354 uint32_t remBits = fmt.flags;
355 for (int descNdx = 0; descNdx < DE_LENGTH_OF_ARRAY(bitDesc); descNdx++)
356 {
357 if (remBits & bitDesc[descNdx].bit)
358 {
359 str << bitDesc[descNdx].token;
360
361 if (bitDesc[descNdx].bit == LAYOUT_XFBBUFFER)
362 str << " = " << fmt.buffer;
363 if (bitDesc[descNdx].bit == LAYOUT_XFBOFFSET)
364 str << " = " << fmt.offset;
365 if (bitDesc[descNdx].bit == LAYOUT_XFBSTRIDE)
366 str << " = " << fmt.stride;
367
368 remBits &= ~bitDesc[descNdx].bit;
369
370 if (remBits != 0)
371 str << ", ";
372 }
373 }
374 DE_ASSERT(remBits == 0);
375 return str;
376 }
377
operator <<(std::ostream & str,const DeviceSizeVector & vec)378 std::ostream &operator<<(std::ostream &str, const DeviceSizeVector &vec)
379 {
380 str << " [";
381
382 for (size_t vecNdx = 0; vecNdx < vec.size(); vecNdx++)
383 str << (uint64_t)vec[vecNdx] << (vecNdx + 1 < vec.size() ? ", " : "]");
384
385 return str;
386 }
387
388 // Layout computation.
389
getDataTypeByteSize(glu::DataType type)390 int getDataTypeByteSize(glu::DataType type)
391 {
392 if (getDataTypeScalarType(type) == glu::TYPE_DOUBLE)
393 {
394 return glu::getDataTypeScalarSize(type) * (int)sizeof(uint64_t);
395 }
396 else
397 {
398 return glu::getDataTypeScalarSize(type) * (int)sizeof(uint32_t);
399 }
400 }
401
getDataTypeArrayStride(glu::DataType type)402 int getDataTypeArrayStride(glu::DataType type)
403 {
404 DE_ASSERT(!glu::isDataTypeMatrix(type));
405
406 return getDataTypeByteSize(type);
407 }
408
getDataTypeArrayStrideForLocation(glu::DataType type)409 int getDataTypeArrayStrideForLocation(glu::DataType type)
410 {
411 DE_ASSERT(!glu::isDataTypeMatrix(type));
412
413 const int baseStride = getDataTypeByteSize(type);
414 const int vec4Alignment = (int)sizeof(uint32_t) * 4;
415
416 return deAlign32(baseStride, vec4Alignment);
417 }
418
computeInterfaceBlockMemberAlignment(const VarType & type)419 int computeInterfaceBlockMemberAlignment(const VarType &type)
420 {
421 if (type.isBasicType())
422 {
423 glu::DataType basicType = type.getBasicType();
424
425 if (glu::isDataTypeMatrix(basicType) || isDataTypeVector(basicType))
426 basicType = glu::getDataTypeScalarType(basicType);
427
428 switch (basicType)
429 {
430 case glu::TYPE_FLOAT:
431 case glu::TYPE_INT:
432 case glu::TYPE_UINT:
433 return sizeof(uint32_t);
434 case glu::TYPE_DOUBLE:
435 return sizeof(uint64_t);
436 default:
437 TCU_THROW(InternalError, "Invalid type");
438 }
439 }
440 else if (type.isArrayType())
441 {
442 return computeInterfaceBlockMemberAlignment(type.getElementType());
443 }
444 else if (type.isStructType())
445 {
446 int maxAlignment = 0;
447
448 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end();
449 memberIter++)
450 maxAlignment = de::max(maxAlignment, computeInterfaceBlockMemberAlignment(memberIter->getType()));
451
452 return maxAlignment;
453 }
454 else
455 TCU_THROW(InternalError, "Invalid type");
456 }
457
createMask(void * maskBasePtr,const InterfaceLayoutEntry & entry,const void * basePtr0,const void * basePtr)458 void createMask(void *maskBasePtr, const InterfaceLayoutEntry &entry, const void *basePtr0, const void *basePtr)
459 {
460 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
461 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
462 const bool isMatrix = glu::isDataTypeMatrix(entry.type);
463 const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(entry.type) : 1;
464 const int vecSize = scalarSize / numVecs;
465 const bool isArray = entry.arraySize > 1;
466 const size_t compSize = getDataTypeByteSize(scalarType);
467
468 DE_ASSERT(scalarSize % numVecs == 0);
469
470 for (int elemNdx = 0; elemNdx < entry.arraySize; elemNdx++)
471 {
472 uint8_t *elemPtr = (uint8_t *)basePtr + entry.offset + (isArray ? elemNdx * entry.arrayStride : 0);
473
474 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
475 {
476 uint8_t *vecPtr = elemPtr + (isMatrix ? vecNdx * entry.matrixStride : 0);
477
478 for (int compNdx = 0; compNdx < vecSize; compNdx++)
479 {
480 const uint8_t *compPtr = vecPtr + compSize * compNdx;
481 const size_t offset = compPtr - (uint8_t *)basePtr0;
482 uint8_t *maskPtr = (uint8_t *)maskBasePtr + offset;
483
484 switch (scalarType)
485 {
486 case glu::TYPE_DOUBLE:
487 case glu::TYPE_FLOAT:
488 case glu::TYPE_INT:
489 case glu::TYPE_UINT:
490 {
491 for (size_t ndx = 0; ndx < compSize; ++ndx)
492 ++maskPtr[ndx];
493
494 break;
495 }
496 default:
497 DE_ASSERT(false);
498 }
499 }
500 }
501 }
502 }
503
createMask(const InterfaceLayout & layout,const std::map<int,void * > & blockPointers,const void * basePtr0,const size_t baseSize)504 std::vector<uint8_t> createMask(const InterfaceLayout &layout, const std::map<int, void *> &blockPointers,
505 const void *basePtr0, const size_t baseSize)
506 {
507 std::vector<uint8_t> mask(baseSize, 0);
508 const int numBlocks((int)layout.blocks.size());
509
510 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
511 {
512 void *basePtr = blockPointers.find(blockNdx)->second;
513 int numEntries = (int)layout.blocks[blockNdx].activeInterfaceIndices.size();
514
515 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
516 {
517 const InterfaceLayoutEntry &entry =
518 layout.interfaces[layout.blocks[blockNdx].activeInterfaceIndices[entryNdx]];
519
520 if (entry.validate)
521 createMask(&mask[0], entry, basePtr0, basePtr);
522 }
523 }
524
525 return mask;
526 }
527
computeInterfaceBlockAlignment(const InterfaceBlock & interfaceBlock)528 int computeInterfaceBlockAlignment(const InterfaceBlock &interfaceBlock)
529 {
530 int baseAlignment = 0;
531
532 for (InterfaceBlock::ConstIterator memberIter = interfaceBlock.begin(); memberIter != interfaceBlock.end();
533 memberIter++)
534 {
535 const InterfaceBlockMember &member = *memberIter;
536
537 baseAlignment = std::max(baseAlignment, computeInterfaceBlockMemberAlignment(member.getType()));
538 }
539
540 return baseAlignment;
541 }
542
isOverlaped(const int a1,const int b1,const int a2,const int b2)543 static inline bool isOverlaped(const int a1, const int b1, const int a2, const int b2)
544 {
545 DE_ASSERT(b1 > 0 && b2 > 0);
546
547 const int b1s = b1 - 1;
548 const int b2s = b2 - 1;
549
550 return deInRange32(a1, a2, b2s) || deInRange32(b1s, a2, b2s) || deInRange32(a2, a1, b1s) ||
551 deInRange32(b2s, a1, b1s);
552 }
553
computeXfbLayout(InterfaceLayout & layout,int & curOffset,int & curLocation,int curBlockNdx,const std::string & curPrefix,const VarType & type,uint32_t layoutFlags)554 void computeXfbLayout(InterfaceLayout &layout, int &curOffset, int &curLocation, int curBlockNdx,
555 const std::string &curPrefix, const VarType &type, uint32_t layoutFlags)
556 {
557 const int locationAlignSize = 16;
558 const bool validate = 0 == (layoutFlags & (FIELD_MISSING | FIELD_UNASSIGNED));
559 int baseAlignment = computeInterfaceBlockMemberAlignment(type);
560
561 DE_ASSERT(baseAlignment == sizeof(uint32_t) || baseAlignment == sizeof(uint64_t));
562
563 curOffset = deAlign32(curOffset, baseAlignment);
564
565 if (type.isBasicType())
566 {
567 const glu::DataType basicType = type.getBasicType();
568 int fieldSize = 0;
569 int fieldSizeForLocation = 0;
570 InterfaceLayoutEntry entry;
571
572 entry.name = curPrefix;
573 entry.type = basicType;
574 entry.arraySize = 1;
575 entry.arrayStride = 0;
576 entry.matrixStride = 0;
577 entry.blockLayoutNdx = curBlockNdx;
578 entry.locationNdx = 0;
579 entry.validate = validate;
580
581 if (glu::isDataTypeMatrix(basicType))
582 {
583 // Array of vectors
584 const int vecSize = glu::getDataTypeMatrixNumRows(basicType);
585 const int numVecs = glu::getDataTypeMatrixNumColumns(basicType);
586 const glu::DataType elemType = glu::getDataTypeScalarType(basicType);
587 const int stride = getDataTypeArrayStride(glu::getDataTypeVector(elemType, vecSize));
588 const int strideForLocation = getDataTypeArrayStrideForLocation(glu::getDataTypeVector(elemType, vecSize));
589
590 entry.matrixStride = stride;
591
592 fieldSize = numVecs * stride;
593 fieldSizeForLocation = numVecs * strideForLocation;
594 }
595 else
596 {
597 // Scalar or vector.
598 fieldSize = getDataTypeByteSize(basicType);
599 fieldSizeForLocation = deAlign32(fieldSize, locationAlignSize);
600 }
601
602 entry.offset = curOffset;
603 entry.locationNdx = curLocation;
604
605 curOffset += fieldSize;
606 curLocation += deDivRoundUp32(fieldSizeForLocation, locationAlignSize);
607
608 layout.interfaces.push_back(entry);
609 }
610 else if (type.isArrayType())
611 {
612 const VarType &elemType = type.getElementType();
613
614 if (elemType.isBasicType() && !glu::isDataTypeMatrix(elemType.getBasicType()))
615 {
616 // Array of scalars or vectors.
617 const glu::DataType elemBasicType = elemType.getBasicType();
618 const int stride = getDataTypeArrayStride(elemBasicType);
619 const int fieldSize = stride * type.getArraySize();
620 const int strideForLocation = getDataTypeArrayStrideForLocation(elemBasicType);
621 const int fieldSizeForLocation = strideForLocation * type.getArraySize();
622 InterfaceLayoutEntry entry;
623
624 entry.name = curPrefix + "[0]"; // Array interfaces are always postfixed with [0]
625 entry.type = elemBasicType;
626 entry.blockLayoutNdx = curBlockNdx;
627 entry.offset = curOffset;
628 entry.arraySize = type.getArraySize();
629 entry.arrayStride = stride;
630 entry.matrixStride = 0;
631 entry.locationNdx = curLocation;
632 entry.validate = validate;
633
634 curOffset += fieldSize;
635 curLocation += deDivRoundUp32(fieldSizeForLocation, locationAlignSize);
636
637 layout.interfaces.push_back(entry);
638 }
639 else if (elemType.isBasicType() && glu::isDataTypeMatrix(elemType.getBasicType()))
640 {
641 // Array of matrices.
642 const glu::DataType elemBasicType = elemType.getBasicType();
643 const glu::DataType scalarType = glu::getDataTypeScalarType(elemBasicType);
644 const int vecSize = glu::getDataTypeMatrixNumRows(elemBasicType);
645 const int numVecs = glu::getDataTypeMatrixNumColumns(elemBasicType);
646 const int stride = getDataTypeArrayStride(glu::getDataTypeVector(scalarType, vecSize));
647 const int fieldSize = numVecs * type.getArraySize() * stride;
648 const int strideForLocation =
649 getDataTypeArrayStrideForLocation(glu::getDataTypeVector(scalarType, vecSize));
650 const int fieldSizeForLocation = numVecs * type.getArraySize() * strideForLocation;
651 InterfaceLayoutEntry entry;
652
653 entry.name = curPrefix + "[0]"; // Array interfaces are always postfixed with [0]
654 entry.type = elemBasicType;
655 entry.blockLayoutNdx = curBlockNdx;
656 entry.offset = curOffset;
657 entry.arraySize = type.getArraySize();
658 entry.arrayStride = stride * numVecs;
659 entry.matrixStride = stride;
660 entry.locationNdx = curLocation;
661 entry.validate = validate;
662
663 curOffset += fieldSize;
664 curLocation += deDivRoundUp32(fieldSizeForLocation, locationAlignSize);
665
666 layout.interfaces.push_back(entry);
667 }
668 else
669 {
670 DE_ASSERT(elemType.isStructType() || elemType.isArrayType());
671
672 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
673 computeXfbLayout(layout, curOffset, curLocation, curBlockNdx,
674 curPrefix + "[" + de::toString(elemNdx) + "]", type.getElementType(), layoutFlags);
675 }
676 }
677 else
678 {
679 DE_ASSERT(type.isStructType());
680
681 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end();
682 memberIter++)
683 computeXfbLayout(layout, curOffset, curLocation, curBlockNdx, curPrefix + "." + memberIter->getName(),
684 memberIter->getType(), (memberIter->getFlags() | layoutFlags) & FIELD_OPTIONS);
685
686 curOffset = deAlign32(curOffset, baseAlignment);
687 }
688 }
689
computeXfbLayout(InterfaceLayout & layout,ShaderInterface & shaderInterface,BufferGeneralMapping & perBufferXfbOffsets,uint32_t & locationsUsed)690 void computeXfbLayout(InterfaceLayout &layout, ShaderInterface &shaderInterface,
691 BufferGeneralMapping &perBufferXfbOffsets, uint32_t &locationsUsed)
692 {
693 const int numInterfaceBlocks = shaderInterface.getNumInterfaceBlocks();
694 int curLocation = 0;
695 BufferGeneralMapping bufferAlignments;
696 BufferGeneralMapping buffersList;
697 BufferGeneralMapping bufferStrideGroup;
698 BufferUsedRangesMap bufferUsedRanges;
699
700 for (int blockNdx = 0; blockNdx < numInterfaceBlocks; blockNdx++)
701 {
702 const InterfaceBlock &interfaceBlock = shaderInterface.getInterfaceBlock(blockNdx);
703 const int xfbBuffer = interfaceBlock.getXfbBuffer();
704
705 buffersList[xfbBuffer] = 1;
706 bufferStrideGroup[xfbBuffer] = xfbBuffer;
707 }
708
709 for (BufferGeneralMapping::const_iterator xfbBuffersIter = buffersList.begin(); xfbBuffersIter != buffersList.end();
710 xfbBuffersIter++)
711 {
712 const int xfbBufferAnalyzed = xfbBuffersIter->first;
713
714 for (int blockNdx = 0; blockNdx < numInterfaceBlocks; blockNdx++)
715 {
716 InterfaceBlock &interfaceBlock = shaderInterface.getInterfaceBlockForModify(blockNdx);
717
718 if (interfaceBlock.getXfbBuffer() == xfbBufferAnalyzed)
719 {
720 const bool hasInstanceName = interfaceBlock.hasInstanceName();
721 const std::string blockPrefix = hasInstanceName ? (interfaceBlock.getBlockName() + ".") : "";
722 const int numInstances = interfaceBlock.isArray() ? interfaceBlock.getArraySize() : 1;
723 int activeBlockNdx = (int)layout.blocks.size();
724 int startInterfaceNdx = (int)layout.interfaces.size();
725 int startLocationNdx = (int)curLocation;
726 int interfaceAlignement = computeInterfaceBlockAlignment(interfaceBlock);
727 int curOffset = 0;
728 int blockSize = 0;
729
730 do
731 {
732 const int xfbFirstInstanceBuffer = interfaceBlock.getXfbBuffer();
733 int &xfbFirstInstanceBufferOffset = perBufferXfbOffsets[xfbFirstInstanceBuffer];
734 const int savedLayoutInterfacesNdx = (int)layout.interfaces.size();
735 const int savedCurOffset = curOffset;
736 const int savedCurLocation = curLocation;
737 UsedRangeList &usedRanges = bufferUsedRanges[xfbFirstInstanceBuffer];
738 bool fitIntoBuffer = true;
739
740 // GLSL 4.60
741 // Further, if applied to an aggregate containing a double, the offset must also be a multiple of 8,
742 // and the space taken in the buffer will be a multiple of 8.
743 xfbFirstInstanceBufferOffset = deAlign32(xfbFirstInstanceBufferOffset, interfaceAlignement);
744
745 for (InterfaceBlock::ConstIterator memberIter = interfaceBlock.begin();
746 memberIter != interfaceBlock.end(); memberIter++)
747 {
748 const InterfaceBlockMember &member = *memberIter;
749
750 computeXfbLayout(layout, curOffset, curLocation, activeBlockNdx, blockPrefix + member.getName(),
751 member.getType(), member.getFlags() & FIELD_OPTIONS);
752 }
753
754 // GLSL 4.60
755 // Further, if applied to an aggregate containing a double, the offset must also be a multiple of 8,
756 // and the space taken in the buffer will be a multiple of 8.
757 blockSize = deAlign32(curOffset, interfaceAlignement);
758
759 // Overlapping check
760 for (UsedRangeList::const_iterator usedRangeIt = usedRanges.begin();
761 usedRangeIt != usedRanges.end(); ++usedRangeIt)
762 {
763 const int &usedRangeStart = usedRangeIt->first;
764 const int &usedRangeEnd = usedRangeIt->second;
765 const int genRangeStart = xfbFirstInstanceBufferOffset;
766 const int genRangeEnd = xfbFirstInstanceBufferOffset + blockSize;
767
768 // Validate if block has overlapping
769 if (isOverlaped(genRangeStart, genRangeEnd, usedRangeStart, usedRangeEnd))
770 {
771 // Restart from obstacle interface end
772 fitIntoBuffer = false;
773
774 DE_ASSERT(xfbFirstInstanceBufferOffset > usedRangeEnd);
775
776 // Bump up interface start to the end of used range
777 xfbFirstInstanceBufferOffset = usedRangeEnd;
778
779 // Undo allocation
780 curOffset = savedCurOffset;
781 curLocation = savedCurLocation;
782
783 layout.interfaces.resize(savedLayoutInterfacesNdx);
784 }
785 }
786
787 if (fitIntoBuffer)
788 break;
789 } while (true);
790
791 const int xfbFirstInstanceBuffer = interfaceBlock.getXfbBuffer();
792 const int xfbFirstInstanceBufferOffset = perBufferXfbOffsets[xfbFirstInstanceBuffer];
793 const int endInterfaceNdx = (int)layout.interfaces.size();
794 const int blockSizeInLocations = curLocation - startLocationNdx;
795
796 curLocation -= blockSizeInLocations;
797
798 if (numInstances > 1)
799 interfaceBlock.setFlag(LAYOUT_XFBSTRIDE);
800
801 // Create block layout entries for each instance.
802 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
803 {
804 // Allocate entry for instance.
805 layout.blocks.push_back(BlockLayoutEntry());
806
807 BlockLayoutEntry &blockEntry = layout.blocks.back();
808 const int xfbBuffer = xfbFirstInstanceBuffer + instanceNdx;
809 int &xfbBufferOffset = perBufferXfbOffsets[xfbBuffer];
810
811 DE_ASSERT(xfbBufferOffset <= xfbFirstInstanceBufferOffset);
812
813 xfbBufferOffset = xfbFirstInstanceBufferOffset;
814
815 blockEntry.name = interfaceBlock.getBlockName();
816 blockEntry.xfbBuffer = xfbBuffer;
817 blockEntry.xfbOffset = xfbBufferOffset;
818 blockEntry.xfbSize = blockSize;
819 blockEntry.blockDeclarationNdx = blockNdx;
820 blockEntry.instanceNdx = instanceNdx;
821 blockEntry.locationNdx = curLocation;
822 blockEntry.locationSize = blockSizeInLocations;
823
824 xfbBufferOffset += blockSize;
825 curLocation += blockSizeInLocations;
826
827 // Compute active interface set for block.
828 for (int interfaceNdx = startInterfaceNdx; interfaceNdx < endInterfaceNdx; interfaceNdx++)
829 blockEntry.activeInterfaceIndices.push_back(interfaceNdx);
830
831 if (interfaceBlock.isArray())
832 blockEntry.name += "[" + de::toString(instanceNdx) + "]";
833
834 bufferUsedRanges[xfbBuffer].push_back(
835 UsedRange(blockEntry.xfbOffset, blockEntry.xfbOffset + blockEntry.xfbSize));
836
837 // Store maximum per-buffer alignment
838 bufferAlignments[xfbBuffer] = std::max(interfaceAlignement, bufferAlignments[xfbBuffer]);
839
840 // Buffers bound through instanced arrays must have same stride (and alignment)
841 bufferStrideGroup[xfbBuffer] = bufferStrideGroup[xfbFirstInstanceBuffer];
842 }
843 }
844 }
845 }
846
847 // All XFB buffers within group must have same stride
848 {
849 BufferGeneralMapping groupStride;
850
851 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin();
852 xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
853 {
854 const int xfbBuffer = xfbBuffersIter->first;
855 const int xfbStride = perBufferXfbOffsets[xfbBuffer];
856 const int group = bufferStrideGroup[xfbBuffer];
857
858 groupStride[group] = std::max(groupStride[group], xfbStride);
859 }
860
861 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin();
862 xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
863 {
864 const int xfbBuffer = xfbBuffersIter->first;
865 const int group = bufferStrideGroup[xfbBuffer];
866
867 perBufferXfbOffsets[xfbBuffer] = groupStride[group];
868 }
869 }
870
871 // All XFB buffers within group must have same stride alignment
872 {
873 BufferGeneralMapping groupAlignment;
874
875 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin();
876 xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
877 {
878 const int xfbBuffer = xfbBuffersIter->first;
879 const int group = bufferStrideGroup[xfbBuffer];
880 const int xfbAlign = bufferAlignments[xfbBuffer];
881
882 groupAlignment[group] = std::max(groupAlignment[group], xfbAlign);
883 }
884
885 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin();
886 xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
887 {
888 const int xfbBuffer = xfbBuffersIter->first;
889 const int group = bufferStrideGroup[xfbBuffer];
890
891 bufferAlignments[xfbBuffer] = groupAlignment[group];
892 }
893 }
894
895 // GLSL 4.60
896 // If the buffer is capturing any outputs with double-precision components, the stride must be a multiple of 8, ...
897 for (BufferGeneralMapping::const_iterator xfbBuffersIter = perBufferXfbOffsets.begin();
898 xfbBuffersIter != perBufferXfbOffsets.end(); xfbBuffersIter++)
899 {
900 const int xfbBuffer = xfbBuffersIter->first;
901 const int xfbAlign = bufferAlignments[xfbBuffer];
902 int &xfbOffset = perBufferXfbOffsets[xfbBuffer];
903
904 xfbOffset = deAlign32(xfbOffset, xfbAlign);
905 }
906
907 // Keep stride in interface blocks
908 for (int blockNdx = 0; blockNdx < (int)layout.blocks.size(); blockNdx++)
909 layout.blocks[blockNdx].xfbStride = perBufferXfbOffsets[layout.blocks[blockNdx].xfbBuffer];
910
911 locationsUsed = static_cast<uint32_t>(curLocation);
912 }
913
914 // Value generator.
915
generateValue(const InterfaceLayoutEntry & entry,void * basePtr,de::Random & rnd)916 void generateValue(const InterfaceLayoutEntry &entry, void *basePtr, de::Random &rnd)
917 {
918 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
919 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
920 const bool isMatrix = glu::isDataTypeMatrix(entry.type);
921 const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(entry.type) : 1;
922 const int vecSize = scalarSize / numVecs;
923 const bool isArray = entry.arraySize > 1;
924 const size_t compSize = getDataTypeByteSize(scalarType);
925
926 DE_ASSERT(scalarSize % numVecs == 0);
927
928 for (int elemNdx = 0; elemNdx < entry.arraySize; elemNdx++)
929 {
930 uint8_t *elemPtr = (uint8_t *)basePtr + entry.offset + (isArray ? elemNdx * entry.arrayStride : 0);
931
932 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
933 {
934 uint8_t *vecPtr = elemPtr + (isMatrix ? vecNdx * entry.matrixStride : 0);
935
936 for (int compNdx = 0; compNdx < vecSize; compNdx++)
937 {
938 uint8_t *compPtr = vecPtr + compSize * compNdx;
939 const int sign = rnd.getBool() ? +1 : -1;
940 const int value = rnd.getInt(1, 127);
941
942 switch (scalarType)
943 {
944 case glu::TYPE_DOUBLE:
945 *((double *)compPtr) = (double)(sign * value);
946 break;
947 case glu::TYPE_FLOAT:
948 *((float *)compPtr) = (float)(sign * value);
949 break;
950 case glu::TYPE_INT:
951 *((int32_t *)compPtr) = (int32_t)(sign * value);
952 break;
953 case glu::TYPE_UINT:
954 *((uint32_t *)compPtr) = (uint32_t)(value);
955 break;
956 default:
957 DE_ASSERT(false);
958 }
959 }
960 }
961 }
962 }
963
generateValues(const InterfaceLayout & layout,const std::map<int,void * > & blockPointers,uint32_t seed)964 void generateValues(const InterfaceLayout &layout, const std::map<int, void *> &blockPointers, uint32_t seed)
965 {
966 de::Random rnd(seed);
967 int numBlocks = (int)layout.blocks.size();
968
969 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
970 {
971 void *basePtr = blockPointers.find(blockNdx)->second;
972 int numEntries = (int)layout.blocks[blockNdx].activeInterfaceIndices.size();
973
974 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
975 {
976 const InterfaceLayoutEntry &entry =
977 layout.interfaces[layout.blocks[blockNdx].activeInterfaceIndices[entryNdx]];
978
979 if (entry.validate)
980 generateValue(entry, basePtr, rnd);
981 }
982 }
983 }
984
985 // Shader generator.
986
987 struct Indent
988 {
989 int level;
Indentvkt::TransformFeedback::__anon3dcebe630111::Indent990 Indent(int level_) : level(level_)
991 {
992 }
993 };
994
operator <<(std::ostream & str,const Indent & indent)995 std::ostream &operator<<(std::ostream &str, const Indent &indent)
996 {
997 for (int i = 0; i < indent.level; i++)
998 str << "\t";
999 return str;
1000 }
1001
1002 void generateDeclaration(std::ostringstream &src, const VarType &type, const std::string &name, int indentLevel,
1003 uint32_t unusedHints, uint32_t flagsMask, uint32_t buffer, uint32_t stride, uint32_t offset);
1004 void generateDeclaration(std::ostringstream &src, const InterfaceBlockMember &member, int indentLevel, uint32_t buffer,
1005 uint32_t stride, uint32_t offset);
1006 void generateDeclaration(std::ostringstream &src, const StructType &structType, int indentLevel);
1007
1008 void generateLocalDeclaration(std::ostringstream &src, const StructType &structType, int indentLevel);
1009 void generateFullDeclaration(std::ostringstream &src, const StructType &structType, int indentLevel);
1010
generateDeclaration(std::ostringstream & src,const StructType & structType,int indentLevel)1011 void generateDeclaration(std::ostringstream &src, const StructType &structType, int indentLevel)
1012 {
1013 DE_ASSERT(structType.hasTypeName());
1014 generateFullDeclaration(src, structType, indentLevel);
1015 src << ";\n";
1016 }
1017
generateFullDeclaration(std::ostringstream & src,const StructType & structType,int indentLevel)1018 void generateFullDeclaration(std::ostringstream &src, const StructType &structType, int indentLevel)
1019 {
1020 src << "struct";
1021 if (structType.hasTypeName())
1022 src << " " << structType.getTypeName();
1023 src << "\n" << Indent(indentLevel) << "{\n";
1024
1025 for (StructType::ConstIterator memberIter = structType.begin(); memberIter != structType.end(); memberIter++)
1026 {
1027 src << Indent(indentLevel + 1);
1028 generateDeclaration(src, memberIter->getType(), memberIter->getName(), indentLevel + 1,
1029 memberIter->getFlags() & FIELD_OPTIONS, ~LAYOUT_MASK, 0u, 0u, 0u);
1030 }
1031
1032 src << Indent(indentLevel) << "}";
1033 }
1034
generateLocalDeclaration(std::ostringstream & src,const StructType & structType,int)1035 void generateLocalDeclaration(std::ostringstream &src, const StructType &structType, int /* indentLevel */)
1036 {
1037 src << structType.getTypeName();
1038 }
1039
generateLayoutAndPrecisionDeclaration(std::ostringstream & src,uint32_t flags,uint32_t buffer,uint32_t stride,uint32_t offset)1040 void generateLayoutAndPrecisionDeclaration(std::ostringstream &src, uint32_t flags, uint32_t buffer, uint32_t stride,
1041 uint32_t offset)
1042 {
1043 if ((flags & LAYOUT_MASK) != 0)
1044 src << "layout(" << LayoutFlagsFmt(flags & LAYOUT_MASK, buffer, stride, offset) << ") ";
1045
1046 if ((flags & PRECISION_MASK) != 0)
1047 src << PrecisionFlagsFmt(flags & PRECISION_MASK) << " ";
1048 }
1049
generateDeclaration(std::ostringstream & src,const VarType & type,const std::string & name,int indentLevel,uint32_t fieldHints,uint32_t flagsMask,uint32_t buffer,uint32_t stride,uint32_t offset)1050 void generateDeclaration(std::ostringstream &src, const VarType &type, const std::string &name, int indentLevel,
1051 uint32_t fieldHints, uint32_t flagsMask, uint32_t buffer, uint32_t stride, uint32_t offset)
1052 {
1053 if (fieldHints & FIELD_MISSING)
1054 src << "// ";
1055
1056 generateLayoutAndPrecisionDeclaration(src, type.getFlags() & flagsMask, buffer, stride, offset);
1057
1058 if (type.isBasicType())
1059 src << glu::getDataTypeName(type.getBasicType()) << " " << name;
1060 else if (type.isArrayType())
1061 {
1062 std::vector<int> arraySizes;
1063 const VarType *curType = &type;
1064 while (curType->isArrayType())
1065 {
1066 arraySizes.push_back(curType->getArraySize());
1067 curType = &curType->getElementType();
1068 }
1069
1070 generateLayoutAndPrecisionDeclaration(src, curType->getFlags() & flagsMask, buffer, stride, offset);
1071
1072 if (curType->isBasicType())
1073 src << glu::getDataTypeName(curType->getBasicType());
1074 else
1075 {
1076 DE_ASSERT(curType->isStructType());
1077 generateLocalDeclaration(src, curType->getStruct(), indentLevel + 1);
1078 }
1079
1080 src << " " << name;
1081
1082 for (std::vector<int>::const_iterator sizeIter = arraySizes.begin(); sizeIter != arraySizes.end(); sizeIter++)
1083 src << "[" << *sizeIter << "]";
1084 }
1085 else
1086 {
1087 generateLocalDeclaration(src, type.getStruct(), indentLevel + 1);
1088 src << " " << name;
1089 }
1090
1091 src << ";";
1092
1093 // Print out unused hints.
1094 if (fieldHints & FIELD_MISSING)
1095 src << " // missing field";
1096 else if (fieldHints & FIELD_UNASSIGNED)
1097 src << " // unassigned";
1098
1099 src << "\n";
1100 }
1101
generateDeclaration(std::ostringstream & src,const InterfaceBlockMember & member,int indentLevel,uint32_t buffer,uint32_t stride,uint32_t offset)1102 void generateDeclaration(std::ostringstream &src, const InterfaceBlockMember &member, int indentLevel, uint32_t buffer,
1103 uint32_t stride, uint32_t offset)
1104 {
1105 if ((member.getFlags() & LAYOUT_MASK) != 0)
1106 src << "layout(" << LayoutFlagsFmt(member.getFlags() & LAYOUT_MASK, buffer, stride, offset) << ") ";
1107
1108 generateDeclaration(src, member.getType(), member.getName(), indentLevel, member.getFlags() & FIELD_OPTIONS, ~0u,
1109 buffer, stride, offset);
1110 }
1111
getBlockMemberOffset(int blockNdx,const InterfaceBlock & block,const InterfaceBlockMember & member,const InterfaceLayout & layout)1112 uint32_t getBlockMemberOffset(int blockNdx, const InterfaceBlock &block, const InterfaceBlockMember &member,
1113 const InterfaceLayout &layout)
1114 {
1115 std::ostringstream name;
1116 const VarType *curType = &member.getType();
1117
1118 if (block.getInstanceName().length() != 0)
1119 name << block.getBlockName() << "."; // \note InterfaceLayoutEntry uses block name rather than instance name
1120
1121 name << member.getName();
1122
1123 while (!curType->isBasicType())
1124 {
1125 if (curType->isArrayType())
1126 {
1127 name << "[0]";
1128 curType = &curType->getElementType();
1129 }
1130
1131 if (curType->isStructType())
1132 {
1133 const StructType::ConstIterator firstMember = curType->getStruct().begin();
1134
1135 name << "." << firstMember->getName();
1136 curType = &firstMember->getType();
1137 }
1138 }
1139
1140 const int interfaceLayoutNdx = layout.getInterfaceLayoutIndex(blockNdx, name.str());
1141 DE_ASSERT(interfaceLayoutNdx >= 0);
1142
1143 return layout.interfaces[interfaceLayoutNdx].offset;
1144 }
1145
1146 template <typename T>
semiShuffle(std::vector<T> & v)1147 void semiShuffle(std::vector<T> &v)
1148 {
1149 const std::vector<T> src = v;
1150 int i = -1;
1151 int n = static_cast<int>(src.size());
1152
1153 v.clear();
1154
1155 while (n)
1156 {
1157 i += n;
1158 v.push_back(src[i]);
1159 n = (n > 0 ? 1 - n : -1 - n);
1160 }
1161 }
1162
1163 template <typename T>
1164 //! \note Stores pointers to original elements
1165 class Traverser
1166 {
1167 public:
1168 template <typename Iter>
Traverser(const Iter beg,const Iter end,const bool shuffled)1169 Traverser(const Iter beg, const Iter end, const bool shuffled)
1170 {
1171 for (Iter it = beg; it != end; ++it)
1172 m_elements.push_back(&(*it));
1173
1174 if (shuffled)
1175 semiShuffle(m_elements);
1176
1177 m_next = m_elements.begin();
1178 }
1179
next(void)1180 T *next(void)
1181 {
1182 if (m_next != m_elements.end())
1183 return *m_next++;
1184 else
1185 return DE_NULL;
1186 }
1187
1188 private:
1189 typename std::vector<T *> m_elements;
1190 typename std::vector<T *>::const_iterator m_next;
1191 };
1192
generateDeclaration(std::ostringstream & src,int blockNdx,const InterfaceBlock & block,const InterfaceLayout & layout,bool shuffleUniformMembers)1193 void generateDeclaration(std::ostringstream &src, int blockNdx, const InterfaceBlock &block,
1194 const InterfaceLayout &layout, bool shuffleUniformMembers)
1195 {
1196 const int indentOne = 1;
1197 const int ndx = layout.getBlockLayoutIndex(blockNdx, 0);
1198 const int locationNdx = layout.blocks[ndx].locationNdx;
1199 const int xfbOffset = layout.blocks[ndx].xfbOffset;
1200 const int xfbBuffer = layout.blocks[ndx].xfbBuffer;
1201 const int xfbStride = layout.blocks[ndx].xfbStride;
1202
1203 src << "layout(";
1204 src << "location = " << locationNdx;
1205 if ((block.getFlags() & LAYOUT_MASK) != 0)
1206 src << ", " << LayoutFlagsFmt(block.getFlags() & LAYOUT_MASK, xfbBuffer, xfbStride, xfbOffset);
1207 src << ") out " << block.getBlockName();
1208
1209 src << " //"
1210 << " sizeInBytes=" << layout.blocks[ndx].xfbSize << " sizeInLocations=" << layout.blocks[ndx].locationSize;
1211
1212 src << "\n{\n";
1213
1214 Traverser<const InterfaceBlockMember> interfaces(block.begin(), block.end(), shuffleUniformMembers);
1215
1216 while (const InterfaceBlockMember *pUniform = interfaces.next())
1217 {
1218 src << Indent(indentOne);
1219 generateDeclaration(src, *pUniform, indentOne, xfbBuffer, xfbStride,
1220 xfbOffset + getBlockMemberOffset(blockNdx, block, *pUniform, layout));
1221 }
1222
1223 src << "}";
1224
1225 if (block.hasInstanceName())
1226 {
1227 src << " " << block.getInstanceName();
1228 if (block.isArray())
1229 src << "[" << block.getArraySize() << "]";
1230 }
1231 else
1232 DE_ASSERT(!block.isArray());
1233
1234 src << ";\n";
1235 }
1236
generateValueSrc(std::ostringstream & src,const InterfaceLayoutEntry & entry,const void * basePtr,int elementNdx)1237 int generateValueSrc(std::ostringstream &src, const InterfaceLayoutEntry &entry, const void *basePtr, int elementNdx)
1238 {
1239 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
1240 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
1241 const bool isArray = entry.arraySize > 1;
1242 const uint8_t *elemPtr = (const uint8_t *)basePtr + entry.offset + (isArray ? elementNdx * entry.arrayStride : 0);
1243 const size_t compSize = getDataTypeByteSize(scalarType);
1244
1245 if (scalarSize > 1)
1246 src << glu::getDataTypeName(entry.type) << "(";
1247
1248 if (glu::isDataTypeMatrix(entry.type))
1249 {
1250 const int numRows = glu::getDataTypeMatrixNumRows(entry.type);
1251 const int numCols = glu::getDataTypeMatrixNumColumns(entry.type);
1252
1253 DE_ASSERT(scalarType == glu::TYPE_FLOAT || scalarType == glu::TYPE_DOUBLE);
1254
1255 // Constructed in column-wise order.
1256 for (int colNdx = 0; colNdx < numCols; colNdx++)
1257 {
1258 for (int rowNdx = 0; rowNdx < numRows; rowNdx++)
1259 {
1260 const uint8_t *compPtr = elemPtr + (colNdx * entry.matrixStride + rowNdx * compSize);
1261 const float compVal = (scalarType == glu::TYPE_FLOAT) ? *((const float *)compPtr) :
1262 (scalarType == glu::TYPE_DOUBLE) ? (float)*((const double *)compPtr) :
1263 0.0f;
1264
1265 if (colNdx > 0 || rowNdx > 0)
1266 src << ", ";
1267
1268 src << de::floatToString(compVal, 1);
1269 }
1270 }
1271 }
1272 else
1273 {
1274 for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
1275 {
1276 const uint8_t *compPtr = elemPtr + scalarNdx * compSize;
1277
1278 if (scalarNdx > 0)
1279 src << ", ";
1280
1281 switch (scalarType)
1282 {
1283 case glu::TYPE_DOUBLE:
1284 src << de::floatToString((float)(*((const double *)compPtr)), 1);
1285 break;
1286 case glu::TYPE_FLOAT:
1287 src << de::floatToString(*((const float *)compPtr), 1) << "f";
1288 break;
1289 case glu::TYPE_INT:
1290 src << *((const int *)compPtr);
1291 break;
1292 case glu::TYPE_UINT:
1293 src << *((const uint32_t *)compPtr) << "u";
1294 break;
1295 default:
1296 DE_ASSERT(false && "Invalid type");
1297 break;
1298 }
1299 }
1300 }
1301
1302 if (scalarSize > 1)
1303 src << ")";
1304
1305 return static_cast<int>(elemPtr - static_cast<const uint8_t *>(basePtr));
1306 }
1307
writeMatrixTypeSrc(int columnCount,int rowCount,std::string type,std::ostringstream & src,const std::string & srcName,const void * basePtr,const InterfaceLayoutEntry & entry,bool vector)1308 void writeMatrixTypeSrc(int columnCount, int rowCount, std::string type, std::ostringstream &src,
1309 const std::string &srcName, const void *basePtr, const InterfaceLayoutEntry &entry, bool vector)
1310 {
1311 if (vector) // generateTestSrcMatrixPerVec
1312 {
1313 for (int colNdx = 0; colNdx < columnCount; colNdx++)
1314 {
1315 src << "\t" << srcName << "[" << colNdx << "] = ";
1316
1317 if (glu::isDataTypeMatrix(entry.type))
1318 {
1319 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
1320 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
1321 const uint8_t *compPtr = (const uint8_t *)basePtr + entry.offset;
1322
1323 if (scalarSize > 1)
1324 src << type << "(";
1325
1326 for (int rowNdx = 0; rowNdx < rowCount; rowNdx++)
1327 {
1328 const float compVal = (scalarType == glu::TYPE_FLOAT) ? *((const float *)compPtr) :
1329 (scalarType == glu::TYPE_DOUBLE) ? (float)*((const double *)compPtr) :
1330 0.0f;
1331
1332 src << de::floatToString(compVal, 1);
1333
1334 if (rowNdx < rowCount - 1)
1335 src << ", ";
1336 }
1337
1338 src << ");\n";
1339 }
1340 else
1341 {
1342 generateValueSrc(src, entry, basePtr, 0);
1343 src << "[" << colNdx << "];\n";
1344 }
1345 }
1346 }
1347 else // generateTestSrcMatrixPerElement
1348 {
1349 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
1350
1351 for (int colNdx = 0; colNdx < columnCount; colNdx++)
1352 {
1353 for (int rowNdx = 0; rowNdx < rowCount; rowNdx++)
1354 {
1355 src << "\t" << srcName << "[" << colNdx << "][" << rowNdx << "] = ";
1356 if (glu::isDataTypeMatrix(entry.type))
1357 {
1358 const uint8_t *elemPtr = (const uint8_t *)basePtr + entry.offset;
1359 const size_t compSize = getDataTypeByteSize(scalarType);
1360 const uint8_t *compPtr = elemPtr + (colNdx * entry.matrixStride + rowNdx * compSize);
1361 const float compVal = (scalarType == glu::TYPE_FLOAT) ? *((const float *)compPtr) :
1362 (scalarType == glu::TYPE_DOUBLE) ? (float)*((const double *)compPtr) :
1363 0.0f;
1364
1365 src << de::floatToString(compVal, 1) << ";\n";
1366 }
1367 else
1368 {
1369 generateValueSrc(src, entry, basePtr, 0);
1370 src << "[" << colNdx << "][" << rowNdx << "];\n";
1371 }
1372 }
1373 }
1374 }
1375 }
1376
generateTestSrcMatrixPerVec(std::ostringstream & src,glu::DataType elementType,const std::string & srcName,const void * basePtr,const InterfaceLayoutEntry & entry)1377 void generateTestSrcMatrixPerVec(std::ostringstream &src, glu::DataType elementType, const std::string &srcName,
1378 const void *basePtr, const InterfaceLayoutEntry &entry)
1379 {
1380 switch (elementType)
1381 {
1382 case glu::TYPE_FLOAT_MAT2:
1383 writeMatrixTypeSrc(2, 2, "vec2", src, srcName, basePtr, entry, true);
1384 break;
1385 case glu::TYPE_FLOAT_MAT2X3:
1386 writeMatrixTypeSrc(2, 3, "vec3", src, srcName, basePtr, entry, true);
1387 break;
1388 case glu::TYPE_FLOAT_MAT2X4:
1389 writeMatrixTypeSrc(2, 4, "vec4", src, srcName, basePtr, entry, true);
1390 break;
1391 case glu::TYPE_FLOAT_MAT3X4:
1392 writeMatrixTypeSrc(3, 4, "vec4", src, srcName, basePtr, entry, true);
1393 break;
1394 case glu::TYPE_FLOAT_MAT4:
1395 writeMatrixTypeSrc(4, 4, "vec4", src, srcName, basePtr, entry, true);
1396 break;
1397 case glu::TYPE_FLOAT_MAT4X2:
1398 writeMatrixTypeSrc(4, 2, "vec2", src, srcName, basePtr, entry, true);
1399 break;
1400 case glu::TYPE_FLOAT_MAT4X3:
1401 writeMatrixTypeSrc(4, 3, "vec3", src, srcName, basePtr, entry, true);
1402 break;
1403 default:
1404 DE_ASSERT(false && "Invalid type");
1405 break;
1406 }
1407 }
1408
generateTestSrcMatrixPerElement(std::ostringstream & src,glu::DataType elementType,const std::string & srcName,const void * basePtr,const InterfaceLayoutEntry & entry)1409 void generateTestSrcMatrixPerElement(std::ostringstream &src, glu::DataType elementType, const std::string &srcName,
1410 const void *basePtr, const InterfaceLayoutEntry &entry)
1411 {
1412 std::string type = "float";
1413 switch (elementType)
1414 {
1415 case glu::TYPE_FLOAT_MAT2:
1416 writeMatrixTypeSrc(2, 2, type, src, srcName, basePtr, entry, false);
1417 break;
1418 case glu::TYPE_FLOAT_MAT2X3:
1419 writeMatrixTypeSrc(2, 3, type, src, srcName, basePtr, entry, false);
1420 break;
1421 case glu::TYPE_FLOAT_MAT2X4:
1422 writeMatrixTypeSrc(2, 4, type, src, srcName, basePtr, entry, false);
1423 break;
1424 case glu::TYPE_FLOAT_MAT3X4:
1425 writeMatrixTypeSrc(3, 4, type, src, srcName, basePtr, entry, false);
1426 break;
1427 case glu::TYPE_FLOAT_MAT4:
1428 writeMatrixTypeSrc(4, 4, type, src, srcName, basePtr, entry, false);
1429 break;
1430 case glu::TYPE_FLOAT_MAT4X2:
1431 writeMatrixTypeSrc(4, 2, type, src, srcName, basePtr, entry, false);
1432 break;
1433 case glu::TYPE_FLOAT_MAT4X3:
1434 writeMatrixTypeSrc(4, 3, type, src, srcName, basePtr, entry, false);
1435 break;
1436 default:
1437 DE_ASSERT(false && "Invalid type");
1438 break;
1439 }
1440 }
1441
generateSingleAssignment(std::ostringstream & src,glu::DataType elementType,const std::string & srcName,const void * basePtr,const InterfaceLayoutEntry & entry,MatrixLoadFlags matrixLoadFlag)1442 void generateSingleAssignment(std::ostringstream &src, glu::DataType elementType, const std::string &srcName,
1443 const void *basePtr, const InterfaceLayoutEntry &entry, MatrixLoadFlags matrixLoadFlag)
1444 {
1445 if (matrixLoadFlag == LOAD_FULL_MATRIX)
1446 {
1447 src << "\t" << srcName << " = ";
1448 generateValueSrc(src, entry, basePtr, 0);
1449 src << ";\n";
1450 }
1451 else
1452 {
1453 if (glu::isDataTypeMatrix(elementType))
1454 {
1455 generateTestSrcMatrixPerVec(src, elementType, srcName, basePtr, entry);
1456 generateTestSrcMatrixPerElement(src, elementType, srcName, basePtr, entry);
1457 }
1458 }
1459 }
1460
generateAssignment(std::ostringstream & src,const InterfaceLayout & layout,const VarType & type,const std::string & srcName,const std::string & apiName,int blockNdx,const void * basePtr,MatrixLoadFlags matrixLoadFlag)1461 void generateAssignment(std::ostringstream &src, const InterfaceLayout &layout, const VarType &type,
1462 const std::string &srcName, const std::string &apiName, int blockNdx, const void *basePtr,
1463 MatrixLoadFlags matrixLoadFlag)
1464 {
1465 if (type.isBasicType() || (type.isArrayType() && type.getElementType().isBasicType()))
1466 {
1467 // Basic type or array of basic types.
1468 bool isArray = type.isArrayType();
1469 glu::DataType elementType = isArray ? type.getElementType().getBasicType() : type.getBasicType();
1470 std::string fullApiName = std::string(apiName) + (isArray ? "[0]" : ""); // Arrays are always postfixed with [0]
1471 int interfaceLayoutNdx = layout.getInterfaceLayoutIndex(blockNdx, fullApiName);
1472 const InterfaceLayoutEntry &entry = layout.interfaces[interfaceLayoutNdx];
1473
1474 if (isArray)
1475 {
1476 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
1477 {
1478 src << "\t" << srcName << "[" << elemNdx << "] = ";
1479 generateValueSrc(src, entry, basePtr, elemNdx);
1480 src << ";\n";
1481 }
1482 }
1483 else
1484 {
1485 generateSingleAssignment(src, elementType, srcName, basePtr, entry, matrixLoadFlag);
1486 }
1487 }
1488 else if (type.isArrayType())
1489 {
1490 const VarType &elementType = type.getElementType();
1491
1492 for (int elementNdx = 0; elementNdx < type.getArraySize(); elementNdx++)
1493 {
1494 const std::string op = std::string("[") + de::toString(elementNdx) + "]";
1495 const std::string elementSrcName = std::string(srcName) + op;
1496 const std::string elementApiName = std::string(apiName) + op;
1497
1498 generateAssignment(src, layout, elementType, elementSrcName, elementApiName, blockNdx, basePtr,
1499 LOAD_FULL_MATRIX);
1500 }
1501 }
1502 else
1503 {
1504 DE_ASSERT(type.isStructType());
1505
1506 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end();
1507 memberIter++)
1508 {
1509 const StructMember &member = *memberIter;
1510 const std::string op = std::string(".") + member.getName();
1511 const std::string memberSrcName = std::string(srcName) + op;
1512 const std::string memberApiName = std::string(apiName) + op;
1513
1514 if (0 == (member.getFlags() & (FIELD_UNASSIGNED | FIELD_MISSING)))
1515 generateAssignment(src, layout, memberIter->getType(), memberSrcName, memberApiName, blockNdx, basePtr,
1516 LOAD_FULL_MATRIX);
1517 }
1518 }
1519 }
1520
generateAssignment(std::ostringstream & src,const InterfaceLayout & layout,const ShaderInterface & shaderInterface,const std::map<int,void * > & blockPointers,MatrixLoadFlags matrixLoadFlag)1521 void generateAssignment(std::ostringstream &src, const InterfaceLayout &layout, const ShaderInterface &shaderInterface,
1522 const std::map<int, void *> &blockPointers, MatrixLoadFlags matrixLoadFlag)
1523 {
1524 for (int blockNdx = 0; blockNdx < shaderInterface.getNumInterfaceBlocks(); blockNdx++)
1525 {
1526 const InterfaceBlock &block = shaderInterface.getInterfaceBlock(blockNdx);
1527
1528 bool hasInstanceName = block.hasInstanceName();
1529 bool isArray = block.isArray();
1530 int numInstances = isArray ? block.getArraySize() : 1;
1531 std::string apiPrefix = hasInstanceName ? block.getBlockName() + "." : std::string("");
1532
1533 DE_ASSERT(!isArray || hasInstanceName);
1534
1535 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1536 {
1537 std::string instancePostfix =
1538 isArray ? std::string("[") + de::toString(instanceNdx) + "]" : std::string("");
1539 std::string blockInstanceName = block.getBlockName() + instancePostfix;
1540 std::string srcPrefix = hasInstanceName ? block.getInstanceName() + instancePostfix + "." : std::string("");
1541 int blockLayoutNdx = layout.getBlockLayoutIndex(blockNdx, instanceNdx);
1542 void *basePtr = blockPointers.find(blockLayoutNdx)->second;
1543
1544 for (InterfaceBlock::ConstIterator interfaceMemberIter = block.begin(); interfaceMemberIter != block.end();
1545 interfaceMemberIter++)
1546 {
1547 const InterfaceBlockMember &interfaceMember = *interfaceMemberIter;
1548
1549 if ((interfaceMember.getFlags() & (FIELD_MISSING | FIELD_UNASSIGNED)) == 0)
1550 {
1551 std::string srcName = srcPrefix + interfaceMember.getName();
1552 std::string apiName = apiPrefix + interfaceMember.getName();
1553
1554 generateAssignment(src, layout, interfaceMember.getType(), srcName, apiName, blockNdx, basePtr,
1555 matrixLoadFlag);
1556 }
1557 }
1558 }
1559 }
1560 }
1561
generatePassthroughShader()1562 std::string generatePassthroughShader()
1563 {
1564 std::ostringstream src;
1565
1566 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n";
1567
1568 src << "\n"
1569 "void main (void)\n"
1570 "{\n"
1571 "}\n";
1572
1573 return src.str();
1574 }
1575
generateTestShader(const ShaderInterface & shaderInterface,const InterfaceLayout & layout,const std::map<int,void * > & blockPointers,MatrixLoadFlags matrixLoadFlag,TestStageFlags testStageFlags,bool shuffleUniformMembers)1576 std::string generateTestShader(const ShaderInterface &shaderInterface, const InterfaceLayout &layout,
1577 const std::map<int, void *> &blockPointers, MatrixLoadFlags matrixLoadFlag,
1578 TestStageFlags testStageFlags, bool shuffleUniformMembers)
1579 {
1580 std::ostringstream src;
1581 std::vector<const StructType *> namedStructs;
1582
1583 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n\n";
1584
1585 if (testStageFlags == TEST_STAGE_GEOMETRY)
1586 {
1587 src << "layout(points) in;\n"
1588 << "layout(points, max_vertices = 1) out;\n\n";
1589 }
1590
1591 shaderInterface.getNamedStructs(namedStructs);
1592 for (std::vector<const StructType *>::const_iterator structIter = namedStructs.begin();
1593 structIter != namedStructs.end(); structIter++)
1594 generateDeclaration(src, **structIter, 0);
1595
1596 for (int blockNdx = 0; blockNdx < shaderInterface.getNumInterfaceBlocks(); blockNdx++)
1597 {
1598 const InterfaceBlock &block = shaderInterface.getInterfaceBlock(blockNdx);
1599
1600 generateDeclaration(src, blockNdx, block, layout, shuffleUniformMembers);
1601 }
1602
1603 src << "\n"
1604 "void main (void)\n"
1605 "{\n";
1606
1607 generateAssignment(src, layout, shaderInterface, blockPointers, matrixLoadFlag);
1608
1609 if (testStageFlags == TEST_STAGE_GEOMETRY)
1610 {
1611 src << "\n"
1612 << "\tEmitVertex();\n"
1613 << "\tEndPrimitive();\n";
1614 }
1615
1616 src << "}\n";
1617
1618 return src.str();
1619 }
1620
makeGraphicsPipeline(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkRenderPass renderPass,const VkShaderModule vertexModule,const VkShaderModule geometryModule,const VkExtent2D renderSize)1621 Move<VkPipeline> makeGraphicsPipeline(const DeviceInterface &vk, const VkDevice device,
1622 const VkPipelineLayout pipelineLayout, const VkRenderPass renderPass,
1623 const VkShaderModule vertexModule, const VkShaderModule geometryModule,
1624 const VkExtent2D renderSize)
1625 {
1626 const std::vector<VkViewport> viewports(1, makeViewport(renderSize));
1627 const std::vector<VkRect2D> scissors(1, makeRect2D(renderSize));
1628 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {
1629 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType
1630 DE_NULL, // const void* pNext
1631 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags
1632 0u, // uint32_t vertexBindingDescriptionCount
1633 DE_NULL, // const VkVertexInputBindingDescription* pVertexBindingDescriptions
1634 0u, // uint32_t vertexAttributeDescriptionCount
1635 DE_NULL, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions
1636 };
1637
1638 return makeGraphicsPipeline(
1639 vk, // const DeviceInterface& vk
1640 device, // const VkDevice device
1641 pipelineLayout, // const VkPipelineLayout pipelineLayout
1642 vertexModule, // const VkShaderModule vertexShaderModule
1643 DE_NULL, // const VkShaderModule tessellationControlModule
1644 DE_NULL, // const VkShaderModule tessellationEvalModule
1645 geometryModule, // const VkShaderModule geometryShaderModule
1646 DE_NULL, // const VkShaderModule m_maxGeometryBlocksShaderModule
1647 renderPass, // const VkRenderPass renderPass
1648 viewports, // const std::vector<VkViewport>& viewports
1649 scissors, // const std::vector<VkRect2D>& scissors
1650 VK_PRIMITIVE_TOPOLOGY_POINT_LIST, // const VkPrimitiveTopology topology
1651 0u, // const uint32_t subpass
1652 0u, // const uint32_t patchControlPoints
1653 &vertexInputStateCreateInfo); // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
1654 }
1655
1656 // InterfaceBlockCaseInstance
1657
1658 class InterfaceBlockCaseInstance : public vkt::TestInstance
1659 {
1660 public:
1661 InterfaceBlockCaseInstance(Context &context, const InterfaceLayout &layout,
1662 const std::map<int, void *> &blockPointers, const std::vector<uint8_t> &data,
1663 const std::vector<VkDeviceSize> &tfBufBindingOffsets,
1664 const std::vector<VkDeviceSize> &tfBufBindingSizes, const uint32_t locationsRequired,
1665 const TestStageFlags testStageFlags);
1666
1667 virtual ~InterfaceBlockCaseInstance(void);
1668 virtual tcu::TestStatus iterate(void);
1669
1670 private:
1671 Move<VkShaderModule> getGeometryShaderModule(const DeviceInterface &vk, const VkDevice device);
1672
1673 bool usesFloat64(void);
1674 std::string validateValue(const InterfaceLayoutEntry &entry, const void *basePtr0, const void *basePtr,
1675 const void *receivedBasePtr);
1676 std::string validateValues(const void *recievedDataPtr);
1677
1678 typedef de::SharedPtr<vk::Unique<vk::VkBuffer>> VkBufferSp;
1679 typedef de::SharedPtr<vk::Allocation> AllocationSp;
1680
1681 const InterfaceLayout &m_layout;
1682 const std::vector<uint8_t> &m_data;
1683 const DeviceSizeVector &m_tfBufBindingOffsets;
1684 const DeviceSizeVector &m_tfBufBindingSizes;
1685 const std::map<int, void *> &m_blockPointers;
1686 const uint32_t m_locationsRequired;
1687 const TestStageFlags m_testStageFlags;
1688 const VkExtent2D m_imageExtent2D;
1689 };
1690
InterfaceBlockCaseInstance(Context & ctx,const InterfaceLayout & layout,const std::map<int,void * > & blockPointers,const std::vector<uint8_t> & data,const std::vector<VkDeviceSize> & tfBufBindingOffsets,const std::vector<VkDeviceSize> & tfBufBindingSizes,const uint32_t locationsRequired,const TestStageFlags testStageFlags)1691 InterfaceBlockCaseInstance::InterfaceBlockCaseInstance(Context &ctx, const InterfaceLayout &layout,
1692 const std::map<int, void *> &blockPointers,
1693 const std::vector<uint8_t> &data,
1694 const std::vector<VkDeviceSize> &tfBufBindingOffsets,
1695 const std::vector<VkDeviceSize> &tfBufBindingSizes,
1696 const uint32_t locationsRequired,
1697 const TestStageFlags testStageFlags)
1698 : vkt::TestInstance(ctx)
1699 , m_layout(layout)
1700 , m_data(data)
1701 , m_tfBufBindingOffsets(tfBufBindingOffsets)
1702 , m_tfBufBindingSizes(tfBufBindingSizes)
1703 , m_blockPointers(blockPointers)
1704 , m_locationsRequired(locationsRequired)
1705 , m_testStageFlags(testStageFlags)
1706 , m_imageExtent2D(makeExtent2D(256u, 256u))
1707 {
1708 const uint32_t componentsPerLocation = 4u;
1709 const uint32_t componentsRequired =
1710 m_locationsRequired * componentsPerLocation + 7u; // Add 7 for built-in components
1711 const InstanceInterface &vki = m_context.getInstanceInterface();
1712 const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
1713 const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(vki, physDevice);
1714 const VkPhysicalDeviceTransformFeedbackFeaturesEXT &transformFeedbackFeatures =
1715 m_context.getTransformFeedbackFeaturesEXT();
1716 const VkPhysicalDeviceLimits limits = getPhysicalDeviceProperties(vki, physDevice).limits;
1717 VkPhysicalDeviceTransformFeedbackPropertiesEXT transformFeedbackProperties;
1718 VkPhysicalDeviceProperties2 deviceProperties2;
1719
1720 if (transformFeedbackFeatures.transformFeedback == false)
1721 TCU_THROW(NotSupportedError, "transformFeedback feature is not supported");
1722
1723 deMemset(&deviceProperties2, 0, sizeof(deviceProperties2));
1724 deMemset(&transformFeedbackProperties, 0x00, sizeof(transformFeedbackProperties));
1725
1726 deviceProperties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1727 deviceProperties2.pNext = &transformFeedbackProperties;
1728
1729 transformFeedbackProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT;
1730 transformFeedbackProperties.pNext = DE_NULL;
1731
1732 vki.getPhysicalDeviceProperties2(physDevice, &deviceProperties2);
1733
1734 if (transformFeedbackProperties.maxTransformFeedbackBuffers < tfBufBindingSizes.size())
1735 TCU_THROW(NotSupportedError, "maxTransformFeedbackBuffers=" +
1736 de::toString(transformFeedbackProperties.maxTransformFeedbackBuffers) +
1737 " is less than required (" + de::toString(tfBufBindingSizes.size()) + ")");
1738
1739 if (transformFeedbackProperties.maxTransformFeedbackBufferDataSize < m_data.size())
1740 TCU_THROW(NotSupportedError, "maxTransformFeedbackBufferDataSize=" +
1741 de::toString(transformFeedbackProperties.maxTransformFeedbackBufferDataSize) +
1742 " is less than required (" + de::toString(m_data.size()) + ")");
1743
1744 if (m_testStageFlags == TEST_STAGE_VERTEX)
1745 {
1746 if (limits.maxVertexOutputComponents < componentsRequired)
1747 TCU_THROW(NotSupportedError, "maxVertexOutputComponents=" + de::toString(limits.maxVertexOutputComponents) +
1748 " is less than required (" + de::toString(componentsRequired) + ")");
1749 }
1750
1751 if (m_testStageFlags == TEST_STAGE_GEOMETRY)
1752 {
1753 if (!features.geometryShader)
1754 TCU_THROW(NotSupportedError, "Missing feature: geometryShader");
1755
1756 if (limits.maxGeometryOutputComponents < componentsRequired)
1757 TCU_THROW(NotSupportedError,
1758 "maxGeometryOutputComponents=" + de::toString(limits.maxGeometryOutputComponents) +
1759 " is less than required (" + de::toString(componentsRequired) + ")");
1760 }
1761
1762 if (usesFloat64())
1763 m_context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_FLOAT64);
1764 }
1765
~InterfaceBlockCaseInstance(void)1766 InterfaceBlockCaseInstance::~InterfaceBlockCaseInstance(void)
1767 {
1768 }
1769
usesFloat64(void)1770 bool InterfaceBlockCaseInstance::usesFloat64(void)
1771 {
1772 for (size_t layoutNdx = 0; layoutNdx < m_layout.interfaces.size(); ++layoutNdx)
1773 if (isDataTypeDoubleType(m_layout.interfaces[layoutNdx].type))
1774 return true;
1775
1776 return false;
1777 }
1778
getGeometryShaderModule(const DeviceInterface & vk,const VkDevice device)1779 Move<VkShaderModule> InterfaceBlockCaseInstance::getGeometryShaderModule(const DeviceInterface &vk,
1780 const VkDevice device)
1781 {
1782 if (m_testStageFlags == TEST_STAGE_GEOMETRY)
1783 return createShaderModule(vk, device, m_context.getBinaryCollection().get("geom"), 0u);
1784
1785 return Move<VkShaderModule>();
1786 }
1787
iterate(void)1788 tcu::TestStatus InterfaceBlockCaseInstance::iterate(void)
1789 {
1790 const DeviceInterface &vk = m_context.getDeviceInterface();
1791 const VkDevice device = m_context.getDevice();
1792 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1793 const VkQueue queue = m_context.getUniversalQueue();
1794 Allocator &allocator = m_context.getDefaultAllocator();
1795
1796 const Move<VkShaderModule> vertModule(
1797 createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0u));
1798 const Move<VkShaderModule> geomModule(getGeometryShaderModule(vk, device));
1799 const Move<VkRenderPass> renderPass(makeRenderPass(vk, device, VK_FORMAT_UNDEFINED));
1800 const Move<VkFramebuffer> framebuffer(
1801 makeFramebuffer(vk, device, *renderPass, 0u, DE_NULL, m_imageExtent2D.width, m_imageExtent2D.height));
1802 const Move<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device));
1803 const Move<VkPipeline> pipeline(
1804 makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertModule, *geomModule, m_imageExtent2D));
1805 const Move<VkCommandPool> cmdPool(
1806 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
1807 const Move<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1808
1809 const VkBufferCreateInfo tfBufCreateInfo = makeBufferCreateInfo(
1810 m_data.size(), VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT);
1811 const Move<VkBuffer> tfBuf = createBuffer(vk, device, &tfBufCreateInfo);
1812 const de::MovePtr<Allocation> tfBufAllocation =
1813 allocator.allocate(getBufferMemoryRequirements(vk, device, *tfBuf), MemoryRequirement::HostVisible);
1814 const uint32_t tfBufBindingCount = static_cast<uint32_t>(m_tfBufBindingOffsets.size());
1815 const std::vector<VkBuffer> tfBufBindings(tfBufBindingCount, *tfBuf);
1816
1817 DE_ASSERT(tfBufBindings.size() == tfBufBindingCount);
1818
1819 VK_CHECK(vk.bindBufferMemory(device, *tfBuf, tfBufAllocation->getMemory(), tfBufAllocation->getOffset()));
1820
1821 deMemset(tfBufAllocation->getHostPtr(), 0, m_data.size());
1822 flushMappedMemoryRange(vk, device, tfBufAllocation->getMemory(), tfBufAllocation->getOffset(), VK_WHOLE_SIZE);
1823
1824 beginCommandBuffer(vk, *cmdBuffer);
1825 {
1826 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(m_imageExtent2D));
1827 {
1828 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1829
1830 vk.cmdBindTransformFeedbackBuffersEXT(*cmdBuffer, 0, tfBufBindingCount, &tfBufBindings[0],
1831 &m_tfBufBindingOffsets[0], &m_tfBufBindingSizes[0]);
1832
1833 vk.cmdBeginTransformFeedbackEXT(*cmdBuffer, 0, 0, DE_NULL, DE_NULL);
1834 {
1835 vk.cmdDraw(*cmdBuffer, 1u, 1u, 0u, 0u);
1836 }
1837 vk.cmdEndTransformFeedbackEXT(*cmdBuffer, 0, 0, DE_NULL, DE_NULL);
1838 }
1839 endRenderPass(vk, *cmdBuffer);
1840
1841 const VkMemoryBarrier tfMemoryBarrier = {
1842 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // VkStructureType sType;
1843 DE_NULL, // const void* pNext;
1844 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, // VkAccessFlags outputMask;
1845 VK_ACCESS_HOST_READ_BIT // VkAccessFlags inputMask;
1846 };
1847 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
1848 1u, &tfMemoryBarrier, 0u, DE_NULL, 0u, DE_NULL);
1849 }
1850 endCommandBuffer(vk, *cmdBuffer);
1851 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
1852
1853 invalidateMappedMemoryRange(vk, device, tfBufAllocation->getMemory(), tfBufAllocation->getOffset(), VK_WHOLE_SIZE);
1854
1855 std::string result = validateValues(tfBufAllocation->getHostPtr());
1856
1857 if (!result.empty())
1858 return tcu::TestStatus::fail(result);
1859
1860 return tcu::TestStatus::pass("Pass");
1861 }
1862
validateValue(const InterfaceLayoutEntry & entry,const void * basePtr0,const void * basePtr,const void * receivedBasePtr)1863 std::string InterfaceBlockCaseInstance::validateValue(const InterfaceLayoutEntry &entry, const void *basePtr0,
1864 const void *basePtr, const void *receivedBasePtr)
1865 {
1866 const glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
1867 const int scalarSize = glu::getDataTypeScalarSize(entry.type);
1868 const bool isMatrix = glu::isDataTypeMatrix(entry.type);
1869 const int numVecs = isMatrix ? glu::getDataTypeMatrixNumColumns(entry.type) : 1;
1870 const int vecSize = scalarSize / numVecs;
1871 const bool isArray = entry.arraySize > 1;
1872 const size_t compSize = getDataTypeByteSize(scalarType);
1873 std::string result;
1874
1875 DE_ASSERT(scalarSize % numVecs == 0);
1876
1877 for (int elemNdx = 0; elemNdx < entry.arraySize; elemNdx++)
1878 {
1879 uint8_t *elemPtr = (uint8_t *)basePtr + entry.offset + (isArray ? elemNdx * entry.arrayStride : 0);
1880
1881 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
1882 {
1883 uint8_t *vecPtr = elemPtr + (isMatrix ? vecNdx * entry.matrixStride : 0);
1884
1885 for (int compNdx = 0; compNdx < vecSize; compNdx++)
1886 {
1887 const uint8_t *compPtr = vecPtr + compSize * compNdx;
1888 const size_t offset = compPtr - (uint8_t *)basePtr0;
1889 const uint8_t *receivedPtr = (uint8_t *)receivedBasePtr + offset;
1890
1891 switch (scalarType)
1892 {
1893 case glu::TYPE_DOUBLE:
1894 {
1895 const double expected = *((double *)compPtr);
1896 const double received = *((double *)receivedPtr);
1897
1898 if (deAbs(received - expected) > 0.05)
1899 result = "Mismatch at offset " + de::toString(offset) + " expected " + de::toString(expected) +
1900 " received " + de::toString(received);
1901
1902 break;
1903 }
1904 case glu::TYPE_FLOAT:
1905 {
1906 const float expected = *((float *)compPtr);
1907 const float received = *((float *)receivedPtr);
1908
1909 if (deAbs(received - expected) > 0.05)
1910 result = "Mismatch at offset " + de::toString(offset) + " expected " + de::toString(expected) +
1911 " received " + de::toString(received);
1912
1913 break;
1914 }
1915 case glu::TYPE_INT:
1916 {
1917 const int32_t expected = *((int32_t *)compPtr);
1918 const int32_t received = *((int32_t *)receivedPtr);
1919
1920 if (received != expected)
1921 result = "Mismatch at offset " + de::toString(offset) + " expected " + de::toString(expected) +
1922 " received " + de::toString(received);
1923
1924 break;
1925 }
1926 case glu::TYPE_UINT:
1927 {
1928 const uint32_t expected = *((uint32_t *)compPtr);
1929 const uint32_t received = *((uint32_t *)receivedPtr);
1930
1931 if (received != expected)
1932 result = "Mismatch at offset " + de::toString(offset) + " expected " + de::toString(expected) +
1933 " received " + de::toString(received);
1934
1935 break;
1936 }
1937 default:
1938 DE_ASSERT(false);
1939 }
1940
1941 if (!result.empty())
1942 {
1943 result += " (elemNdx=" + de::toString(elemNdx) + " vecNdx=" + de::toString(vecNdx) +
1944 " compNdx=" + de::toString(compNdx) + ")";
1945
1946 return result;
1947 }
1948 }
1949 }
1950 }
1951
1952 return result;
1953 }
1954
validateValues(const void * recievedDataPtr)1955 std::string InterfaceBlockCaseInstance::validateValues(const void *recievedDataPtr)
1956 {
1957 const int numBlocks = (int)m_layout.blocks.size();
1958
1959 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1960 {
1961 void *basePtr = m_blockPointers.find(blockNdx)->second;
1962 int numEntries = (int)m_layout.blocks[blockNdx].activeInterfaceIndices.size();
1963
1964 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
1965 {
1966 const InterfaceLayoutEntry &entry =
1967 m_layout.interfaces[m_layout.blocks[blockNdx].activeInterfaceIndices[entryNdx]];
1968 const std::string result = entry.validate ? validateValue(entry, &m_data[0], basePtr, recievedDataPtr) : "";
1969
1970 if (!result.empty())
1971 {
1972 tcu::TestLog &log = m_context.getTestContext().getLog();
1973 std::vector<uint8_t> mask = createMask(m_layout, m_blockPointers, &m_data[0], m_data.size());
1974 std::ostringstream str;
1975
1976 str << "Error at entry '" << entry.name << "' block '" << m_layout.blocks[blockNdx].name << "'"
1977 << std::endl;
1978 str << result << std::endl;
1979
1980 str << m_layout;
1981
1982 str << "Xfb buffer offsets: " << m_tfBufBindingOffsets << std::endl;
1983 str << "Xfb buffer sizes: " << m_tfBufBindingSizes << std::endl << std::endl;
1984
1985 dumpBytes(str, "Expected:", &m_data[0], m_data.size(), &mask[0]);
1986 dumpBytes(str, "Retrieved:", recievedDataPtr, m_data.size(), &mask[0]);
1987
1988 dumpBytes(str, "Expected (unfiltered):", &m_data[0], m_data.size());
1989 dumpBytes(str, "Retrieved (unfiltered):", recievedDataPtr, m_data.size());
1990
1991 log << tcu::TestLog::Message << str.str() << tcu::TestLog::EndMessage;
1992
1993 return result;
1994 }
1995 }
1996 }
1997
1998 return std::string();
1999 }
2000
2001 } // namespace
2002
2003 // InterfaceBlockCase.
2004
InterfaceBlockCase(tcu::TestContext & testCtx,const std::string & name,MatrixLoadFlags matrixLoadFlag,TestStageFlags testStageFlags,bool shuffleInterfaceMembers)2005 InterfaceBlockCase::InterfaceBlockCase(tcu::TestContext &testCtx, const std::string &name,
2006 MatrixLoadFlags matrixLoadFlag, TestStageFlags testStageFlags,
2007 bool shuffleInterfaceMembers)
2008 : TestCase(testCtx, name)
2009 , m_matrixLoadFlag(matrixLoadFlag)
2010 , m_testStageFlags(testStageFlags)
2011 , m_shuffleInterfaceMembers(shuffleInterfaceMembers)
2012 , m_locationsRequired(0)
2013 {
2014 }
2015
~InterfaceBlockCase(void)2016 InterfaceBlockCase::~InterfaceBlockCase(void)
2017 {
2018 }
2019
initPrograms(vk::SourceCollections & programCollection) const2020 void InterfaceBlockCase::initPrograms(vk::SourceCollections &programCollection) const
2021 {
2022 DE_ASSERT(!m_vertShaderSource.empty());
2023
2024 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
2025
2026 if (!m_geomShaderSource.empty())
2027 programCollection.glslSources.add("geom") << glu::GeometrySource(m_geomShaderSource);
2028 }
2029
createInstance(Context & context) const2030 TestInstance *InterfaceBlockCase::createInstance(Context &context) const
2031 {
2032 return new InterfaceBlockCaseInstance(context, m_interfaceLayout, m_blockPointers, m_data, m_tfBufBindingOffsets,
2033 m_tfBufBindingSizes, m_locationsRequired, m_testStageFlags);
2034 }
2035
delayedInit(void)2036 void InterfaceBlockCase::delayedInit(void)
2037 {
2038 BufferGeneralMapping xfbBufferSize;
2039 std::string notSupportedComment;
2040
2041 // Compute reference layout.
2042 computeXfbLayout(m_interfaceLayout, m_interface, xfbBufferSize, m_locationsRequired);
2043
2044 // Assign storage for reference values.
2045 // m_data contains all xfb buffers starting with all interfaces of first xfb_buffer, then all interfaces of next xfb_buffer
2046 {
2047 BufferGeneralMapping xfbBufferOffsets;
2048 int totalSize = 0;
2049 int maxXfb = 0;
2050
2051 for (BufferGeneralMapping::const_iterator xfbBuffersIter = xfbBufferSize.begin();
2052 xfbBuffersIter != xfbBufferSize.end(); xfbBuffersIter++)
2053 {
2054 xfbBufferOffsets[xfbBuffersIter->first] = totalSize;
2055 totalSize += xfbBuffersIter->second;
2056 maxXfb = std::max(maxXfb, xfbBuffersIter->first);
2057 }
2058 m_data.resize(totalSize);
2059
2060 DE_ASSERT(de::inBounds(maxXfb, 0, 256)); // Not correlated with spec: just make sure vectors won't be huge
2061
2062 m_tfBufBindingSizes.resize(maxXfb + 1);
2063 for (BufferGeneralMapping::const_iterator xfbBuffersIter = xfbBufferSize.begin();
2064 xfbBuffersIter != xfbBufferSize.end(); xfbBuffersIter++)
2065 m_tfBufBindingSizes[xfbBuffersIter->first] = xfbBuffersIter->second;
2066
2067 m_tfBufBindingOffsets.resize(maxXfb + 1);
2068 for (BufferGeneralMapping::const_iterator xfbBuffersIter = xfbBufferOffsets.begin();
2069 xfbBuffersIter != xfbBufferOffsets.end(); xfbBuffersIter++)
2070 m_tfBufBindingOffsets[xfbBuffersIter->first] = xfbBuffersIter->second;
2071
2072 // Pointers for each block.
2073 for (int blockNdx = 0; blockNdx < (int)m_interfaceLayout.blocks.size(); blockNdx++)
2074 {
2075 const int dataXfbBufferStartOffset = xfbBufferOffsets[m_interfaceLayout.blocks[blockNdx].xfbBuffer];
2076 const int offset = dataXfbBufferStartOffset + m_interfaceLayout.blocks[blockNdx].xfbOffset;
2077
2078 m_blockPointers[blockNdx] = &m_data[0] + offset;
2079 }
2080 }
2081
2082 // Generate values.
2083 generateValues(m_interfaceLayout, m_blockPointers, 1 /* seed */);
2084
2085 // Overlap validation
2086 {
2087 std::vector<uint8_t> mask = createMask(m_interfaceLayout, m_blockPointers, &m_data[0], m_data.size());
2088
2089 for (size_t maskNdx = 0; maskNdx < mask.size(); ++maskNdx)
2090 DE_ASSERT(mask[maskNdx] <= 1);
2091 }
2092
2093 if (m_testStageFlags == TEST_STAGE_VERTEX)
2094 {
2095 m_vertShaderSource = generateTestShader(m_interface, m_interfaceLayout, m_blockPointers, m_matrixLoadFlag,
2096 m_testStageFlags, m_shuffleInterfaceMembers);
2097 m_geomShaderSource = "";
2098 }
2099 else if (m_testStageFlags == TEST_STAGE_GEOMETRY)
2100 {
2101 m_vertShaderSource = generatePassthroughShader();
2102 m_geomShaderSource = generateTestShader(m_interface, m_interfaceLayout, m_blockPointers, m_matrixLoadFlag,
2103 m_testStageFlags, m_shuffleInterfaceMembers);
2104 }
2105 else
2106 {
2107 DE_ASSERT(false && "Unknown test stage specified");
2108 }
2109 }
2110
2111 } // namespace TransformFeedback
2112 } // namespace vkt
2113