1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Pipeline barrier tests
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktMemoryPipelineBarrierTests.hpp"
25
26 #include "vktTestCaseUtil.hpp"
27
28 #include "vkDefs.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkCmdUtil.hpp"
36 #include "vkObjUtil.hpp"
37
38 #include "tcuMaybe.hpp"
39 #include "tcuTextureUtil.hpp"
40 #include "tcuTestLog.hpp"
41 #include "tcuResultCollector.hpp"
42 #include "tcuTexture.hpp"
43 #include "tcuImageCompare.hpp"
44
45 #include "deUniquePtr.hpp"
46 #include "deStringUtil.hpp"
47 #include "deRandom.hpp"
48
49 #include "deInt32.h"
50 #include "deMath.h"
51 #include "deMemory.h"
52
53 #include <map>
54 #include <set>
55 #include <sstream>
56 #include <string>
57 #include <vector>
58
59 using tcu::Maybe;
60 using tcu::TestLog;
61
62 using de::MovePtr;
63
64 using std::map;
65 using std::pair;
66 using std::set;
67 using std::string;
68 using std::vector;
69
70 using tcu::ConstPixelBufferAccess;
71 using tcu::IVec2;
72 using tcu::PixelBufferAccess;
73 using tcu::TextureFormat;
74 using tcu::TextureLevel;
75 using tcu::UVec2;
76 using tcu::UVec4;
77 using tcu::Vec4;
78
79 namespace vkt
80 {
81 namespace memory
82 {
83 namespace
84 {
85
86 #define ONE_MEGABYTE 1024 * 1024
87 #define DEFAULT_VERTEX_BUFFER_STRIDE 2
88 #define ALTERNATIVE_VERTEX_BUFFER_STRIDE 4
89
90 enum
91 {
92 MAX_UNIFORM_BUFFER_SIZE = 1024,
93 MAX_STORAGE_BUFFER_SIZE = (1 << 28),
94 MAX_SIZE = (128 * 1024)
95 };
96
97 // \todo [mika] Add to utilities
98 template <typename T>
divRoundUp(const T & a,const T & b)99 T divRoundUp(const T &a, const T &b)
100 {
101 return (a / b) + (a % b == 0 ? 0 : 1);
102 }
103
104 enum Usage
105 {
106 // Mapped host read and write
107 USAGE_HOST_READ = (0x1u << 0),
108 USAGE_HOST_WRITE = (0x1u << 1),
109
110 // Copy and other transfer operations
111 USAGE_TRANSFER_SRC = (0x1u << 2),
112 USAGE_TRANSFER_DST = (0x1u << 3),
113
114 // Buffer usage flags
115 USAGE_INDEX_BUFFER = (0x1u << 4),
116 USAGE_VERTEX_BUFFER = (0x1u << 5),
117
118 USAGE_UNIFORM_BUFFER = (0x1u << 6),
119 USAGE_STORAGE_BUFFER = (0x1u << 7),
120
121 USAGE_UNIFORM_TEXEL_BUFFER = (0x1u << 8),
122 USAGE_STORAGE_TEXEL_BUFFER = (0x1u << 9),
123
124 // \todo [2016-03-09 mika] This is probably almost impossible to do
125 USAGE_INDIRECT_BUFFER = (0x1u << 10),
126
127 // Texture usage flags
128 USAGE_SAMPLED_IMAGE = (0x1u << 11),
129 USAGE_STORAGE_IMAGE = (0x1u << 12),
130 USAGE_COLOR_ATTACHMENT = (0x1u << 13),
131 USAGE_INPUT_ATTACHMENT = (0x1u << 14),
132 USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u << 15),
133 };
134
supportsDeviceBufferWrites(Usage usage)135 bool supportsDeviceBufferWrites(Usage usage)
136 {
137 if (usage & USAGE_TRANSFER_DST)
138 return true;
139
140 if (usage & USAGE_STORAGE_BUFFER)
141 return true;
142
143 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
144 return true;
145
146 return false;
147 }
148
supportsDeviceImageWrites(Usage usage)149 bool supportsDeviceImageWrites(Usage usage)
150 {
151 if (usage & USAGE_TRANSFER_DST)
152 return true;
153
154 if (usage & USAGE_STORAGE_IMAGE)
155 return true;
156
157 if (usage & USAGE_COLOR_ATTACHMENT)
158 return true;
159
160 return false;
161 }
162
163 // Sequential access enums
164 enum Access
165 {
166 ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
167 ACCESS_INDEX_READ_BIT,
168 ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
169 ACCESS_UNIFORM_READ_BIT,
170 ACCESS_INPUT_ATTACHMENT_READ_BIT,
171 ACCESS_SHADER_READ_BIT,
172 ACCESS_SHADER_WRITE_BIT,
173 ACCESS_COLOR_ATTACHMENT_READ_BIT,
174 ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
175 ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
176 ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
177 ACCESS_TRANSFER_READ_BIT,
178 ACCESS_TRANSFER_WRITE_BIT,
179 ACCESS_HOST_READ_BIT,
180 ACCESS_HOST_WRITE_BIT,
181 ACCESS_MEMORY_READ_BIT,
182 ACCESS_MEMORY_WRITE_BIT,
183
184 ACCESS_LAST
185 };
186
accessFlagToAccess(vk::VkAccessFlagBits flag)187 Access accessFlagToAccess(vk::VkAccessFlagBits flag)
188 {
189 switch (flag)
190 {
191 case vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
192 return ACCESS_INDIRECT_COMMAND_READ_BIT;
193 case vk::VK_ACCESS_INDEX_READ_BIT:
194 return ACCESS_INDEX_READ_BIT;
195 case vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
196 return ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
197 case vk::VK_ACCESS_UNIFORM_READ_BIT:
198 return ACCESS_UNIFORM_READ_BIT;
199 case vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
200 return ACCESS_INPUT_ATTACHMENT_READ_BIT;
201 case vk::VK_ACCESS_SHADER_READ_BIT:
202 return ACCESS_SHADER_READ_BIT;
203 case vk::VK_ACCESS_SHADER_WRITE_BIT:
204 return ACCESS_SHADER_WRITE_BIT;
205 case vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
206 return ACCESS_COLOR_ATTACHMENT_READ_BIT;
207 case vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
208 return ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
209 case vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
210 return ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
211 case vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
212 return ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
213 case vk::VK_ACCESS_TRANSFER_READ_BIT:
214 return ACCESS_TRANSFER_READ_BIT;
215 case vk::VK_ACCESS_TRANSFER_WRITE_BIT:
216 return ACCESS_TRANSFER_WRITE_BIT;
217 case vk::VK_ACCESS_HOST_READ_BIT:
218 return ACCESS_HOST_READ_BIT;
219 case vk::VK_ACCESS_HOST_WRITE_BIT:
220 return ACCESS_HOST_WRITE_BIT;
221 case vk::VK_ACCESS_MEMORY_READ_BIT:
222 return ACCESS_MEMORY_READ_BIT;
223 case vk::VK_ACCESS_MEMORY_WRITE_BIT:
224 return ACCESS_MEMORY_WRITE_BIT;
225
226 default:
227 DE_FATAL("Unknown access flags");
228 return ACCESS_LAST;
229 }
230 }
231
232 // Sequential stage enums
233 enum PipelineStage
234 {
235 PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
236 PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
237 PIPELINESTAGE_DRAW_INDIRECT_BIT,
238 PIPELINESTAGE_VERTEX_INPUT_BIT,
239 PIPELINESTAGE_VERTEX_SHADER_BIT,
240 PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
241 PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
242 PIPELINESTAGE_GEOMETRY_SHADER_BIT,
243 PIPELINESTAGE_FRAGMENT_SHADER_BIT,
244 PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
245 PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
246 PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
247 PIPELINESTAGE_COMPUTE_SHADER_BIT,
248 PIPELINESTAGE_TRANSFER_BIT,
249 PIPELINESTAGE_HOST_BIT,
250
251 PIPELINESTAGE_LAST
252 };
253
pipelineStageFlagToPipelineStage(vk::VkPipelineStageFlagBits flag)254 PipelineStage pipelineStageFlagToPipelineStage(vk::VkPipelineStageFlagBits flag)
255 {
256 switch (flag)
257 {
258 case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
259 return PIPELINESTAGE_TOP_OF_PIPE_BIT;
260 case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT:
261 return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
262 case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT:
263 return PIPELINESTAGE_DRAW_INDIRECT_BIT;
264 case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT:
265 return PIPELINESTAGE_VERTEX_INPUT_BIT;
266 case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT:
267 return PIPELINESTAGE_VERTEX_SHADER_BIT;
268 case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT:
269 return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
270 case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT:
271 return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
272 case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT:
273 return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
274 case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT:
275 return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
276 case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT:
277 return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
278 case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT:
279 return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
280 case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT:
281 return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
282 case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT:
283 return PIPELINESTAGE_COMPUTE_SHADER_BIT;
284 case vk::VK_PIPELINE_STAGE_TRANSFER_BIT:
285 return PIPELINESTAGE_TRANSFER_BIT;
286 case vk::VK_PIPELINE_STAGE_HOST_BIT:
287 return PIPELINESTAGE_HOST_BIT;
288
289 default:
290 DE_FATAL("Unknown pipeline stage flags");
291 return PIPELINESTAGE_LAST;
292 }
293 }
294
operator |(Usage a,Usage b)295 Usage operator|(Usage a, Usage b)
296 {
297 return (Usage)((uint32_t)a | (uint32_t)b);
298 }
299
operator &(Usage a,Usage b)300 Usage operator&(Usage a, Usage b)
301 {
302 return (Usage)((uint32_t)a & (uint32_t)b);
303 }
304
usageToName(Usage usage)305 string usageToName(Usage usage)
306 {
307 const struct
308 {
309 Usage usage;
310 const char *const name;
311 } usageNames[] = {
312 {USAGE_HOST_READ, "host_read"},
313 {USAGE_HOST_WRITE, "host_write"},
314
315 {USAGE_TRANSFER_SRC, "transfer_src"},
316 {USAGE_TRANSFER_DST, "transfer_dst"},
317
318 {USAGE_INDEX_BUFFER, "index_buffer"},
319 {USAGE_VERTEX_BUFFER, "vertex_buffer"},
320 {USAGE_UNIFORM_BUFFER, "uniform_buffer"},
321 {USAGE_STORAGE_BUFFER, "storage_buffer"},
322 {USAGE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer"},
323 {USAGE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer"},
324 {USAGE_INDIRECT_BUFFER, "indirect_buffer"},
325 {USAGE_SAMPLED_IMAGE, "image_sampled"},
326 {USAGE_STORAGE_IMAGE, "storage_image"},
327 {USAGE_COLOR_ATTACHMENT, "color_attachment"},
328 {USAGE_INPUT_ATTACHMENT, "input_attachment"},
329 {USAGE_DEPTH_STENCIL_ATTACHMENT, "depth_stencil_attachment"},
330 };
331
332 std::ostringstream stream;
333 bool first = true;
334
335 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
336 {
337 if (usage & usageNames[usageNdx].usage)
338 {
339 if (!first)
340 stream << "_";
341 else
342 first = false;
343
344 stream << usageNames[usageNdx].name;
345 }
346 }
347
348 return stream.str();
349 }
350
usageToBufferUsageFlags(Usage usage)351 vk::VkBufferUsageFlags usageToBufferUsageFlags(Usage usage)
352 {
353 vk::VkBufferUsageFlags flags = 0;
354
355 if (usage & USAGE_TRANSFER_SRC)
356 flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
357
358 if (usage & USAGE_TRANSFER_DST)
359 flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
360
361 if (usage & USAGE_INDEX_BUFFER)
362 flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
363
364 if (usage & USAGE_VERTEX_BUFFER)
365 flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
366
367 if (usage & USAGE_INDIRECT_BUFFER)
368 flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
369
370 if (usage & USAGE_UNIFORM_BUFFER)
371 flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
372
373 if (usage & USAGE_STORAGE_BUFFER)
374 flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
375
376 if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
377 flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
378
379 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
380 flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
381
382 return flags;
383 }
384
usageToImageUsageFlags(Usage usage)385 vk::VkImageUsageFlags usageToImageUsageFlags(Usage usage)
386 {
387 vk::VkImageUsageFlags flags = 0;
388
389 if (usage & USAGE_TRANSFER_SRC)
390 flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
391
392 if (usage & USAGE_TRANSFER_DST)
393 flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
394
395 if (usage & USAGE_SAMPLED_IMAGE)
396 flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
397
398 if (usage & USAGE_STORAGE_IMAGE)
399 flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
400
401 if (usage & USAGE_COLOR_ATTACHMENT)
402 flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
403
404 if (usage & USAGE_INPUT_ATTACHMENT)
405 flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
406
407 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
408 flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
409
410 return flags;
411 }
412
usageToStageFlags(Usage usage)413 vk::VkPipelineStageFlags usageToStageFlags(Usage usage)
414 {
415 vk::VkPipelineStageFlags flags = 0;
416
417 if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
418 flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
419
420 if (usage & (USAGE_TRANSFER_SRC | USAGE_TRANSFER_DST))
421 flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
422
423 if (usage & (USAGE_VERTEX_BUFFER | USAGE_INDEX_BUFFER))
424 flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
425
426 if (usage & USAGE_INDIRECT_BUFFER)
427 flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
428
429 if (usage & (USAGE_UNIFORM_BUFFER | USAGE_STORAGE_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER | USAGE_STORAGE_TEXEL_BUFFER |
430 USAGE_SAMPLED_IMAGE | USAGE_STORAGE_IMAGE))
431 {
432 flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
433 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
434 }
435
436 if (usage & USAGE_INPUT_ATTACHMENT)
437 flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
438
439 if (usage & USAGE_COLOR_ATTACHMENT)
440 flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
441
442 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
443 {
444 flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
445 }
446
447 return flags;
448 }
449
usageToAccessFlags(Usage usage)450 vk::VkAccessFlags usageToAccessFlags(Usage usage)
451 {
452 vk::VkAccessFlags flags = 0;
453
454 if (usage & USAGE_HOST_READ)
455 flags |= vk::VK_ACCESS_HOST_READ_BIT;
456
457 if (usage & USAGE_HOST_WRITE)
458 flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
459
460 if (usage & USAGE_TRANSFER_SRC)
461 flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
462
463 if (usage & USAGE_TRANSFER_DST)
464 flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
465
466 if (usage & USAGE_INDEX_BUFFER)
467 flags |= vk::VK_ACCESS_INDEX_READ_BIT;
468
469 if (usage & USAGE_VERTEX_BUFFER)
470 flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
471
472 if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
473 flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
474
475 if (usage & USAGE_SAMPLED_IMAGE)
476 flags |= vk::VK_ACCESS_SHADER_READ_BIT;
477
478 if (usage & (USAGE_STORAGE_BUFFER | USAGE_STORAGE_TEXEL_BUFFER | USAGE_STORAGE_IMAGE))
479 flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
480
481 if (usage & USAGE_INDIRECT_BUFFER)
482 flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
483
484 if (usage & USAGE_COLOR_ATTACHMENT)
485 flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
486
487 if (usage & USAGE_INPUT_ATTACHMENT)
488 flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
489
490 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
491 flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
492
493 return flags;
494 }
495
496 struct TestConfig
497 {
498 Usage usage;
499 uint32_t vertexBufferStride;
500 vk::VkDeviceSize size;
501 vk::VkSharingMode sharing;
502 };
503
createBeginCommandBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkCommandPool pool,vk::VkCommandBufferLevel level)504 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer(const vk::DeviceInterface &vkd, vk::VkDevice device,
505 vk::VkCommandPool pool, vk::VkCommandBufferLevel level)
506 {
507 const vk::VkCommandBufferInheritanceInfo inheritInfo = {
508 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, DE_NULL, 0, 0, 0, VK_FALSE, 0u, 0u};
509 const vk::VkCommandBufferBeginInfo beginInfo = {
510 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
511 DE_NULL,
512 0u,
513 (level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo :
514 (const vk::VkCommandBufferInheritanceInfo *)DE_NULL),
515 };
516
517 vk::Move<vk::VkCommandBuffer> commandBuffer(allocateCommandBuffer(vkd, device, pool, level));
518
519 VK_CHECK(vkd.beginCommandBuffer(*commandBuffer, &beginInfo));
520
521 return commandBuffer;
522 }
523
createBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceSize size,vk::VkBufferUsageFlags usage,vk::VkSharingMode sharingMode,const vector<uint32_t> & queueFamilies)524 vk::Move<vk::VkBuffer> createBuffer(const vk::DeviceInterface &vkd, vk::VkDevice device, vk::VkDeviceSize size,
525 vk::VkBufferUsageFlags usage, vk::VkSharingMode sharingMode,
526 const vector<uint32_t> &queueFamilies)
527 {
528 const vk::VkBufferCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
529 DE_NULL,
530
531 0, // flags
532 size,
533 usage,
534 sharingMode,
535 (uint32_t)queueFamilies.size(),
536 &queueFamilies[0]};
537
538 return vk::createBuffer(vkd, device, &createInfo);
539 }
540
allocMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceSize size,uint32_t memoryTypeIndex)541 vk::Move<vk::VkDeviceMemory> allocMemory(const vk::DeviceInterface &vkd, vk::VkDevice device, vk::VkDeviceSize size,
542 uint32_t memoryTypeIndex)
543 {
544 const vk::VkMemoryAllocateInfo alloc = {vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
545 DE_NULL, // pNext
546
547 size, memoryTypeIndex};
548
549 return vk::allocateMemory(vkd, device, &alloc);
550 }
551
bindBufferMemory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkBuffer buffer,vk::VkMemoryPropertyFlags properties)552 vk::Move<vk::VkDeviceMemory> bindBufferMemory(const vk::InstanceInterface &vki, const vk::DeviceInterface &vkd,
553 vk::VkPhysicalDevice physicalDevice, vk::VkDevice device,
554 vk::VkBuffer buffer, vk::VkMemoryPropertyFlags properties)
555 {
556 const vk::VkMemoryRequirements memoryRequirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
557 const vk::VkPhysicalDeviceMemoryProperties memoryProperties =
558 vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
559 uint32_t memoryTypeIndex;
560
561 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
562 {
563 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex)) &&
564 (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
565 {
566 try
567 {
568 const vk::VkMemoryAllocateInfo allocationInfo = {vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, DE_NULL,
569 memoryRequirements.size, memoryTypeIndex};
570 vk::Move<vk::VkDeviceMemory> memory(vk::allocateMemory(vkd, device, &allocationInfo));
571
572 VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
573
574 return memory;
575 }
576 catch (const vk::Error &error)
577 {
578 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY ||
579 error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
580 {
581 // Try next memory type/heap if out of memory
582 }
583 else
584 {
585 // Throw all other errors forward
586 throw;
587 }
588 }
589 }
590 }
591
592 TCU_FAIL("Failed to allocate memory for buffer");
593 }
594
bindImageMemory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkImage image,vk::VkMemoryPropertyFlags properties)595 vk::Move<vk::VkDeviceMemory> bindImageMemory(const vk::InstanceInterface &vki, const vk::DeviceInterface &vkd,
596 vk::VkPhysicalDevice physicalDevice, vk::VkDevice device,
597 vk::VkImage image, vk::VkMemoryPropertyFlags properties)
598 {
599 const vk::VkMemoryRequirements memoryRequirements = vk::getImageMemoryRequirements(vkd, device, image);
600 const vk::VkPhysicalDeviceMemoryProperties memoryProperties =
601 vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
602 uint32_t memoryTypeIndex;
603
604 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
605 {
606 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex)) &&
607 (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
608 {
609 try
610 {
611 const vk::VkMemoryAllocateInfo allocationInfo = {vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, DE_NULL,
612 memoryRequirements.size, memoryTypeIndex};
613 vk::Move<vk::VkDeviceMemory> memory(vk::allocateMemory(vkd, device, &allocationInfo));
614
615 VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
616
617 return memory;
618 }
619 catch (const vk::Error &error)
620 {
621 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY ||
622 error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
623 {
624 // Try next memory type/heap if out of memory
625 }
626 else
627 {
628 // Throw all other errors forward
629 throw;
630 }
631 }
632 }
633 }
634
635 TCU_FAIL("Failed to allocate memory for image");
636 }
637
mapMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceMemory memory,vk::VkDeviceSize size)638 void *mapMemory(const vk::DeviceInterface &vkd, vk::VkDevice device, vk::VkDeviceMemory memory, vk::VkDeviceSize size)
639 {
640 void *ptr;
641
642 VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
643
644 return ptr;
645 }
646
647 class ReferenceMemory
648 {
649 public:
650 ReferenceMemory(size_t size);
651
652 void set(size_t pos, uint8_t val);
653 uint8_t get(uint64_t pos) const;
654 bool isDefined(uint64_t pos) const;
655
656 void setDefined(size_t offset, size_t size, const void *data);
657 void setUndefined(size_t offset, size_t size);
658 void setData(size_t offset, size_t size, const void *data);
659
getSize(void) const660 size_t getSize(void) const
661 {
662 return m_data.size();
663 }
664
665 private:
666 vector<uint8_t> m_data;
667 vector<uint64_t> m_defined;
668 };
669
ReferenceMemory(size_t size)670 ReferenceMemory::ReferenceMemory(size_t size) : m_data(size, 0), m_defined(size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
671 {
672 }
673
set(size_t pos,uint8_t val)674 void ReferenceMemory::set(size_t pos, uint8_t val)
675 {
676 DE_ASSERT(pos < m_data.size());
677
678 m_data[pos] = val;
679 m_defined[pos / 64] |= 0x1ull << (pos % 64);
680 }
681
setData(size_t offset,size_t size,const void * data_)682 void ReferenceMemory::setData(size_t offset, size_t size, const void *data_)
683 {
684 const uint8_t *data = (const uint8_t *)data_;
685
686 DE_ASSERT(offset < m_data.size());
687 DE_ASSERT(offset + size <= m_data.size());
688
689 // \todo [2016-03-09 mika] Optimize
690 for (size_t pos = 0; pos < size; pos++)
691 {
692 m_data[offset + pos] = data[pos];
693 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
694 }
695 }
696
setUndefined(size_t offset,size_t size)697 void ReferenceMemory::setUndefined(size_t offset, size_t size)
698 {
699 // \todo [2016-03-09 mika] Optimize
700 for (size_t pos = 0; pos < size; pos++)
701 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
702 }
703
get(uint64_t pos) const704 uint8_t ReferenceMemory::get(uint64_t pos) const
705 {
706 DE_ASSERT(pos < m_data.size());
707 DE_ASSERT(isDefined(pos));
708 return m_data[(size_t)pos];
709 }
710
isDefined(uint64_t pos) const711 bool ReferenceMemory::isDefined(uint64_t pos) const
712 {
713 DE_ASSERT(pos < m_data.size());
714
715 return (m_defined[(size_t)pos / 64] & (0x1ull << (pos % 64))) != 0;
716 }
717
718 class Memory
719 {
720 public:
721 Memory(const vk::InstanceInterface &vki, const vk::DeviceInterface &vkd, vk::VkPhysicalDevice physicalDevice,
722 vk::VkDevice device, vk::VkDeviceSize size, uint32_t memoryTypeIndex, vk::VkDeviceSize maxBufferSize,
723 int32_t maxImageWidth, int32_t maxImageHeight);
724
getSize(void) const725 vk::VkDeviceSize getSize(void) const
726 {
727 return m_size;
728 }
getMaxBufferSize(void) const729 vk::VkDeviceSize getMaxBufferSize(void) const
730 {
731 return m_maxBufferSize;
732 }
getSupportBuffers(void) const733 bool getSupportBuffers(void) const
734 {
735 return m_maxBufferSize > 0;
736 }
737
getMaxImageWidth(void) const738 int32_t getMaxImageWidth(void) const
739 {
740 return m_maxImageWidth;
741 }
getMaxImageHeight(void) const742 int32_t getMaxImageHeight(void) const
743 {
744 return m_maxImageHeight;
745 }
getSupportImages(void) const746 bool getSupportImages(void) const
747 {
748 return m_maxImageWidth > 0;
749 }
750
getMemoryType(void) const751 const vk::VkMemoryType &getMemoryType(void) const
752 {
753 return m_memoryType;
754 }
getMemoryTypeIndex(void) const755 uint32_t getMemoryTypeIndex(void) const
756 {
757 return m_memoryTypeIndex;
758 }
getMemory(void) const759 vk::VkDeviceMemory getMemory(void) const
760 {
761 return *m_memory;
762 }
763
764 private:
765 const vk::VkDeviceSize m_size;
766 const uint32_t m_memoryTypeIndex;
767 const vk::VkMemoryType m_memoryType;
768 const vk::Unique<vk::VkDeviceMemory> m_memory;
769 const vk::VkDeviceSize m_maxBufferSize;
770 const int32_t m_maxImageWidth;
771 const int32_t m_maxImageHeight;
772 };
773
getMemoryTypeInfo(const vk::InstanceInterface & vki,vk::VkPhysicalDevice device,uint32_t memoryTypeIndex)774 vk::VkMemoryType getMemoryTypeInfo(const vk::InstanceInterface &vki, vk::VkPhysicalDevice device,
775 uint32_t memoryTypeIndex)
776 {
777 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
778
779 DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
780
781 return memoryProperties.memoryTypes[memoryTypeIndex];
782 }
783
findMaxBufferSize(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkBufferUsageFlags usage,vk::VkSharingMode sharingMode,const vector<uint32_t> & queueFamilies,vk::VkDeviceSize memorySize,uint32_t memoryTypeIndex)784 vk::VkDeviceSize findMaxBufferSize(const vk::DeviceInterface &vkd, vk::VkDevice device,
785
786 vk::VkBufferUsageFlags usage, vk::VkSharingMode sharingMode,
787 const vector<uint32_t> &queueFamilies,
788
789 vk::VkDeviceSize memorySize, uint32_t memoryTypeIndex)
790 {
791 vk::VkDeviceSize lastSuccess = 0;
792 vk::VkDeviceSize currentSize = memorySize / 2;
793
794 {
795 const vk::Unique<vk::VkBuffer> buffer(createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
796 const vk::VkMemoryRequirements requirements(vk::getBufferMemoryRequirements(vkd, device, *buffer));
797
798 if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
799 return memorySize;
800 }
801
802 for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
803 {
804 const vk::Unique<vk::VkBuffer> buffer(
805 createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
806 const vk::VkMemoryRequirements requirements(vk::getBufferMemoryRequirements(vkd, device, *buffer));
807
808 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
809 {
810 lastSuccess = currentSize;
811 currentSize += stepSize;
812 }
813 else
814 currentSize -= stepSize;
815
816 if (stepSize == 0)
817 break;
818 }
819
820 return lastSuccess;
821 }
822
823 // Round size down maximum W * H * 4, where W and H < 4096
roundBufferSizeToWxHx4(vk::VkDeviceSize size)824 vk::VkDeviceSize roundBufferSizeToWxHx4(vk::VkDeviceSize size)
825 {
826 const vk::VkDeviceSize maxTextureSize = 4096;
827 vk::VkDeviceSize maxTexelCount = size / 4;
828 vk::VkDeviceSize bestW = de::max(maxTexelCount, maxTextureSize);
829 vk::VkDeviceSize bestH = maxTexelCount / bestW;
830
831 // \todo [2016-03-09 mika] Could probably be faster?
832 for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
833 {
834 const vk::VkDeviceSize h = maxTexelCount / w;
835
836 if (bestW * bestH < w * h)
837 {
838 bestW = w;
839 bestH = h;
840 }
841 }
842
843 return bestW * bestH * 4;
844 }
845
846 // Find RGBA8 image size that has exactly "size" of number of bytes.
847 // "size" must be W * H * 4 where W and H < 4096
findImageSizeWxHx4(vk::VkDeviceSize size)848 IVec2 findImageSizeWxHx4(vk::VkDeviceSize size)
849 {
850 const vk::VkDeviceSize maxTextureSize = 4096;
851 vk::VkDeviceSize texelCount = size / 4;
852
853 DE_ASSERT((size % 4) == 0);
854
855 // \todo [2016-03-09 mika] Could probably be faster?
856 for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
857 {
858 const vk::VkDeviceSize h = texelCount / w;
859
860 if ((texelCount % w) == 0 && h < maxTextureSize)
861 return IVec2((int)w, (int)h);
862 }
863
864 DE_FATAL("Invalid size");
865 return IVec2(-1, -1);
866 }
867
findMaxRGBA8ImageSize(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkImageUsageFlags usage,vk::VkSharingMode sharingMode,const vector<uint32_t> & queueFamilies,vk::VkDeviceSize memorySize,uint32_t memoryTypeIndex)868 IVec2 findMaxRGBA8ImageSize(const vk::DeviceInterface &vkd, vk::VkDevice device,
869
870 vk::VkImageUsageFlags usage, vk::VkSharingMode sharingMode,
871 const vector<uint32_t> &queueFamilies,
872
873 vk::VkDeviceSize memorySize, uint32_t memoryTypeIndex)
874 {
875 IVec2 lastSuccess(0);
876 IVec2 currentSize;
877
878 {
879 const uint32_t texelCount = (uint32_t)(memorySize / 4);
880 const uint32_t width = (uint32_t)deFloatSqrt((float)texelCount);
881 const uint32_t height = texelCount / width;
882
883 currentSize[0] = deMaxu32(width, height);
884 currentSize[1] = deMinu32(width, height);
885 }
886
887 for (int32_t stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
888 {
889 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
890 DE_NULL,
891
892 0u,
893 vk::VK_IMAGE_TYPE_2D,
894 vk::VK_FORMAT_R8G8B8A8_UNORM,
895 {
896 (uint32_t)currentSize[0],
897 (uint32_t)currentSize[1],
898 1u,
899 },
900 1u,
901 1u,
902 vk::VK_SAMPLE_COUNT_1_BIT,
903 vk::VK_IMAGE_TILING_OPTIMAL,
904 usage,
905 sharingMode,
906 (uint32_t)queueFamilies.size(),
907 &queueFamilies[0],
908 vk::VK_IMAGE_LAYOUT_UNDEFINED};
909 const vk::Unique<vk::VkImage> image(vk::createImage(vkd, device, &createInfo));
910 const vk::VkMemoryRequirements requirements(vk::getImageMemoryRequirements(vkd, device, *image));
911
912 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
913 {
914 lastSuccess = currentSize;
915 currentSize[0] += stepSize;
916 currentSize[1] += stepSize;
917 }
918 else
919 {
920 currentSize[0] -= stepSize;
921 currentSize[1] -= stepSize;
922 }
923
924 if (stepSize == 0)
925 break;
926 }
927
928 return lastSuccess;
929 }
930
Memory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkDeviceSize size,uint32_t memoryTypeIndex,vk::VkDeviceSize maxBufferSize,int32_t maxImageWidth,int32_t maxImageHeight)931 Memory::Memory(const vk::InstanceInterface &vki, const vk::DeviceInterface &vkd, vk::VkPhysicalDevice physicalDevice,
932 vk::VkDevice device, vk::VkDeviceSize size, uint32_t memoryTypeIndex, vk::VkDeviceSize maxBufferSize,
933 int32_t maxImageWidth, int32_t maxImageHeight)
934 : m_size(size)
935 , m_memoryTypeIndex(memoryTypeIndex)
936 , m_memoryType(getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
937 , m_memory(allocMemory(vkd, device, size, memoryTypeIndex))
938 , m_maxBufferSize(maxBufferSize)
939 , m_maxImageWidth(maxImageWidth)
940 , m_maxImageHeight(maxImageHeight)
941 {
942 }
943
944 class Context
945 {
946 public:
Context(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkQueue queue,uint32_t queueFamilyIndex,const vector<pair<uint32_t,vk::VkQueue>> & queues,const vk::BinaryCollection & binaryCollection)947 Context(const vk::InstanceInterface &vki, const vk::DeviceInterface &vkd, vk::VkPhysicalDevice physicalDevice,
948 vk::VkDevice device, vk::VkQueue queue, uint32_t queueFamilyIndex,
949 const vector<pair<uint32_t, vk::VkQueue>> &queues, const vk::BinaryCollection &binaryCollection)
950 : m_vki(vki)
951 , m_vkd(vkd)
952 , m_physicalDevice(physicalDevice)
953 , m_device(device)
954 , m_queue(queue)
955 , m_queueFamilyIndex(queueFamilyIndex)
956 , m_queues(queues)
957 , m_commandPool(
958 createCommandPool(vkd, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex))
959 , m_binaryCollection(binaryCollection)
960 {
961 for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
962 m_queueFamilies.push_back(m_queues[queueNdx].first);
963 }
964
getInstanceInterface(void) const965 const vk::InstanceInterface &getInstanceInterface(void) const
966 {
967 return m_vki;
968 }
getPhysicalDevice(void) const969 vk::VkPhysicalDevice getPhysicalDevice(void) const
970 {
971 return m_physicalDevice;
972 }
getDevice(void) const973 vk::VkDevice getDevice(void) const
974 {
975 return m_device;
976 }
getDeviceInterface(void) const977 const vk::DeviceInterface &getDeviceInterface(void) const
978 {
979 return m_vkd;
980 }
getQueue(void) const981 vk::VkQueue getQueue(void) const
982 {
983 return m_queue;
984 }
getQueueFamily(void) const985 uint32_t getQueueFamily(void) const
986 {
987 return m_queueFamilyIndex;
988 }
getQueues(void) const989 const vector<pair<uint32_t, vk::VkQueue>> &getQueues(void) const
990 {
991 return m_queues;
992 }
getQueueFamilies(void) const993 const vector<uint32_t> getQueueFamilies(void) const
994 {
995 return m_queueFamilies;
996 }
getCommandPool(void) const997 vk::VkCommandPool getCommandPool(void) const
998 {
999 return *m_commandPool;
1000 }
getBinaryCollection(void) const1001 const vk::BinaryCollection &getBinaryCollection(void) const
1002 {
1003 return m_binaryCollection;
1004 }
1005
1006 private:
1007 const vk::InstanceInterface &m_vki;
1008 const vk::DeviceInterface &m_vkd;
1009 const vk::VkPhysicalDevice m_physicalDevice;
1010 const vk::VkDevice m_device;
1011 const vk::VkQueue m_queue;
1012 const uint32_t m_queueFamilyIndex;
1013 const vector<pair<uint32_t, vk::VkQueue>> m_queues;
1014 const vk::Unique<vk::VkCommandPool> m_commandPool;
1015 const vk::BinaryCollection &m_binaryCollection;
1016 vector<uint32_t> m_queueFamilies;
1017 };
1018
1019 class PrepareContext
1020 {
1021 public:
PrepareContext(const Context & context,const Memory & memory)1022 PrepareContext(const Context &context, const Memory &memory) : m_context(context), m_memory(memory)
1023 {
1024 }
1025
getMemory(void) const1026 const Memory &getMemory(void) const
1027 {
1028 return m_memory;
1029 }
getContext(void) const1030 const Context &getContext(void) const
1031 {
1032 return m_context;
1033 }
getBinaryCollection(void) const1034 const vk::BinaryCollection &getBinaryCollection(void) const
1035 {
1036 return m_context.getBinaryCollection();
1037 }
1038
setBuffer(vk::Move<vk::VkBuffer> buffer,vk::VkDeviceSize size)1039 void setBuffer(vk::Move<vk::VkBuffer> buffer, vk::VkDeviceSize size)
1040 {
1041 DE_ASSERT(!m_currentImage);
1042 DE_ASSERT(!m_currentBuffer);
1043
1044 m_currentBuffer = buffer;
1045 m_currentBufferSize = size;
1046 }
1047
getBuffer(void) const1048 vk::VkBuffer getBuffer(void) const
1049 {
1050 return *m_currentBuffer;
1051 }
getBufferSize(void) const1052 vk::VkDeviceSize getBufferSize(void) const
1053 {
1054 DE_ASSERT(m_currentBuffer);
1055 return m_currentBufferSize;
1056 }
1057
releaseBuffer(void)1058 void releaseBuffer(void)
1059 {
1060 m_currentBuffer.disown();
1061 }
1062
setImage(vk::Move<vk::VkImage> image,vk::VkImageLayout layout,vk::VkDeviceSize memorySize,int32_t width,int32_t height)1063 void setImage(vk::Move<vk::VkImage> image, vk::VkImageLayout layout, vk::VkDeviceSize memorySize, int32_t width,
1064 int32_t height)
1065 {
1066 DE_ASSERT(!m_currentImage);
1067 DE_ASSERT(!m_currentBuffer);
1068
1069 m_currentImage = image;
1070 m_currentImageMemorySize = memorySize;
1071 m_currentImageLayout = layout;
1072 m_currentImageWidth = width;
1073 m_currentImageHeight = height;
1074 }
1075
setImageLayout(vk::VkImageLayout layout)1076 void setImageLayout(vk::VkImageLayout layout)
1077 {
1078 DE_ASSERT(m_currentImage);
1079 m_currentImageLayout = layout;
1080 }
1081
getImage(void) const1082 vk::VkImage getImage(void) const
1083 {
1084 return *m_currentImage;
1085 }
getImageWidth(void) const1086 int32_t getImageWidth(void) const
1087 {
1088 DE_ASSERT(m_currentImage);
1089 return m_currentImageWidth;
1090 }
getImageHeight(void) const1091 int32_t getImageHeight(void) const
1092 {
1093 DE_ASSERT(m_currentImage);
1094 return m_currentImageHeight;
1095 }
getImageMemorySize(void) const1096 vk::VkDeviceSize getImageMemorySize(void) const
1097 {
1098 DE_ASSERT(m_currentImage);
1099 return m_currentImageMemorySize;
1100 }
1101
releaseImage(void)1102 void releaseImage(void)
1103 {
1104 m_currentImage.disown();
1105 }
1106
getImageLayout(void) const1107 vk::VkImageLayout getImageLayout(void) const
1108 {
1109 DE_ASSERT(m_currentImage);
1110 return m_currentImageLayout;
1111 }
1112
1113 private:
1114 const Context &m_context;
1115 const Memory &m_memory;
1116
1117 vk::Move<vk::VkBuffer> m_currentBuffer;
1118 vk::VkDeviceSize m_currentBufferSize;
1119
1120 vk::Move<vk::VkImage> m_currentImage;
1121 vk::VkDeviceSize m_currentImageMemorySize;
1122 vk::VkImageLayout m_currentImageLayout;
1123 int32_t m_currentImageWidth;
1124 int32_t m_currentImageHeight;
1125 };
1126
1127 class ExecuteContext
1128 {
1129 public:
ExecuteContext(const Context & context)1130 ExecuteContext(const Context &context) : m_context(context)
1131 {
1132 }
1133
getContext(void) const1134 const Context &getContext(void) const
1135 {
1136 return m_context;
1137 }
setMapping(void * ptr)1138 void setMapping(void *ptr)
1139 {
1140 m_mapping = ptr;
1141 }
getMapping(void) const1142 void *getMapping(void) const
1143 {
1144 return m_mapping;
1145 }
1146
1147 private:
1148 const Context &m_context;
1149 void *m_mapping;
1150 };
1151
1152 class VerifyContext
1153 {
1154 public:
VerifyContext(TestLog & log,tcu::ResultCollector & resultCollector,const Context & context,vk::VkDeviceSize size)1155 VerifyContext(TestLog &log, tcu::ResultCollector &resultCollector, const Context &context, vk::VkDeviceSize size)
1156 : m_log(log)
1157 , m_resultCollector(resultCollector)
1158 , m_context(context)
1159 , m_reference((size_t)size)
1160 {
1161 }
1162
getContext(void) const1163 const Context &getContext(void) const
1164 {
1165 return m_context;
1166 }
getLog(void) const1167 TestLog &getLog(void) const
1168 {
1169 return m_log;
1170 }
getResultCollector(void) const1171 tcu::ResultCollector &getResultCollector(void) const
1172 {
1173 return m_resultCollector;
1174 }
1175
getReference(void)1176 ReferenceMemory &getReference(void)
1177 {
1178 return m_reference;
1179 }
getReferenceImage(void)1180 TextureLevel &getReferenceImage(void)
1181 {
1182 return m_referenceImage;
1183 }
1184
1185 private:
1186 TestLog &m_log;
1187 tcu::ResultCollector &m_resultCollector;
1188 const Context &m_context;
1189 ReferenceMemory m_reference;
1190 TextureLevel m_referenceImage;
1191 };
1192
1193 class Command
1194 {
1195 public:
1196 // Constructor should allocate all non-vulkan resources.
~Command(void)1197 virtual ~Command(void)
1198 {
1199 }
1200
1201 // Get name of the command
1202 virtual const char *getName(void) const = 0;
1203
1204 // Log prepare operations
logPrepare(TestLog &,size_t) const1205 virtual void logPrepare(TestLog &, size_t) const
1206 {
1207 }
1208 // Log executed operations
logExecute(TestLog &,size_t) const1209 virtual void logExecute(TestLog &, size_t) const
1210 {
1211 }
1212
1213 // Prepare should allocate all vulkan resources and resources that require
1214 // that buffer or memory has been already allocated. This should build all
1215 // command buffers etc.
prepare(PrepareContext &)1216 virtual void prepare(PrepareContext &)
1217 {
1218 }
1219
1220 // Execute command. Write or read mapped memory, submit commands to queue
1221 // etc.
execute(ExecuteContext &)1222 virtual void execute(ExecuteContext &)
1223 {
1224 }
1225
1226 // Verify that results are correct.
verify(VerifyContext &,size_t)1227 virtual void verify(VerifyContext &, size_t)
1228 {
1229 }
1230
1231 protected:
1232 // Allow only inheritance
Command(void)1233 Command(void)
1234 {
1235 }
1236
1237 private:
1238 // Disallow copying
1239 Command(const Command &);
1240 Command &operator&(const Command &);
1241 };
1242
1243 class Map : public Command
1244 {
1245 public:
Map(void)1246 Map(void)
1247 {
1248 }
~Map(void)1249 ~Map(void)
1250 {
1251 }
getName(void) const1252 const char *getName(void) const
1253 {
1254 return "Map";
1255 }
1256
logExecute(TestLog & log,size_t commandIndex) const1257 void logExecute(TestLog &log, size_t commandIndex) const
1258 {
1259 log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1260 }
1261
prepare(PrepareContext & context)1262 void prepare(PrepareContext &context)
1263 {
1264 m_memory = context.getMemory().getMemory();
1265 m_size = context.getMemory().getSize();
1266 }
1267
execute(ExecuteContext & context)1268 void execute(ExecuteContext &context)
1269 {
1270 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1271 const vk::VkDevice device = context.getContext().getDevice();
1272
1273 context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1274 }
1275
1276 private:
1277 vk::VkDeviceMemory m_memory;
1278 vk::VkDeviceSize m_size;
1279 };
1280
1281 class UnMap : public Command
1282 {
1283 public:
UnMap(void)1284 UnMap(void)
1285 {
1286 }
~UnMap(void)1287 ~UnMap(void)
1288 {
1289 }
getName(void) const1290 const char *getName(void) const
1291 {
1292 return "UnMap";
1293 }
1294
logExecute(TestLog & log,size_t commandIndex) const1295 void logExecute(TestLog &log, size_t commandIndex) const
1296 {
1297 log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1298 }
1299
prepare(PrepareContext & context)1300 void prepare(PrepareContext &context)
1301 {
1302 m_memory = context.getMemory().getMemory();
1303 }
1304
execute(ExecuteContext & context)1305 void execute(ExecuteContext &context)
1306 {
1307 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1308 const vk::VkDevice device = context.getContext().getDevice();
1309
1310 vkd.unmapMemory(device, m_memory);
1311 context.setMapping(DE_NULL);
1312 }
1313
1314 private:
1315 vk::VkDeviceMemory m_memory;
1316 };
1317
1318 class Invalidate : public Command
1319 {
1320 public:
Invalidate(void)1321 Invalidate(void)
1322 {
1323 }
~Invalidate(void)1324 ~Invalidate(void)
1325 {
1326 }
getName(void) const1327 const char *getName(void) const
1328 {
1329 return "Invalidate";
1330 }
1331
logExecute(TestLog & log,size_t commandIndex) const1332 void logExecute(TestLog &log, size_t commandIndex) const
1333 {
1334 log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1335 }
1336
prepare(PrepareContext & context)1337 void prepare(PrepareContext &context)
1338 {
1339 m_memory = context.getMemory().getMemory();
1340 m_size = context.getMemory().getSize();
1341 }
1342
execute(ExecuteContext & context)1343 void execute(ExecuteContext &context)
1344 {
1345 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1346 const vk::VkDevice device = context.getContext().getDevice();
1347
1348 vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, VK_WHOLE_SIZE);
1349 }
1350
1351 private:
1352 vk::VkDeviceMemory m_memory;
1353 vk::VkDeviceSize m_size;
1354 };
1355
1356 class Flush : public Command
1357 {
1358 public:
Flush(void)1359 Flush(void)
1360 {
1361 }
~Flush(void)1362 ~Flush(void)
1363 {
1364 }
getName(void) const1365 const char *getName(void) const
1366 {
1367 return "Flush";
1368 }
1369
logExecute(TestLog & log,size_t commandIndex) const1370 void logExecute(TestLog &log, size_t commandIndex) const
1371 {
1372 log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1373 }
1374
prepare(PrepareContext & context)1375 void prepare(PrepareContext &context)
1376 {
1377 m_memory = context.getMemory().getMemory();
1378 m_size = context.getMemory().getSize();
1379 }
1380
execute(ExecuteContext & context)1381 void execute(ExecuteContext &context)
1382 {
1383 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1384 const vk::VkDevice device = context.getContext().getDevice();
1385
1386 vk::flushMappedMemoryRange(vkd, device, m_memory, 0, VK_WHOLE_SIZE);
1387 }
1388
1389 private:
1390 vk::VkDeviceMemory m_memory;
1391 vk::VkDeviceSize m_size;
1392 };
1393
1394 // Host memory reads and writes
1395 class HostMemoryAccess : public Command
1396 {
1397 public:
1398 HostMemoryAccess(bool read, bool write, uint32_t seed);
~HostMemoryAccess(void)1399 ~HostMemoryAccess(void)
1400 {
1401 }
getName(void) const1402 const char *getName(void) const
1403 {
1404 return "HostMemoryAccess";
1405 }
1406
1407 void logExecute(TestLog &log, size_t commandIndex) const;
1408 void prepare(PrepareContext &context);
1409 void execute(ExecuteContext &context);
1410 void verify(VerifyContext &context, size_t commandIndex);
1411
1412 private:
1413 const bool m_read;
1414 const bool m_write;
1415 const uint32_t m_seed;
1416
1417 size_t m_size;
1418 vector<uint8_t> m_readData;
1419 };
1420
HostMemoryAccess(bool read,bool write,uint32_t seed)1421 HostMemoryAccess::HostMemoryAccess(bool read, bool write, uint32_t seed)
1422 : m_read(read)
1423 , m_write(write)
1424 , m_seed(seed)
1425 , m_size(0)
1426 {
1427 }
1428
logExecute(TestLog & log,size_t commandIndex) const1429 void HostMemoryAccess::logExecute(TestLog &log, size_t commandIndex) const
1430 {
1431 log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "")
1432 << (m_write ? " write" : "") << ", seed: " << m_seed << TestLog::EndMessage;
1433 }
1434
prepare(PrepareContext & context)1435 void HostMemoryAccess::prepare(PrepareContext &context)
1436 {
1437 m_size = (size_t)context.getMemory().getSize();
1438
1439 if (m_read)
1440 m_readData.resize(m_size, 0);
1441 }
1442
execute(ExecuteContext & context)1443 void HostMemoryAccess::execute(ExecuteContext &context)
1444 {
1445 if (m_read && m_write)
1446 {
1447 de::Random rng(m_seed);
1448 uint8_t *const ptr = (uint8_t *)context.getMapping();
1449 if (m_size >= ONE_MEGABYTE)
1450 {
1451 deMemcpy(&m_readData[0], ptr, m_size);
1452 for (size_t pos = 0; pos < m_size; ++pos)
1453 {
1454 ptr[pos] = m_readData[pos] ^ rng.getUint8();
1455 }
1456 }
1457 else
1458 {
1459 for (size_t pos = 0; pos < m_size; ++pos)
1460 {
1461 const uint8_t mask = rng.getUint8();
1462 const uint8_t value = ptr[pos];
1463
1464 m_readData[pos] = value;
1465 ptr[pos] = value ^ mask;
1466 }
1467 }
1468 }
1469 else if (m_read)
1470 {
1471 const uint8_t *const ptr = (uint8_t *)context.getMapping();
1472 if (m_size >= ONE_MEGABYTE)
1473 {
1474 deMemcpy(&m_readData[0], ptr, m_size);
1475 }
1476 else
1477 {
1478 for (size_t pos = 0; pos < m_size; ++pos)
1479 {
1480 m_readData[pos] = ptr[pos];
1481 }
1482 }
1483 }
1484 else if (m_write)
1485 {
1486 de::Random rng(m_seed);
1487 uint8_t *const ptr = (uint8_t *)context.getMapping();
1488 for (size_t pos = 0; pos < m_size; ++pos)
1489 {
1490 ptr[pos] = rng.getUint8();
1491 }
1492 }
1493 else
1494 DE_FATAL("Host memory access without read or write.");
1495 }
1496
verify(VerifyContext & context,size_t commandIndex)1497 void HostMemoryAccess::verify(VerifyContext &context, size_t commandIndex)
1498 {
1499 tcu::ResultCollector &resultCollector = context.getResultCollector();
1500 ReferenceMemory &reference = context.getReference();
1501 de::Random rng(m_seed);
1502
1503 if (m_read && m_write)
1504 {
1505 for (size_t pos = 0; pos < m_size; pos++)
1506 {
1507 const uint8_t mask = rng.getUint8();
1508 const uint8_t value = m_readData[pos];
1509
1510 if (reference.isDefined(pos))
1511 {
1512 if (value != reference.get(pos))
1513 {
1514 resultCollector.fail(
1515 de::toString(commandIndex) + ":" + getName() +
1516 " Result differs from reference, Expected: " + de::toString(tcu::toHex<8>(reference.get(pos))) +
1517 ", Got: " + de::toString(tcu::toHex<8>(value)) + ", At offset: " + de::toString(pos));
1518 break;
1519 }
1520
1521 reference.set(pos, reference.get(pos) ^ mask);
1522 }
1523 }
1524 }
1525 else if (m_read)
1526 {
1527 for (size_t pos = 0; pos < m_size; pos++)
1528 {
1529 const uint8_t value = m_readData[pos];
1530
1531 if (reference.isDefined(pos))
1532 {
1533 if (value != reference.get(pos))
1534 {
1535 resultCollector.fail(
1536 de::toString(commandIndex) + ":" + getName() +
1537 " Result differs from reference, Expected: " + de::toString(tcu::toHex<8>(reference.get(pos))) +
1538 ", Got: " + de::toString(tcu::toHex<8>(value)) + ", At offset: " + de::toString(pos));
1539 break;
1540 }
1541 }
1542 }
1543 }
1544 else if (m_write)
1545 {
1546 for (size_t pos = 0; pos < m_size; pos++)
1547 {
1548 const uint8_t value = rng.getUint8();
1549
1550 reference.set(pos, value);
1551 }
1552 }
1553 else
1554 DE_FATAL("Host memory access without read or write.");
1555 }
1556
1557 class CreateBuffer : public Command
1558 {
1559 public:
1560 CreateBuffer(vk::VkBufferUsageFlags usage, vk::VkSharingMode sharing);
~CreateBuffer(void)1561 ~CreateBuffer(void)
1562 {
1563 }
getName(void) const1564 const char *getName(void) const
1565 {
1566 return "CreateBuffer";
1567 }
1568
1569 void logPrepare(TestLog &log, size_t commandIndex) const;
1570 void prepare(PrepareContext &context);
1571
1572 private:
1573 const vk::VkBufferUsageFlags m_usage;
1574 const vk::VkSharingMode m_sharing;
1575 };
1576
CreateBuffer(vk::VkBufferUsageFlags usage,vk::VkSharingMode sharing)1577 CreateBuffer::CreateBuffer(vk::VkBufferUsageFlags usage, vk::VkSharingMode sharing) : m_usage(usage), m_sharing(sharing)
1578 {
1579 }
1580
logPrepare(TestLog & log,size_t commandIndex) const1581 void CreateBuffer::logPrepare(TestLog &log, size_t commandIndex) const
1582 {
1583 log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing
1584 << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1585 }
1586
prepare(PrepareContext & context)1587 void CreateBuffer::prepare(PrepareContext &context)
1588 {
1589 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1590 const vk::VkDevice device = context.getContext().getDevice();
1591 const vk::VkDeviceSize bufferSize = context.getMemory().getMaxBufferSize();
1592 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
1593
1594 context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1595 }
1596
1597 class DestroyBuffer : public Command
1598 {
1599 public:
1600 DestroyBuffer(void);
~DestroyBuffer(void)1601 ~DestroyBuffer(void)
1602 {
1603 }
getName(void) const1604 const char *getName(void) const
1605 {
1606 return "DestroyBuffer";
1607 }
1608
1609 void logExecute(TestLog &log, size_t commandIndex) const;
1610 void prepare(PrepareContext &context);
1611 void execute(ExecuteContext &context);
1612
1613 private:
1614 vk::Move<vk::VkBuffer> m_buffer;
1615 };
1616
DestroyBuffer(void)1617 DestroyBuffer::DestroyBuffer(void)
1618 {
1619 }
1620
prepare(PrepareContext & context)1621 void DestroyBuffer::prepare(PrepareContext &context)
1622 {
1623 m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()),
1624 vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(),
1625 context.getContext().getDevice(), DE_NULL));
1626 context.releaseBuffer();
1627 }
1628
logExecute(TestLog & log,size_t commandIndex) const1629 void DestroyBuffer::logExecute(TestLog &log, size_t commandIndex) const
1630 {
1631 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1632 }
1633
execute(ExecuteContext & context)1634 void DestroyBuffer::execute(ExecuteContext &context)
1635 {
1636 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1637 const vk::VkDevice device = context.getContext().getDevice();
1638
1639 vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1640 }
1641
1642 class BindBufferMemory : public Command
1643 {
1644 public:
BindBufferMemory(void)1645 BindBufferMemory(void)
1646 {
1647 }
~BindBufferMemory(void)1648 ~BindBufferMemory(void)
1649 {
1650 }
getName(void) const1651 const char *getName(void) const
1652 {
1653 return "BindBufferMemory";
1654 }
1655
1656 void logPrepare(TestLog &log, size_t commandIndex) const;
1657 void prepare(PrepareContext &context);
1658 };
1659
logPrepare(TestLog & log,size_t commandIndex) const1660 void BindBufferMemory::logPrepare(TestLog &log, size_t commandIndex) const
1661 {
1662 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1663 }
1664
prepare(PrepareContext & context)1665 void BindBufferMemory::prepare(PrepareContext &context)
1666 {
1667 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1668 const vk::VkDevice device = context.getContext().getDevice();
1669
1670 VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1671 }
1672
1673 class CreateImage : public Command
1674 {
1675 public:
1676 CreateImage(vk::VkImageUsageFlags usage, vk::VkSharingMode sharing);
~CreateImage(void)1677 ~CreateImage(void)
1678 {
1679 }
getName(void) const1680 const char *getName(void) const
1681 {
1682 return "CreateImage";
1683 }
1684
1685 void logPrepare(TestLog &log, size_t commandIndex) const;
1686 void prepare(PrepareContext &context);
1687 void verify(VerifyContext &context, size_t commandIndex);
1688
1689 private:
1690 const vk::VkImageUsageFlags m_usage;
1691 const vk::VkSharingMode m_sharing;
1692 int32_t m_imageWidth;
1693 int32_t m_imageHeight;
1694 };
1695
CreateImage(vk::VkImageUsageFlags usage,vk::VkSharingMode sharing)1696 CreateImage::CreateImage(vk::VkImageUsageFlags usage, vk::VkSharingMode sharing)
1697 : m_usage(usage)
1698 , m_sharing(sharing)
1699 , m_imageWidth(0)
1700 , m_imageHeight(0)
1701 {
1702 }
1703
logPrepare(TestLog & log,size_t commandIndex) const1704 void CreateImage::logPrepare(TestLog &log, size_t commandIndex) const
1705 {
1706 log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing
1707 << ", usage: " << vk::getImageUsageFlagsStr(m_usage) << TestLog::EndMessage;
1708 }
1709
prepare(PrepareContext & context)1710 void CreateImage::prepare(PrepareContext &context)
1711 {
1712 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1713 const vk::VkDevice device = context.getContext().getDevice();
1714 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
1715
1716 m_imageWidth = context.getMemory().getMaxImageWidth();
1717 m_imageHeight = context.getMemory().getMaxImageHeight();
1718
1719 {
1720 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1721 DE_NULL,
1722
1723 0u,
1724 vk::VK_IMAGE_TYPE_2D,
1725 vk::VK_FORMAT_R8G8B8A8_UNORM,
1726 {
1727 (uint32_t)m_imageWidth,
1728 (uint32_t)m_imageHeight,
1729 1u,
1730 },
1731 1u,
1732 1u,
1733 vk::VK_SAMPLE_COUNT_1_BIT,
1734 vk::VK_IMAGE_TILING_OPTIMAL,
1735 m_usage,
1736 m_sharing,
1737 (uint32_t)queueFamilies.size(),
1738 &queueFamilies[0],
1739 vk::VK_IMAGE_LAYOUT_UNDEFINED};
1740 vk::Move<vk::VkImage> image(createImage(vkd, device, &createInfo));
1741 const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, *image);
1742
1743 context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1744 }
1745 }
1746
verify(VerifyContext & context,size_t)1747 void CreateImage::verify(VerifyContext &context, size_t)
1748 {
1749 context.getReferenceImage() =
1750 TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1751 }
1752
1753 class DestroyImage : public Command
1754 {
1755 public:
1756 DestroyImage(void);
~DestroyImage(void)1757 ~DestroyImage(void)
1758 {
1759 }
getName(void) const1760 const char *getName(void) const
1761 {
1762 return "DestroyImage";
1763 }
1764
1765 void logExecute(TestLog &log, size_t commandIndex) const;
1766 void prepare(PrepareContext &context);
1767 void execute(ExecuteContext &context);
1768
1769 private:
1770 vk::Move<vk::VkImage> m_image;
1771 };
1772
DestroyImage(void)1773 DestroyImage::DestroyImage(void)
1774 {
1775 }
1776
prepare(PrepareContext & context)1777 void DestroyImage::prepare(PrepareContext &context)
1778 {
1779 m_image = vk::Move<vk::VkImage>(
1780 vk::check(context.getImage()),
1781 vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1782 context.releaseImage();
1783 }
1784
logExecute(TestLog & log,size_t commandIndex) const1785 void DestroyImage::logExecute(TestLog &log, size_t commandIndex) const
1786 {
1787 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1788 }
1789
execute(ExecuteContext & context)1790 void DestroyImage::execute(ExecuteContext &context)
1791 {
1792 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1793 const vk::VkDevice device = context.getContext().getDevice();
1794
1795 vkd.destroyImage(device, m_image.disown(), DE_NULL);
1796 }
1797
1798 class BindImageMemory : public Command
1799 {
1800 public:
BindImageMemory(void)1801 BindImageMemory(void)
1802 {
1803 }
~BindImageMemory(void)1804 ~BindImageMemory(void)
1805 {
1806 }
getName(void) const1807 const char *getName(void) const
1808 {
1809 return "BindImageMemory";
1810 }
1811
1812 void logPrepare(TestLog &log, size_t commandIndex) const;
1813 void prepare(PrepareContext &context);
1814 };
1815
logPrepare(TestLog & log,size_t commandIndex) const1816 void BindImageMemory::logPrepare(TestLog &log, size_t commandIndex) const
1817 {
1818 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1819 }
1820
prepare(PrepareContext & context)1821 void BindImageMemory::prepare(PrepareContext &context)
1822 {
1823 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1824 const vk::VkDevice device = context.getContext().getDevice();
1825
1826 VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1827 }
1828
1829 class QueueWaitIdle : public Command
1830 {
1831 public:
QueueWaitIdle(void)1832 QueueWaitIdle(void)
1833 {
1834 }
~QueueWaitIdle(void)1835 ~QueueWaitIdle(void)
1836 {
1837 }
getName(void) const1838 const char *getName(void) const
1839 {
1840 return "QueuetWaitIdle";
1841 }
1842
1843 void logExecute(TestLog &log, size_t commandIndex) const;
1844 void execute(ExecuteContext &context);
1845 };
1846
logExecute(TestLog & log,size_t commandIndex) const1847 void QueueWaitIdle::logExecute(TestLog &log, size_t commandIndex) const
1848 {
1849 log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1850 }
1851
execute(ExecuteContext & context)1852 void QueueWaitIdle::execute(ExecuteContext &context)
1853 {
1854 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1855 const vk::VkQueue queue = context.getContext().getQueue();
1856
1857 VK_CHECK(vkd.queueWaitIdle(queue));
1858 }
1859
1860 class DeviceWaitIdle : public Command
1861 {
1862 public:
DeviceWaitIdle(void)1863 DeviceWaitIdle(void)
1864 {
1865 }
~DeviceWaitIdle(void)1866 ~DeviceWaitIdle(void)
1867 {
1868 }
getName(void) const1869 const char *getName(void) const
1870 {
1871 return "DeviceWaitIdle";
1872 }
1873
1874 void logExecute(TestLog &log, size_t commandIndex) const;
1875 void execute(ExecuteContext &context);
1876 };
1877
logExecute(TestLog & log,size_t commandIndex) const1878 void DeviceWaitIdle::logExecute(TestLog &log, size_t commandIndex) const
1879 {
1880 log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1881 }
1882
execute(ExecuteContext & context)1883 void DeviceWaitIdle::execute(ExecuteContext &context)
1884 {
1885 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
1886 const vk::VkDevice device = context.getContext().getDevice();
1887
1888 VK_CHECK(vkd.deviceWaitIdle(device));
1889 }
1890
1891 class SubmitContext
1892 {
1893 public:
SubmitContext(const PrepareContext & context,const vk::VkCommandBuffer commandBuffer)1894 SubmitContext(const PrepareContext &context, const vk::VkCommandBuffer commandBuffer)
1895 : m_context(context)
1896 , m_commandBuffer(commandBuffer)
1897 {
1898 }
1899
getMemory(void) const1900 const Memory &getMemory(void) const
1901 {
1902 return m_context.getMemory();
1903 }
getContext(void) const1904 const Context &getContext(void) const
1905 {
1906 return m_context.getContext();
1907 }
getCommandBuffer(void) const1908 vk::VkCommandBuffer getCommandBuffer(void) const
1909 {
1910 return m_commandBuffer;
1911 }
1912
getBuffer(void) const1913 vk::VkBuffer getBuffer(void) const
1914 {
1915 return m_context.getBuffer();
1916 }
getBufferSize(void) const1917 vk::VkDeviceSize getBufferSize(void) const
1918 {
1919 return m_context.getBufferSize();
1920 }
1921
getImage(void) const1922 vk::VkImage getImage(void) const
1923 {
1924 return m_context.getImage();
1925 }
getImageWidth(void) const1926 int32_t getImageWidth(void) const
1927 {
1928 return m_context.getImageWidth();
1929 }
getImageHeight(void) const1930 int32_t getImageHeight(void) const
1931 {
1932 return m_context.getImageHeight();
1933 }
1934
1935 private:
1936 const PrepareContext &m_context;
1937 const vk::VkCommandBuffer m_commandBuffer;
1938 };
1939
1940 class CmdCommand
1941 {
1942 public:
~CmdCommand(void)1943 virtual ~CmdCommand(void)
1944 {
1945 }
1946 virtual const char *getName(void) const = 0;
1947
1948 // Log things that are done during prepare
logPrepare(TestLog &,size_t) const1949 virtual void logPrepare(TestLog &, size_t) const
1950 {
1951 }
1952 // Log submitted calls etc.
logSubmit(TestLog &,size_t) const1953 virtual void logSubmit(TestLog &, size_t) const
1954 {
1955 }
1956
1957 // Allocate vulkan resources and prepare for submit.
prepare(PrepareContext &)1958 virtual void prepare(PrepareContext &)
1959 {
1960 }
1961
1962 // Submit commands to command buffer.
submit(SubmitContext &)1963 virtual void submit(SubmitContext &)
1964 {
1965 }
1966
1967 // Verify results
verify(VerifyContext &,size_t)1968 virtual void verify(VerifyContext &, size_t)
1969 {
1970 }
1971 };
1972
1973 class SubmitCommandBuffer : public Command
1974 {
1975 public:
1976 SubmitCommandBuffer(const vector<CmdCommand *> &commands);
1977 ~SubmitCommandBuffer(void);
1978
getName(void) const1979 const char *getName(void) const
1980 {
1981 return "SubmitCommandBuffer";
1982 }
1983 void logExecute(TestLog &log, size_t commandIndex) const;
1984 void logPrepare(TestLog &log, size_t commandIndex) const;
1985
1986 // Allocate command buffer and submit commands to command buffer
1987 void prepare(PrepareContext &context);
1988 void execute(ExecuteContext &context);
1989
1990 // Verify that results are correct.
1991 void verify(VerifyContext &context, size_t commandIndex);
1992
1993 private:
1994 vector<CmdCommand *> m_commands;
1995 vk::Move<vk::VkCommandBuffer> m_commandBuffer;
1996 };
1997
SubmitCommandBuffer(const vector<CmdCommand * > & commands)1998 SubmitCommandBuffer::SubmitCommandBuffer(const vector<CmdCommand *> &commands) : m_commands(commands)
1999 {
2000 }
2001
~SubmitCommandBuffer(void)2002 SubmitCommandBuffer::~SubmitCommandBuffer(void)
2003 {
2004 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
2005 delete m_commands[cmdNdx];
2006 }
2007
prepare(PrepareContext & context)2008 void SubmitCommandBuffer::prepare(PrepareContext &context)
2009 {
2010 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2011 const vk::VkDevice device = context.getContext().getDevice();
2012 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2013
2014 m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2015
2016 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
2017 {
2018 CmdCommand &command = *m_commands[cmdNdx];
2019
2020 command.prepare(context);
2021 }
2022
2023 {
2024 SubmitContext submitContext(context, *m_commandBuffer);
2025
2026 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
2027 {
2028 CmdCommand &command = *m_commands[cmdNdx];
2029
2030 command.submit(submitContext);
2031 }
2032
2033 endCommandBuffer(vkd, *m_commandBuffer);
2034 }
2035 }
2036
execute(ExecuteContext & context)2037 void SubmitCommandBuffer::execute(ExecuteContext &context)
2038 {
2039 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2040 const vk::VkCommandBuffer cmd = *m_commandBuffer;
2041 const vk::VkQueue queue = context.getContext().getQueue();
2042 const vk::VkSubmitInfo submit = {vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
2043 DE_NULL,
2044
2045 0,
2046 DE_NULL,
2047 (const vk::VkPipelineStageFlags *)DE_NULL,
2048
2049 1,
2050 &cmd,
2051
2052 0,
2053 DE_NULL};
2054
2055 vkd.queueSubmit(queue, 1, &submit, 0);
2056 }
2057
verify(VerifyContext & context,size_t commandIndex)2058 void SubmitCommandBuffer::verify(VerifyContext &context, size_t commandIndex)
2059 {
2060 const string sectionName(de::toString(commandIndex) + ":" + getName());
2061 const tcu::ScopedLogSection section(context.getLog(), sectionName, sectionName);
2062
2063 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
2064 m_commands[cmdNdx]->verify(context, cmdNdx);
2065 }
2066
logPrepare(TestLog & log,size_t commandIndex) const2067 void SubmitCommandBuffer::logPrepare(TestLog &log, size_t commandIndex) const
2068 {
2069 const string sectionName(de::toString(commandIndex) + ":" + getName());
2070 const tcu::ScopedLogSection section(log, sectionName, sectionName);
2071
2072 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
2073 m_commands[cmdNdx]->logPrepare(log, cmdNdx);
2074 }
2075
logExecute(TestLog & log,size_t commandIndex) const2076 void SubmitCommandBuffer::logExecute(TestLog &log, size_t commandIndex) const
2077 {
2078 const string sectionName(de::toString(commandIndex) + ":" + getName());
2079 const tcu::ScopedLogSection section(log, sectionName, sectionName);
2080
2081 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
2082 m_commands[cmdNdx]->logSubmit(log, cmdNdx);
2083 }
2084
2085 class PipelineBarrier : public CmdCommand
2086 {
2087 public:
2088 enum Type
2089 {
2090 TYPE_GLOBAL = 0,
2091 TYPE_BUFFER,
2092 TYPE_IMAGE,
2093 TYPE_LAST
2094 };
2095 PipelineBarrier(const vk::VkPipelineStageFlags srcStages, const vk::VkAccessFlags srcAccesses,
2096 const vk::VkPipelineStageFlags dstStages, const vk::VkAccessFlags dstAccesses, Type type,
2097 const tcu::Maybe<vk::VkImageLayout> imageLayout);
~PipelineBarrier(void)2098 ~PipelineBarrier(void)
2099 {
2100 }
getName(void) const2101 const char *getName(void) const
2102 {
2103 return "PipelineBarrier";
2104 }
2105
2106 void logSubmit(TestLog &log, size_t commandIndex) const;
2107 void submit(SubmitContext &context);
2108
2109 private:
2110 const vk::VkPipelineStageFlags m_srcStages;
2111 const vk::VkAccessFlags m_srcAccesses;
2112 const vk::VkPipelineStageFlags m_dstStages;
2113 const vk::VkAccessFlags m_dstAccesses;
2114 const Type m_type;
2115 const tcu::Maybe<vk::VkImageLayout> m_imageLayout;
2116 };
2117
PipelineBarrier(const vk::VkPipelineStageFlags srcStages,const vk::VkAccessFlags srcAccesses,const vk::VkPipelineStageFlags dstStages,const vk::VkAccessFlags dstAccesses,Type type,const tcu::Maybe<vk::VkImageLayout> imageLayout)2118 PipelineBarrier::PipelineBarrier(const vk::VkPipelineStageFlags srcStages, const vk::VkAccessFlags srcAccesses,
2119 const vk::VkPipelineStageFlags dstStages, const vk::VkAccessFlags dstAccesses,
2120 Type type, const tcu::Maybe<vk::VkImageLayout> imageLayout)
2121 : m_srcStages(srcStages)
2122 , m_srcAccesses(srcAccesses)
2123 , m_dstStages(dstStages)
2124 , m_dstAccesses(dstAccesses)
2125 , m_type(type)
2126 , m_imageLayout(imageLayout)
2127 {
2128 }
2129
logSubmit(TestLog & log,size_t commandIndex) const2130 void PipelineBarrier::logSubmit(TestLog &log, size_t commandIndex) const
2131 {
2132 log << TestLog::Message << commandIndex << ":" << getName() << " "
2133 << (m_type == TYPE_GLOBAL ? "Global pipeline barrier" :
2134 m_type == TYPE_BUFFER ? "Buffer pipeline barrier" :
2135 "Image pipeline barrier")
2136 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages)
2137 << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2138 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages)
2139 << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
2140 }
2141
submit(SubmitContext & context)2142 void PipelineBarrier::submit(SubmitContext &context)
2143 {
2144 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2145 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2146
2147 switch (m_type)
2148 {
2149 case TYPE_GLOBAL:
2150 {
2151 const vk::VkMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER, DE_NULL,
2152
2153 m_srcAccesses, m_dstAccesses};
2154
2155 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0,
2156 (const vk::VkBufferMemoryBarrier *)DE_NULL, 0,
2157 (const vk::VkImageMemoryBarrier *)DE_NULL);
2158 break;
2159 }
2160
2161 case TYPE_BUFFER:
2162 {
2163 const vk::VkBufferMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2164 DE_NULL,
2165
2166 m_srcAccesses,
2167 m_dstAccesses,
2168
2169 VK_QUEUE_FAMILY_IGNORED,
2170 VK_QUEUE_FAMILY_IGNORED,
2171
2172 context.getBuffer(),
2173 0,
2174 VK_WHOLE_SIZE};
2175
2176 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0,
2177 (const vk::VkMemoryBarrier *)DE_NULL, 1, &barrier, 0,
2178 (const vk::VkImageMemoryBarrier *)DE_NULL);
2179 break;
2180 }
2181
2182 case TYPE_IMAGE:
2183 {
2184 const vk::VkImageMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2185 DE_NULL,
2186
2187 m_srcAccesses,
2188 m_dstAccesses,
2189
2190 *m_imageLayout,
2191 *m_imageLayout,
2192
2193 VK_QUEUE_FAMILY_IGNORED,
2194 VK_QUEUE_FAMILY_IGNORED,
2195
2196 context.getImage(),
2197 {vk::VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}};
2198
2199 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0,
2200 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL, 1,
2201 &barrier);
2202 break;
2203 }
2204
2205 default:
2206 DE_FATAL("Unknown pipeline barrier type");
2207 }
2208 }
2209
2210 class ImageTransition : public CmdCommand
2211 {
2212 public:
2213 ImageTransition(vk::VkPipelineStageFlags srcStages, vk::VkAccessFlags srcAccesses,
2214
2215 vk::VkPipelineStageFlags dstStages, vk::VkAccessFlags dstAccesses,
2216
2217 vk::VkImageLayout srcLayout, vk::VkImageLayout dstLayout);
2218
~ImageTransition(void)2219 ~ImageTransition(void)
2220 {
2221 }
getName(void) const2222 const char *getName(void) const
2223 {
2224 return "ImageTransition";
2225 }
2226
2227 void prepare(PrepareContext &context);
2228 void logSubmit(TestLog &log, size_t commandIndex) const;
2229 void submit(SubmitContext &context);
2230 void verify(VerifyContext &context, size_t);
2231
2232 private:
2233 const vk::VkPipelineStageFlags m_srcStages;
2234 const vk::VkAccessFlags m_srcAccesses;
2235 const vk::VkPipelineStageFlags m_dstStages;
2236 const vk::VkAccessFlags m_dstAccesses;
2237 const vk::VkImageLayout m_srcLayout;
2238 const vk::VkImageLayout m_dstLayout;
2239
2240 vk::VkDeviceSize m_imageMemorySize;
2241 };
2242
ImageTransition(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses,vk::VkImageLayout srcLayout,vk::VkImageLayout dstLayout)2243 ImageTransition::ImageTransition(vk::VkPipelineStageFlags srcStages, vk::VkAccessFlags srcAccesses,
2244
2245 vk::VkPipelineStageFlags dstStages, vk::VkAccessFlags dstAccesses,
2246
2247 vk::VkImageLayout srcLayout, vk::VkImageLayout dstLayout)
2248 : m_srcStages(srcStages)
2249 , m_srcAccesses(srcAccesses)
2250 , m_dstStages(dstStages)
2251 , m_dstAccesses(dstAccesses)
2252 , m_srcLayout(srcLayout)
2253 , m_dstLayout(dstLayout)
2254 , m_imageMemorySize(0)
2255 {
2256 }
2257
logSubmit(TestLog & log,size_t commandIndex) const2258 void ImageTransition::logSubmit(TestLog &log, size_t commandIndex) const
2259 {
2260 log << TestLog::Message << commandIndex << ":" << getName() << " Image transition pipeline barrier"
2261 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages)
2262 << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2263 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages)
2264 << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << ", srcLayout: " << m_srcLayout
2265 << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2266 }
2267
prepare(PrepareContext & context)2268 void ImageTransition::prepare(PrepareContext &context)
2269 {
2270 DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED ||
2271 m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2272
2273 context.setImageLayout(m_dstLayout);
2274 m_imageMemorySize = context.getImageMemorySize();
2275 }
2276
submit(SubmitContext & context)2277 void ImageTransition::submit(SubmitContext &context)
2278 {
2279 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2280 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2281 const vk::VkImageMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2282 DE_NULL,
2283
2284 m_srcAccesses,
2285 m_dstAccesses,
2286
2287 m_srcLayout,
2288 m_dstLayout,
2289
2290 VK_QUEUE_FAMILY_IGNORED,
2291 VK_QUEUE_FAMILY_IGNORED,
2292
2293 context.getImage(),
2294 {vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}};
2295
2296 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0,
2297 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL, 1,
2298 &barrier);
2299 }
2300
verify(VerifyContext & context,size_t)2301 void ImageTransition::verify(VerifyContext &context, size_t)
2302 {
2303 context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2304 }
2305
2306 class FillBuffer : public CmdCommand
2307 {
2308 public:
FillBuffer(uint32_t value)2309 FillBuffer(uint32_t value) : m_value(value), m_bufferSize(0)
2310 {
2311 }
~FillBuffer(void)2312 ~FillBuffer(void)
2313 {
2314 }
getName(void) const2315 const char *getName(void) const
2316 {
2317 return "FillBuffer";
2318 }
2319
2320 void logSubmit(TestLog &log, size_t commandIndex) const;
2321 void submit(SubmitContext &context);
2322 void verify(VerifyContext &context, size_t commandIndex);
2323
2324 private:
2325 const uint32_t m_value;
2326 vk::VkDeviceSize m_bufferSize;
2327 };
2328
logSubmit(TestLog & log,size_t commandIndex) const2329 void FillBuffer::logSubmit(TestLog &log, size_t commandIndex) const
2330 {
2331 log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2332 }
2333
submit(SubmitContext & context)2334 void FillBuffer::submit(SubmitContext &context)
2335 {
2336 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2337 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2338 const vk::VkBuffer buffer = context.getBuffer();
2339 const vk::VkDeviceSize sizeMask = ~(0x3ull); // \note Round down to multiple of 4
2340
2341 m_bufferSize = sizeMask & context.getBufferSize();
2342 vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2343 }
2344
verify(VerifyContext & context,size_t)2345 void FillBuffer::verify(VerifyContext &context, size_t)
2346 {
2347 ReferenceMemory &reference = context.getReference();
2348
2349 for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2350 {
2351 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2352 reference.set(ndx, (uint8_t)(0xffu & (m_value >> (8 * (ndx % 4)))));
2353 #else
2354 reference.set(ndx, (uint8_t)(0xffu & (m_value >> (8 * (3 - (ndx % 4))))));
2355 #endif
2356 }
2357 }
2358
2359 class UpdateBuffer : public CmdCommand
2360 {
2361 public:
UpdateBuffer(uint32_t seed)2362 UpdateBuffer(uint32_t seed) : m_seed(seed), m_bufferSize(0)
2363 {
2364 }
~UpdateBuffer(void)2365 ~UpdateBuffer(void)
2366 {
2367 }
getName(void) const2368 const char *getName(void) const
2369 {
2370 return "UpdateBuffer";
2371 }
2372
2373 void logSubmit(TestLog &log, size_t commandIndex) const;
2374 void submit(SubmitContext &context);
2375 void verify(VerifyContext &context, size_t commandIndex);
2376
2377 private:
2378 const uint32_t m_seed;
2379 vk::VkDeviceSize m_bufferSize;
2380 };
2381
logSubmit(TestLog & log,size_t commandIndex) const2382 void UpdateBuffer::logSubmit(TestLog &log, size_t commandIndex) const
2383 {
2384 log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed
2385 << TestLog::EndMessage;
2386 }
2387
submit(SubmitContext & context)2388 void UpdateBuffer::submit(SubmitContext &context)
2389 {
2390 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2391 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2392 const vk::VkBuffer buffer = context.getBuffer();
2393 const size_t blockSize = 65536;
2394 std::vector<uint8_t> data(blockSize, 0);
2395 de::Random rng(m_seed);
2396
2397 m_bufferSize = context.getBufferSize();
2398
2399 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2400 {
2401 for (size_t ndx = 0; ndx < data.size(); ndx++)
2402 data[ndx] = rng.getUint8();
2403
2404 if (m_bufferSize - updated > blockSize)
2405 vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const uint32_t *)(&data[0]));
2406 else
2407 vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const uint32_t *)(&data[0]));
2408 }
2409 }
2410
verify(VerifyContext & context,size_t)2411 void UpdateBuffer::verify(VerifyContext &context, size_t)
2412 {
2413 ReferenceMemory &reference = context.getReference();
2414 const size_t blockSize = 65536;
2415 vector<uint8_t> data(blockSize, 0);
2416 de::Random rng(m_seed);
2417
2418 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2419 {
2420 for (size_t ndx = 0; ndx < data.size(); ndx++)
2421 data[ndx] = rng.getUint8();
2422
2423 if (m_bufferSize - updated > blockSize)
2424 reference.setData(updated, blockSize, &data[0]);
2425 else
2426 reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2427 }
2428 }
2429
2430 class BufferCopyToBuffer : public CmdCommand
2431 {
2432 public:
BufferCopyToBuffer(void)2433 BufferCopyToBuffer(void)
2434 {
2435 }
~BufferCopyToBuffer(void)2436 ~BufferCopyToBuffer(void)
2437 {
2438 }
getName(void) const2439 const char *getName(void) const
2440 {
2441 return "BufferCopyToBuffer";
2442 }
2443
2444 void logPrepare(TestLog &log, size_t commandIndex) const;
2445 void prepare(PrepareContext &context);
2446 void logSubmit(TestLog &log, size_t commandIndex) const;
2447 void submit(SubmitContext &context);
2448 void verify(VerifyContext &context, size_t commandIndex);
2449
2450 private:
2451 vk::VkDeviceSize m_bufferSize;
2452 vk::Move<vk::VkBuffer> m_dstBuffer;
2453 vk::Move<vk::VkDeviceMemory> m_memory;
2454 };
2455
logPrepare(TestLog & log,size_t commandIndex) const2456 void BufferCopyToBuffer::logPrepare(TestLog &log, size_t commandIndex) const
2457 {
2458 log << TestLog::Message << commandIndex << ":" << getName()
2459 << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2460 }
2461
prepare(PrepareContext & context)2462 void BufferCopyToBuffer::prepare(PrepareContext &context)
2463 {
2464 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
2465 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2466 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2467 const vk::VkDevice device = context.getContext().getDevice();
2468 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
2469
2470 m_bufferSize = context.getBufferSize();
2471
2472 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
2473 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2474 m_memory =
2475 bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2476 }
2477
logSubmit(TestLog & log,size_t commandIndex) const2478 void BufferCopyToBuffer::logSubmit(TestLog &log, size_t commandIndex) const
2479 {
2480 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer"
2481 << TestLog::EndMessage;
2482 }
2483
submit(SubmitContext & context)2484 void BufferCopyToBuffer::submit(SubmitContext &context)
2485 {
2486 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2487 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2488 const vk::VkBufferCopy range = {0, 0, // Offsets
2489 m_bufferSize};
2490
2491 vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2492 }
2493
verify(VerifyContext & context,size_t commandIndex)2494 void BufferCopyToBuffer::verify(VerifyContext &context, size_t commandIndex)
2495 {
2496 tcu::ResultCollector &resultCollector(context.getResultCollector());
2497 ReferenceMemory &reference(context.getReference());
2498 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2499 const vk::VkDevice device = context.getContext().getDevice();
2500 const vk::VkQueue queue = context.getContext().getQueue();
2501 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2502 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
2503 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2504 const vk::VkBufferMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2505 DE_NULL,
2506
2507 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2508 vk::VK_ACCESS_HOST_READ_BIT,
2509
2510 VK_QUEUE_FAMILY_IGNORED,
2511 VK_QUEUE_FAMILY_IGNORED,
2512 *m_dstBuffer,
2513 0,
2514 VK_WHOLE_SIZE};
2515
2516 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT,
2517 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 1, &barrier, 0,
2518 (const vk::VkImageMemoryBarrier *)DE_NULL);
2519
2520 endCommandBuffer(vkd, *commandBuffer);
2521 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2522
2523 {
2524 void *const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2525 bool isOk = true;
2526
2527 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
2528
2529 {
2530 const uint8_t *const data = (const uint8_t *)ptr;
2531
2532 for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2533 {
2534 if (reference.isDefined(pos))
2535 {
2536 if (data[pos] != reference.get(pos))
2537 {
2538 resultCollector.fail(de::toString(commandIndex) + ":" + getName() +
2539 " Result differs from reference, Expected: " +
2540 de::toString(tcu::toHex<8>(reference.get(pos))) +
2541 ", Got: " + de::toString(tcu::toHex<8>(data[pos])) +
2542 ", At offset: " + de::toString(pos));
2543 break;
2544 }
2545 }
2546 }
2547 }
2548
2549 vkd.unmapMemory(device, *m_memory);
2550
2551 if (!isOk)
2552 context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed"
2553 << TestLog::EndMessage;
2554 }
2555 }
2556
2557 class BufferCopyFromBuffer : public CmdCommand
2558 {
2559 public:
BufferCopyFromBuffer(uint32_t seed)2560 BufferCopyFromBuffer(uint32_t seed) : m_seed(seed), m_bufferSize(0)
2561 {
2562 }
~BufferCopyFromBuffer(void)2563 ~BufferCopyFromBuffer(void)
2564 {
2565 }
getName(void) const2566 const char *getName(void) const
2567 {
2568 return "BufferCopyFromBuffer";
2569 }
2570
2571 void logPrepare(TestLog &log, size_t commandIndex) const;
2572 void prepare(PrepareContext &context);
2573 void logSubmit(TestLog &log, size_t commandIndex) const;
2574 void submit(SubmitContext &context);
2575 void verify(VerifyContext &context, size_t commandIndex);
2576
2577 private:
2578 const uint32_t m_seed;
2579 vk::VkDeviceSize m_bufferSize;
2580 vk::Move<vk::VkBuffer> m_srcBuffer;
2581 vk::Move<vk::VkDeviceMemory> m_memory;
2582 };
2583
logPrepare(TestLog & log,size_t commandIndex) const2584 void BufferCopyFromBuffer::logPrepare(TestLog &log, size_t commandIndex) const
2585 {
2586 log << TestLog::Message << commandIndex << ":" << getName()
2587 << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2588 }
2589
prepare(PrepareContext & context)2590 void BufferCopyFromBuffer::prepare(PrepareContext &context)
2591 {
2592 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
2593 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2594 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2595 const vk::VkDevice device = context.getContext().getDevice();
2596 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
2597
2598 m_bufferSize = context.getBufferSize();
2599 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
2600 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2601 m_memory =
2602 bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2603
2604 {
2605 void *const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2606 de::Random rng(m_seed);
2607
2608 {
2609 uint8_t *const data = (uint8_t *)ptr;
2610
2611 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2612 data[ndx] = rng.getUint8();
2613 }
2614
2615 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
2616 vkd.unmapMemory(device, *m_memory);
2617 }
2618 }
2619
logSubmit(TestLog & log,size_t commandIndex) const2620 void BufferCopyFromBuffer::logSubmit(TestLog &log, size_t commandIndex) const
2621 {
2622 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer"
2623 << TestLog::EndMessage;
2624 }
2625
submit(SubmitContext & context)2626 void BufferCopyFromBuffer::submit(SubmitContext &context)
2627 {
2628 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2629 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2630 const vk::VkBufferCopy range = {0, 0, // Offsets
2631 m_bufferSize};
2632
2633 vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2634 }
2635
verify(VerifyContext & context,size_t)2636 void BufferCopyFromBuffer::verify(VerifyContext &context, size_t)
2637 {
2638 ReferenceMemory &reference(context.getReference());
2639 de::Random rng(m_seed);
2640
2641 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2642 reference.set(ndx, rng.getUint8());
2643 }
2644
2645 class BufferCopyToImage : public CmdCommand
2646 {
2647 public:
BufferCopyToImage(void)2648 BufferCopyToImage(void)
2649 {
2650 }
~BufferCopyToImage(void)2651 ~BufferCopyToImage(void)
2652 {
2653 }
getName(void) const2654 const char *getName(void) const
2655 {
2656 return "BufferCopyToImage";
2657 }
2658
2659 void logPrepare(TestLog &log, size_t commandIndex) const;
2660 void prepare(PrepareContext &context);
2661 void logSubmit(TestLog &log, size_t commandIndex) const;
2662 void submit(SubmitContext &context);
2663 void verify(VerifyContext &context, size_t commandIndex);
2664
2665 private:
2666 int32_t m_imageWidth;
2667 int32_t m_imageHeight;
2668 vk::Move<vk::VkImage> m_dstImage;
2669 vk::Move<vk::VkDeviceMemory> m_memory;
2670 };
2671
logPrepare(TestLog & log,size_t commandIndex) const2672 void BufferCopyToImage::logPrepare(TestLog &log, size_t commandIndex) const
2673 {
2674 log << TestLog::Message << commandIndex << ":" << getName()
2675 << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2676 }
2677
prepare(PrepareContext & context)2678 void BufferCopyToImage::prepare(PrepareContext &context)
2679 {
2680 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
2681 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2682 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2683 const vk::VkDevice device = context.getContext().getDevice();
2684 const vk::VkQueue queue = context.getContext().getQueue();
2685 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2686 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
2687 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2688
2689 m_imageWidth = imageSize[0];
2690 m_imageHeight = imageSize[1];
2691
2692 {
2693 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2694 DE_NULL,
2695
2696 0,
2697 vk::VK_IMAGE_TYPE_2D,
2698 vk::VK_FORMAT_R8G8B8A8_UNORM,
2699 {
2700 (uint32_t)m_imageWidth,
2701 (uint32_t)m_imageHeight,
2702 1u,
2703 },
2704 1,
2705 1, // mipLevels, arrayLayers
2706 vk::VK_SAMPLE_COUNT_1_BIT,
2707
2708 vk::VK_IMAGE_TILING_OPTIMAL,
2709 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
2710 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2711 vk::VK_SHARING_MODE_EXCLUSIVE,
2712
2713 (uint32_t)queueFamilies.size(),
2714 &queueFamilies[0],
2715 vk::VK_IMAGE_LAYOUT_UNDEFINED};
2716
2717 m_dstImage = vk::createImage(vkd, device, &createInfo);
2718 }
2719
2720 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2721
2722 {
2723 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
2724 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2725 const vk::VkImageMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2726 DE_NULL,
2727
2728 0,
2729 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2730
2731 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2732 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2733
2734 VK_QUEUE_FAMILY_IGNORED,
2735 VK_QUEUE_FAMILY_IGNORED,
2736
2737 *m_dstImage,
2738 {
2739 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2740 0, // Mip level
2741 1, // Mip level count
2742 0, // Layer
2743 1 // Layer count
2744 }};
2745
2746 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
2747 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0,
2748 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL, 1,
2749 &barrier);
2750
2751 endCommandBuffer(vkd, *commandBuffer);
2752 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2753 }
2754 }
2755
logSubmit(TestLog & log,size_t commandIndex) const2756 void BufferCopyToImage::logSubmit(TestLog &log, size_t commandIndex) const
2757 {
2758 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2759 }
2760
submit(SubmitContext & context)2761 void BufferCopyToImage::submit(SubmitContext &context)
2762 {
2763 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2764 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2765 const vk::VkBufferImageCopy region = {0,
2766 0,
2767 0,
2768 {
2769 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2770 0, // mipLevel
2771 0, // arrayLayer
2772 1 // layerCount
2773 },
2774 {0, 0, 0},
2775 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
2776
2777 vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2778 1, ®ion);
2779 }
2780
verify(VerifyContext & context,size_t commandIndex)2781 void BufferCopyToImage::verify(VerifyContext &context, size_t commandIndex)
2782 {
2783 tcu::ResultCollector &resultCollector(context.getResultCollector());
2784 ReferenceMemory &reference(context.getReference());
2785 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
2786 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2787 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2788 const vk::VkDevice device = context.getContext().getDevice();
2789 const vk::VkQueue queue = context.getContext().getQueue();
2790 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2791 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
2792 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2793 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
2794 const vk::Unique<vk::VkBuffer> dstBuffer(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight,
2795 vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
2796 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2797 const vk::Unique<vk::VkDeviceMemory> memory(
2798 bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2799 {
2800 const vk::VkImageMemoryBarrier imageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2801 DE_NULL,
2802
2803 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2804 vk::VK_ACCESS_TRANSFER_READ_BIT,
2805
2806 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2807 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2808
2809 VK_QUEUE_FAMILY_IGNORED,
2810 VK_QUEUE_FAMILY_IGNORED,
2811
2812 *m_dstImage,
2813 {
2814 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2815 0, // Mip level
2816 1, // Mip level count
2817 0, // Layer
2818 1 // Layer count
2819 }};
2820 const vk::VkBufferMemoryBarrier bufferBarrier = {vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2821 DE_NULL,
2822
2823 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2824 vk::VK_ACCESS_HOST_READ_BIT,
2825
2826 VK_QUEUE_FAMILY_IGNORED,
2827 VK_QUEUE_FAMILY_IGNORED,
2828 *dstBuffer,
2829 0,
2830 VK_WHOLE_SIZE};
2831
2832 const vk::VkBufferImageCopy region = {0,
2833 0,
2834 0,
2835 {
2836 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2837 0, // mipLevel
2838 0, // arrayLayer
2839 1 // layerCount
2840 },
2841 {0, 0, 0},
2842 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
2843
2844 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT,
2845 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
2846 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &imageBarrier);
2847 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1,
2848 ®ion);
2849 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT,
2850 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 1, &bufferBarrier, 0,
2851 (const vk::VkImageMemoryBarrier *)DE_NULL);
2852 }
2853
2854 endCommandBuffer(vkd, *commandBuffer);
2855 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2856
2857 {
2858 void *const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2859
2860 invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
2861
2862 {
2863 const uint8_t *const data = (const uint8_t *)ptr;
2864
2865 for (size_t pos = 0; pos < (size_t)(4 * m_imageWidth * m_imageHeight); pos++)
2866 {
2867 if (reference.isDefined(pos))
2868 {
2869 if (data[pos] != reference.get(pos))
2870 {
2871 resultCollector.fail(de::toString(commandIndex) + ":" + getName() +
2872 " Result differs from reference, Expected: " +
2873 de::toString(tcu::toHex<8>(reference.get(pos))) +
2874 ", Got: " + de::toString(tcu::toHex<8>(data[pos])) +
2875 ", At offset: " + de::toString(pos));
2876 break;
2877 }
2878 }
2879 }
2880 }
2881
2882 vkd.unmapMemory(device, *memory);
2883 }
2884 }
2885
2886 class BufferCopyFromImage : public CmdCommand
2887 {
2888 public:
BufferCopyFromImage(uint32_t seed)2889 BufferCopyFromImage(uint32_t seed) : m_seed(seed)
2890 {
2891 }
~BufferCopyFromImage(void)2892 ~BufferCopyFromImage(void)
2893 {
2894 }
getName(void) const2895 const char *getName(void) const
2896 {
2897 return "BufferCopyFromImage";
2898 }
2899
2900 void logPrepare(TestLog &log, size_t commandIndex) const;
2901 void prepare(PrepareContext &context);
2902 void logSubmit(TestLog &log, size_t commandIndex) const;
2903 void submit(SubmitContext &context);
2904 void verify(VerifyContext &context, size_t commandIndex);
2905
2906 private:
2907 const uint32_t m_seed;
2908 int32_t m_imageWidth;
2909 int32_t m_imageHeight;
2910 vk::Move<vk::VkImage> m_srcImage;
2911 vk::Move<vk::VkDeviceMemory> m_memory;
2912 };
2913
logPrepare(TestLog & log,size_t commandIndex) const2914 void BufferCopyFromImage::logPrepare(TestLog &log, size_t commandIndex) const
2915 {
2916 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy."
2917 << TestLog::EndMessage;
2918 }
2919
prepare(PrepareContext & context)2920 void BufferCopyFromImage::prepare(PrepareContext &context)
2921 {
2922 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
2923 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
2924 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2925 const vk::VkDevice device = context.getContext().getDevice();
2926 const vk::VkQueue queue = context.getContext().getQueue();
2927 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2928 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
2929 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2930
2931 m_imageWidth = imageSize[0];
2932 m_imageHeight = imageSize[1];
2933
2934 {
2935 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2936 DE_NULL,
2937
2938 0,
2939 vk::VK_IMAGE_TYPE_2D,
2940 vk::VK_FORMAT_R8G8B8A8_UNORM,
2941 {
2942 (uint32_t)m_imageWidth,
2943 (uint32_t)m_imageHeight,
2944 1u,
2945 },
2946 1,
2947 1, // mipLevels, arrayLayers
2948 vk::VK_SAMPLE_COUNT_1_BIT,
2949
2950 vk::VK_IMAGE_TILING_OPTIMAL,
2951 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
2952 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2953 vk::VK_SHARING_MODE_EXCLUSIVE,
2954
2955 (uint32_t)queueFamilies.size(),
2956 &queueFamilies[0],
2957 vk::VK_IMAGE_LAYOUT_UNDEFINED};
2958
2959 m_srcImage = vk::createImage(vkd, device, &createInfo);
2960 }
2961
2962 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2963
2964 {
2965 const vk::Unique<vk::VkBuffer> srcBuffer(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight,
2966 vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
2967 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2968 const vk::Unique<vk::VkDeviceMemory> memory(
2969 bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2970 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
2971 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2972 const vk::VkImageMemoryBarrier preImageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2973 DE_NULL,
2974
2975 0,
2976 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2977
2978 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2979 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2980
2981 VK_QUEUE_FAMILY_IGNORED,
2982 VK_QUEUE_FAMILY_IGNORED,
2983
2984 *m_srcImage,
2985 {
2986 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2987 0, // Mip level
2988 1, // Mip level count
2989 0, // Layer
2990 1 // Layer count
2991 }};
2992 const vk::VkImageMemoryBarrier postImageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2993 DE_NULL,
2994
2995 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2996 0,
2997
2998 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2999 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3000
3001 VK_QUEUE_FAMILY_IGNORED,
3002 VK_QUEUE_FAMILY_IGNORED,
3003
3004 *m_srcImage,
3005 {
3006 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3007 0, // Mip level
3008 1, // Mip level count
3009 0, // Layer
3010 1 // Layer count
3011 }};
3012 const vk::VkBufferImageCopy region = {0,
3013 0,
3014 0,
3015 {
3016 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3017 0, // mipLevel
3018 0, // arrayLayer
3019 1 // layerCount
3020 },
3021 {0, 0, 0},
3022 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
3023
3024 {
3025 void *const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3026 de::Random rng(m_seed);
3027
3028 {
3029 uint8_t *const data = (uint8_t *)ptr;
3030
3031 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3032 data[ndx] = rng.getUint8();
3033 }
3034
3035 vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3036 vkd.unmapMemory(device, *memory);
3037 }
3038
3039 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
3040 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0,
3041 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL, 1,
3042 &preImageBarrier);
3043 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
3044 ®ion);
3045 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT,
3046 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
3047 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &postImageBarrier);
3048
3049 endCommandBuffer(vkd, *commandBuffer);
3050 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3051 }
3052 }
3053
logSubmit(TestLog & log,size_t commandIndex) const3054 void BufferCopyFromImage::logSubmit(TestLog &log, size_t commandIndex) const
3055 {
3056 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image"
3057 << TestLog::EndMessage;
3058 }
3059
submit(SubmitContext & context)3060 void BufferCopyFromImage::submit(SubmitContext &context)
3061 {
3062 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3063 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3064 const vk::VkBufferImageCopy region = {0,
3065 0,
3066 0,
3067 {
3068 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3069 0, // mipLevel
3070 0, // arrayLayer
3071 1 // layerCount
3072 },
3073 {0, 0, 0},
3074 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
3075
3076 vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(),
3077 1, ®ion);
3078 }
3079
verify(VerifyContext & context,size_t)3080 void BufferCopyFromImage::verify(VerifyContext &context, size_t)
3081 {
3082 ReferenceMemory &reference(context.getReference());
3083 de::Random rng(m_seed);
3084
3085 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3086 reference.set(ndx, rng.getUint8());
3087 }
3088
3089 class ImageCopyToBuffer : public CmdCommand
3090 {
3091 public:
ImageCopyToBuffer(vk::VkImageLayout imageLayout)3092 ImageCopyToBuffer(vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout)
3093 {
3094 }
~ImageCopyToBuffer(void)3095 ~ImageCopyToBuffer(void)
3096 {
3097 }
getName(void) const3098 const char *getName(void) const
3099 {
3100 return "BufferCopyToImage";
3101 }
3102
3103 void logPrepare(TestLog &log, size_t commandIndex) const;
3104 void prepare(PrepareContext &context);
3105 void logSubmit(TestLog &log, size_t commandIndex) const;
3106 void submit(SubmitContext &context);
3107 void verify(VerifyContext &context, size_t commandIndex);
3108
3109 private:
3110 vk::VkImageLayout m_imageLayout;
3111 vk::VkDeviceSize m_bufferSize;
3112 vk::Move<vk::VkBuffer> m_dstBuffer;
3113 vk::Move<vk::VkDeviceMemory> m_memory;
3114 vk::VkDeviceSize m_imageMemorySize;
3115 int32_t m_imageWidth;
3116 int32_t m_imageHeight;
3117 };
3118
logPrepare(TestLog & log,size_t commandIndex) const3119 void ImageCopyToBuffer::logPrepare(TestLog &log, size_t commandIndex) const
3120 {
3121 log << TestLog::Message << commandIndex << ":" << getName()
3122 << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
3123 }
3124
prepare(PrepareContext & context)3125 void ImageCopyToBuffer::prepare(PrepareContext &context)
3126 {
3127 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
3128 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3129 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3130 const vk::VkDevice device = context.getContext().getDevice();
3131 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
3132
3133 m_imageWidth = context.getImageWidth();
3134 m_imageHeight = context.getImageHeight();
3135 m_bufferSize = 4 * m_imageWidth * m_imageHeight;
3136 m_imageMemorySize = context.getImageMemorySize();
3137 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
3138 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3139 m_memory =
3140 bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3141 }
3142
logSubmit(TestLog & log,size_t commandIndex) const3143 void ImageCopyToBuffer::logSubmit(TestLog &log, size_t commandIndex) const
3144 {
3145 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
3146 }
3147
submit(SubmitContext & context)3148 void ImageCopyToBuffer::submit(SubmitContext &context)
3149 {
3150 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3151 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3152 const vk::VkBufferImageCopy region = {0,
3153 0,
3154 0,
3155 {
3156 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3157 0, // mipLevel
3158 0, // arrayLayer
3159 1 // layerCount
3160 },
3161 {0, 0, 0},
3162 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
3163
3164 vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, ®ion);
3165 }
3166
verify(VerifyContext & context,size_t commandIndex)3167 void ImageCopyToBuffer::verify(VerifyContext &context, size_t commandIndex)
3168 {
3169 tcu::ResultCollector &resultCollector(context.getResultCollector());
3170 ReferenceMemory &reference(context.getReference());
3171 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3172 const vk::VkDevice device = context.getContext().getDevice();
3173 const vk::VkQueue queue = context.getContext().getQueue();
3174 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3175 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
3176 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3177 const vk::VkBufferMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3178 DE_NULL,
3179
3180 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3181 vk::VK_ACCESS_HOST_READ_BIT,
3182
3183 VK_QUEUE_FAMILY_IGNORED,
3184 VK_QUEUE_FAMILY_IGNORED,
3185 *m_dstBuffer,
3186 0,
3187 VK_WHOLE_SIZE};
3188
3189 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT,
3190 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 1, &barrier, 0,
3191 (const vk::VkImageMemoryBarrier *)DE_NULL);
3192
3193 endCommandBuffer(vkd, *commandBuffer);
3194 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3195
3196 reference.setUndefined(0, (size_t)m_imageMemorySize);
3197 {
3198 void *const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3199 const ConstPixelBufferAccess referenceImage(context.getReferenceImage().getAccess());
3200 const ConstPixelBufferAccess resultImage(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8),
3201 m_imageWidth, m_imageHeight, 1, ptr);
3202
3203 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
3204
3205 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(),
3206 (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage,
3207 resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3208 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3209
3210 vkd.unmapMemory(device, *m_memory);
3211 }
3212 }
3213
3214 class ImageCopyFromBuffer : public CmdCommand
3215 {
3216 public:
ImageCopyFromBuffer(uint32_t seed,vk::VkImageLayout imageLayout)3217 ImageCopyFromBuffer(uint32_t seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout)
3218 {
3219 }
~ImageCopyFromBuffer(void)3220 ~ImageCopyFromBuffer(void)
3221 {
3222 }
getName(void) const3223 const char *getName(void) const
3224 {
3225 return "ImageCopyFromBuffer";
3226 }
3227
3228 void logPrepare(TestLog &log, size_t commandIndex) const;
3229 void prepare(PrepareContext &context);
3230 void logSubmit(TestLog &log, size_t commandIndex) const;
3231 void submit(SubmitContext &context);
3232 void verify(VerifyContext &context, size_t commandIndex);
3233
3234 private:
3235 const uint32_t m_seed;
3236 const vk::VkImageLayout m_imageLayout;
3237 int32_t m_imageWidth;
3238 int32_t m_imageHeight;
3239 vk::VkDeviceSize m_imageMemorySize;
3240 vk::VkDeviceSize m_bufferSize;
3241 vk::Move<vk::VkBuffer> m_srcBuffer;
3242 vk::Move<vk::VkDeviceMemory> m_memory;
3243 };
3244
logPrepare(TestLog & log,size_t commandIndex) const3245 void ImageCopyFromBuffer::logPrepare(TestLog &log, size_t commandIndex) const
3246 {
3247 log << TestLog::Message << commandIndex << ":" << getName()
3248 << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3249 }
3250
prepare(PrepareContext & context)3251 void ImageCopyFromBuffer::prepare(PrepareContext &context)
3252 {
3253 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
3254 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3255 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3256 const vk::VkDevice device = context.getContext().getDevice();
3257 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
3258
3259 m_imageWidth = context.getImageHeight();
3260 m_imageHeight = context.getImageWidth();
3261 m_imageMemorySize = context.getImageMemorySize();
3262 m_bufferSize = m_imageWidth * m_imageHeight * 4;
3263 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
3264 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3265 m_memory =
3266 bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3267
3268 {
3269 void *const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3270 de::Random rng(m_seed);
3271
3272 {
3273 uint8_t *const data = (uint8_t *)ptr;
3274
3275 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3276 data[ndx] = rng.getUint8();
3277 }
3278
3279 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
3280 vkd.unmapMemory(device, *m_memory);
3281 }
3282 }
3283
logSubmit(TestLog & log,size_t commandIndex) const3284 void ImageCopyFromBuffer::logSubmit(TestLog &log, size_t commandIndex) const
3285 {
3286 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer"
3287 << TestLog::EndMessage;
3288 }
3289
submit(SubmitContext & context)3290 void ImageCopyFromBuffer::submit(SubmitContext &context)
3291 {
3292 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3293 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3294 const vk::VkBufferImageCopy region = {0,
3295 0,
3296 0,
3297 {
3298 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3299 0, // mipLevel
3300 0, // arrayLayer
3301 1 // layerCount
3302 },
3303 {0, 0, 0},
3304 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
3305
3306 vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, ®ion);
3307 }
3308
verify(VerifyContext & context,size_t)3309 void ImageCopyFromBuffer::verify(VerifyContext &context, size_t)
3310 {
3311 ReferenceMemory &reference(context.getReference());
3312 de::Random rng(m_seed);
3313
3314 reference.setUndefined(0, (size_t)m_imageMemorySize);
3315
3316 {
3317 const PixelBufferAccess &refAccess(context.getReferenceImage().getAccess());
3318
3319 for (int32_t y = 0; y < m_imageHeight; y++)
3320 for (int32_t x = 0; x < m_imageWidth; x++)
3321 {
3322 const uint8_t r8 = rng.getUint8();
3323 const uint8_t g8 = rng.getUint8();
3324 const uint8_t b8 = rng.getUint8();
3325 const uint8_t a8 = rng.getUint8();
3326
3327 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3328 }
3329 }
3330 }
3331
3332 class ImageCopyFromImage : public CmdCommand
3333 {
3334 public:
ImageCopyFromImage(uint32_t seed,vk::VkImageLayout imageLayout)3335 ImageCopyFromImage(uint32_t seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout)
3336 {
3337 }
~ImageCopyFromImage(void)3338 ~ImageCopyFromImage(void)
3339 {
3340 }
getName(void) const3341 const char *getName(void) const
3342 {
3343 return "ImageCopyFromImage";
3344 }
3345
3346 void logPrepare(TestLog &log, size_t commandIndex) const;
3347 void prepare(PrepareContext &context);
3348 void logSubmit(TestLog &log, size_t commandIndex) const;
3349 void submit(SubmitContext &context);
3350 void verify(VerifyContext &context, size_t commandIndex);
3351
3352 private:
3353 const uint32_t m_seed;
3354 const vk::VkImageLayout m_imageLayout;
3355 int32_t m_imageWidth;
3356 int32_t m_imageHeight;
3357 vk::VkDeviceSize m_imageMemorySize;
3358 vk::Move<vk::VkImage> m_srcImage;
3359 vk::Move<vk::VkDeviceMemory> m_memory;
3360 };
3361
logPrepare(TestLog & log,size_t commandIndex) const3362 void ImageCopyFromImage::logPrepare(TestLog &log, size_t commandIndex) const
3363 {
3364 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy."
3365 << TestLog::EndMessage;
3366 }
3367
prepare(PrepareContext & context)3368 void ImageCopyFromImage::prepare(PrepareContext &context)
3369 {
3370 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
3371 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3372 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3373 const vk::VkDevice device = context.getContext().getDevice();
3374 const vk::VkQueue queue = context.getContext().getQueue();
3375 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3376 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
3377
3378 m_imageWidth = context.getImageWidth();
3379 m_imageHeight = context.getImageHeight();
3380 m_imageMemorySize = context.getImageMemorySize();
3381
3382 {
3383 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3384 DE_NULL,
3385
3386 0,
3387 vk::VK_IMAGE_TYPE_2D,
3388 vk::VK_FORMAT_R8G8B8A8_UNORM,
3389 {
3390 (uint32_t)m_imageWidth,
3391 (uint32_t)m_imageHeight,
3392 1u,
3393 },
3394 1,
3395 1, // mipLevels, arrayLayers
3396 vk::VK_SAMPLE_COUNT_1_BIT,
3397
3398 vk::VK_IMAGE_TILING_OPTIMAL,
3399 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
3400 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3401 vk::VK_SHARING_MODE_EXCLUSIVE,
3402
3403 (uint32_t)queueFamilies.size(),
3404 &queueFamilies[0],
3405 vk::VK_IMAGE_LAYOUT_UNDEFINED};
3406
3407 m_srcImage = vk::createImage(vkd, device, &createInfo);
3408 }
3409
3410 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3411
3412 {
3413 const vk::Unique<vk::VkBuffer> srcBuffer(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight,
3414 vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
3415 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3416 const vk::Unique<vk::VkDeviceMemory> memory(
3417 bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3418 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
3419 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3420 const vk::VkImageMemoryBarrier preImageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3421 DE_NULL,
3422
3423 0,
3424 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3425
3426 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3427 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3428
3429 VK_QUEUE_FAMILY_IGNORED,
3430 VK_QUEUE_FAMILY_IGNORED,
3431
3432 *m_srcImage,
3433 {
3434 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3435 0, // Mip level
3436 1, // Mip level count
3437 0, // Layer
3438 1 // Layer count
3439 }};
3440 const vk::VkImageMemoryBarrier postImageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3441 DE_NULL,
3442
3443 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3444 0,
3445
3446 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3447 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3448
3449 VK_QUEUE_FAMILY_IGNORED,
3450 VK_QUEUE_FAMILY_IGNORED,
3451
3452 *m_srcImage,
3453 {
3454 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3455 0, // Mip level
3456 1, // Mip level count
3457 0, // Layer
3458 1 // Layer count
3459 }};
3460 const vk::VkBufferImageCopy region = {0,
3461 0,
3462 0,
3463 {
3464 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3465 0, // mipLevel
3466 0, // arrayLayer
3467 1 // layerCount
3468 },
3469 {0, 0, 0},
3470 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
3471
3472 {
3473 void *const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3474 de::Random rng(m_seed);
3475
3476 {
3477 uint8_t *const data = (uint8_t *)ptr;
3478
3479 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3480 data[ndx] = rng.getUint8();
3481 }
3482
3483 vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3484 vkd.unmapMemory(device, *memory);
3485 }
3486
3487 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
3488 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0,
3489 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL, 1,
3490 &preImageBarrier);
3491 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
3492 ®ion);
3493 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT,
3494 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
3495 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &postImageBarrier);
3496
3497 endCommandBuffer(vkd, *commandBuffer);
3498 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3499 }
3500 }
3501
logSubmit(TestLog & log,size_t commandIndex) const3502 void ImageCopyFromImage::logSubmit(TestLog &log, size_t commandIndex) const
3503 {
3504 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image"
3505 << TestLog::EndMessage;
3506 }
3507
submit(SubmitContext & context)3508 void ImageCopyFromImage::submit(SubmitContext &context)
3509 {
3510 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3511 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3512 const vk::VkImageCopy region = {{
3513 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3514 0, // mipLevel
3515 0, // arrayLayer
3516 1 // layerCount
3517 },
3518 {0, 0, 0},
3519
3520 {
3521 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3522 0, // mipLevel
3523 0, // arrayLayer
3524 1 // layerCount
3525 },
3526 {0, 0, 0},
3527 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
3528
3529 vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(),
3530 m_imageLayout, 1, ®ion);
3531 }
3532
verify(VerifyContext & context,size_t)3533 void ImageCopyFromImage::verify(VerifyContext &context, size_t)
3534 {
3535 ReferenceMemory &reference(context.getReference());
3536 de::Random rng(m_seed);
3537
3538 reference.setUndefined(0, (size_t)m_imageMemorySize);
3539
3540 {
3541 const PixelBufferAccess &refAccess(context.getReferenceImage().getAccess());
3542
3543 for (int32_t y = 0; y < m_imageHeight; y++)
3544 for (int32_t x = 0; x < m_imageWidth; x++)
3545 {
3546 const uint8_t r8 = rng.getUint8();
3547 const uint8_t g8 = rng.getUint8();
3548 const uint8_t b8 = rng.getUint8();
3549 const uint8_t a8 = rng.getUint8();
3550
3551 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3552 }
3553 }
3554 }
3555
3556 class ImageCopyToImage : public CmdCommand
3557 {
3558 public:
ImageCopyToImage(vk::VkImageLayout imageLayout)3559 ImageCopyToImage(vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout)
3560 {
3561 }
~ImageCopyToImage(void)3562 ~ImageCopyToImage(void)
3563 {
3564 }
getName(void) const3565 const char *getName(void) const
3566 {
3567 return "ImageCopyToImage";
3568 }
3569
3570 void logPrepare(TestLog &log, size_t commandIndex) const;
3571 void prepare(PrepareContext &context);
3572 void logSubmit(TestLog &log, size_t commandIndex) const;
3573 void submit(SubmitContext &context);
3574 void verify(VerifyContext &context, size_t commandIndex);
3575
3576 private:
3577 const vk::VkImageLayout m_imageLayout;
3578 int32_t m_imageWidth;
3579 int32_t m_imageHeight;
3580 vk::VkDeviceSize m_imageMemorySize;
3581 vk::Move<vk::VkImage> m_dstImage;
3582 vk::Move<vk::VkDeviceMemory> m_memory;
3583 };
3584
logPrepare(TestLog & log,size_t commandIndex) const3585 void ImageCopyToImage::logPrepare(TestLog &log, size_t commandIndex) const
3586 {
3587 log << TestLog::Message << commandIndex << ":" << getName()
3588 << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3589 }
3590
prepare(PrepareContext & context)3591 void ImageCopyToImage::prepare(PrepareContext &context)
3592 {
3593 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
3594 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3595 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3596 const vk::VkDevice device = context.getContext().getDevice();
3597 const vk::VkQueue queue = context.getContext().getQueue();
3598 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3599 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
3600
3601 m_imageWidth = context.getImageWidth();
3602 m_imageHeight = context.getImageHeight();
3603 m_imageMemorySize = context.getImageMemorySize();
3604
3605 {
3606 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3607 DE_NULL,
3608
3609 0,
3610 vk::VK_IMAGE_TYPE_2D,
3611 vk::VK_FORMAT_R8G8B8A8_UNORM,
3612 {
3613 (uint32_t)m_imageWidth,
3614 (uint32_t)m_imageHeight,
3615 1u,
3616 },
3617 1,
3618 1, // mipLevels, arrayLayers
3619 vk::VK_SAMPLE_COUNT_1_BIT,
3620
3621 vk::VK_IMAGE_TILING_OPTIMAL,
3622 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
3623 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3624 vk::VK_SHARING_MODE_EXCLUSIVE,
3625
3626 (uint32_t)queueFamilies.size(),
3627 &queueFamilies[0],
3628 vk::VK_IMAGE_LAYOUT_UNDEFINED};
3629
3630 m_dstImage = vk::createImage(vkd, device, &createInfo);
3631 }
3632
3633 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3634
3635 {
3636 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
3637 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3638 const vk::VkImageMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3639 DE_NULL,
3640
3641 0,
3642 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3643
3644 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3645 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3646
3647 VK_QUEUE_FAMILY_IGNORED,
3648 VK_QUEUE_FAMILY_IGNORED,
3649
3650 *m_dstImage,
3651 {
3652 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3653 0, // Mip level
3654 1, // Mip level count
3655 0, // Layer
3656 1 // Layer count
3657 }};
3658
3659 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
3660 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0,
3661 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL, 1,
3662 &barrier);
3663
3664 endCommandBuffer(vkd, *commandBuffer);
3665 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3666 }
3667 }
3668
logSubmit(TestLog & log,size_t commandIndex) const3669 void ImageCopyToImage::logSubmit(TestLog &log, size_t commandIndex) const
3670 {
3671 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image"
3672 << TestLog::EndMessage;
3673 }
3674
submit(SubmitContext & context)3675 void ImageCopyToImage::submit(SubmitContext &context)
3676 {
3677 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3678 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3679 const vk::VkImageCopy region = {{
3680 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3681 0, // mipLevel
3682 0, // arrayLayer
3683 1 // layerCount
3684 },
3685 {0, 0, 0},
3686
3687 {
3688 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3689 0, // mipLevel
3690 0, // arrayLayer
3691 1 // layerCount
3692 },
3693 {0, 0, 0},
3694 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
3695
3696 vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage,
3697 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3698 }
3699
verify(VerifyContext & context,size_t commandIndex)3700 void ImageCopyToImage::verify(VerifyContext &context, size_t commandIndex)
3701 {
3702 tcu::ResultCollector &resultCollector(context.getResultCollector());
3703 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
3704 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3705 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3706 const vk::VkDevice device = context.getContext().getDevice();
3707 const vk::VkQueue queue = context.getContext().getQueue();
3708 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3709 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
3710 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3711 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
3712 const vk::Unique<vk::VkBuffer> dstBuffer(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight,
3713 vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
3714 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3715 const vk::Unique<vk::VkDeviceMemory> memory(
3716 bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3717 {
3718 const vk::VkImageMemoryBarrier imageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3719 DE_NULL,
3720
3721 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3722 vk::VK_ACCESS_TRANSFER_READ_BIT,
3723
3724 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3725 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3726
3727 VK_QUEUE_FAMILY_IGNORED,
3728 VK_QUEUE_FAMILY_IGNORED,
3729
3730 *m_dstImage,
3731 {
3732 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3733 0, // Mip level
3734 1, // Mip level count
3735 0, // Layer
3736 1 // Layer count
3737 }};
3738 const vk::VkBufferMemoryBarrier bufferBarrier = {vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3739 DE_NULL,
3740
3741 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3742 vk::VK_ACCESS_HOST_READ_BIT,
3743
3744 VK_QUEUE_FAMILY_IGNORED,
3745 VK_QUEUE_FAMILY_IGNORED,
3746 *dstBuffer,
3747 0,
3748 VK_WHOLE_SIZE};
3749 const vk::VkBufferImageCopy region = {0,
3750 0,
3751 0,
3752 {
3753 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3754 0, // mipLevel
3755 0, // arrayLayer
3756 1 // layerCount
3757 },
3758 {0, 0, 0},
3759 {(uint32_t)m_imageWidth, (uint32_t)m_imageHeight, 1u}};
3760
3761 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT,
3762 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
3763 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &imageBarrier);
3764 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1,
3765 ®ion);
3766 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT,
3767 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 1, &bufferBarrier, 0,
3768 (const vk::VkImageMemoryBarrier *)DE_NULL);
3769 }
3770
3771 endCommandBuffer(vkd, *commandBuffer);
3772 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3773
3774 {
3775 void *const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3776
3777 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3778
3779 {
3780 const uint8_t *const data = (const uint8_t *)ptr;
3781 const ConstPixelBufferAccess resAccess(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8),
3782 m_imageWidth, m_imageHeight, 1, data);
3783 const ConstPixelBufferAccess &refAccess(context.getReferenceImage().getAccess());
3784
3785 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(),
3786 (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess,
3787 UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3788 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3789 }
3790
3791 vkd.unmapMemory(device, *memory);
3792 }
3793 }
3794
3795 enum BlitScale
3796 {
3797 BLIT_SCALE_20,
3798 BLIT_SCALE_10,
3799 };
3800
3801 class ImageBlitFromImage : public CmdCommand
3802 {
3803 public:
ImageBlitFromImage(uint32_t seed,BlitScale scale,vk::VkImageLayout imageLayout)3804 ImageBlitFromImage(uint32_t seed, BlitScale scale, vk::VkImageLayout imageLayout)
3805 : m_seed(seed)
3806 , m_scale(scale)
3807 , m_imageLayout(imageLayout)
3808 {
3809 }
~ImageBlitFromImage(void)3810 ~ImageBlitFromImage(void)
3811 {
3812 }
getName(void) const3813 const char *getName(void) const
3814 {
3815 return "ImageBlitFromImage";
3816 }
3817
3818 void logPrepare(TestLog &log, size_t commandIndex) const;
3819 void prepare(PrepareContext &context);
3820 void logSubmit(TestLog &log, size_t commandIndex) const;
3821 void submit(SubmitContext &context);
3822 void verify(VerifyContext &context, size_t commandIndex);
3823
3824 private:
3825 const uint32_t m_seed;
3826 const BlitScale m_scale;
3827 const vk::VkImageLayout m_imageLayout;
3828 int32_t m_imageWidth;
3829 int32_t m_imageHeight;
3830 vk::VkDeviceSize m_imageMemorySize;
3831 int32_t m_srcImageWidth;
3832 int32_t m_srcImageHeight;
3833 vk::Move<vk::VkImage> m_srcImage;
3834 vk::Move<vk::VkDeviceMemory> m_memory;
3835 };
3836
logPrepare(TestLog & log,size_t commandIndex) const3837 void ImageBlitFromImage::logPrepare(TestLog &log, size_t commandIndex) const
3838 {
3839 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit."
3840 << TestLog::EndMessage;
3841 }
3842
prepare(PrepareContext & context)3843 void ImageBlitFromImage::prepare(PrepareContext &context)
3844 {
3845 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
3846 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3847 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3848 const vk::VkDevice device = context.getContext().getDevice();
3849 const vk::VkQueue queue = context.getContext().getQueue();
3850 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3851 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
3852
3853 m_imageWidth = context.getImageWidth();
3854 m_imageHeight = context.getImageHeight();
3855 m_imageMemorySize = context.getImageMemorySize();
3856
3857 if (m_scale == BLIT_SCALE_10)
3858 {
3859 m_srcImageWidth = m_imageWidth;
3860 m_srcImageHeight = m_imageHeight;
3861 }
3862 else if (m_scale == BLIT_SCALE_20)
3863 {
3864 m_srcImageWidth = m_imageWidth == 1 ? 1 : m_imageWidth / 2;
3865 m_srcImageHeight = m_imageHeight == 1 ? 1 : m_imageHeight / 2;
3866 }
3867 else
3868 DE_FATAL("Unsupported scale");
3869
3870 {
3871 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3872 DE_NULL,
3873
3874 0,
3875 vk::VK_IMAGE_TYPE_2D,
3876 vk::VK_FORMAT_R8G8B8A8_UNORM,
3877 {
3878 (uint32_t)m_srcImageWidth,
3879 (uint32_t)m_srcImageHeight,
3880 1u,
3881 },
3882 1,
3883 1, // mipLevels, arrayLayers
3884 vk::VK_SAMPLE_COUNT_1_BIT,
3885
3886 vk::VK_IMAGE_TILING_OPTIMAL,
3887 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
3888 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3889 vk::VK_SHARING_MODE_EXCLUSIVE,
3890
3891 (uint32_t)queueFamilies.size(),
3892 &queueFamilies[0],
3893 vk::VK_IMAGE_LAYOUT_UNDEFINED};
3894
3895 m_srcImage = vk::createImage(vkd, device, &createInfo);
3896 }
3897
3898 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3899
3900 {
3901 const vk::Unique<vk::VkBuffer> srcBuffer(createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight,
3902 vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
3903 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3904 const vk::Unique<vk::VkDeviceMemory> memory(
3905 bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3906 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
3907 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3908 const vk::VkImageMemoryBarrier preImageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3909 DE_NULL,
3910
3911 0,
3912 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3913
3914 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3915 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3916
3917 VK_QUEUE_FAMILY_IGNORED,
3918 VK_QUEUE_FAMILY_IGNORED,
3919
3920 *m_srcImage,
3921 {
3922 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3923 0, // Mip level
3924 1, // Mip level count
3925 0, // Layer
3926 1 // Layer count
3927 }};
3928 const vk::VkImageMemoryBarrier postImageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3929 DE_NULL,
3930
3931 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3932 0,
3933
3934 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3935 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3936
3937 VK_QUEUE_FAMILY_IGNORED,
3938 VK_QUEUE_FAMILY_IGNORED,
3939
3940 *m_srcImage,
3941 {
3942 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3943 0, // Mip level
3944 1, // Mip level count
3945 0, // Layer
3946 1 // Layer count
3947 }};
3948 const vk::VkBufferImageCopy region = {0,
3949 0,
3950 0,
3951 {
3952 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3953 0, // mipLevel
3954 0, // arrayLayer
3955 1 // layerCount
3956 },
3957 {0, 0, 0},
3958 {(uint32_t)m_srcImageWidth, (uint32_t)m_srcImageHeight, 1u}};
3959
3960 {
3961 void *const ptr = mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3962 de::Random rng(m_seed);
3963
3964 {
3965 uint8_t *const data = (uint8_t *)ptr;
3966
3967 for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3968 data[ndx] = rng.getUint8();
3969 }
3970
3971 vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3972 vkd.unmapMemory(device, *memory);
3973 }
3974
3975 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
3976 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0,
3977 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL, 1,
3978 &preImageBarrier);
3979 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
3980 ®ion);
3981 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT,
3982 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
3983 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &postImageBarrier);
3984
3985 endCommandBuffer(vkd, *commandBuffer);
3986 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3987 }
3988 }
3989
logSubmit(TestLog & log,size_t commandIndex) const3990 void ImageBlitFromImage::logSubmit(TestLog &log, size_t commandIndex) const
3991 {
3992 log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image"
3993 << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
3994 }
3995
submit(SubmitContext & context)3996 void ImageBlitFromImage::submit(SubmitContext &context)
3997 {
3998 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
3999 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4000 const vk::VkImageBlit region = {// Src
4001 {
4002 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4003 0, // mipLevel
4004 0, // arrayLayer
4005 1 // layerCount
4006 },
4007 {
4008 {0, 0, 0},
4009 {m_srcImageWidth, m_srcImageHeight, 1},
4010 },
4011
4012 // Dst
4013 {
4014 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4015 0, // mipLevel
4016 0, // arrayLayer
4017 1 // layerCount
4018 },
4019 {{0, 0, 0}, {m_imageWidth, m_imageHeight, 1u}}};
4020 vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(),
4021 m_imageLayout, 1, ®ion, vk::VK_FILTER_NEAREST);
4022 }
4023
verify(VerifyContext & context,size_t)4024 void ImageBlitFromImage::verify(VerifyContext &context, size_t)
4025 {
4026 ReferenceMemory &reference(context.getReference());
4027 de::Random rng(m_seed);
4028
4029 reference.setUndefined(0, (size_t)m_imageMemorySize);
4030
4031 {
4032 const PixelBufferAccess &refAccess(context.getReferenceImage().getAccess());
4033
4034 if (m_scale == BLIT_SCALE_10)
4035 {
4036 for (int32_t y = 0; y < m_imageHeight; y++)
4037 for (int32_t x = 0; x < m_imageWidth; x++)
4038 {
4039 const uint8_t r8 = rng.getUint8();
4040 const uint8_t g8 = rng.getUint8();
4041 const uint8_t b8 = rng.getUint8();
4042 const uint8_t a8 = rng.getUint8();
4043
4044 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
4045 }
4046 }
4047 else if (m_scale == BLIT_SCALE_20)
4048 {
4049 tcu::TextureLevel source(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth,
4050 m_srcImageHeight);
4051 const float xscale = ((float)m_srcImageWidth) / (float)m_imageWidth;
4052 const float yscale = ((float)m_srcImageHeight) / (float)m_imageHeight;
4053
4054 for (int32_t y = 0; y < m_srcImageHeight; y++)
4055 for (int32_t x = 0; x < m_srcImageWidth; x++)
4056 {
4057 const uint8_t r8 = rng.getUint8();
4058 const uint8_t g8 = rng.getUint8();
4059 const uint8_t b8 = rng.getUint8();
4060 const uint8_t a8 = rng.getUint8();
4061
4062 source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
4063 }
4064
4065 for (int32_t y = 0; y < m_imageHeight; y++)
4066 for (int32_t x = 0; x < m_imageWidth; x++)
4067 refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale),
4068 int((float(y) + 0.5f) * yscale)),
4069 x, y);
4070 }
4071 else
4072 DE_FATAL("Unsupported scale");
4073 }
4074 }
4075
4076 class ImageBlitToImage : public CmdCommand
4077 {
4078 public:
ImageBlitToImage(BlitScale scale,vk::VkImageLayout imageLayout)4079 ImageBlitToImage(BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout)
4080 {
4081 }
~ImageBlitToImage(void)4082 ~ImageBlitToImage(void)
4083 {
4084 }
getName(void) const4085 const char *getName(void) const
4086 {
4087 return "ImageBlitToImage";
4088 }
4089
4090 void logPrepare(TestLog &log, size_t commandIndex) const;
4091 void prepare(PrepareContext &context);
4092 void logSubmit(TestLog &log, size_t commandIndex) const;
4093 void submit(SubmitContext &context);
4094 void verify(VerifyContext &context, size_t commandIndex);
4095
4096 private:
4097 const BlitScale m_scale;
4098 const vk::VkImageLayout m_imageLayout;
4099 int32_t m_imageWidth;
4100 int32_t m_imageHeight;
4101 vk::VkDeviceSize m_imageMemorySize;
4102 int32_t m_dstImageWidth;
4103 int32_t m_dstImageHeight;
4104 vk::Move<vk::VkImage> m_dstImage;
4105 vk::Move<vk::VkDeviceMemory> m_memory;
4106 };
4107
logPrepare(TestLog & log,size_t commandIndex) const4108 void ImageBlitToImage::logPrepare(TestLog &log, size_t commandIndex) const
4109 {
4110 log << TestLog::Message << commandIndex << ":" << getName()
4111 << " Allocate destination image for image to image blit." << TestLog::EndMessage;
4112 }
4113
prepare(PrepareContext & context)4114 void ImageBlitToImage::prepare(PrepareContext &context)
4115 {
4116 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
4117 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
4118 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4119 const vk::VkDevice device = context.getContext().getDevice();
4120 const vk::VkQueue queue = context.getContext().getQueue();
4121 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4122 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
4123
4124 m_imageWidth = context.getImageWidth();
4125 m_imageHeight = context.getImageHeight();
4126 m_imageMemorySize = context.getImageMemorySize();
4127
4128 if (m_scale == BLIT_SCALE_10)
4129 {
4130 m_dstImageWidth = context.getImageWidth();
4131 m_dstImageHeight = context.getImageHeight();
4132 }
4133 else if (m_scale == BLIT_SCALE_20)
4134 {
4135 m_dstImageWidth = context.getImageWidth() * 2;
4136 m_dstImageHeight = context.getImageHeight() * 2;
4137 }
4138 else
4139 DE_FATAL("Unsupportd blit scale");
4140
4141 {
4142 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4143 DE_NULL,
4144
4145 0,
4146 vk::VK_IMAGE_TYPE_2D,
4147 vk::VK_FORMAT_R8G8B8A8_UNORM,
4148 {
4149 (uint32_t)m_dstImageWidth,
4150 (uint32_t)m_dstImageHeight,
4151 1u,
4152 },
4153 1,
4154 1, // mipLevels, arrayLayers
4155 vk::VK_SAMPLE_COUNT_1_BIT,
4156
4157 vk::VK_IMAGE_TILING_OPTIMAL,
4158 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT |
4159 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4160 vk::VK_SHARING_MODE_EXCLUSIVE,
4161
4162 (uint32_t)queueFamilies.size(),
4163 &queueFamilies[0],
4164 vk::VK_IMAGE_LAYOUT_UNDEFINED};
4165
4166 m_dstImage = vk::createImage(vkd, device, &createInfo);
4167 }
4168
4169 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
4170
4171 {
4172 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
4173 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4174 const vk::VkImageMemoryBarrier barrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4175 DE_NULL,
4176
4177 0,
4178 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4179
4180 vk::VK_IMAGE_LAYOUT_UNDEFINED,
4181 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4182
4183 VK_QUEUE_FAMILY_IGNORED,
4184 VK_QUEUE_FAMILY_IGNORED,
4185
4186 *m_dstImage,
4187 {
4188 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4189 0, // Mip level
4190 1, // Mip level count
4191 0, // Layer
4192 1 // Layer count
4193 }};
4194
4195 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
4196 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0,
4197 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL, 1,
4198 &barrier);
4199
4200 endCommandBuffer(vkd, *commandBuffer);
4201 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4202 }
4203 }
4204
logSubmit(TestLog & log,size_t commandIndex) const4205 void ImageBlitToImage::logSubmit(TestLog &log, size_t commandIndex) const
4206 {
4207 log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image"
4208 << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
4209 }
4210
submit(SubmitContext & context)4211 void ImageBlitToImage::submit(SubmitContext &context)
4212 {
4213 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
4214 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4215 const vk::VkImageBlit region = {// Src
4216 {
4217 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4218 0, // mipLevel
4219 0, // arrayLayer
4220 1 // layerCount
4221 },
4222 {
4223 {0, 0, 0},
4224 {m_imageWidth, m_imageHeight, 1},
4225 },
4226
4227 // Dst
4228 {
4229 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4230 0, // mipLevel
4231 0, // arrayLayer
4232 1 // layerCount
4233 },
4234 {{0, 0, 0}, {m_dstImageWidth, m_dstImageHeight, 1u}}};
4235 vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage,
4236 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, vk::VK_FILTER_NEAREST);
4237 }
4238
verify(VerifyContext & context,size_t commandIndex)4239 void ImageBlitToImage::verify(VerifyContext &context, size_t commandIndex)
4240 {
4241 tcu::ResultCollector &resultCollector(context.getResultCollector());
4242 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
4243 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
4244 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4245 const vk::VkDevice device = context.getContext().getDevice();
4246 const vk::VkQueue queue = context.getContext().getQueue();
4247 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4248 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
4249 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4250 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
4251 const vk::Unique<vk::VkBuffer> dstBuffer(createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight,
4252 vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
4253 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4254 const vk::Unique<vk::VkDeviceMemory> memory(
4255 bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4256 {
4257 const vk::VkImageMemoryBarrier imageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4258 DE_NULL,
4259
4260 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4261 vk::VK_ACCESS_TRANSFER_READ_BIT,
4262
4263 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4264 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4265
4266 VK_QUEUE_FAMILY_IGNORED,
4267 VK_QUEUE_FAMILY_IGNORED,
4268
4269 *m_dstImage,
4270 {
4271 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4272 0, // Mip level
4273 1, // Mip level count
4274 0, // Layer
4275 1 // Layer count
4276 }};
4277 const vk::VkBufferMemoryBarrier bufferBarrier = {vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4278 DE_NULL,
4279
4280 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4281 vk::VK_ACCESS_HOST_READ_BIT,
4282
4283 VK_QUEUE_FAMILY_IGNORED,
4284 VK_QUEUE_FAMILY_IGNORED,
4285 *dstBuffer,
4286 0,
4287 VK_WHOLE_SIZE};
4288 const vk::VkBufferImageCopy region = {0,
4289 0,
4290 0,
4291 {
4292 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4293 0, // mipLevel
4294 0, // arrayLayer
4295 1 // layerCount
4296 },
4297 {0, 0, 0},
4298 {(uint32_t)m_dstImageWidth, (uint32_t)m_dstImageHeight, 1}};
4299
4300 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT,
4301 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 0,
4302 (const vk::VkBufferMemoryBarrier *)DE_NULL, 1, &imageBarrier);
4303 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1,
4304 ®ion);
4305 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT,
4306 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 1, &bufferBarrier, 0,
4307 (const vk::VkImageMemoryBarrier *)DE_NULL);
4308 }
4309
4310 endCommandBuffer(vkd, *commandBuffer);
4311 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4312
4313 {
4314 void *const ptr = mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4315
4316 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
4317
4318 if (m_scale == BLIT_SCALE_10)
4319 {
4320 const uint8_t *const data = (const uint8_t *)ptr;
4321 const ConstPixelBufferAccess resAccess(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8),
4322 m_dstImageWidth, m_dstImageHeight, 1, data);
4323 const ConstPixelBufferAccess &refAccess(context.getReferenceImage().getAccess());
4324
4325 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(),
4326 (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess,
4327 UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4328 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4329 }
4330 else if (m_scale == BLIT_SCALE_20)
4331 {
4332 const uint8_t *const data = (const uint8_t *)ptr;
4333 const ConstPixelBufferAccess resAccess(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8),
4334 m_dstImageWidth, m_dstImageHeight, 1, data);
4335 tcu::TextureLevel reference(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth,
4336 m_dstImageHeight, 1);
4337
4338 {
4339 const ConstPixelBufferAccess &refAccess(context.getReferenceImage().getAccess());
4340
4341 for (int32_t y = 0; y < m_dstImageHeight; y++)
4342 for (int32_t x = 0; x < m_dstImageWidth; x++)
4343 {
4344 reference.getAccess().setPixel(refAccess.getPixel(x / 2, y / 2), x, y);
4345 }
4346 }
4347
4348 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(),
4349 (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(),
4350 resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4351 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4352 }
4353 else
4354 DE_FATAL("Unknown scale");
4355
4356 vkd.unmapMemory(device, *memory);
4357 }
4358 }
4359
4360 class PrepareRenderPassContext
4361 {
4362 public:
PrepareRenderPassContext(PrepareContext & context,vk::VkRenderPass renderPass,vk::VkFramebuffer framebuffer,int32_t targetWidth,int32_t targetHeight)4363 PrepareRenderPassContext(PrepareContext &context, vk::VkRenderPass renderPass, vk::VkFramebuffer framebuffer,
4364 int32_t targetWidth, int32_t targetHeight)
4365 : m_context(context)
4366 , m_renderPass(renderPass)
4367 , m_framebuffer(framebuffer)
4368 , m_targetWidth(targetWidth)
4369 , m_targetHeight(targetHeight)
4370 {
4371 }
4372
getMemory(void) const4373 const Memory &getMemory(void) const
4374 {
4375 return m_context.getMemory();
4376 }
getContext(void) const4377 const Context &getContext(void) const
4378 {
4379 return m_context.getContext();
4380 }
getBinaryCollection(void) const4381 const vk::BinaryCollection &getBinaryCollection(void) const
4382 {
4383 return m_context.getBinaryCollection();
4384 }
4385
getBuffer(void) const4386 vk::VkBuffer getBuffer(void) const
4387 {
4388 return m_context.getBuffer();
4389 }
getBufferSize(void) const4390 vk::VkDeviceSize getBufferSize(void) const
4391 {
4392 return m_context.getBufferSize();
4393 }
4394
getImage(void) const4395 vk::VkImage getImage(void) const
4396 {
4397 return m_context.getImage();
4398 }
getImageWidth(void) const4399 int32_t getImageWidth(void) const
4400 {
4401 return m_context.getImageWidth();
4402 }
getImageHeight(void) const4403 int32_t getImageHeight(void) const
4404 {
4405 return m_context.getImageHeight();
4406 }
getImageLayout(void) const4407 vk::VkImageLayout getImageLayout(void) const
4408 {
4409 return m_context.getImageLayout();
4410 }
4411
getTargetWidth(void) const4412 int32_t getTargetWidth(void) const
4413 {
4414 return m_targetWidth;
4415 }
getTargetHeight(void) const4416 int32_t getTargetHeight(void) const
4417 {
4418 return m_targetHeight;
4419 }
4420
getRenderPass(void) const4421 vk::VkRenderPass getRenderPass(void) const
4422 {
4423 return m_renderPass;
4424 }
4425
4426 private:
4427 PrepareContext &m_context;
4428 const vk::VkRenderPass m_renderPass;
4429 const vk::VkFramebuffer m_framebuffer;
4430 const int32_t m_targetWidth;
4431 const int32_t m_targetHeight;
4432 };
4433
4434 class VerifyRenderPassContext
4435 {
4436 public:
VerifyRenderPassContext(VerifyContext & context,int32_t targetWidth,int32_t targetHeight)4437 VerifyRenderPassContext(VerifyContext &context, int32_t targetWidth, int32_t targetHeight)
4438 : m_context(context)
4439 , m_referenceTarget(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4440 {
4441 }
4442
getContext(void) const4443 const Context &getContext(void) const
4444 {
4445 return m_context.getContext();
4446 }
getLog(void) const4447 TestLog &getLog(void) const
4448 {
4449 return m_context.getLog();
4450 }
getResultCollector(void) const4451 tcu::ResultCollector &getResultCollector(void) const
4452 {
4453 return m_context.getResultCollector();
4454 }
4455
getReferenceTarget(void)4456 TextureLevel &getReferenceTarget(void)
4457 {
4458 return m_referenceTarget;
4459 }
4460
getReference(void)4461 ReferenceMemory &getReference(void)
4462 {
4463 return m_context.getReference();
4464 }
getReferenceImage(void)4465 TextureLevel &getReferenceImage(void)
4466 {
4467 return m_context.getReferenceImage();
4468 }
4469
4470 private:
4471 VerifyContext &m_context;
4472 TextureLevel m_referenceTarget;
4473 };
4474
4475 class RenderPassCommand
4476 {
4477 public:
~RenderPassCommand(void)4478 virtual ~RenderPassCommand(void)
4479 {
4480 }
4481 virtual const char *getName(void) const = 0;
4482
4483 // Log things that are done during prepare
logPrepare(TestLog &,size_t) const4484 virtual void logPrepare(TestLog &, size_t) const
4485 {
4486 }
4487 // Log submitted calls etc.
logSubmit(TestLog &,size_t) const4488 virtual void logSubmit(TestLog &, size_t) const
4489 {
4490 }
4491
4492 // Allocate vulkan resources and prepare for submit.
prepare(PrepareRenderPassContext &)4493 virtual void prepare(PrepareRenderPassContext &)
4494 {
4495 }
4496
4497 // Submit commands to command buffer.
submit(SubmitContext &)4498 virtual void submit(SubmitContext &)
4499 {
4500 }
4501
4502 // Verify results
verify(VerifyRenderPassContext &,size_t)4503 virtual void verify(VerifyRenderPassContext &, size_t)
4504 {
4505 }
4506 };
4507
4508 class SubmitRenderPass : public CmdCommand
4509 {
4510 public:
4511 SubmitRenderPass(const vector<RenderPassCommand *> &commands);
4512 ~SubmitRenderPass(void);
getName(void) const4513 const char *getName(void) const
4514 {
4515 return "SubmitRenderPass";
4516 }
4517
4518 void logPrepare(TestLog &, size_t) const;
4519 void logSubmit(TestLog &, size_t) const;
4520
4521 void prepare(PrepareContext &);
4522 void submit(SubmitContext &);
4523
4524 void verify(VerifyContext &, size_t);
4525
4526 private:
4527 const int32_t m_targetWidth;
4528 const int32_t m_targetHeight;
4529 vk::Move<vk::VkRenderPass> m_renderPass;
4530 vk::Move<vk::VkDeviceMemory> m_colorTargetMemory;
4531 de::MovePtr<vk::Allocation> m_colorTargetMemory2;
4532 vk::Move<vk::VkImage> m_colorTarget;
4533 vk::Move<vk::VkImageView> m_colorTargetView;
4534 vk::Move<vk::VkFramebuffer> m_framebuffer;
4535 vector<RenderPassCommand *> m_commands;
4536 };
4537
SubmitRenderPass(const vector<RenderPassCommand * > & commands)4538 SubmitRenderPass::SubmitRenderPass(const vector<RenderPassCommand *> &commands)
4539 : m_targetWidth(256)
4540 , m_targetHeight(256)
4541 , m_commands(commands)
4542 {
4543 }
4544
~SubmitRenderPass()4545 SubmitRenderPass::~SubmitRenderPass()
4546 {
4547 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4548 delete m_commands[cmdNdx];
4549 }
4550
logPrepare(TestLog & log,size_t commandIndex) const4551 void SubmitRenderPass::logPrepare(TestLog &log, size_t commandIndex) const
4552 {
4553 const string sectionName(de::toString(commandIndex) + ":" + getName());
4554 const tcu::ScopedLogSection section(log, sectionName, sectionName);
4555
4556 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4557 {
4558 RenderPassCommand &command = *m_commands[cmdNdx];
4559 command.logPrepare(log, cmdNdx);
4560 }
4561 }
4562
logSubmit(TestLog & log,size_t commandIndex) const4563 void SubmitRenderPass::logSubmit(TestLog &log, size_t commandIndex) const
4564 {
4565 const string sectionName(de::toString(commandIndex) + ":" + getName());
4566 const tcu::ScopedLogSection section(log, sectionName, sectionName);
4567
4568 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4569 {
4570 RenderPassCommand &command = *m_commands[cmdNdx];
4571 command.logSubmit(log, cmdNdx);
4572 }
4573 }
4574
prepare(PrepareContext & context)4575 void SubmitRenderPass::prepare(PrepareContext &context)
4576 {
4577 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
4578 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
4579 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4580 const vk::VkDevice device = context.getContext().getDevice();
4581 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
4582
4583 {
4584 const vk::VkImageCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4585 DE_NULL,
4586 0u,
4587
4588 vk::VK_IMAGE_TYPE_2D,
4589 vk::VK_FORMAT_R8G8B8A8_UNORM,
4590 {(uint32_t)m_targetWidth, (uint32_t)m_targetHeight, 1u},
4591 1u,
4592 1u,
4593 vk::VK_SAMPLE_COUNT_1_BIT,
4594 vk::VK_IMAGE_TILING_OPTIMAL,
4595 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
4596 vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4597 vk::VK_SHARING_MODE_EXCLUSIVE,
4598 (uint32_t)queueFamilies.size(),
4599 &queueFamilies[0],
4600 vk::VK_IMAGE_LAYOUT_UNDEFINED};
4601
4602 m_colorTarget = vk::createImage(vkd, device, &createInfo);
4603 }
4604
4605 m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4606
4607 {
4608 const vk::VkImageViewCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4609 DE_NULL,
4610
4611 0u,
4612 *m_colorTarget,
4613 vk::VK_IMAGE_VIEW_TYPE_2D,
4614 vk::VK_FORMAT_R8G8B8A8_UNORM,
4615 {vk::VK_COMPONENT_SWIZZLE_R, vk::VK_COMPONENT_SWIZZLE_G,
4616 vk::VK_COMPONENT_SWIZZLE_B, vk::VK_COMPONENT_SWIZZLE_A},
4617 {vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}};
4618
4619 m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4620 }
4621
4622 m_renderPass = vk::makeRenderPass(vkd, device, vk::VK_FORMAT_R8G8B8A8_UNORM, vk::VK_FORMAT_UNDEFINED,
4623 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
4624
4625 {
4626 const vk::VkImageView imageViews[] = {*m_colorTargetView};
4627 const vk::VkFramebufferCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4628 DE_NULL,
4629 0u,
4630
4631 *m_renderPass,
4632 DE_LENGTH_OF_ARRAY(imageViews),
4633 imageViews,
4634 (uint32_t)m_targetWidth,
4635 (uint32_t)m_targetHeight,
4636 1u};
4637
4638 m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4639 }
4640
4641 {
4642 PrepareRenderPassContext renderpassContext(context, *m_renderPass, *m_framebuffer, m_targetWidth,
4643 m_targetHeight);
4644
4645 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4646 {
4647 RenderPassCommand &command = *m_commands[cmdNdx];
4648 command.prepare(renderpassContext);
4649 }
4650 }
4651 }
4652
submit(SubmitContext & context)4653 void SubmitRenderPass::submit(SubmitContext &context)
4654 {
4655 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
4656 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4657
4658 beginRenderPass(vkd, commandBuffer, *m_renderPass, *m_framebuffer,
4659 vk::makeRect2D(0, 0, m_targetWidth, m_targetHeight), tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4660
4661 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4662 {
4663 RenderPassCommand &command = *m_commands[cmdNdx];
4664
4665 command.submit(context);
4666 }
4667
4668 endRenderPass(vkd, commandBuffer);
4669 }
4670
verify(VerifyContext & context,size_t commandIndex)4671 void SubmitRenderPass::verify(VerifyContext &context, size_t commandIndex)
4672 {
4673 TestLog &log(context.getLog());
4674 tcu::ResultCollector &resultCollector(context.getResultCollector());
4675 const string sectionName(de::toString(commandIndex) + ":" + getName());
4676 const tcu::ScopedLogSection section(log, sectionName, sectionName);
4677 VerifyRenderPassContext verifyContext(context, m_targetWidth, m_targetHeight);
4678
4679 tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4680
4681 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4682 {
4683 RenderPassCommand &command = *m_commands[cmdNdx];
4684 command.verify(verifyContext, cmdNdx);
4685 }
4686
4687 {
4688 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
4689 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
4690 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4691 const vk::VkDevice device = context.getContext().getDevice();
4692 const vk::VkQueue queue = context.getContext().getQueue();
4693 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4694 const vk::Unique<vk::VkCommandBuffer> commandBuffer(
4695 createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4696 const vector<uint32_t> &queueFamilies = context.getContext().getQueueFamilies();
4697 const vk::Unique<vk::VkBuffer> dstBuffer(createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight,
4698 vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT,
4699 vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4700 const vk::Unique<vk::VkDeviceMemory> memory(
4701 bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4702 {
4703 const vk::VkImageMemoryBarrier imageBarrier = {vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4704 DE_NULL,
4705
4706 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4707 vk::VK_ACCESS_TRANSFER_READ_BIT,
4708
4709 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4710 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4711
4712 VK_QUEUE_FAMILY_IGNORED,
4713 VK_QUEUE_FAMILY_IGNORED,
4714
4715 *m_colorTarget,
4716 {
4717 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4718 0, // Mip level
4719 1, // Mip level count
4720 0, // Layer
4721 1 // Layer count
4722 }};
4723 const vk::VkBufferMemoryBarrier bufferBarrier = {vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4724 DE_NULL,
4725
4726 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4727 vk::VK_ACCESS_HOST_READ_BIT,
4728
4729 VK_QUEUE_FAMILY_IGNORED,
4730 VK_QUEUE_FAMILY_IGNORED,
4731 *dstBuffer,
4732 0,
4733 VK_WHOLE_SIZE};
4734 const vk::VkBufferImageCopy region = {0,
4735 0,
4736 0,
4737 {
4738 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4739 0, // mipLevel
4740 0, // arrayLayer
4741 1 // layerCount
4742 },
4743 {0, 0, 0},
4744 {(uint32_t)m_targetWidth, (uint32_t)m_targetHeight, 1u}};
4745
4746 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
4747 vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0,
4748 (const vk::VkMemoryBarrier *)DE_NULL, 0, (const vk::VkBufferMemoryBarrier *)DE_NULL,
4749 1, &imageBarrier);
4750 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4751 *dstBuffer, 1, ®ion);
4752 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT,
4753 (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier *)DE_NULL, 1, &bufferBarrier,
4754 0, (const vk::VkImageMemoryBarrier *)DE_NULL);
4755 }
4756
4757 endCommandBuffer(vkd, *commandBuffer);
4758 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4759
4760 {
4761 void *const ptr = mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4762
4763 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
4764
4765 {
4766 const uint8_t *const data = (const uint8_t *)ptr;
4767 const ConstPixelBufferAccess resAccess(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8),
4768 m_targetWidth, m_targetHeight, 1, data);
4769 const ConstPixelBufferAccess &refAccess(verifyContext.getReferenceTarget().getAccess());
4770
4771 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(),
4772 (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess,
4773 resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4774 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4775 }
4776
4777 vkd.unmapMemory(device, *memory);
4778 }
4779 }
4780 }
4781
4782 class ExecuteSecondaryCommandBuffer : public CmdCommand
4783 {
4784 public:
4785 ExecuteSecondaryCommandBuffer(const vector<CmdCommand *> &commands);
4786 ~ExecuteSecondaryCommandBuffer(void);
getName(void) const4787 const char *getName(void) const
4788 {
4789 return "ExecuteSecondaryCommandBuffer";
4790 }
4791
4792 void logPrepare(TestLog &, size_t) const;
4793 void logSubmit(TestLog &, size_t) const;
4794
4795 void prepare(PrepareContext &);
4796 void submit(SubmitContext &);
4797
4798 void verify(VerifyContext &, size_t);
4799
4800 private:
4801 vk::Move<vk::VkCommandBuffer> m_commandBuffer;
4802 vk::Move<vk::VkDeviceMemory> m_colorTargetMemory;
4803 de::MovePtr<vk::Allocation> m_colorTargetMemory2;
4804 vk::Move<vk::VkImage> m_colorTarget;
4805 vk::Move<vk::VkImageView> m_colorTargetView;
4806 vk::Move<vk::VkFramebuffer> m_framebuffer;
4807 vector<CmdCommand *> m_commands;
4808 };
4809
ExecuteSecondaryCommandBuffer(const vector<CmdCommand * > & commands)4810 ExecuteSecondaryCommandBuffer::ExecuteSecondaryCommandBuffer(const vector<CmdCommand *> &commands)
4811 : m_commands(commands)
4812 {
4813 }
4814
~ExecuteSecondaryCommandBuffer(void)4815 ExecuteSecondaryCommandBuffer::~ExecuteSecondaryCommandBuffer(void)
4816 {
4817 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4818 delete m_commands[cmdNdx];
4819 }
4820
logPrepare(TestLog & log,size_t commandIndex) const4821 void ExecuteSecondaryCommandBuffer::logPrepare(TestLog &log, size_t commandIndex) const
4822 {
4823 const string sectionName(de::toString(commandIndex) + ":" + getName());
4824 const tcu::ScopedLogSection section(log, sectionName, sectionName);
4825
4826 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4827 {
4828 CmdCommand &command = *m_commands[cmdNdx];
4829 command.logPrepare(log, cmdNdx);
4830 }
4831 }
4832
logSubmit(TestLog & log,size_t commandIndex) const4833 void ExecuteSecondaryCommandBuffer::logSubmit(TestLog &log, size_t commandIndex) const
4834 {
4835 const string sectionName(de::toString(commandIndex) + ":" + getName());
4836 const tcu::ScopedLogSection section(log, sectionName, sectionName);
4837
4838 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4839 {
4840 CmdCommand &command = *m_commands[cmdNdx];
4841 command.logSubmit(log, cmdNdx);
4842 }
4843 }
4844
prepare(PrepareContext & context)4845 void ExecuteSecondaryCommandBuffer::prepare(PrepareContext &context)
4846 {
4847 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
4848 const vk::VkDevice device = context.getContext().getDevice();
4849 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4850
4851 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4852 {
4853 CmdCommand &command = *m_commands[cmdNdx];
4854
4855 command.prepare(context);
4856 }
4857
4858 m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY);
4859 {
4860 SubmitContext submitContext(context, *m_commandBuffer);
4861
4862 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4863 {
4864 CmdCommand &command = *m_commands[cmdNdx];
4865
4866 command.submit(submitContext);
4867 }
4868
4869 endCommandBuffer(vkd, *m_commandBuffer);
4870 }
4871 }
4872
submit(SubmitContext & context)4873 void ExecuteSecondaryCommandBuffer::submit(SubmitContext &context)
4874 {
4875 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
4876 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4877
4878 {
4879 vkd.cmdExecuteCommands(commandBuffer, 1, &m_commandBuffer.get());
4880 }
4881 }
4882
verify(VerifyContext & context,size_t commandIndex)4883 void ExecuteSecondaryCommandBuffer::verify(VerifyContext &context, size_t commandIndex)
4884 {
4885 const string sectionName(de::toString(commandIndex) + ":" + getName());
4886 const tcu::ScopedLogSection section(context.getLog(), sectionName, sectionName);
4887
4888 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4889 m_commands[cmdNdx]->verify(context, cmdNdx);
4890 }
4891
4892 struct PipelineResources
4893 {
4894 vk::Move<vk::VkPipeline> pipeline;
4895 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
4896 vk::Move<vk::VkPipelineLayout> pipelineLayout;
4897 };
4898
createPipelineWithResources(const vk::DeviceInterface & vkd,const vk::VkDevice device,const vk::VkRenderPass renderPass,const uint32_t subpass,const vk::VkShaderModule & vertexShaderModule,const vk::VkShaderModule & fragmentShaderModule,const uint32_t viewPortWidth,const uint32_t viewPortHeight,const vector<vk::VkVertexInputBindingDescription> & vertexBindingDescriptions,const vector<vk::VkVertexInputAttributeDescription> & vertexAttributeDescriptions,const vector<vk::VkDescriptorSetLayoutBinding> & bindings,const vk::VkPrimitiveTopology topology,uint32_t pushConstantRangeCount,const vk::VkPushConstantRange * pushConstantRanges,PipelineResources & resources)4899 void createPipelineWithResources(const vk::DeviceInterface &vkd, const vk::VkDevice device,
4900 const vk::VkRenderPass renderPass, const uint32_t subpass,
4901 const vk::VkShaderModule &vertexShaderModule,
4902 const vk::VkShaderModule &fragmentShaderModule, const uint32_t viewPortWidth,
4903 const uint32_t viewPortHeight,
4904 const vector<vk::VkVertexInputBindingDescription> &vertexBindingDescriptions,
4905 const vector<vk::VkVertexInputAttributeDescription> &vertexAttributeDescriptions,
4906 const vector<vk::VkDescriptorSetLayoutBinding> &bindings,
4907 const vk::VkPrimitiveTopology topology, uint32_t pushConstantRangeCount,
4908 const vk::VkPushConstantRange *pushConstantRanges, PipelineResources &resources)
4909 {
4910 if (!bindings.empty())
4911 {
4912 const vk::VkDescriptorSetLayoutCreateInfo createInfo = {
4913 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, DE_NULL,
4914
4915 0u, (uint32_t)bindings.size(), bindings.empty() ? DE_NULL : &bindings[0]};
4916
4917 resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4918 }
4919
4920 {
4921 const vk::VkDescriptorSetLayout descriptorSetLayout_ = *resources.descriptorSetLayout;
4922 const vk::VkPipelineLayoutCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4923 DE_NULL,
4924 0,
4925
4926 resources.descriptorSetLayout ? 1u : 0u,
4927 resources.descriptorSetLayout ? &descriptorSetLayout_ :
4928 DE_NULL,
4929
4930 pushConstantRangeCount,
4931 pushConstantRanges};
4932
4933 resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4934 }
4935
4936 {
4937 const std::vector<vk::VkViewport> viewports(
4938 1, vk::makeViewport(0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f));
4939 const std::vector<vk::VkRect2D> scissors(1, vk::makeRect2D(0, 0, viewPortWidth, viewPortHeight));
4940
4941 const vk::VkPipelineVertexInputStateCreateInfo vertexInputState = {
4942 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4943 DE_NULL,
4944 0u,
4945
4946 (uint32_t)vertexBindingDescriptions.size(),
4947 vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4948
4949 (uint32_t)vertexAttributeDescriptions.size(),
4950 vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]};
4951
4952 resources.pipeline = vk::makeGraphicsPipeline(
4953 vkd, // const DeviceInterface& vk
4954 device, // const VkDevice device
4955 *resources.pipelineLayout, // const VkPipelineLayout pipelineLayout
4956 vertexShaderModule, // const VkShaderModule vertexShaderModule
4957 DE_NULL, // const VkShaderModule tessellationControlModule
4958 DE_NULL, // const VkShaderModule tessellationEvalModule
4959 DE_NULL, // const VkShaderModule geometryShaderModule
4960 fragmentShaderModule, // const VkShaderModule fragmentShaderModule
4961 renderPass, // const VkRenderPass renderPass
4962 viewports, // const std::vector<VkViewport>& viewports
4963 scissors, // const std::vector<VkRect2D>& scissors
4964 topology, // const VkPrimitiveTopology topology
4965 subpass, // const uint32_t subpass
4966 0u, // const uint32_t patchControlPoints
4967 &vertexInputState); // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
4968 }
4969 }
4970
4971 class RenderIndexBuffer : public RenderPassCommand
4972 {
4973 public:
RenderIndexBuffer(void)4974 RenderIndexBuffer(void)
4975 {
4976 }
~RenderIndexBuffer(void)4977 ~RenderIndexBuffer(void)
4978 {
4979 }
4980
getName(void) const4981 const char *getName(void) const
4982 {
4983 return "RenderIndexBuffer";
4984 }
4985 void logPrepare(TestLog &, size_t) const;
4986 void logSubmit(TestLog &, size_t) const;
4987 void prepare(PrepareRenderPassContext &);
4988 void submit(SubmitContext &context);
4989 void verify(VerifyRenderPassContext &, size_t);
4990
4991 private:
4992 PipelineResources m_resources;
4993 vk::VkDeviceSize m_bufferSize;
4994 };
4995
logPrepare(TestLog & log,size_t commandIndex) const4996 void RenderIndexBuffer::logPrepare(TestLog &log, size_t commandIndex) const
4997 {
4998 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer."
4999 << TestLog::EndMessage;
5000 }
5001
logSubmit(TestLog & log,size_t commandIndex) const5002 void RenderIndexBuffer::logSubmit(TestLog &log, size_t commandIndex) const
5003 {
5004 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer."
5005 << TestLog::EndMessage;
5006 }
5007
prepare(PrepareRenderPassContext & context)5008 void RenderIndexBuffer::prepare(PrepareRenderPassContext &context)
5009 {
5010 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5011 const vk::VkDevice device = context.getContext().getDevice();
5012 const vk::VkRenderPass renderPass = context.getRenderPass();
5013 const uint32_t subpass = 0;
5014 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
5015 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
5016 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
5017 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5018
5019 createPipelineWithResources(
5020 vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(),
5021 context.getTargetHeight(), vector<vk::VkVertexInputBindingDescription>(),
5022 vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(),
5023 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5024 m_bufferSize = context.getBufferSize();
5025 }
5026
submit(SubmitContext & context)5027 void RenderIndexBuffer::submit(SubmitContext &context)
5028 {
5029 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5030 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5031
5032 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5033 vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
5034 vkd.cmdDrawIndexed(commandBuffer, (uint32_t)(context.getBufferSize() / 2), 1, 0, 0, 0);
5035 }
5036
verify(VerifyRenderPassContext & context,size_t)5037 void RenderIndexBuffer::verify(VerifyRenderPassContext &context, size_t)
5038 {
5039 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
5040 {
5041 const uint8_t x = context.getReference().get(pos * 2);
5042 const uint8_t y = context.getReference().get((pos * 2) + 1);
5043
5044 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5045 }
5046 }
5047
5048 class RenderVertexBuffer : public RenderPassCommand
5049 {
5050 public:
RenderVertexBuffer(uint32_t stride)5051 RenderVertexBuffer(uint32_t stride)
5052 : m_stride(stride)
5053 , m_name("RenderVertexBuffer" + de::toString(stride))
5054 , m_bufferSize(0)
5055 {
5056 }
~RenderVertexBuffer(void)5057 ~RenderVertexBuffer(void)
5058 {
5059 }
5060
getName(void) const5061 const char *getName(void) const
5062 {
5063 return m_name.c_str();
5064 }
5065 void logPrepare(TestLog &, size_t) const;
5066 void logSubmit(TestLog &, size_t) const;
5067 void prepare(PrepareRenderPassContext &);
5068 void submit(SubmitContext &context);
5069 void verify(VerifyRenderPassContext &, size_t);
5070
5071 private:
5072 const uint32_t m_stride;
5073 const std::string m_name;
5074 PipelineResources m_resources;
5075 vk::VkDeviceSize m_bufferSize;
5076 };
5077
logPrepare(TestLog & log,size_t commandIndex) const5078 void RenderVertexBuffer::logPrepare(TestLog &log, size_t commandIndex) const
5079 {
5080 log << TestLog::Message << commandIndex << ":" << getName()
5081 << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
5082 }
5083
logSubmit(TestLog & log,size_t commandIndex) const5084 void RenderVertexBuffer::logSubmit(TestLog &log, size_t commandIndex) const
5085 {
5086 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer."
5087 << TestLog::EndMessage;
5088 }
5089
prepare(PrepareRenderPassContext & context)5090 void RenderVertexBuffer::prepare(PrepareRenderPassContext &context)
5091 {
5092 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5093 const vk::VkDevice device = context.getContext().getDevice();
5094 const vk::VkRenderPass renderPass = context.getRenderPass();
5095 const uint32_t subpass = 0;
5096 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
5097 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
5098 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
5099 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5100
5101 vector<vk::VkVertexInputAttributeDescription> vertexAttributeDescriptions;
5102 vector<vk::VkVertexInputBindingDescription> vertexBindingDescriptions;
5103
5104 {
5105 const vk::VkVertexInputBindingDescription vertexBindingDescription = {0, m_stride,
5106 vk::VK_VERTEX_INPUT_RATE_VERTEX};
5107
5108 vertexBindingDescriptions.push_back(vertexBindingDescription);
5109 }
5110 {
5111 const vk::VkVertexInputAttributeDescription vertexAttributeDescription = {0, 0, vk::VK_FORMAT_R8G8_UNORM, 0};
5112
5113 vertexAttributeDescriptions.push_back(vertexAttributeDescription);
5114 }
5115 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
5116 context.getTargetWidth(), context.getTargetHeight(), vertexBindingDescriptions,
5117 vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(),
5118 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5119
5120 m_bufferSize = context.getBufferSize();
5121 }
5122
submit(SubmitContext & context)5123 void RenderVertexBuffer::submit(SubmitContext &context)
5124 {
5125 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5126 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5127 const vk::VkDeviceSize offset = 0;
5128 const vk::VkBuffer buffer = context.getBuffer();
5129
5130 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5131 vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
5132 vkd.cmdDraw(commandBuffer, (uint32_t)(context.getBufferSize() / m_stride), 1, 0, 0);
5133 }
5134
verify(VerifyRenderPassContext & context,size_t)5135 void RenderVertexBuffer::verify(VerifyRenderPassContext &context, size_t)
5136 {
5137 for (size_t pos = 0; pos < (size_t)m_bufferSize / m_stride; pos++)
5138 {
5139 const uint8_t x = context.getReference().get(pos * m_stride);
5140 const uint8_t y = context.getReference().get((pos * m_stride) + 1);
5141
5142 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5143 }
5144 }
5145
5146 class RenderVertexUniformBuffer : public RenderPassCommand
5147 {
5148 public:
RenderVertexUniformBuffer(void)5149 RenderVertexUniformBuffer(void)
5150 {
5151 }
5152 ~RenderVertexUniformBuffer(void);
5153
getName(void) const5154 const char *getName(void) const
5155 {
5156 return "RenderVertexUniformBuffer";
5157 }
5158 void logPrepare(TestLog &, size_t) const;
5159 void logSubmit(TestLog &, size_t) const;
5160 void prepare(PrepareRenderPassContext &);
5161 void submit(SubmitContext &context);
5162 void verify(VerifyRenderPassContext &, size_t);
5163
5164 protected:
5165 uint32_t calculateBufferPartSize(size_t descriptorSetNdx) const;
5166
5167 private:
5168 PipelineResources m_resources;
5169 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5170 vector<vk::VkDescriptorSet> m_descriptorSets;
5171
5172 vk::VkDeviceSize m_bufferSize;
5173 };
5174
~RenderVertexUniformBuffer(void)5175 RenderVertexUniformBuffer::~RenderVertexUniformBuffer(void)
5176 {
5177 }
5178
logPrepare(TestLog & log,size_t commandIndex) const5179 void RenderVertexUniformBuffer::logPrepare(TestLog &log, size_t commandIndex) const
5180 {
5181 log << TestLog::Message << commandIndex << ":" << getName()
5182 << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5183 }
5184
logSubmit(TestLog & log,size_t commandIndex) const5185 void RenderVertexUniformBuffer::logSubmit(TestLog &log, size_t commandIndex) const
5186 {
5187 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer."
5188 << TestLog::EndMessage;
5189 }
5190
prepare(PrepareRenderPassContext & context)5191 void RenderVertexUniformBuffer::prepare(PrepareRenderPassContext &context)
5192 {
5193 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5194 const vk::VkDevice device = context.getContext().getDevice();
5195 const vk::VkRenderPass renderPass = context.getRenderPass();
5196 const uint32_t subpass = 0;
5197 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
5198 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
5199 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
5200 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5201 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5202
5203 // make sure buffer size is multiple of 16 (in glsl we use uvec4 to store 16 values)
5204 m_bufferSize = context.getBufferSize();
5205 m_bufferSize = static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
5206
5207 {
5208 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1,
5209 vk::VK_SHADER_STAGE_VERTEX_BIT, DE_NULL};
5210
5211 bindings.push_back(binding);
5212 }
5213
5214 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
5215 context.getTargetWidth(), context.getTargetHeight(),
5216 vector<vk::VkVertexInputBindingDescription>(),
5217 vector<vk::VkVertexInputAttributeDescription>(), bindings,
5218 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5219
5220 {
5221 const uint32_t descriptorCount =
5222 (uint32_t)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
5223 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, descriptorCount};
5224 const vk::VkDescriptorPoolCreateInfo createInfo = {
5225 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5226 DE_NULL,
5227 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5228
5229 descriptorCount,
5230 1u,
5231 &poolSizes,
5232 };
5233
5234 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5235 m_descriptorSets.resize(descriptorCount);
5236 }
5237
5238 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5239 {
5240 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5241 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5242 DE_NULL,
5243
5244 *m_descriptorPool, 1, &layout};
5245
5246 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5247
5248 {
5249 const vk::VkDescriptorBufferInfo bufferInfo = {
5250 context.getBuffer(), (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5251 calculateBufferPartSize(descriptorSetNdx)};
5252 const vk::VkWriteDescriptorSet write = {
5253 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5254 DE_NULL,
5255 m_descriptorSets[descriptorSetNdx],
5256 0u,
5257 0u,
5258 1u,
5259 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5260 DE_NULL,
5261 &bufferInfo,
5262 DE_NULL,
5263 };
5264
5265 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5266 }
5267 }
5268 }
5269
submit(SubmitContext & context)5270 void RenderVertexUniformBuffer::submit(SubmitContext &context)
5271 {
5272 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5273 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5274
5275 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5276
5277 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5278 {
5279 const size_t size = calculateBufferPartSize(descriptorSetNdx);
5280 const uint32_t count = (uint32_t)(size / 2);
5281
5282 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u,
5283 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5284 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5285 }
5286 }
5287
verify(VerifyRenderPassContext & context,size_t)5288 void RenderVertexUniformBuffer::verify(VerifyRenderPassContext &context, size_t)
5289 {
5290 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5291 {
5292 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5293 const size_t size = calculateBufferPartSize(descriptorSetNdx);
5294 const size_t count = size / 2;
5295
5296 for (size_t pos = 0; pos < count; pos++)
5297 {
5298 const uint8_t x = context.getReference().get(offset + pos * 2);
5299 const uint8_t y = context.getReference().get(offset + (pos * 2) + 1);
5300
5301 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5302 }
5303 }
5304 }
5305
calculateBufferPartSize(size_t descriptorSetNdx) const5306 uint32_t RenderVertexUniformBuffer::calculateBufferPartSize(size_t descriptorSetNdx) const
5307 {
5308 uint32_t size =
5309 static_cast<uint32_t>(m_bufferSize) - static_cast<uint32_t>(descriptorSetNdx) * MAX_UNIFORM_BUFFER_SIZE;
5310 if (size < MAX_UNIFORM_BUFFER_SIZE)
5311 return size;
5312 return MAX_UNIFORM_BUFFER_SIZE;
5313 }
5314
5315 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5316 {
5317 public:
RenderVertexUniformTexelBuffer(void)5318 RenderVertexUniformTexelBuffer(void)
5319 {
5320 }
5321 ~RenderVertexUniformTexelBuffer(void);
5322
getName(void) const5323 const char *getName(void) const
5324 {
5325 return "RenderVertexUniformTexelBuffer";
5326 }
5327 void logPrepare(TestLog &, size_t) const;
5328 void logSubmit(TestLog &, size_t) const;
5329 void prepare(PrepareRenderPassContext &);
5330 void submit(SubmitContext &context);
5331 void verify(VerifyRenderPassContext &, size_t);
5332
5333 private:
5334 PipelineResources m_resources;
5335 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5336 vector<vk::VkDescriptorSet> m_descriptorSets;
5337 vector<vk::VkBufferView> m_bufferViews;
5338
5339 const vk::DeviceInterface *m_vkd;
5340 vk::VkDevice m_device;
5341 vk::VkDeviceSize m_bufferSize;
5342 uint32_t m_maxUniformTexelCount;
5343 };
5344
~RenderVertexUniformTexelBuffer(void)5345 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer(void)
5346 {
5347 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5348 {
5349 if (!!m_bufferViews[bufferViewNdx])
5350 {
5351 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5352 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5353 }
5354 }
5355 }
5356
logPrepare(TestLog & log,size_t commandIndex) const5357 void RenderVertexUniformTexelBuffer::logPrepare(TestLog &log, size_t commandIndex) const
5358 {
5359 log << TestLog::Message << commandIndex << ":" << getName()
5360 << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5361 }
5362
logSubmit(TestLog & log,size_t commandIndex) const5363 void RenderVertexUniformTexelBuffer::logSubmit(TestLog &log, size_t commandIndex) const
5364 {
5365 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer."
5366 << TestLog::EndMessage;
5367 }
5368
prepare(PrepareRenderPassContext & context)5369 void RenderVertexUniformTexelBuffer::prepare(PrepareRenderPassContext &context)
5370 {
5371 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
5372 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5373 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5374 const vk::VkDevice device = context.getContext().getDevice();
5375 const vk::VkRenderPass renderPass = context.getRenderPass();
5376 const uint32_t subpass = 0;
5377 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
5378 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5379 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
5380 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5381 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5382
5383 m_device = device;
5384 m_vkd = &vkd;
5385 m_bufferSize = context.getBufferSize();
5386 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5387
5388 {
5389 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1,
5390 vk::VK_SHADER_STAGE_VERTEX_BIT, DE_NULL};
5391
5392 bindings.push_back(binding);
5393 }
5394
5395 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
5396 context.getTargetWidth(), context.getTargetHeight(),
5397 vector<vk::VkVertexInputBindingDescription>(),
5398 vector<vk::VkVertexInputAttributeDescription>(), bindings,
5399 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5400
5401 {
5402 const uint32_t descriptorCount =
5403 (uint32_t)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5404 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, descriptorCount};
5405 const vk::VkDescriptorPoolCreateInfo createInfo = {
5406 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5407 DE_NULL,
5408 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5409
5410 descriptorCount,
5411 1u,
5412 &poolSizes,
5413 };
5414
5415 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5416 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5417 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5418 }
5419
5420 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5421 {
5422 const uint32_t count = (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2 ?
5423 m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2 :
5424 m_maxUniformTexelCount * 2) /
5425 2;
5426 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5427 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5428 DE_NULL,
5429
5430 *m_descriptorPool, 1, &layout};
5431
5432 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5433
5434 {
5435 const vk::VkBufferViewCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5436 DE_NULL,
5437 0u,
5438
5439 context.getBuffer(),
5440 vk::VK_FORMAT_R16_UINT,
5441 descriptorSetNdx * m_maxUniformTexelCount * 2,
5442 count * 2};
5443
5444 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5445 }
5446
5447 {
5448 const vk::VkWriteDescriptorSet write = {vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5449 DE_NULL,
5450 m_descriptorSets[descriptorSetNdx],
5451 0u,
5452 0u,
5453 1u,
5454 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5455 DE_NULL,
5456 DE_NULL,
5457 &m_bufferViews[descriptorSetNdx]};
5458
5459 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5460 }
5461 }
5462 }
5463
submit(SubmitContext & context)5464 void RenderVertexUniformTexelBuffer::submit(SubmitContext &context)
5465 {
5466 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5467 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5468
5469 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5470
5471 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5472 {
5473 const uint32_t count = (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2 ?
5474 m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2 :
5475 m_maxUniformTexelCount * 2) /
5476 2;
5477
5478 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u,
5479 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5480 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5481 }
5482 }
5483
verify(VerifyRenderPassContext & context,size_t)5484 void RenderVertexUniformTexelBuffer::verify(VerifyRenderPassContext &context, size_t)
5485 {
5486 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5487 {
5488 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 2;
5489 const uint32_t count = (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2 ?
5490 m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2 :
5491 m_maxUniformTexelCount * 2) /
5492 2;
5493
5494 for (size_t pos = 0; pos < (size_t)count; pos++)
5495 {
5496 const uint8_t x = context.getReference().get(offset + pos * 2);
5497 const uint8_t y = context.getReference().get(offset + (pos * 2) + 1);
5498
5499 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5500 }
5501 }
5502 }
5503
5504 class RenderVertexStorageBuffer : public RenderPassCommand
5505 {
5506 public:
RenderVertexStorageBuffer(void)5507 RenderVertexStorageBuffer(void)
5508 {
5509 }
5510 ~RenderVertexStorageBuffer(void);
5511
getName(void) const5512 const char *getName(void) const
5513 {
5514 return "RenderVertexStorageBuffer";
5515 }
5516 void logPrepare(TestLog &, size_t) const;
5517 void logSubmit(TestLog &, size_t) const;
5518 void prepare(PrepareRenderPassContext &);
5519 void submit(SubmitContext &context);
5520 void verify(VerifyRenderPassContext &, size_t);
5521
5522 private:
5523 PipelineResources m_resources;
5524 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5525 vector<vk::VkDescriptorSet> m_descriptorSets;
5526
5527 vk::VkDeviceSize m_bufferSize;
5528 };
5529
~RenderVertexStorageBuffer(void)5530 RenderVertexStorageBuffer::~RenderVertexStorageBuffer(void)
5531 {
5532 }
5533
logPrepare(TestLog & log,size_t commandIndex) const5534 void RenderVertexStorageBuffer::logPrepare(TestLog &log, size_t commandIndex) const
5535 {
5536 log << TestLog::Message << commandIndex << ":" << getName()
5537 << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5538 }
5539
logSubmit(TestLog & log,size_t commandIndex) const5540 void RenderVertexStorageBuffer::logSubmit(TestLog &log, size_t commandIndex) const
5541 {
5542 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer."
5543 << TestLog::EndMessage;
5544 }
5545
prepare(PrepareRenderPassContext & context)5546 void RenderVertexStorageBuffer::prepare(PrepareRenderPassContext &context)
5547 {
5548 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5549 const vk::VkDevice device = context.getContext().getDevice();
5550 const vk::VkRenderPass renderPass = context.getRenderPass();
5551 const uint32_t subpass = 0;
5552 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
5553 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5554 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
5555 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5556 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5557
5558 m_bufferSize = context.getBufferSize();
5559
5560 {
5561 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1,
5562 vk::VK_SHADER_STAGE_VERTEX_BIT, DE_NULL};
5563
5564 bindings.push_back(binding);
5565 }
5566
5567 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
5568 context.getTargetWidth(), context.getTargetHeight(),
5569 vector<vk::VkVertexInputBindingDescription>(),
5570 vector<vk::VkVertexInputAttributeDescription>(), bindings,
5571 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5572
5573 {
5574 const uint32_t descriptorCount =
5575 (uint32_t)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5576 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, descriptorCount};
5577 const vk::VkDescriptorPoolCreateInfo createInfo = {
5578 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5579 DE_NULL,
5580 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5581
5582 descriptorCount,
5583 1u,
5584 &poolSizes,
5585 };
5586
5587 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5588 m_descriptorSets.resize(descriptorCount);
5589 }
5590
5591 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5592 {
5593 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5594 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5595 DE_NULL,
5596
5597 *m_descriptorPool, 1, &layout};
5598
5599 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5600
5601 {
5602 const vk::VkDescriptorBufferInfo bufferInfo = {
5603 context.getBuffer(), descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5604 de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5605 (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)};
5606 const vk::VkWriteDescriptorSet write = {
5607 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5608 DE_NULL,
5609 m_descriptorSets[descriptorSetNdx],
5610 0u,
5611 0u,
5612 1u,
5613 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5614 DE_NULL,
5615 &bufferInfo,
5616 DE_NULL,
5617 };
5618
5619 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5620 }
5621 }
5622 }
5623
submit(SubmitContext & context)5624 void RenderVertexStorageBuffer::submit(SubmitContext &context)
5625 {
5626 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5627 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5628
5629 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5630
5631 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5632 {
5633 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE ?
5634 (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE) :
5635 (size_t)(MAX_STORAGE_BUFFER_SIZE);
5636
5637 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u,
5638 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5639 vkd.cmdDraw(commandBuffer, (uint32_t)(size / 2), 1, 0, 0);
5640 }
5641 }
5642
verify(VerifyRenderPassContext & context,size_t)5643 void RenderVertexStorageBuffer::verify(VerifyRenderPassContext &context, size_t)
5644 {
5645 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5646 {
5647 const size_t offset = descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5648 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE ?
5649 (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE) :
5650 (size_t)(MAX_STORAGE_BUFFER_SIZE);
5651
5652 for (size_t pos = 0; pos < size / 2; pos++)
5653 {
5654 const uint8_t x = context.getReference().get(offset + pos * 2);
5655 const uint8_t y = context.getReference().get(offset + (pos * 2) + 1);
5656
5657 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5658 }
5659 }
5660 }
5661
5662 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5663 {
5664 public:
RenderVertexStorageTexelBuffer(void)5665 RenderVertexStorageTexelBuffer(void)
5666 {
5667 }
5668 ~RenderVertexStorageTexelBuffer(void);
5669
getName(void) const5670 const char *getName(void) const
5671 {
5672 return "RenderVertexStorageTexelBuffer";
5673 }
5674 void logPrepare(TestLog &, size_t) const;
5675 void logSubmit(TestLog &, size_t) const;
5676 void prepare(PrepareRenderPassContext &);
5677 void submit(SubmitContext &context);
5678 void verify(VerifyRenderPassContext &, size_t);
5679
5680 private:
5681 PipelineResources m_resources;
5682 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5683 vector<vk::VkDescriptorSet> m_descriptorSets;
5684 vector<vk::VkBufferView> m_bufferViews;
5685
5686 const vk::DeviceInterface *m_vkd;
5687 vk::VkDevice m_device;
5688 vk::VkDeviceSize m_bufferSize;
5689 uint32_t m_maxStorageTexelCount;
5690 };
5691
~RenderVertexStorageTexelBuffer(void)5692 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer(void)
5693 {
5694 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5695 {
5696 if (!!m_bufferViews[bufferViewNdx])
5697 {
5698 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5699 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5700 }
5701 }
5702 }
5703
logPrepare(TestLog & log,size_t commandIndex) const5704 void RenderVertexStorageTexelBuffer::logPrepare(TestLog &log, size_t commandIndex) const
5705 {
5706 log << TestLog::Message << commandIndex << ":" << getName()
5707 << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5708 }
5709
logSubmit(TestLog & log,size_t commandIndex) const5710 void RenderVertexStorageTexelBuffer::logSubmit(TestLog &log, size_t commandIndex) const
5711 {
5712 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer."
5713 << TestLog::EndMessage;
5714 }
5715
prepare(PrepareRenderPassContext & context)5716 void RenderVertexStorageTexelBuffer::prepare(PrepareRenderPassContext &context)
5717 {
5718 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
5719 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5720 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5721 const vk::VkDevice device = context.getContext().getDevice();
5722 const vk::VkRenderPass renderPass = context.getRenderPass();
5723 const uint32_t subpass = 0;
5724 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
5725 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5726 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
5727 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5728 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5729
5730 m_device = device;
5731 m_vkd = &vkd;
5732 m_bufferSize = context.getBufferSize();
5733 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5734
5735 {
5736 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1,
5737 vk::VK_SHADER_STAGE_VERTEX_BIT, DE_NULL};
5738
5739 bindings.push_back(binding);
5740 }
5741
5742 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
5743 context.getTargetWidth(), context.getTargetHeight(),
5744 vector<vk::VkVertexInputBindingDescription>(),
5745 vector<vk::VkVertexInputAttributeDescription>(), bindings,
5746 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5747
5748 {
5749 const uint32_t descriptorCount =
5750 (uint32_t)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * (uint64_t)(4)));
5751 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, descriptorCount};
5752 const vk::VkDescriptorPoolCreateInfo createInfo = {
5753 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5754 DE_NULL,
5755 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5756
5757 descriptorCount,
5758 1u,
5759 &poolSizes,
5760 };
5761
5762 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5763 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5764 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5765 }
5766
5767 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5768 {
5769 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5770 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5771 DE_NULL,
5772
5773 *m_descriptorPool, 1, &layout};
5774
5775 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5776
5777 {
5778 const vk::VkBufferViewCreateInfo createInfo = {
5779 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5780 DE_NULL,
5781 0u,
5782
5783 context.getBuffer(),
5784 vk::VK_FORMAT_R32_UINT,
5785 descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4),
5786 (uint32_t)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * (uint64_t)(4),
5787 m_bufferSize -
5788 descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4))};
5789
5790 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5791 }
5792
5793 {
5794 const vk::VkWriteDescriptorSet write = {vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5795 DE_NULL,
5796 m_descriptorSets[descriptorSetNdx],
5797 0u,
5798 0u,
5799 1u,
5800 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5801 DE_NULL,
5802 DE_NULL,
5803 &m_bufferViews[descriptorSetNdx]};
5804
5805 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5806 }
5807 }
5808 }
5809
submit(SubmitContext & context)5810 void RenderVertexStorageTexelBuffer::submit(SubmitContext &context)
5811 {
5812 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5813 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5814
5815 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5816
5817 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5818 {
5819 const uint32_t count =
5820 (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * (uint64_t)(4) ?
5821 m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4) :
5822 m_maxStorageTexelCount * (uint64_t)(4)) /
5823 2;
5824
5825 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u,
5826 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5827 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5828 }
5829 }
5830
verify(VerifyRenderPassContext & context,size_t)5831 void RenderVertexStorageTexelBuffer::verify(VerifyRenderPassContext &context, size_t)
5832 {
5833 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5834 {
5835 const uint64_t offset = descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4);
5836 const uint32_t count =
5837 (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * (uint64_t)(4) ?
5838 m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4) :
5839 m_maxStorageTexelCount * (uint64_t)(4)) /
5840 2;
5841
5842 DE_ASSERT(context.getReference().getSize() <= (uint64_t)(4) * m_maxStorageTexelCount * m_descriptorSets.size());
5843 DE_ASSERT(context.getReference().getSize() > offset);
5844 DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5845
5846 for (size_t pos = 0; pos < (size_t)count; pos++)
5847 {
5848 const uint8_t x = context.getReference().get(offset + pos * 2);
5849 const uint8_t y = context.getReference().get(offset + (pos * 2) + 1);
5850
5851 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5852 }
5853 }
5854 }
5855
5856 class RenderVertexStorageImage : public RenderPassCommand
5857 {
5858 public:
RenderVertexStorageImage(void)5859 RenderVertexStorageImage(void)
5860 {
5861 }
5862 ~RenderVertexStorageImage(void);
5863
getName(void) const5864 const char *getName(void) const
5865 {
5866 return "RenderVertexStorageImage";
5867 }
5868 void logPrepare(TestLog &, size_t) const;
5869 void logSubmit(TestLog &, size_t) const;
5870 void prepare(PrepareRenderPassContext &);
5871 void submit(SubmitContext &context);
5872 void verify(VerifyRenderPassContext &, size_t);
5873
5874 private:
5875 PipelineResources m_resources;
5876 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5877 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5878 vk::Move<vk::VkImageView> m_imageView;
5879 };
5880
~RenderVertexStorageImage(void)5881 RenderVertexStorageImage::~RenderVertexStorageImage(void)
5882 {
5883 }
5884
logPrepare(TestLog & log,size_t commandIndex) const5885 void RenderVertexStorageImage::logPrepare(TestLog &log, size_t commandIndex) const
5886 {
5887 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image."
5888 << TestLog::EndMessage;
5889 }
5890
logSubmit(TestLog & log,size_t commandIndex) const5891 void RenderVertexStorageImage::logSubmit(TestLog &log, size_t commandIndex) const
5892 {
5893 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image."
5894 << TestLog::EndMessage;
5895 }
5896
prepare(PrepareRenderPassContext & context)5897 void RenderVertexStorageImage::prepare(PrepareRenderPassContext &context)
5898 {
5899 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5900 const vk::VkDevice device = context.getContext().getDevice();
5901 const vk::VkRenderPass renderPass = context.getRenderPass();
5902 const uint32_t subpass = 0;
5903 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
5904 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5905 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
5906 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5907 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5908
5909 {
5910 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1,
5911 vk::VK_SHADER_STAGE_VERTEX_BIT, DE_NULL};
5912
5913 bindings.push_back(binding);
5914 }
5915
5916 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
5917 context.getTargetWidth(), context.getTargetHeight(),
5918 vector<vk::VkVertexInputBindingDescription>(),
5919 vector<vk::VkVertexInputAttributeDescription>(), bindings,
5920 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5921
5922 {
5923 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1};
5924 const vk::VkDescriptorPoolCreateInfo createInfo = {
5925 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5926 DE_NULL,
5927 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5928
5929 1u,
5930 1u,
5931 &poolSizes,
5932 };
5933
5934 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5935 }
5936
5937 {
5938 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5939 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5940 DE_NULL,
5941
5942 *m_descriptorPool, 1, &layout};
5943
5944 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5945
5946 {
5947 const vk::VkImageViewCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5948 DE_NULL,
5949 0u,
5950
5951 context.getImage(),
5952 vk::VK_IMAGE_VIEW_TYPE_2D,
5953 vk::VK_FORMAT_R8G8B8A8_UNORM,
5954 vk::makeComponentMappingRGBA(),
5955 {vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}};
5956
5957 m_imageView = vk::createImageView(vkd, device, &createInfo);
5958 }
5959
5960 {
5961 const vk::VkDescriptorImageInfo imageInfo = {0, *m_imageView, context.getImageLayout()};
5962 const vk::VkWriteDescriptorSet write = {
5963 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, DE_NULL, *m_descriptorSet, 0u, 0u, 1u,
5964 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &imageInfo, DE_NULL, DE_NULL,
5965 };
5966
5967 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5968 }
5969 }
5970 }
5971
submit(SubmitContext & context)5972 void RenderVertexStorageImage::submit(SubmitContext &context)
5973 {
5974 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
5975 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5976
5977 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5978
5979 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u,
5980 &(*m_descriptorSet), 0u, DE_NULL);
5981 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5982 }
5983
verify(VerifyRenderPassContext & context,size_t)5984 void RenderVertexStorageImage::verify(VerifyRenderPassContext &context, size_t)
5985 {
5986 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2);
5987 pos++)
5988 {
5989 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
5990 const tcu::UVec4 pixel =
5991 context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5992
5993 if (pos % 2 == 0)
5994 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5995 else
5996 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5997 }
5998 }
5999
6000 class RenderVertexSampledImage : public RenderPassCommand
6001 {
6002 public:
RenderVertexSampledImage(void)6003 RenderVertexSampledImage(void)
6004 {
6005 }
6006 ~RenderVertexSampledImage(void);
6007
getName(void) const6008 const char *getName(void) const
6009 {
6010 return "RenderVertexSampledImage";
6011 }
6012 void logPrepare(TestLog &, size_t) const;
6013 void logSubmit(TestLog &, size_t) const;
6014 void prepare(PrepareRenderPassContext &);
6015 void submit(SubmitContext &context);
6016 void verify(VerifyRenderPassContext &, size_t);
6017
6018 private:
6019 PipelineResources m_resources;
6020 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6021 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6022 vk::Move<vk::VkImageView> m_imageView;
6023 vk::Move<vk::VkSampler> m_sampler;
6024 };
6025
~RenderVertexSampledImage(void)6026 RenderVertexSampledImage::~RenderVertexSampledImage(void)
6027 {
6028 }
6029
logPrepare(TestLog & log,size_t commandIndex) const6030 void RenderVertexSampledImage::logPrepare(TestLog &log, size_t commandIndex) const
6031 {
6032 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image."
6033 << TestLog::EndMessage;
6034 }
6035
logSubmit(TestLog & log,size_t commandIndex) const6036 void RenderVertexSampledImage::logSubmit(TestLog &log, size_t commandIndex) const
6037 {
6038 log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image."
6039 << TestLog::EndMessage;
6040 }
6041
prepare(PrepareRenderPassContext & context)6042 void RenderVertexSampledImage::prepare(PrepareRenderPassContext &context)
6043 {
6044 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6045 const vk::VkDevice device = context.getContext().getDevice();
6046 const vk::VkRenderPass renderPass = context.getRenderPass();
6047 const uint32_t subpass = 0;
6048 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
6049 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
6050 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
6051 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
6052 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6053
6054 {
6055 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1,
6056 vk::VK_SHADER_STAGE_VERTEX_BIT, DE_NULL};
6057
6058 bindings.push_back(binding);
6059 }
6060
6061 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
6062 context.getTargetWidth(), context.getTargetHeight(),
6063 vector<vk::VkVertexInputBindingDescription>(),
6064 vector<vk::VkVertexInputAttributeDescription>(), bindings,
6065 vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
6066
6067 {
6068 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1};
6069 const vk::VkDescriptorPoolCreateInfo createInfo = {
6070 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6071 DE_NULL,
6072 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6073
6074 1u,
6075 1u,
6076 &poolSizes,
6077 };
6078
6079 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6080 }
6081
6082 {
6083 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6084 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6085 DE_NULL,
6086
6087 *m_descriptorPool, 1, &layout};
6088
6089 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6090
6091 {
6092 const vk::VkImageViewCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
6093 DE_NULL,
6094 0u,
6095
6096 context.getImage(),
6097 vk::VK_IMAGE_VIEW_TYPE_2D,
6098 vk::VK_FORMAT_R8G8B8A8_UNORM,
6099 vk::makeComponentMappingRGBA(),
6100 {vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}};
6101
6102 m_imageView = vk::createImageView(vkd, device, &createInfo);
6103 }
6104
6105 {
6106 const vk::VkSamplerCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
6107 DE_NULL,
6108 0u,
6109
6110 vk::VK_FILTER_NEAREST,
6111 vk::VK_FILTER_NEAREST,
6112
6113 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
6114 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6115 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6116 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6117 0.0f,
6118 VK_FALSE,
6119 1.0f,
6120 VK_FALSE,
6121 vk::VK_COMPARE_OP_ALWAYS,
6122 0.0f,
6123 0.0f,
6124 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
6125 VK_FALSE};
6126
6127 m_sampler = vk::createSampler(vkd, device, &createInfo);
6128 }
6129
6130 {
6131 const vk::VkDescriptorImageInfo imageInfo = {*m_sampler, *m_imageView, context.getImageLayout()};
6132 const vk::VkWriteDescriptorSet write = {
6133 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, DE_NULL, *m_descriptorSet, 0u, 0u, 1u,
6134 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageInfo, DE_NULL, DE_NULL,
6135 };
6136
6137 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6138 }
6139 }
6140 }
6141
submit(SubmitContext & context)6142 void RenderVertexSampledImage::submit(SubmitContext &context)
6143 {
6144 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6145 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6146
6147 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6148
6149 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u,
6150 &(*m_descriptorSet), 0u, DE_NULL);
6151 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
6152 }
6153
verify(VerifyRenderPassContext & context,size_t)6154 void RenderVertexSampledImage::verify(VerifyRenderPassContext &context, size_t)
6155 {
6156 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2);
6157 pos++)
6158 {
6159 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
6160 const tcu::UVec4 pixel =
6161 context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
6162
6163 if (pos % 2 == 0)
6164 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
6165 else
6166 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
6167 }
6168 }
6169
6170 class RenderFragmentUniformBuffer : public RenderPassCommand
6171 {
6172 public:
RenderFragmentUniformBuffer(void)6173 RenderFragmentUniformBuffer(void)
6174 {
6175 }
6176 ~RenderFragmentUniformBuffer(void);
6177
getName(void) const6178 const char *getName(void) const
6179 {
6180 return "RenderFragmentUniformBuffer";
6181 }
6182 void logPrepare(TestLog &, size_t) const;
6183 void logSubmit(TestLog &, size_t) const;
6184 void prepare(PrepareRenderPassContext &);
6185 void submit(SubmitContext &context);
6186 void verify(VerifyRenderPassContext &, size_t);
6187
6188 protected:
6189 uint32_t calculateBufferPartSize(size_t descriptorSetNdx) const;
6190
6191 private:
6192 PipelineResources m_resources;
6193 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6194 vector<vk::VkDescriptorSet> m_descriptorSets;
6195
6196 vk::VkDeviceSize m_bufferSize;
6197 size_t m_targetWidth;
6198 size_t m_targetHeight;
6199 uint32_t m_valuesPerPixel;
6200 };
6201
~RenderFragmentUniformBuffer(void)6202 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer(void)
6203 {
6204 }
6205
logPrepare(TestLog & log,size_t commandIndex) const6206 void RenderFragmentUniformBuffer::logPrepare(TestLog &log, size_t commandIndex) const
6207 {
6208 log << TestLog::Message << commandIndex << ":" << getName()
6209 << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6210 }
6211
logSubmit(TestLog & log,size_t commandIndex) const6212 void RenderFragmentUniformBuffer::logSubmit(TestLog &log, size_t commandIndex) const
6213 {
6214 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer."
6215 << TestLog::EndMessage;
6216 }
6217
prepare(PrepareRenderPassContext & context)6218 void RenderFragmentUniformBuffer::prepare(PrepareRenderPassContext &context)
6219 {
6220 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6221 const vk::VkDevice device = context.getContext().getDevice();
6222 const vk::VkRenderPass renderPass = context.getRenderPass();
6223 const uint32_t subpass = 0;
6224 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
6225 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6226 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
6227 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6228 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6229
6230 // make sure buffer is smaller then MAX_SIZE and is multiple of 16 (in glsl we use uvec4 to store 16 values)
6231 m_bufferSize = de::min(context.getBufferSize(), (vk::VkDeviceSize)MAX_SIZE);
6232 m_bufferSize = static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
6233 m_targetWidth = context.getTargetWidth();
6234 m_targetHeight = context.getTargetHeight();
6235
6236 {
6237 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1,
6238 vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL};
6239
6240 bindings.push_back(binding);
6241 }
6242 const vk::VkPushConstantRange pushConstantRange = {vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, 12u};
6243
6244 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
6245 context.getTargetWidth(), context.getTargetHeight(),
6246 vector<vk::VkVertexInputBindingDescription>(),
6247 vector<vk::VkVertexInputAttributeDescription>(), bindings,
6248 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6249
6250 {
6251 const uint32_t descriptorCount =
6252 (uint32_t)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6253 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, descriptorCount};
6254 const vk::VkDescriptorPoolCreateInfo createInfo = {
6255 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6256 DE_NULL,
6257 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6258
6259 descriptorCount,
6260 1u,
6261 &poolSizes,
6262 };
6263
6264 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6265 m_descriptorSets.resize(descriptorCount);
6266
6267 m_valuesPerPixel = (uint32_t)divRoundUp<size_t>(
6268 descriptorCount * de::min<size_t>((size_t)m_bufferSize / 4, MAX_UNIFORM_BUFFER_SIZE / 4),
6269 m_targetWidth * m_targetHeight);
6270 }
6271
6272 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6273 {
6274 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6275 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6276 DE_NULL,
6277
6278 *m_descriptorPool, 1, &layout};
6279
6280 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6281
6282 {
6283 const vk::VkDescriptorBufferInfo bufferInfo = {
6284 context.getBuffer(), (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6285 calculateBufferPartSize(descriptorSetNdx)};
6286 const vk::VkWriteDescriptorSet write = {
6287 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6288 DE_NULL,
6289 m_descriptorSets[descriptorSetNdx],
6290 0u,
6291 0u,
6292 1u,
6293 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6294 DE_NULL,
6295 &bufferInfo,
6296 DE_NULL,
6297 };
6298
6299 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6300 }
6301 }
6302 }
6303
submit(SubmitContext & context)6304 void RenderFragmentUniformBuffer::submit(SubmitContext &context)
6305 {
6306 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6307 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6308
6309 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6310
6311 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6312 {
6313 const struct
6314 {
6315 const uint32_t callId;
6316 const uint32_t valuesPerPixel;
6317 const uint32_t bufferSize;
6318 } callParams = {(uint32_t)descriptorSetNdx, m_valuesPerPixel, calculateBufferPartSize(descriptorSetNdx) / 16u};
6319
6320 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u,
6321 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6322 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u,
6323 (uint32_t)sizeof(callParams), &callParams);
6324 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6325 }
6326 }
6327
verify(VerifyRenderPassContext & context,size_t)6328 void RenderFragmentUniformBuffer::verify(VerifyRenderPassContext &context, size_t)
6329 {
6330 const size_t arrayIntSize = MAX_UNIFORM_BUFFER_SIZE / sizeof(uint32_t);
6331
6332 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6333 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6334 {
6335 const uint32_t id = (uint32_t)y * 256u + (uint32_t)x;
6336 const size_t firstDescriptorSetNdx =
6337 de::min<size_t>(id / (arrayIntSize / m_valuesPerPixel), m_descriptorSets.size() - 1);
6338
6339 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size();
6340 descriptorSetNdx++)
6341 {
6342 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6343 const uint32_t callId = (uint32_t)descriptorSetNdx;
6344 const uint32_t count = calculateBufferPartSize(descriptorSetNdx) / 16u;
6345
6346 if (id < callId * (arrayIntSize / m_valuesPerPixel))
6347 continue;
6348 else
6349 {
6350 uint32_t value = id;
6351
6352 for (uint32_t i = 0; i < m_valuesPerPixel; i++)
6353 {
6354 // in shader UBO has up to 64 items of uvec4, each uvec4 contains 16 values
6355 size_t index = offset + size_t((value % count) * 16u) + size_t((value % 4u) * 4u);
6356 value = (((uint32_t)context.getReference().get(index + 0))) |
6357 (((uint32_t)context.getReference().get(index + 1)) << 8u) |
6358 (((uint32_t)context.getReference().get(index + 2)) << 16u) |
6359 (((uint32_t)context.getReference().get(index + 3)) << 24u);
6360 }
6361 const UVec4 vec((value >> 0u) & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu,
6362 (value >> 24u) & 0xFFu);
6363
6364 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6365 }
6366 }
6367 }
6368 }
6369
calculateBufferPartSize(size_t descriptorSetNdx) const6370 uint32_t RenderFragmentUniformBuffer::calculateBufferPartSize(size_t descriptorSetNdx) const
6371 {
6372 uint32_t size =
6373 static_cast<uint32_t>(m_bufferSize) - static_cast<uint32_t>(descriptorSetNdx) * MAX_UNIFORM_BUFFER_SIZE;
6374 if (size < MAX_UNIFORM_BUFFER_SIZE)
6375 return size;
6376 return MAX_UNIFORM_BUFFER_SIZE;
6377 }
6378
6379 class RenderFragmentStorageBuffer : public RenderPassCommand
6380 {
6381 public:
RenderFragmentStorageBuffer(void)6382 RenderFragmentStorageBuffer(void)
6383 {
6384 }
6385 ~RenderFragmentStorageBuffer(void);
6386
getName(void) const6387 const char *getName(void) const
6388 {
6389 return "RenderFragmentStorageBuffer";
6390 }
6391 void logPrepare(TestLog &, size_t) const;
6392 void logSubmit(TestLog &, size_t) const;
6393 void prepare(PrepareRenderPassContext &);
6394 void submit(SubmitContext &context);
6395 void verify(VerifyRenderPassContext &, size_t);
6396
6397 private:
6398 PipelineResources m_resources;
6399 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6400 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6401
6402 vk::VkDeviceSize m_bufferSize;
6403 size_t m_targetWidth;
6404 size_t m_targetHeight;
6405 };
6406
~RenderFragmentStorageBuffer(void)6407 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer(void)
6408 {
6409 }
6410
logPrepare(TestLog & log,size_t commandIndex) const6411 void RenderFragmentStorageBuffer::logPrepare(TestLog &log, size_t commandIndex) const
6412 {
6413 log << TestLog::Message << commandIndex << ":" << getName()
6414 << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6415 }
6416
logSubmit(TestLog & log,size_t commandIndex) const6417 void RenderFragmentStorageBuffer::logSubmit(TestLog &log, size_t commandIndex) const
6418 {
6419 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer."
6420 << TestLog::EndMessage;
6421 }
6422
prepare(PrepareRenderPassContext & context)6423 void RenderFragmentStorageBuffer::prepare(PrepareRenderPassContext &context)
6424 {
6425 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6426 const vk::VkDevice device = context.getContext().getDevice();
6427 const vk::VkRenderPass renderPass = context.getRenderPass();
6428 const uint32_t subpass = 0;
6429 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
6430 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6431 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
6432 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6433 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6434
6435 // make sure buffer size is multiple of 16 (in glsl we use uvec4 to store 16 values)
6436 m_bufferSize = context.getBufferSize();
6437 m_bufferSize = static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
6438 m_targetWidth = context.getTargetWidth();
6439 m_targetHeight = context.getTargetHeight();
6440
6441 {
6442 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1,
6443 vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL};
6444
6445 bindings.push_back(binding);
6446 }
6447 const vk::VkPushConstantRange pushConstantRange = {vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, 12u};
6448
6449 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
6450 context.getTargetWidth(), context.getTargetHeight(),
6451 vector<vk::VkVertexInputBindingDescription>(),
6452 vector<vk::VkVertexInputAttributeDescription>(), bindings,
6453 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6454
6455 {
6456 const uint32_t descriptorCount = 1;
6457 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, descriptorCount};
6458 const vk::VkDescriptorPoolCreateInfo createInfo = {
6459 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6460 DE_NULL,
6461 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6462
6463 descriptorCount,
6464 1u,
6465 &poolSizes,
6466 };
6467
6468 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6469 }
6470
6471 {
6472 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6473 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6474 DE_NULL,
6475
6476 *m_descriptorPool, 1, &layout};
6477
6478 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6479
6480 {
6481 const vk::VkDescriptorBufferInfo bufferInfo = {context.getBuffer(), 0u, m_bufferSize};
6482 const vk::VkWriteDescriptorSet write = {
6483 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, DE_NULL, m_descriptorSet.get(), 0u, 0u, 1u,
6484 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, DE_NULL, &bufferInfo, DE_NULL,
6485 };
6486
6487 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6488 }
6489 }
6490 }
6491
submit(SubmitContext & context)6492 void RenderFragmentStorageBuffer::submit(SubmitContext &context)
6493 {
6494 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6495 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6496
6497 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6498
6499 const struct
6500 {
6501 const uint32_t valuesPerPixel;
6502 const uint32_t bufferSize;
6503 } callParams = {(uint32_t)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6504 (uint32_t)m_bufferSize};
6505
6506 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u,
6507 &m_descriptorSet.get(), 0u, DE_NULL);
6508 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u,
6509 (uint32_t)sizeof(callParams), &callParams);
6510 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6511 }
6512
verify(VerifyRenderPassContext & context,size_t)6513 void RenderFragmentStorageBuffer::verify(VerifyRenderPassContext &context, size_t)
6514 {
6515 const uint32_t valuesPerPixel =
6516 (uint32_t)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6517
6518 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6519 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6520 {
6521 const uint32_t id = (uint32_t)y * 256u + (uint32_t)x;
6522
6523 uint32_t value = id;
6524
6525 for (uint32_t i = 0; i < valuesPerPixel; i++)
6526 {
6527 value =
6528 (((uint32_t)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(uint32_t))) * 4 + 0))
6529 << 0u) |
6530 (((uint32_t)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(uint32_t))) * 4 + 1))
6531 << 8u) |
6532 (((uint32_t)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(uint32_t))) * 4 + 2))
6533 << 16u) |
6534 (((uint32_t)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(uint32_t))) * 4 + 3))
6535 << 24u);
6536 }
6537 const UVec4 vec((value >> 0u) & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu,
6538 (value >> 24u) & 0xFFu);
6539
6540 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6541 }
6542 }
6543
6544 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6545 {
6546 public:
RenderFragmentUniformTexelBuffer(void)6547 RenderFragmentUniformTexelBuffer(void)
6548 {
6549 }
6550 ~RenderFragmentUniformTexelBuffer(void);
6551
getName(void) const6552 const char *getName(void) const
6553 {
6554 return "RenderFragmentUniformTexelBuffer";
6555 }
6556 void logPrepare(TestLog &, size_t) const;
6557 void logSubmit(TestLog &, size_t) const;
6558 void prepare(PrepareRenderPassContext &);
6559 void submit(SubmitContext &context);
6560 void verify(VerifyRenderPassContext &, size_t);
6561
6562 private:
6563 PipelineResources m_resources;
6564 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6565 vector<vk::VkDescriptorSet> m_descriptorSets;
6566 vector<vk::VkBufferView> m_bufferViews;
6567
6568 const vk::DeviceInterface *m_vkd;
6569 vk::VkDevice m_device;
6570 vk::VkDeviceSize m_bufferSize;
6571 uint32_t m_maxUniformTexelCount;
6572 size_t m_targetWidth;
6573 size_t m_targetHeight;
6574 };
6575
~RenderFragmentUniformTexelBuffer(void)6576 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer(void)
6577 {
6578 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6579 {
6580 if (!!m_bufferViews[bufferViewNdx])
6581 {
6582 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6583 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6584 }
6585 }
6586 }
6587
logPrepare(TestLog & log,size_t commandIndex) const6588 void RenderFragmentUniformTexelBuffer::logPrepare(TestLog &log, size_t commandIndex) const
6589 {
6590 log << TestLog::Message << commandIndex << ":" << getName()
6591 << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6592 }
6593
logSubmit(TestLog & log,size_t commandIndex) const6594 void RenderFragmentUniformTexelBuffer::logSubmit(TestLog &log, size_t commandIndex) const
6595 {
6596 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer."
6597 << TestLog::EndMessage;
6598 }
6599
prepare(PrepareRenderPassContext & context)6600 void RenderFragmentUniformTexelBuffer::prepare(PrepareRenderPassContext &context)
6601 {
6602 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
6603 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6604 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6605 const vk::VkDevice device = context.getContext().getDevice();
6606 const vk::VkRenderPass renderPass = context.getRenderPass();
6607 const uint32_t subpass = 0;
6608 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
6609 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6610 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
6611 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6612 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6613
6614 m_device = device;
6615 m_vkd = &vkd;
6616 m_bufferSize = context.getBufferSize();
6617 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6618 m_targetWidth = context.getTargetWidth();
6619 m_targetHeight = context.getTargetHeight();
6620
6621 {
6622 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1,
6623 vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL};
6624
6625 bindings.push_back(binding);
6626 }
6627 const vk::VkPushConstantRange pushConstantRange = {vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, 12u};
6628
6629 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
6630 context.getTargetWidth(), context.getTargetHeight(),
6631 vector<vk::VkVertexInputBindingDescription>(),
6632 vector<vk::VkVertexInputAttributeDescription>(), bindings,
6633 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6634
6635 {
6636 const uint32_t descriptorCount =
6637 (uint32_t)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6638 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, descriptorCount};
6639 const vk::VkDescriptorPoolCreateInfo createInfo = {
6640 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6641 DE_NULL,
6642 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6643
6644 descriptorCount,
6645 1u,
6646 &poolSizes,
6647 };
6648
6649 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6650 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6651 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6652 }
6653
6654 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6655 {
6656 const uint32_t count = (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4 ?
6657 m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4 :
6658 m_maxUniformTexelCount * 4) /
6659 4;
6660 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6661 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6662 DE_NULL,
6663
6664 *m_descriptorPool, 1, &layout};
6665
6666 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6667
6668 {
6669 const vk::VkBufferViewCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6670 DE_NULL,
6671 0u,
6672
6673 context.getBuffer(),
6674 vk::VK_FORMAT_R32_UINT,
6675 descriptorSetNdx * m_maxUniformTexelCount * 4,
6676 count * 4};
6677
6678 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6679 }
6680
6681 {
6682 const vk::VkWriteDescriptorSet write = {vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6683 DE_NULL,
6684 m_descriptorSets[descriptorSetNdx],
6685 0u,
6686 0u,
6687 1u,
6688 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6689 DE_NULL,
6690 DE_NULL,
6691 &m_bufferViews[descriptorSetNdx]};
6692
6693 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6694 }
6695 }
6696 }
6697
submit(SubmitContext & context)6698 void RenderFragmentUniformTexelBuffer::submit(SubmitContext &context)
6699 {
6700 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6701 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6702
6703 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6704
6705 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6706 {
6707 const struct
6708 {
6709 const uint32_t callId;
6710 const uint32_t valuesPerPixel;
6711 const uint32_t maxUniformTexelCount;
6712 } callParams = {(uint32_t)descriptorSetNdx,
6713 (uint32_t)divRoundUp<size_t>(
6714 m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount),
6715 m_targetWidth * m_targetHeight),
6716 m_maxUniformTexelCount};
6717
6718 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u,
6719 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6720 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u,
6721 (uint32_t)sizeof(callParams), &callParams);
6722 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6723 }
6724 }
6725
verify(VerifyRenderPassContext & context,size_t)6726 void RenderFragmentUniformTexelBuffer::verify(VerifyRenderPassContext &context, size_t)
6727 {
6728 const uint32_t valuesPerPixel = (uint32_t)divRoundUp<size_t>(
6729 m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount),
6730 m_targetWidth * m_targetHeight);
6731
6732 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6733 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6734 {
6735 const size_t firstDescriptorSetNdx = de::min<size_t>(
6736 (y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6737
6738 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size();
6739 descriptorSetNdx++)
6740 {
6741 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 4;
6742 const uint32_t callId = (uint32_t)descriptorSetNdx;
6743
6744 const uint32_t id = (uint32_t)y * 256u + (uint32_t)x;
6745 const uint32_t count = (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4 ?
6746 m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4 :
6747 m_maxUniformTexelCount * 4) /
6748 4;
6749
6750 if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6751 continue;
6752 else
6753 {
6754 uint32_t value = id;
6755
6756 for (uint32_t i = 0; i < valuesPerPixel; i++)
6757 {
6758 value = ((uint32_t)context.getReference().get(offset + (value % count) * 4 + 0)) |
6759 (((uint32_t)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u) |
6760 (((uint32_t)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u) |
6761 (((uint32_t)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6762 }
6763 const UVec4 vec((value >> 0u) & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu,
6764 (value >> 24u) & 0xFFu);
6765
6766 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6767 }
6768 }
6769 }
6770 }
6771
6772 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6773 {
6774 public:
RenderFragmentStorageTexelBuffer(void)6775 RenderFragmentStorageTexelBuffer(void)
6776 {
6777 }
6778 ~RenderFragmentStorageTexelBuffer(void);
6779
getName(void) const6780 const char *getName(void) const
6781 {
6782 return "RenderFragmentStorageTexelBuffer";
6783 }
6784 void logPrepare(TestLog &, size_t) const;
6785 void logSubmit(TestLog &, size_t) const;
6786 void prepare(PrepareRenderPassContext &);
6787 void submit(SubmitContext &context);
6788 void verify(VerifyRenderPassContext &, size_t);
6789
6790 private:
6791 PipelineResources m_resources;
6792 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6793 vector<vk::VkDescriptorSet> m_descriptorSets;
6794 vector<vk::VkBufferView> m_bufferViews;
6795
6796 const vk::DeviceInterface *m_vkd;
6797 vk::VkDevice m_device;
6798 vk::VkDeviceSize m_bufferSize;
6799 uint32_t m_maxStorageTexelCount;
6800 size_t m_targetWidth;
6801 size_t m_targetHeight;
6802 };
6803
~RenderFragmentStorageTexelBuffer(void)6804 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer(void)
6805 {
6806 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6807 {
6808 if (!!m_bufferViews[bufferViewNdx])
6809 {
6810 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6811 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6812 }
6813 }
6814 }
6815
logPrepare(TestLog & log,size_t commandIndex) const6816 void RenderFragmentStorageTexelBuffer::logPrepare(TestLog &log, size_t commandIndex) const
6817 {
6818 log << TestLog::Message << commandIndex << ":" << getName()
6819 << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6820 }
6821
logSubmit(TestLog & log,size_t commandIndex) const6822 void RenderFragmentStorageTexelBuffer::logSubmit(TestLog &log, size_t commandIndex) const
6823 {
6824 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer."
6825 << TestLog::EndMessage;
6826 }
6827
prepare(PrepareRenderPassContext & context)6828 void RenderFragmentStorageTexelBuffer::prepare(PrepareRenderPassContext &context)
6829 {
6830 const vk::InstanceInterface &vki = context.getContext().getInstanceInterface();
6831 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6832 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6833 const vk::VkDevice device = context.getContext().getDevice();
6834 const vk::VkRenderPass renderPass = context.getRenderPass();
6835 const uint32_t subpass = 0;
6836 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
6837 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6838 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
6839 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6840 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6841
6842 m_device = device;
6843 m_vkd = &vkd;
6844 m_bufferSize = context.getBufferSize();
6845 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6846 m_targetWidth = context.getTargetWidth();
6847 m_targetHeight = context.getTargetHeight();
6848
6849 {
6850 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1,
6851 vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL};
6852
6853 bindings.push_back(binding);
6854 }
6855 const vk::VkPushConstantRange pushConstantRange = {vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, 16u};
6856
6857 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
6858 context.getTargetWidth(), context.getTargetHeight(),
6859 vector<vk::VkVertexInputBindingDescription>(),
6860 vector<vk::VkVertexInputAttributeDescription>(), bindings,
6861 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6862
6863 {
6864 const uint32_t descriptorCount =
6865 (uint32_t)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * (uint64_t)(4)));
6866 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, descriptorCount};
6867 const vk::VkDescriptorPoolCreateInfo createInfo = {
6868 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6869 DE_NULL,
6870 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6871
6872 descriptorCount,
6873 1u,
6874 &poolSizes,
6875 };
6876
6877 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6878 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6879 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6880 }
6881
6882 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6883 {
6884 const uint32_t count =
6885 (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * (uint64_t)(4) ?
6886 m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4) :
6887 m_maxStorageTexelCount * (uint64_t)(4)) /
6888 4;
6889 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6890 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6891 DE_NULL,
6892
6893 *m_descriptorPool, 1, &layout};
6894
6895 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6896
6897 {
6898 const vk::VkBufferViewCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6899 DE_NULL,
6900 0u,
6901
6902 context.getBuffer(),
6903 vk::VK_FORMAT_R32_UINT,
6904 descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4),
6905 count * 4};
6906
6907 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6908 }
6909
6910 {
6911 const vk::VkWriteDescriptorSet write = {vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6912 DE_NULL,
6913 m_descriptorSets[descriptorSetNdx],
6914 0u,
6915 0u,
6916 1u,
6917 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6918 DE_NULL,
6919 DE_NULL,
6920 &m_bufferViews[descriptorSetNdx]};
6921
6922 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6923 }
6924 }
6925 }
6926
submit(SubmitContext & context)6927 void RenderFragmentStorageTexelBuffer::submit(SubmitContext &context)
6928 {
6929 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
6930 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6931
6932 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6933
6934 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6935 {
6936 const struct
6937 {
6938 const uint32_t callId;
6939 const uint32_t valuesPerPixel;
6940 const uint32_t maxStorageTexelCount;
6941 const uint32_t width;
6942 } callParams = {(uint32_t)descriptorSetNdx,
6943 (uint32_t)divRoundUp<size_t>(
6944 m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4),
6945 m_targetWidth * m_targetHeight),
6946 m_maxStorageTexelCount,
6947 (uint32_t)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * (uint64_t)(4) ?
6948 m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4) :
6949 m_maxStorageTexelCount * (uint64_t)(4)) /
6950 4u};
6951
6952 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u,
6953 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6954 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u,
6955 (uint32_t)sizeof(callParams), &callParams);
6956 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6957 }
6958 }
6959
verify(VerifyRenderPassContext & context,size_t)6960 void RenderFragmentStorageTexelBuffer::verify(VerifyRenderPassContext &context, size_t)
6961 {
6962 const uint32_t valuesPerPixel = (uint32_t)divRoundUp<size_t>(
6963 m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4),
6964 m_targetWidth * m_targetHeight);
6965
6966 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6967 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6968 {
6969 const size_t firstDescriptorSetNdx = de::min<size_t>(
6970 (y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6971
6972 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size();
6973 descriptorSetNdx++)
6974 {
6975 const uint64_t offset = descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4);
6976 const uint32_t callId = (uint32_t)descriptorSetNdx;
6977
6978 const uint32_t id = (uint32_t)y * 256u + (uint32_t)x;
6979 const uint32_t count =
6980 (uint32_t)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * (uint64_t)(4) ?
6981 m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * (uint64_t)(4) :
6982 m_maxStorageTexelCount * (uint64_t)(4)) /
6983 4;
6984
6985 if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6986 continue;
6987 else
6988 {
6989 uint32_t value = id;
6990
6991 for (uint32_t i = 0; i < valuesPerPixel; i++)
6992 {
6993 value = ((uint32_t)context.getReference().get(offset + (value % count) * 4 + 0)) |
6994 (((uint32_t)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u) |
6995 (((uint32_t)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u) |
6996 (((uint32_t)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6997 }
6998 const UVec4 vec((value >> 0u) & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu,
6999 (value >> 24u) & 0xFFu);
7000
7001 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
7002 }
7003 }
7004 }
7005 }
7006
7007 class RenderFragmentStorageImage : public RenderPassCommand
7008 {
7009 public:
RenderFragmentStorageImage(void)7010 RenderFragmentStorageImage(void)
7011 {
7012 }
7013 ~RenderFragmentStorageImage(void);
7014
getName(void) const7015 const char *getName(void) const
7016 {
7017 return "RenderFragmentStorageImage";
7018 }
7019 void logPrepare(TestLog &, size_t) const;
7020 void logSubmit(TestLog &, size_t) const;
7021 void prepare(PrepareRenderPassContext &);
7022 void submit(SubmitContext &context);
7023 void verify(VerifyRenderPassContext &, size_t);
7024
7025 private:
7026 PipelineResources m_resources;
7027 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
7028 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
7029 vk::Move<vk::VkImageView> m_imageView;
7030 };
7031
~RenderFragmentStorageImage(void)7032 RenderFragmentStorageImage::~RenderFragmentStorageImage(void)
7033 {
7034 }
7035
logPrepare(TestLog & log,size_t commandIndex) const7036 void RenderFragmentStorageImage::logPrepare(TestLog &log, size_t commandIndex) const
7037 {
7038 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image."
7039 << TestLog::EndMessage;
7040 }
7041
logSubmit(TestLog & log,size_t commandIndex) const7042 void RenderFragmentStorageImage::logSubmit(TestLog &log, size_t commandIndex) const
7043 {
7044 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image."
7045 << TestLog::EndMessage;
7046 }
7047
prepare(PrepareRenderPassContext & context)7048 void RenderFragmentStorageImage::prepare(PrepareRenderPassContext &context)
7049 {
7050 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
7051 const vk::VkDevice device = context.getContext().getDevice();
7052 const vk::VkRenderPass renderPass = context.getRenderPass();
7053 const uint32_t subpass = 0;
7054 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
7055 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7056 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
7057 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
7058 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7059
7060 {
7061 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1,
7062 vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL};
7063
7064 bindings.push_back(binding);
7065 }
7066
7067 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
7068 context.getTargetWidth(), context.getTargetHeight(),
7069 vector<vk::VkVertexInputBindingDescription>(),
7070 vector<vk::VkVertexInputAttributeDescription>(), bindings,
7071 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7072
7073 {
7074 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1};
7075 const vk::VkDescriptorPoolCreateInfo createInfo = {
7076 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7077 DE_NULL,
7078 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7079
7080 1u,
7081 1u,
7082 &poolSizes,
7083 };
7084
7085 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7086 }
7087
7088 {
7089 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7090 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7091 DE_NULL,
7092
7093 *m_descriptorPool, 1, &layout};
7094
7095 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7096
7097 {
7098 const vk::VkImageViewCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7099 DE_NULL,
7100 0u,
7101
7102 context.getImage(),
7103 vk::VK_IMAGE_VIEW_TYPE_2D,
7104 vk::VK_FORMAT_R8G8B8A8_UNORM,
7105 vk::makeComponentMappingRGBA(),
7106 {vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}};
7107
7108 m_imageView = vk::createImageView(vkd, device, &createInfo);
7109 }
7110
7111 {
7112 const vk::VkDescriptorImageInfo imageInfo = {0, *m_imageView, context.getImageLayout()};
7113 const vk::VkWriteDescriptorSet write = {
7114 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, DE_NULL, *m_descriptorSet, 0u, 0u, 1u,
7115 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &imageInfo, DE_NULL, DE_NULL,
7116 };
7117
7118 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7119 }
7120 }
7121 }
7122
submit(SubmitContext & context)7123 void RenderFragmentStorageImage::submit(SubmitContext &context)
7124 {
7125 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
7126 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7127
7128 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7129
7130 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u,
7131 &(*m_descriptorSet), 0u, DE_NULL);
7132 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
7133 }
7134
verify(VerifyRenderPassContext & context,size_t)7135 void RenderFragmentStorageImage::verify(VerifyRenderPassContext &context, size_t)
7136 {
7137 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7138 const uint32_t valuesPerPixel = de::max<uint32_t>(1u, (size.x() * size.y()) / (256u * 256u));
7139
7140 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7141 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7142 {
7143 UVec4 value = UVec4(x, y, 0u, 0u);
7144
7145 for (uint32_t i = 0; i < valuesPerPixel; i++)
7146 {
7147 const UVec2 pos =
7148 UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7149 const Vec4 floatValue =
7150 context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7151
7152 value = UVec4((uint32_t)round(floatValue.x() * 255.0f), (uint32_t)round(floatValue.y() * 255.0f),
7153 (uint32_t)round(floatValue.z() * 255.0f), (uint32_t)round(floatValue.w() * 255.0f));
7154 }
7155 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7156 }
7157 }
7158
7159 class RenderFragmentSampledImage : public RenderPassCommand
7160 {
7161 public:
RenderFragmentSampledImage(void)7162 RenderFragmentSampledImage(void)
7163 {
7164 }
7165 ~RenderFragmentSampledImage(void);
7166
getName(void) const7167 const char *getName(void) const
7168 {
7169 return "RenderFragmentSampledImage";
7170 }
7171 void logPrepare(TestLog &, size_t) const;
7172 void logSubmit(TestLog &, size_t) const;
7173 void prepare(PrepareRenderPassContext &);
7174 void submit(SubmitContext &context);
7175 void verify(VerifyRenderPassContext &, size_t);
7176
7177 private:
7178 PipelineResources m_resources;
7179 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
7180 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
7181 vk::Move<vk::VkImageView> m_imageView;
7182 vk::Move<vk::VkSampler> m_sampler;
7183 };
7184
~RenderFragmentSampledImage(void)7185 RenderFragmentSampledImage::~RenderFragmentSampledImage(void)
7186 {
7187 }
7188
logPrepare(TestLog & log,size_t commandIndex) const7189 void RenderFragmentSampledImage::logPrepare(TestLog &log, size_t commandIndex) const
7190 {
7191 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image."
7192 << TestLog::EndMessage;
7193 }
7194
logSubmit(TestLog & log,size_t commandIndex) const7195 void RenderFragmentSampledImage::logSubmit(TestLog &log, size_t commandIndex) const
7196 {
7197 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image."
7198 << TestLog::EndMessage;
7199 }
7200
prepare(PrepareRenderPassContext & context)7201 void RenderFragmentSampledImage::prepare(PrepareRenderPassContext &context)
7202 {
7203 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
7204 const vk::VkDevice device = context.getContext().getDevice();
7205 const vk::VkRenderPass renderPass = context.getRenderPass();
7206 const uint32_t subpass = 0;
7207 const vk::Unique<vk::VkShaderModule> vertexShaderModule(
7208 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7209 const vk::Unique<vk::VkShaderModule> fragmentShaderModule(
7210 vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7211 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7212
7213 {
7214 const vk::VkDescriptorSetLayoutBinding binding = {0u, vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1,
7215 vk::VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL};
7216
7217 bindings.push_back(binding);
7218 }
7219
7220 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule,
7221 context.getTargetWidth(), context.getTargetHeight(),
7222 vector<vk::VkVertexInputBindingDescription>(),
7223 vector<vk::VkVertexInputAttributeDescription>(), bindings,
7224 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7225
7226 {
7227 const vk::VkDescriptorPoolSize poolSizes = {vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1};
7228 const vk::VkDescriptorPoolCreateInfo createInfo = {
7229 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7230 DE_NULL,
7231 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7232
7233 1u,
7234 1u,
7235 &poolSizes,
7236 };
7237
7238 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7239 }
7240
7241 {
7242 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7243 const vk::VkDescriptorSetAllocateInfo allocateInfo = {vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7244 DE_NULL,
7245
7246 *m_descriptorPool, 1, &layout};
7247
7248 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7249
7250 {
7251 const vk::VkImageViewCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7252 DE_NULL,
7253 0u,
7254
7255 context.getImage(),
7256 vk::VK_IMAGE_VIEW_TYPE_2D,
7257 vk::VK_FORMAT_R8G8B8A8_UNORM,
7258 vk::makeComponentMappingRGBA(),
7259 {vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}};
7260
7261 m_imageView = vk::createImageView(vkd, device, &createInfo);
7262 }
7263
7264 {
7265 const vk::VkSamplerCreateInfo createInfo = {vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7266 DE_NULL,
7267 0u,
7268
7269 vk::VK_FILTER_NEAREST,
7270 vk::VK_FILTER_NEAREST,
7271
7272 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7273 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7274 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7275 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7276 0.0f,
7277 VK_FALSE,
7278 1.0f,
7279 VK_FALSE,
7280 vk::VK_COMPARE_OP_ALWAYS,
7281 0.0f,
7282 0.0f,
7283 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7284 VK_FALSE};
7285
7286 m_sampler = vk::createSampler(vkd, device, &createInfo);
7287 }
7288
7289 {
7290 const vk::VkDescriptorImageInfo imageInfo = {*m_sampler, *m_imageView, context.getImageLayout()};
7291 const vk::VkWriteDescriptorSet write = {
7292 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, DE_NULL, *m_descriptorSet, 0u, 0u, 1u,
7293 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageInfo, DE_NULL, DE_NULL,
7294 };
7295
7296 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7297 }
7298 }
7299 }
7300
submit(SubmitContext & context)7301 void RenderFragmentSampledImage::submit(SubmitContext &context)
7302 {
7303 const vk::DeviceInterface &vkd = context.getContext().getDeviceInterface();
7304 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7305
7306 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7307
7308 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u,
7309 &(*m_descriptorSet), 0u, DE_NULL);
7310 vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7311 }
7312
verify(VerifyRenderPassContext & context,size_t)7313 void RenderFragmentSampledImage::verify(VerifyRenderPassContext &context, size_t)
7314 {
7315 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7316 const uint32_t valuesPerPixel = de::max<uint32_t>(1u, (size.x() * size.y()) / (256u * 256u));
7317
7318 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7319 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7320 {
7321 UVec4 value = UVec4(x, y, 0u, 0u);
7322
7323 for (uint32_t i = 0; i < valuesPerPixel; i++)
7324 {
7325 const UVec2 pos =
7326 UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7327 const Vec4 floatValue =
7328 context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7329
7330 value = UVec4((uint32_t)round(floatValue.x() * 255.0f), (uint32_t)round(floatValue.y() * 255.0f),
7331 (uint32_t)round(floatValue.z() * 255.0f), (uint32_t)round(floatValue.w() * 255.0f));
7332 }
7333
7334 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7335 }
7336 }
7337
7338 enum Op
7339 {
7340 OP_MAP,
7341 OP_UNMAP,
7342
7343 OP_MAP_FLUSH,
7344 OP_MAP_INVALIDATE,
7345
7346 OP_MAP_READ,
7347 OP_MAP_WRITE,
7348 OP_MAP_MODIFY,
7349
7350 OP_BUFFER_CREATE,
7351 OP_BUFFER_DESTROY,
7352 OP_BUFFER_BINDMEMORY,
7353
7354 OP_QUEUE_WAIT_FOR_IDLE,
7355 OP_DEVICE_WAIT_FOR_IDLE,
7356
7357 OP_COMMAND_BUFFER_BEGIN,
7358 OP_COMMAND_BUFFER_END,
7359
7360 // Secondary, non render pass command buffers
7361 // Render pass secondary command buffers are not currently covered
7362 OP_SECONDARY_COMMAND_BUFFER_BEGIN,
7363 OP_SECONDARY_COMMAND_BUFFER_END,
7364
7365 // Buffer transfer operations
7366 OP_BUFFER_FILL,
7367 OP_BUFFER_UPDATE,
7368
7369 OP_BUFFER_COPY_TO_BUFFER,
7370 OP_BUFFER_COPY_FROM_BUFFER,
7371
7372 OP_BUFFER_COPY_TO_IMAGE,
7373 OP_BUFFER_COPY_FROM_IMAGE,
7374
7375 OP_IMAGE_CREATE,
7376 OP_IMAGE_DESTROY,
7377 OP_IMAGE_BINDMEMORY,
7378
7379 OP_IMAGE_TRANSITION_LAYOUT,
7380
7381 OP_IMAGE_COPY_TO_BUFFER,
7382 OP_IMAGE_COPY_FROM_BUFFER,
7383
7384 OP_IMAGE_COPY_TO_IMAGE,
7385 OP_IMAGE_COPY_FROM_IMAGE,
7386
7387 OP_IMAGE_BLIT_TO_IMAGE,
7388 OP_IMAGE_BLIT_FROM_IMAGE,
7389
7390 OP_IMAGE_RESOLVE,
7391
7392 OP_PIPELINE_BARRIER_GLOBAL,
7393 OP_PIPELINE_BARRIER_BUFFER,
7394 OP_PIPELINE_BARRIER_IMAGE,
7395
7396 // Renderpass operations
7397 OP_RENDERPASS_BEGIN,
7398 OP_RENDERPASS_END,
7399
7400 // Commands inside render pass
7401 OP_RENDER_VERTEX_BUFFER,
7402 OP_RENDER_INDEX_BUFFER,
7403
7404 OP_RENDER_VERTEX_UNIFORM_BUFFER,
7405 OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7406
7407 OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7408 OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7409
7410 OP_RENDER_VERTEX_STORAGE_BUFFER,
7411 OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7412
7413 OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7414 OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7415
7416 OP_RENDER_VERTEX_STORAGE_IMAGE,
7417 OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7418
7419 OP_RENDER_VERTEX_SAMPLED_IMAGE,
7420 OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7421 };
7422
7423 enum Stage
7424 {
7425 STAGE_HOST,
7426 STAGE_COMMAND_BUFFER,
7427 STAGE_SECONDARY_COMMAND_BUFFER,
7428
7429 STAGE_RENDER_PASS
7430 };
7431
getWriteAccessFlags(void)7432 vk::VkAccessFlags getWriteAccessFlags(void)
7433 {
7434 return vk::VK_ACCESS_SHADER_WRITE_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
7435 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | vk::VK_ACCESS_TRANSFER_WRITE_BIT |
7436 vk::VK_ACCESS_HOST_WRITE_BIT | vk::VK_ACCESS_MEMORY_WRITE_BIT;
7437 }
7438
isWriteAccess(vk::VkAccessFlagBits access)7439 bool isWriteAccess(vk::VkAccessFlagBits access)
7440 {
7441 return (getWriteAccessFlags() & access) != 0;
7442 }
7443
7444 class CacheState
7445 {
7446 public:
7447 CacheState(vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7448
7449 bool isValid(vk::VkPipelineStageFlagBits stage, vk::VkAccessFlagBits access) const;
7450
7451 void perform(vk::VkPipelineStageFlagBits stage, vk::VkAccessFlagBits access);
7452
7453 void submitCommandBuffer(void);
7454 void waitForIdle(void);
7455
7456 void getFullBarrier(vk::VkPipelineStageFlags &srcStages, vk::VkAccessFlags &srcAccesses,
7457 vk::VkPipelineStageFlags &dstStages, vk::VkAccessFlags &dstAccesses) const;
7458
7459 void barrier(vk::VkPipelineStageFlags srcStages, vk::VkAccessFlags srcAccesses, vk::VkPipelineStageFlags dstStages,
7460 vk::VkAccessFlags dstAccesses);
7461
7462 void imageLayoutBarrier(vk::VkPipelineStageFlags srcStages, vk::VkAccessFlags srcAccesses,
7463 vk::VkPipelineStageFlags dstStages, vk::VkAccessFlags dstAccesses);
7464
7465 void checkImageLayoutBarrier(vk::VkPipelineStageFlags srcStages, vk::VkAccessFlags srcAccesses,
7466 vk::VkPipelineStageFlags dstStages, vk::VkAccessFlags dstAccesses);
7467
7468 // Everything is clean and there is no need for barriers
7469 bool isClean(void) const;
7470
getAllowedStages(void) const7471 vk::VkPipelineStageFlags getAllowedStages(void) const
7472 {
7473 return m_allowedStages;
7474 }
getAllowedAcceses(void) const7475 vk::VkAccessFlags getAllowedAcceses(void) const
7476 {
7477 return m_allowedAccesses;
7478 }
7479
7480 private:
7481 // Limit which stages and accesses are used by the CacheState tracker
7482 const vk::VkPipelineStageFlags m_allowedStages;
7483 const vk::VkAccessFlags m_allowedAccesses;
7484
7485 // [dstStage][srcStage][dstAccess] = srcAccesses
7486 // In stage dstStage write srcAccesses from srcStage are not yet available for dstAccess
7487 vk::VkAccessFlags m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST][ACCESS_LAST];
7488 // Latest pipeline transition is not available in stage
7489 bool m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7490 // [dstStage] = dstAccesses
7491 // In stage dstStage ops with dstAccesses are not yet visible
7492 vk::VkAccessFlags m_invisibleOperations[PIPELINESTAGE_LAST];
7493
7494 // [dstStage] = srcStage
7495 // Memory operation in srcStage have not completed before dstStage
7496 vk::VkPipelineStageFlags m_incompleteOperations[PIPELINESTAGE_LAST];
7497 };
7498
CacheState(vk::VkPipelineStageFlags allowedStages,vk::VkAccessFlags allowedAccesses)7499 CacheState::CacheState(vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7500 : m_allowedStages(allowedStages)
7501 , m_allowedAccesses(allowedAccesses)
7502 {
7503 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7504 {
7505 if ((dstStage_ & m_allowedStages) == 0)
7506 continue;
7507
7508 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7509
7510 // All operations are initially visible
7511 m_invisibleOperations[dstStage] = 0;
7512
7513 // There are no incomplete read operations initially
7514 m_incompleteOperations[dstStage] = 0;
7515
7516 // There are no incomplete layout transitions
7517 m_unavailableLayoutTransition[dstStage] = false;
7518
7519 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7520 {
7521 if ((srcStage_ & m_allowedStages) == 0)
7522 continue;
7523
7524 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7525
7526 // There are no write operations that are not yet available
7527 // initially.
7528 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7529 {
7530 if ((dstAccess_ & m_allowedAccesses) == 0)
7531 continue;
7532
7533 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7534
7535 m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
7536 }
7537 }
7538 }
7539 }
7540
isValid(vk::VkPipelineStageFlagBits stage,vk::VkAccessFlagBits access) const7541 bool CacheState::isValid(vk::VkPipelineStageFlagBits stage, vk::VkAccessFlagBits access) const
7542 {
7543 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7544 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7545
7546 const PipelineStage dstStage = pipelineStageFlagToPipelineStage(stage);
7547
7548 // Previous operations are not visible to access on stage
7549 if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7550 return false;
7551
7552 if (isWriteAccess(access))
7553 {
7554 // Memory operations from other stages have not completed before
7555 // dstStage
7556 if (m_incompleteOperations[dstStage] != 0)
7557 return false;
7558 }
7559
7560 return true;
7561 }
7562
perform(vk::VkPipelineStageFlagBits stage,vk::VkAccessFlagBits access)7563 void CacheState::perform(vk::VkPipelineStageFlagBits stage, vk::VkAccessFlagBits access)
7564 {
7565 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7566 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7567
7568 const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7569
7570 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7571 {
7572 if ((dstStage_ & m_allowedStages) == 0)
7573 continue;
7574
7575 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7576
7577 // Mark stage as incomplete for all stages
7578 m_incompleteOperations[dstStage] |= stage;
7579
7580 if (isWriteAccess(access))
7581 {
7582 // Mark all accesses from all stages invisible
7583 m_invisibleOperations[dstStage] |= m_allowedAccesses;
7584
7585 // Mark write access from srcStage unavailable to all stages for all accesses
7586 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7587 {
7588 if ((dstAccess_ & m_allowedAccesses) == 0)
7589 continue;
7590
7591 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7592
7593 m_unavailableWriteOperations[dstStage][srcStage][dstAccess] |= access;
7594 }
7595 }
7596 }
7597 }
7598
submitCommandBuffer(void)7599 void CacheState::submitCommandBuffer(void)
7600 {
7601 // Flush all host writes and reads
7602 barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7603 m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT), m_allowedStages,
7604 m_allowedAccesses);
7605 }
7606
waitForIdle(void)7607 void CacheState::waitForIdle(void)
7608 {
7609 // Make all writes available
7610 barrier(m_allowedStages, m_allowedAccesses & getWriteAccessFlags(), m_allowedStages, 0);
7611
7612 // Make all writes visible on device side
7613 barrier(m_allowedStages, 0, m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT), m_allowedAccesses);
7614 }
7615
getFullBarrier(vk::VkPipelineStageFlags & srcStages,vk::VkAccessFlags & srcAccesses,vk::VkPipelineStageFlags & dstStages,vk::VkAccessFlags & dstAccesses) const7616 void CacheState::getFullBarrier(vk::VkPipelineStageFlags &srcStages, vk::VkAccessFlags &srcAccesses,
7617 vk::VkPipelineStageFlags &dstStages, vk::VkAccessFlags &dstAccesses) const
7618 {
7619 srcStages = 0;
7620 srcAccesses = 0;
7621 dstStages = 0;
7622 dstAccesses = 0;
7623
7624 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7625 {
7626 if ((dstStage_ & m_allowedStages) == 0)
7627 continue;
7628
7629 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7630
7631 // Make sure all previous operation are complete in all stages
7632 if (m_incompleteOperations[dstStage])
7633 {
7634 dstStages |= dstStage_;
7635 srcStages |= m_incompleteOperations[dstStage];
7636 }
7637
7638 // Make sure all read operations are visible in dstStage
7639 if (m_invisibleOperations[dstStage])
7640 {
7641 dstStages |= dstStage_;
7642 dstAccesses |= m_invisibleOperations[dstStage];
7643 }
7644
7645 // Make sure all write operations from all stages are available
7646 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7647 {
7648 if ((srcStage_ & m_allowedStages) == 0)
7649 continue;
7650
7651 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7652
7653 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7654 {
7655 if ((dstAccess_ & m_allowedAccesses) == 0)
7656 continue;
7657
7658 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7659
7660 if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess])
7661 {
7662 dstStages |= dstStage_;
7663 srcStages |= dstStage_;
7664 srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage][dstAccess];
7665 }
7666 }
7667
7668 if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7669 {
7670 // Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7671 // but has completed in srcStage.
7672 dstStages |= dstStage_;
7673 srcStages |= dstStage_;
7674 }
7675 }
7676 }
7677
7678 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7679 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7680 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7681 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7682 }
7683
checkImageLayoutBarrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7684 void CacheState::checkImageLayoutBarrier(vk::VkPipelineStageFlags srcStages, vk::VkAccessFlags srcAccesses,
7685 vk::VkPipelineStageFlags dstStages, vk::VkAccessFlags dstAccesses)
7686 {
7687 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7688 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7689 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7690 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7691
7692 DE_UNREF(srcStages);
7693 DE_UNREF(srcAccesses);
7694
7695 DE_UNREF(dstStages);
7696 DE_UNREF(dstAccesses);
7697
7698 #if defined(DE_DEBUG)
7699 // Check that all stages have completed before srcStages or are in srcStages.
7700 {
7701 vk::VkPipelineStageFlags completedStages = srcStages;
7702
7703 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7704 {
7705 if ((srcStage_ & srcStages) == 0)
7706 continue;
7707
7708 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7709
7710 completedStages |= (~m_incompleteOperations[srcStage]);
7711 }
7712
7713 DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7714 }
7715
7716 // Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7717 if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7718 {
7719 bool anyWriteAvailable = false;
7720
7721 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7722 {
7723 if ((dstStage_ & m_allowedStages) == 0)
7724 continue;
7725
7726 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7727
7728 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7729 {
7730 if ((srcStage_ & m_allowedStages) == 0)
7731 continue;
7732
7733 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7734
7735 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7736 {
7737 if ((dstAccess_ & m_allowedAccesses) == 0)
7738 continue;
7739
7740 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7741
7742 if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] !=
7743 (getWriteAccessFlags() & m_allowedAccesses))
7744 {
7745 anyWriteAvailable = true;
7746 break;
7747 }
7748 }
7749 }
7750 }
7751
7752 DE_ASSERT(anyWriteAvailable);
7753 }
7754 #endif
7755 }
7756
imageLayoutBarrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7757 void CacheState::imageLayoutBarrier(vk::VkPipelineStageFlags srcStages, vk::VkAccessFlags srcAccesses,
7758 vk::VkPipelineStageFlags dstStages, vk::VkAccessFlags dstAccesses)
7759 {
7760 checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7761
7762 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7763 {
7764 if ((dstStage_ & m_allowedStages) == 0)
7765 continue;
7766
7767 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7768
7769 // All stages are incomplete after the barrier except each dstStage in it self.
7770 m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7771
7772 // All memory operations are invisible unless they are listed in dstAccess
7773 m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7774
7775 // Layout transition is unavailable in stage unless it was listed in dstStages
7776 m_unavailableLayoutTransition[dstStage] = (dstStage_ & dstStages) == 0;
7777
7778 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7779 {
7780 if ((srcStage_ & m_allowedStages) == 0)
7781 continue;
7782
7783 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7784
7785 // All write operations are available after layout transition
7786 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7787 {
7788 if ((dstAccess_ & m_allowedAccesses) == 0)
7789 continue;
7790
7791 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7792
7793 m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
7794 }
7795 }
7796 }
7797 }
7798
barrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7799 void CacheState::barrier(vk::VkPipelineStageFlags srcStages, vk::VkAccessFlags srcAccesses,
7800 vk::VkPipelineStageFlags dstStages, vk::VkAccessFlags dstAccesses)
7801 {
7802 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7803 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7804 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7805 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7806
7807 // Transitivity
7808 {
7809 vk::VkPipelineStageFlags oldIncompleteOperations[PIPELINESTAGE_LAST];
7810 vk::VkAccessFlags oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST][ACCESS_LAST];
7811 bool oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7812
7813 deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7814 deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7815 deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7816
7817 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7818 {
7819 if ((srcStage_ & srcStages) == 0)
7820 continue;
7821
7822 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7823
7824 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7825 {
7826 if ((dstStage_ & dstStages) == 0)
7827 continue;
7828
7829 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7830
7831 // Stages that have completed before srcStage have also completed before dstStage
7832 m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7833
7834 // Image layout transition in srcStage are now available in dstStage
7835 m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7836
7837 for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7838 {
7839 if ((sharedStage_ & m_allowedStages) == 0)
7840 continue;
7841
7842 const PipelineStage sharedStage =
7843 pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7844
7845 // Writes that are available in srcStage are also available in dstStage
7846 for (vk::VkAccessFlags sharedAccess_ = 1; sharedAccess_ <= m_allowedAccesses; sharedAccess_ <<= 1)
7847 {
7848 if ((sharedAccess_ & m_allowedAccesses) == 0)
7849 continue;
7850
7851 const Access sharedAccess = accessFlagToAccess((vk::VkAccessFlagBits)sharedAccess_);
7852
7853 m_unavailableWriteOperations[dstStage][sharedStage][sharedAccess] &=
7854 oldUnavailableWriteOperations[srcStage][sharedStage][sharedAccess];
7855 }
7856 }
7857 }
7858 }
7859 }
7860
7861 // Barrier
7862 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7863 {
7864 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7865 bool allWritesAvailable = true;
7866
7867 if ((dstStage_ & dstStages) == 0)
7868 continue;
7869
7870 // Operations in srcStages have completed before any stage in dstStages
7871 m_incompleteOperations[dstStage] &= ~srcStages;
7872
7873 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7874 {
7875 if ((srcStage_ & m_allowedStages) == 0)
7876 continue;
7877
7878 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7879
7880 // Make srcAccesses from srcStage available in dstStage for dstAccess
7881 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7882 {
7883 if ((dstAccess_ & m_allowedAccesses) == 0)
7884 continue;
7885
7886 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7887
7888 if (((srcStage_ & srcStages) != 0) && ((dstAccess_ & dstAccesses) != 0))
7889 m_unavailableWriteOperations[dstStage][srcStage][dstAccess] &= ~srcAccesses;
7890
7891 if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
7892 allWritesAvailable = false;
7893 }
7894 }
7895
7896 // If all writes are available in dstStage make dstAccesses also visible
7897 if (allWritesAvailable)
7898 m_invisibleOperations[dstStage] &= ~dstAccesses;
7899 }
7900 }
7901
isClean(void) const7902 bool CacheState::isClean(void) const
7903 {
7904 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7905 {
7906 if ((dstStage_ & m_allowedStages) == 0)
7907 continue;
7908
7909 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7910
7911 // Some operations are not visible to some stages
7912 if (m_invisibleOperations[dstStage] != 0)
7913 return false;
7914
7915 // There are operation that have not completed yet
7916 if (m_incompleteOperations[dstStage] != 0)
7917 return false;
7918
7919 // Layout transition has not completed yet
7920 if (m_unavailableLayoutTransition[dstStage])
7921 return false;
7922
7923 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7924 {
7925 if ((srcStage_ & m_allowedStages) == 0)
7926 continue;
7927
7928 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7929
7930 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7931 {
7932 if ((dstAccess_ & m_allowedAccesses) == 0)
7933 continue;
7934
7935 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7936
7937 // Some write operations are not available yet
7938 if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
7939 return false;
7940 }
7941 }
7942 }
7943
7944 return true;
7945 }
7946
layoutSupportedByUsage(Usage usage,vk::VkImageLayout layout)7947 bool layoutSupportedByUsage(Usage usage, vk::VkImageLayout layout)
7948 {
7949 switch (layout)
7950 {
7951 case vk::VK_IMAGE_LAYOUT_GENERAL:
7952 return true;
7953
7954 case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7955 return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7956
7957 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7958 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7959
7960 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7961 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7962
7963 case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7964 // \todo [2016-03-09 mika] Should include input attachment
7965 return (usage & USAGE_SAMPLED_IMAGE) != 0;
7966
7967 case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7968 return (usage & USAGE_TRANSFER_SRC) != 0;
7969
7970 case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7971 return (usage & USAGE_TRANSFER_DST) != 0;
7972
7973 case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7974 return true;
7975
7976 default:
7977 DE_FATAL("Unknown layout");
7978 return false;
7979 }
7980 }
7981
getNumberOfSupportedLayouts(Usage usage)7982 size_t getNumberOfSupportedLayouts(Usage usage)
7983 {
7984 const vk::VkImageLayout layouts[] = {
7985 vk::VK_IMAGE_LAYOUT_GENERAL,
7986 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7987 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7988 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7989 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7990 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7991 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7992 };
7993 size_t supportedLayoutCount = 0;
7994
7995 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7996 {
7997 const vk::VkImageLayout layout = layouts[layoutNdx];
7998
7999 if (layoutSupportedByUsage(usage, layout))
8000 supportedLayoutCount++;
8001 }
8002
8003 return supportedLayoutCount;
8004 }
8005
getRandomNextLayout(de::Random & rng,Usage usage,vk::VkImageLayout previousLayout)8006 vk::VkImageLayout getRandomNextLayout(de::Random &rng, Usage usage, vk::VkImageLayout previousLayout)
8007 {
8008 const vk::VkImageLayout layouts[] = {
8009 vk::VK_IMAGE_LAYOUT_GENERAL,
8010 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
8011 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
8012 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
8013 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
8014 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
8015 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
8016 };
8017 const size_t supportedLayoutCount = getNumberOfSupportedLayouts(usage);
8018
8019 DE_ASSERT(supportedLayoutCount > 0);
8020
8021 size_t nextLayoutNdx =
8022 ((size_t)rng.getUint32()) %
8023 (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED ? supportedLayoutCount : supportedLayoutCount - 1);
8024
8025 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
8026 {
8027 const vk::VkImageLayout layout = layouts[layoutNdx];
8028
8029 if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
8030 {
8031 if (nextLayoutNdx == 0)
8032 return layout;
8033 else
8034 nextLayoutNdx--;
8035 }
8036 }
8037
8038 DE_FATAL("Unreachable");
8039 return vk::VK_IMAGE_LAYOUT_UNDEFINED;
8040 }
8041
8042 struct State
8043 {
Statevkt::memory::__anonc75fd9bb0111::State8044 State(Usage usage, uint32_t seed)
8045 : stage(STAGE_HOST)
8046 , cache(usageToStageFlags(usage), usageToAccessFlags(usage))
8047 , rng(seed)
8048 , mapped(false)
8049 , hostInvalidated(true)
8050 , hostFlushed(true)
8051 , memoryDefined(false)
8052 , hasBuffer(false)
8053 , hasBoundBufferMemory(false)
8054 , hasImage(false)
8055 , hasBoundImageMemory(false)
8056 , imageLayout(vk::VK_IMAGE_LAYOUT_UNDEFINED)
8057 , imageDefined(false)
8058 , queueIdle(true)
8059 , deviceIdle(true)
8060 , commandBufferIsEmpty(true)
8061 , primaryCommandBufferIsEmpty(true)
8062 , renderPassIsEmpty(true)
8063 {
8064 }
8065
8066 Stage stage;
8067 CacheState cache;
8068 de::Random rng;
8069
8070 bool mapped;
8071 bool hostInvalidated;
8072 bool hostFlushed;
8073 bool memoryDefined;
8074
8075 bool hasBuffer;
8076 bool hasBoundBufferMemory;
8077
8078 bool hasImage;
8079 bool hasBoundImageMemory;
8080 vk::VkImageLayout imageLayout;
8081 bool imageDefined;
8082
8083 bool queueIdle;
8084 bool deviceIdle;
8085
8086 bool commandBufferIsEmpty;
8087
8088 // a copy of commandBufferIsEmpty value, when secondary command buffer is in use
8089 bool primaryCommandBufferIsEmpty;
8090
8091 bool renderPassIsEmpty;
8092 };
8093
getAvailableOps(const State & state,bool supportsBuffers,bool supportsImages,Usage usage,vector<Op> & ops)8094 void getAvailableOps(const State &state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op> &ops)
8095 {
8096 if (state.stage == STAGE_HOST)
8097 {
8098 if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8099 {
8100 // Host memory operations
8101 if (state.mapped)
8102 {
8103 ops.push_back(OP_UNMAP);
8104
8105 // Avoid flush and finish if they are not needed
8106 if (!state.hostFlushed)
8107 ops.push_back(OP_MAP_FLUSH);
8108
8109 if (!state.hostInvalidated && state.queueIdle &&
8110 ((usage & USAGE_HOST_READ) == 0 ||
8111 state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT)) &&
8112 ((usage & USAGE_HOST_WRITE) == 0 ||
8113 state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8114 {
8115 ops.push_back(OP_MAP_INVALIDATE);
8116 }
8117
8118 if (usage & USAGE_HOST_READ && usage & USAGE_HOST_WRITE && state.memoryDefined &&
8119 state.hostInvalidated && state.queueIdle &&
8120 state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT) &&
8121 state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8122 {
8123 ops.push_back(OP_MAP_MODIFY);
8124 }
8125
8126 if (usage & USAGE_HOST_READ && state.memoryDefined && state.hostInvalidated && state.queueIdle &&
8127 state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8128 {
8129 ops.push_back(OP_MAP_READ);
8130 }
8131
8132 if (usage & USAGE_HOST_WRITE && state.hostInvalidated && state.queueIdle &&
8133 state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8134 {
8135 ops.push_back(OP_MAP_WRITE);
8136 }
8137 }
8138 else
8139 ops.push_back(OP_MAP);
8140 }
8141
8142 if (state.hasBoundBufferMemory && state.queueIdle)
8143 {
8144 // \note Destroy only buffers after they have been bound
8145 ops.push_back(OP_BUFFER_DESTROY);
8146 }
8147 else
8148 {
8149 if (state.hasBuffer)
8150 {
8151 if (!state.hasBoundBufferMemory)
8152 ops.push_back(OP_BUFFER_BINDMEMORY);
8153 }
8154 else if (!state.hasImage && supportsBuffers) // Avoid creating buffer if there is already image
8155 ops.push_back(OP_BUFFER_CREATE);
8156 }
8157
8158 if (state.hasBoundImageMemory && state.queueIdle)
8159 {
8160 // \note Destroy only image after they have been bound
8161 ops.push_back(OP_IMAGE_DESTROY);
8162 }
8163 else
8164 {
8165 if (state.hasImage)
8166 {
8167 if (!state.hasBoundImageMemory)
8168 ops.push_back(OP_IMAGE_BINDMEMORY);
8169 }
8170 else if (!state.hasBuffer && supportsImages) // Avoid creating image if there is already buffer
8171 ops.push_back(OP_IMAGE_CREATE);
8172 }
8173
8174 // Host writes must be flushed before GPU commands and there must be
8175 // buffer or image for GPU commands
8176 if (state.hostFlushed &&
8177 (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined ||
8178 supportsDeviceImageWrites(usage)) &&
8179 (state.hasBoundBufferMemory ||
8180 state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8181 && (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) !=
8182 0) // Don't start command buffer if there are no ways to use memory from gpu
8183 {
8184 ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8185 }
8186
8187 if (!state.deviceIdle)
8188 ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8189
8190 if (!state.queueIdle)
8191 ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8192 }
8193 else if (state.stage == STAGE_COMMAND_BUFFER)
8194 {
8195 if (!state.cache.isClean())
8196 {
8197 ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8198
8199 if (state.hasImage && (state.imageLayout != vk::VK_IMAGE_LAYOUT_UNDEFINED))
8200 ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8201
8202 if (state.hasBuffer)
8203 ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8204 }
8205
8206 if (state.hasBoundBufferMemory)
8207 {
8208 if (usage & USAGE_TRANSFER_DST &&
8209 state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8210 {
8211 ops.push_back(OP_BUFFER_FILL);
8212 ops.push_back(OP_BUFFER_UPDATE);
8213 ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8214 ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8215 }
8216
8217 if (usage & USAGE_TRANSFER_SRC && state.memoryDefined &&
8218 state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8219 {
8220 ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8221 ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8222 }
8223 }
8224
8225 if (state.hasBoundImageMemory &&
8226 (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || getNumberOfSupportedLayouts(usage) > 1))
8227 {
8228 ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8229
8230 {
8231 if (usage & USAGE_TRANSFER_DST &&
8232 (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL ||
8233 state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) &&
8234 state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8235 {
8236 ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8237 ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8238 ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8239 }
8240
8241 if (usage & USAGE_TRANSFER_SRC &&
8242 (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL ||
8243 state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) &&
8244 state.imageDefined &&
8245 state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8246 {
8247 ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8248 ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8249 ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8250 }
8251 }
8252 }
8253
8254 // \todo [2016-03-09 mika] Add other usages?
8255 if ((state.memoryDefined && state.hasBoundBufferMemory &&
8256 (((usage & USAGE_VERTEX_BUFFER) &&
8257 state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT)) ||
8258 ((usage & USAGE_INDEX_BUFFER) &&
8259 state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT)) ||
8260 ((usage & USAGE_UNIFORM_BUFFER) &&
8261 (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT) ||
8262 state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))) ||
8263 ((usage & USAGE_UNIFORM_TEXEL_BUFFER) &&
8264 (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT) ||
8265 state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))) ||
8266 ((usage & USAGE_STORAGE_BUFFER) &&
8267 (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT) ||
8268 state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))) ||
8269 ((usage & USAGE_STORAGE_TEXEL_BUFFER) &&
8270 state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))) ||
8271 (state.imageDefined && state.hasBoundImageMemory &&
8272 (((usage & USAGE_STORAGE_IMAGE) && state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL &&
8273 (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT) ||
8274 state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))) ||
8275 ((usage & USAGE_SAMPLED_IMAGE) &&
8276 (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL ||
8277 state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) &&
8278 (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT) ||
8279 state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8280 {
8281 ops.push_back(OP_RENDERPASS_BEGIN);
8282 }
8283
8284 ops.push_back(OP_SECONDARY_COMMAND_BUFFER_BEGIN);
8285
8286 // \note This depends on previous operations and has to be always the
8287 // last command buffer operation check
8288 if (ops.empty() || !state.commandBufferIsEmpty)
8289 ops.push_back(OP_COMMAND_BUFFER_END);
8290 }
8291 else if (state.stage == STAGE_SECONDARY_COMMAND_BUFFER)
8292 {
8293 if (!state.cache.isClean())
8294 {
8295 ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8296
8297 if (state.hasImage && (state.imageLayout != vk::VK_IMAGE_LAYOUT_UNDEFINED))
8298 ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8299
8300 if (state.hasBuffer)
8301 ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8302 }
8303
8304 if (state.hasBoundBufferMemory)
8305 {
8306 if (usage & USAGE_TRANSFER_DST &&
8307 state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8308 {
8309 ops.push_back(OP_BUFFER_FILL);
8310 ops.push_back(OP_BUFFER_UPDATE);
8311 ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8312 ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8313 }
8314
8315 if (usage & USAGE_TRANSFER_SRC && state.memoryDefined &&
8316 state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8317 {
8318 ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8319 ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8320 }
8321 }
8322
8323 if (state.hasBoundImageMemory &&
8324 (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || getNumberOfSupportedLayouts(usage) > 1))
8325 {
8326 ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8327
8328 {
8329 if (usage & USAGE_TRANSFER_DST &&
8330 (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL ||
8331 state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) &&
8332 state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8333 {
8334 ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8335 ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8336 ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8337 }
8338
8339 if (usage & USAGE_TRANSFER_SRC &&
8340 (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL ||
8341 state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) &&
8342 state.imageDefined &&
8343 state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8344 {
8345 ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8346 ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8347 ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8348 }
8349 }
8350 }
8351
8352 // \note This depends on previous operations and has to be always the
8353 // last command buffer operation check
8354 if (ops.empty() || !state.commandBufferIsEmpty)
8355 ops.push_back(OP_SECONDARY_COMMAND_BUFFER_END);
8356 }
8357 else if (state.stage == STAGE_RENDER_PASS)
8358 {
8359 if ((usage & USAGE_VERTEX_BUFFER) != 0 && state.memoryDefined && state.hasBoundBufferMemory &&
8360 state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8361 {
8362 ops.push_back(OP_RENDER_VERTEX_BUFFER);
8363 }
8364
8365 if ((usage & USAGE_INDEX_BUFFER) != 0 && state.memoryDefined && state.hasBoundBufferMemory &&
8366 state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8367 {
8368 ops.push_back(OP_RENDER_INDEX_BUFFER);
8369 }
8370
8371 if ((usage & USAGE_UNIFORM_BUFFER) != 0 && state.memoryDefined && state.hasBoundBufferMemory)
8372 {
8373 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8374 ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8375
8376 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8377 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8378 }
8379
8380 if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0 && state.memoryDefined && state.hasBoundBufferMemory)
8381 {
8382 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8383 ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8384
8385 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8386 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8387 }
8388
8389 if ((usage & USAGE_STORAGE_BUFFER) != 0 && state.memoryDefined && state.hasBoundBufferMemory)
8390 {
8391 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8392 ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8393
8394 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8395 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8396 }
8397
8398 if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0 && state.memoryDefined && state.hasBoundBufferMemory)
8399 {
8400 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8401 ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8402
8403 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8404 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8405 }
8406
8407 if ((usage & USAGE_STORAGE_IMAGE) != 0 && state.imageDefined && state.hasBoundImageMemory &&
8408 (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8409 {
8410 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8411 ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8412
8413 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8414 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8415 }
8416
8417 if ((usage & USAGE_SAMPLED_IMAGE) != 0 && state.imageDefined && state.hasBoundImageMemory &&
8418 (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL ||
8419 state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8420 {
8421 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8422 ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8423
8424 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8425 ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8426 }
8427
8428 if (!state.renderPassIsEmpty)
8429 ops.push_back(OP_RENDERPASS_END);
8430 }
8431 else
8432 DE_FATAL("Unknown stage");
8433 }
8434
removeIllegalAccessFlags(vk::VkAccessFlags & accessflags,vk::VkPipelineStageFlags stageflags)8435 void removeIllegalAccessFlags(vk::VkAccessFlags &accessflags, vk::VkPipelineStageFlags stageflags)
8436 {
8437 if (!(stageflags & vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
8438 accessflags &= ~vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
8439
8440 if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8441 accessflags &= ~vk::VK_ACCESS_INDEX_READ_BIT;
8442
8443 if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8444 accessflags &= ~vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
8445
8446 if (!(stageflags &
8447 (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8448 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8449 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8450 accessflags &= ~vk::VK_ACCESS_UNIFORM_READ_BIT;
8451
8452 if (!(stageflags & vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT))
8453 accessflags &= ~vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
8454
8455 if (!(stageflags &
8456 (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8457 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8458 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8459 accessflags &= ~vk::VK_ACCESS_SHADER_READ_BIT;
8460
8461 if (!(stageflags &
8462 (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8463 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8464 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8465 accessflags &= ~vk::VK_ACCESS_SHADER_WRITE_BIT;
8466
8467 if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8468 accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
8469
8470 if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8471 accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
8472
8473 if (!(stageflags &
8474 (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8475 accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
8476
8477 if (!(stageflags &
8478 (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8479 accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
8480
8481 if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8482 accessflags &= ~vk::VK_ACCESS_TRANSFER_READ_BIT;
8483
8484 if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8485 accessflags &= ~vk::VK_ACCESS_TRANSFER_WRITE_BIT;
8486
8487 if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8488 accessflags &= ~vk::VK_ACCESS_HOST_READ_BIT;
8489
8490 if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8491 accessflags &= ~vk::VK_ACCESS_HOST_WRITE_BIT;
8492 }
8493
applyOp(State & state,const Memory & memory,Op op,Usage usage)8494 void applyOp(State &state, const Memory &memory, Op op, Usage usage)
8495 {
8496 switch (op)
8497 {
8498 case OP_MAP:
8499 DE_ASSERT(state.stage == STAGE_HOST);
8500 DE_ASSERT(!state.mapped);
8501 state.mapped = true;
8502 break;
8503
8504 case OP_UNMAP:
8505 DE_ASSERT(state.stage == STAGE_HOST);
8506 DE_ASSERT(state.mapped);
8507 state.mapped = false;
8508 break;
8509
8510 case OP_MAP_FLUSH:
8511 DE_ASSERT(state.stage == STAGE_HOST);
8512 DE_ASSERT(!state.hostFlushed);
8513 state.hostFlushed = true;
8514 break;
8515
8516 case OP_MAP_INVALIDATE:
8517 DE_ASSERT(state.stage == STAGE_HOST);
8518 DE_ASSERT(!state.hostInvalidated);
8519 state.hostInvalidated = true;
8520 break;
8521
8522 case OP_MAP_READ:
8523 DE_ASSERT(state.stage == STAGE_HOST);
8524 DE_ASSERT(state.hostInvalidated);
8525 state.rng.getUint32();
8526 break;
8527
8528 case OP_MAP_WRITE:
8529 DE_ASSERT(state.stage == STAGE_HOST);
8530 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8531 state.hostFlushed = false;
8532
8533 state.memoryDefined = true;
8534 state.imageDefined = false;
8535 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8536 state.rng.getUint32();
8537 break;
8538
8539 case OP_MAP_MODIFY:
8540 DE_ASSERT(state.stage == STAGE_HOST);
8541 DE_ASSERT(state.hostInvalidated);
8542
8543 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8544 state.hostFlushed = false;
8545
8546 state.rng.getUint32();
8547 break;
8548
8549 case OP_BUFFER_CREATE:
8550 DE_ASSERT(state.stage == STAGE_HOST);
8551 DE_ASSERT(!state.hasBuffer);
8552
8553 state.hasBuffer = true;
8554 break;
8555
8556 case OP_BUFFER_DESTROY:
8557 DE_ASSERT(state.stage == STAGE_HOST);
8558 DE_ASSERT(state.hasBuffer);
8559 DE_ASSERT(state.hasBoundBufferMemory);
8560
8561 state.hasBuffer = false;
8562 state.hasBoundBufferMemory = false;
8563 break;
8564
8565 case OP_BUFFER_BINDMEMORY:
8566 DE_ASSERT(state.stage == STAGE_HOST);
8567 DE_ASSERT(state.hasBuffer);
8568 DE_ASSERT(!state.hasBoundBufferMemory);
8569
8570 state.hasBoundBufferMemory = true;
8571 break;
8572
8573 case OP_IMAGE_CREATE:
8574 DE_ASSERT(state.stage == STAGE_HOST);
8575 DE_ASSERT(!state.hasImage);
8576 DE_ASSERT(!state.hasBuffer);
8577
8578 state.hasImage = true;
8579 break;
8580
8581 case OP_IMAGE_DESTROY:
8582 DE_ASSERT(state.stage == STAGE_HOST);
8583 DE_ASSERT(state.hasImage);
8584 DE_ASSERT(state.hasBoundImageMemory);
8585
8586 state.hasImage = false;
8587 state.hasBoundImageMemory = false;
8588 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8589 state.imageDefined = false;
8590 break;
8591
8592 case OP_IMAGE_BINDMEMORY:
8593 DE_ASSERT(state.stage == STAGE_HOST);
8594 DE_ASSERT(state.hasImage);
8595 DE_ASSERT(!state.hasBoundImageMemory);
8596
8597 state.hasBoundImageMemory = true;
8598 break;
8599
8600 case OP_IMAGE_TRANSITION_LAYOUT:
8601 {
8602 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8603 DE_ASSERT(state.hasImage);
8604 DE_ASSERT(state.hasBoundImageMemory);
8605
8606 // \todo [2016-03-09 mika] Support linear tiling and predefined data
8607 const vk::VkImageLayout srcLayout =
8608 state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8609 const vk::VkImageLayout dstLayout = getRandomNextLayout(state.rng, usage, srcLayout);
8610
8611 vk::VkPipelineStageFlags dirtySrcStages;
8612 vk::VkAccessFlags dirtySrcAccesses;
8613 vk::VkPipelineStageFlags dirtyDstStages;
8614 vk::VkAccessFlags dirtyDstAccesses;
8615
8616 vk::VkPipelineStageFlags srcStages;
8617 vk::VkAccessFlags srcAccesses;
8618 vk::VkPipelineStageFlags dstStages;
8619 vk::VkAccessFlags dstAccesses;
8620
8621 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8622
8623 // Try masking some random bits
8624 srcStages = dirtySrcStages;
8625 srcAccesses = dirtySrcAccesses;
8626
8627 dstStages = state.cache.getAllowedStages() & state.rng.getUint32();
8628 dstAccesses = state.cache.getAllowedAcceses() & state.rng.getUint32();
8629
8630 // If there are no bits in dst stage mask use all stages
8631 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8632
8633 if (!srcStages)
8634 srcStages = dstStages;
8635
8636 removeIllegalAccessFlags(dstAccesses, dstStages);
8637 removeIllegalAccessFlags(srcAccesses, srcStages);
8638
8639 if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8640 state.imageDefined = false;
8641
8642 state.commandBufferIsEmpty = false;
8643 state.imageLayout = dstLayout;
8644 state.memoryDefined = false;
8645 state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8646 break;
8647 }
8648
8649 case OP_QUEUE_WAIT_FOR_IDLE:
8650 DE_ASSERT(state.stage == STAGE_HOST);
8651 DE_ASSERT(!state.queueIdle);
8652
8653 state.queueIdle = true;
8654
8655 state.cache.waitForIdle();
8656 break;
8657
8658 case OP_DEVICE_WAIT_FOR_IDLE:
8659 DE_ASSERT(state.stage == STAGE_HOST);
8660 DE_ASSERT(!state.deviceIdle);
8661
8662 state.queueIdle = true;
8663 state.deviceIdle = true;
8664
8665 state.cache.waitForIdle();
8666 break;
8667
8668 case OP_COMMAND_BUFFER_BEGIN:
8669 DE_ASSERT(state.stage == STAGE_HOST);
8670 state.stage = STAGE_COMMAND_BUFFER;
8671 state.commandBufferIsEmpty = true;
8672 // Makes host writes visible to command buffer
8673 state.cache.submitCommandBuffer();
8674 break;
8675
8676 case OP_COMMAND_BUFFER_END:
8677 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8678 state.stage = STAGE_HOST;
8679 state.queueIdle = false;
8680 state.deviceIdle = false;
8681 break;
8682
8683 case OP_SECONDARY_COMMAND_BUFFER_BEGIN:
8684 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8685 state.stage = STAGE_SECONDARY_COMMAND_BUFFER;
8686 state.primaryCommandBufferIsEmpty = state.commandBufferIsEmpty;
8687 state.commandBufferIsEmpty = true;
8688 break;
8689
8690 case OP_SECONDARY_COMMAND_BUFFER_END:
8691 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8692 state.stage = STAGE_COMMAND_BUFFER;
8693 state.commandBufferIsEmpty = state.primaryCommandBufferIsEmpty;
8694 break;
8695
8696 case OP_BUFFER_COPY_FROM_BUFFER:
8697 case OP_BUFFER_COPY_FROM_IMAGE:
8698 case OP_BUFFER_UPDATE:
8699 case OP_BUFFER_FILL:
8700 state.rng.getUint32();
8701 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8702
8703 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8704 state.hostInvalidated = false;
8705
8706 state.commandBufferIsEmpty = false;
8707 state.memoryDefined = true;
8708 state.imageDefined = false;
8709 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8710 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8711 break;
8712
8713 case OP_BUFFER_COPY_TO_BUFFER:
8714 case OP_BUFFER_COPY_TO_IMAGE:
8715 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8716
8717 state.commandBufferIsEmpty = false;
8718 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8719 break;
8720
8721 case OP_IMAGE_BLIT_FROM_IMAGE:
8722 state.rng.getBool();
8723 // Fall through
8724 case OP_IMAGE_COPY_FROM_BUFFER:
8725 case OP_IMAGE_COPY_FROM_IMAGE:
8726 state.rng.getUint32();
8727 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8728
8729 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8730 state.hostInvalidated = false;
8731
8732 state.commandBufferIsEmpty = false;
8733 state.memoryDefined = false;
8734 state.imageDefined = true;
8735 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8736 break;
8737
8738 case OP_IMAGE_BLIT_TO_IMAGE:
8739 state.rng.getBool();
8740 // Fall through
8741 case OP_IMAGE_COPY_TO_BUFFER:
8742 case OP_IMAGE_COPY_TO_IMAGE:
8743 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8744
8745 state.commandBufferIsEmpty = false;
8746 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8747 break;
8748
8749 case OP_PIPELINE_BARRIER_GLOBAL:
8750 case OP_PIPELINE_BARRIER_BUFFER:
8751 case OP_PIPELINE_BARRIER_IMAGE:
8752 {
8753 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8754
8755 vk::VkPipelineStageFlags dirtySrcStages;
8756 vk::VkAccessFlags dirtySrcAccesses;
8757 vk::VkPipelineStageFlags dirtyDstStages;
8758 vk::VkAccessFlags dirtyDstAccesses;
8759
8760 vk::VkPipelineStageFlags srcStages;
8761 vk::VkAccessFlags srcAccesses;
8762 vk::VkPipelineStageFlags dstStages;
8763 vk::VkAccessFlags dstAccesses;
8764
8765 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8766
8767 // Try masking some random bits
8768 srcStages = dirtySrcStages & state.rng.getUint32();
8769 srcAccesses = dirtySrcAccesses & state.rng.getUint32();
8770
8771 dstStages = dirtyDstStages & state.rng.getUint32();
8772 dstAccesses = dirtyDstAccesses & state.rng.getUint32();
8773
8774 // If there are no bits in stage mask use the original dirty stages
8775 srcStages = srcStages ? srcStages : dirtySrcStages;
8776 dstStages = dstStages ? dstStages : dirtyDstStages;
8777
8778 if (!srcStages)
8779 srcStages = dstStages;
8780
8781 removeIllegalAccessFlags(dstAccesses, dstStages);
8782 removeIllegalAccessFlags(srcAccesses, srcStages);
8783
8784 state.commandBufferIsEmpty = false;
8785 state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8786 break;
8787 }
8788
8789 case OP_RENDERPASS_BEGIN:
8790 {
8791 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8792
8793 state.renderPassIsEmpty = true;
8794 state.stage = STAGE_RENDER_PASS;
8795 break;
8796 }
8797
8798 case OP_RENDERPASS_END:
8799 {
8800 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8801
8802 state.renderPassIsEmpty = true;
8803 state.stage = STAGE_COMMAND_BUFFER;
8804 break;
8805 }
8806
8807 case OP_RENDER_VERTEX_BUFFER:
8808 {
8809 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8810
8811 state.renderPassIsEmpty = false;
8812 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8813 break;
8814 }
8815
8816 case OP_RENDER_INDEX_BUFFER:
8817 {
8818 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8819
8820 state.renderPassIsEmpty = false;
8821 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8822 break;
8823 }
8824
8825 case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8826 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8827 {
8828 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8829
8830 state.renderPassIsEmpty = false;
8831 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8832 break;
8833 }
8834
8835 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8836 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8837 {
8838 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8839
8840 state.renderPassIsEmpty = false;
8841 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8842 break;
8843 }
8844
8845 case OP_RENDER_VERTEX_STORAGE_BUFFER:
8846 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8847 {
8848 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8849
8850 state.renderPassIsEmpty = false;
8851 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8852 break;
8853 }
8854
8855 case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8856 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8857 {
8858 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8859
8860 state.renderPassIsEmpty = false;
8861 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8862 break;
8863 }
8864
8865 case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8866 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8867 {
8868 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8869
8870 state.renderPassIsEmpty = false;
8871 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8872 break;
8873 }
8874
8875 case OP_RENDER_VERTEX_STORAGE_IMAGE:
8876 case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8877 {
8878 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8879
8880 state.renderPassIsEmpty = false;
8881 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8882 break;
8883 }
8884
8885 default:
8886 DE_FATAL("Unknown op");
8887 }
8888 }
8889
createHostCommand(Op op,de::Random & rng,Usage usage,vk::VkSharingMode sharing)8890 de::MovePtr<Command> createHostCommand(Op op, de::Random &rng, Usage usage, vk::VkSharingMode sharing)
8891 {
8892 switch (op)
8893 {
8894 case OP_MAP:
8895 return de::MovePtr<Command>(new Map());
8896 case OP_UNMAP:
8897 return de::MovePtr<Command>(new UnMap());
8898
8899 case OP_MAP_FLUSH:
8900 return de::MovePtr<Command>(new Flush());
8901 case OP_MAP_INVALIDATE:
8902 return de::MovePtr<Command>(new Invalidate());
8903
8904 case OP_MAP_READ:
8905 return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8906 case OP_MAP_WRITE:
8907 return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8908 case OP_MAP_MODIFY:
8909 return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8910
8911 case OP_BUFFER_CREATE:
8912 return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8913 case OP_BUFFER_DESTROY:
8914 return de::MovePtr<Command>(new DestroyBuffer());
8915 case OP_BUFFER_BINDMEMORY:
8916 return de::MovePtr<Command>(new BindBufferMemory());
8917
8918 case OP_IMAGE_CREATE:
8919 return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8920 case OP_IMAGE_DESTROY:
8921 return de::MovePtr<Command>(new DestroyImage());
8922 case OP_IMAGE_BINDMEMORY:
8923 return de::MovePtr<Command>(new BindImageMemory());
8924
8925 case OP_QUEUE_WAIT_FOR_IDLE:
8926 return de::MovePtr<Command>(new QueueWaitIdle());
8927 case OP_DEVICE_WAIT_FOR_IDLE:
8928 return de::MovePtr<Command>(new DeviceWaitIdle());
8929
8930 default:
8931 DE_FATAL("Unknown op");
8932 return de::MovePtr<Command>(DE_NULL);
8933 }
8934 }
8935
createCmdCommand(de::Random & rng,const State & state,Op op,Usage usage)8936 de::MovePtr<CmdCommand> createCmdCommand(de::Random &rng, const State &state, Op op, Usage usage)
8937 {
8938 switch (op)
8939 {
8940 case OP_BUFFER_FILL:
8941 return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8942 case OP_BUFFER_UPDATE:
8943 return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8944 case OP_BUFFER_COPY_TO_BUFFER:
8945 return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8946 case OP_BUFFER_COPY_FROM_BUFFER:
8947 return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8948
8949 case OP_BUFFER_COPY_TO_IMAGE:
8950 return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8951 case OP_BUFFER_COPY_FROM_IMAGE:
8952 return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8953
8954 case OP_IMAGE_TRANSITION_LAYOUT:
8955 {
8956 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8957 DE_ASSERT(state.hasImage);
8958 DE_ASSERT(state.hasBoundImageMemory);
8959
8960 const vk::VkImageLayout srcLayout = rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8961 const vk::VkImageLayout dstLayout = getRandomNextLayout(rng, usage, srcLayout);
8962
8963 vk::VkPipelineStageFlags dirtySrcStages;
8964 vk::VkAccessFlags dirtySrcAccesses;
8965 vk::VkPipelineStageFlags dirtyDstStages;
8966 vk::VkAccessFlags dirtyDstAccesses;
8967
8968 vk::VkPipelineStageFlags srcStages;
8969 vk::VkAccessFlags srcAccesses;
8970 vk::VkPipelineStageFlags dstStages;
8971 vk::VkAccessFlags dstAccesses;
8972
8973 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8974
8975 // Try masking some random bits
8976 srcStages = dirtySrcStages;
8977 srcAccesses = dirtySrcAccesses;
8978
8979 dstStages = state.cache.getAllowedStages() & rng.getUint32();
8980 dstAccesses = state.cache.getAllowedAcceses() & rng.getUint32();
8981
8982 // If there are no bits in dst stage mask use all stages
8983 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8984
8985 if (!srcStages)
8986 srcStages = dstStages;
8987
8988 removeIllegalAccessFlags(dstAccesses, dstStages);
8989 removeIllegalAccessFlags(srcAccesses, srcStages);
8990
8991 return de::MovePtr<CmdCommand>(
8992 new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8993 }
8994
8995 case OP_IMAGE_COPY_TO_BUFFER:
8996 return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8997 case OP_IMAGE_COPY_FROM_BUFFER:
8998 return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8999 case OP_IMAGE_COPY_TO_IMAGE:
9000 return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
9001 case OP_IMAGE_COPY_FROM_IMAGE:
9002 return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
9003 case OP_IMAGE_BLIT_TO_IMAGE:
9004 {
9005 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
9006 return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
9007 }
9008
9009 case OP_IMAGE_BLIT_FROM_IMAGE:
9010 {
9011 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
9012 return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
9013 }
9014
9015 case OP_PIPELINE_BARRIER_GLOBAL:
9016 case OP_PIPELINE_BARRIER_BUFFER:
9017 case OP_PIPELINE_BARRIER_IMAGE:
9018 {
9019 vk::VkPipelineStageFlags dirtySrcStages;
9020 vk::VkAccessFlags dirtySrcAccesses;
9021 vk::VkPipelineStageFlags dirtyDstStages;
9022 vk::VkAccessFlags dirtyDstAccesses;
9023
9024 vk::VkPipelineStageFlags srcStages;
9025 vk::VkAccessFlags srcAccesses;
9026 vk::VkPipelineStageFlags dstStages;
9027 vk::VkAccessFlags dstAccesses;
9028
9029 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
9030
9031 // Try masking some random bits
9032 srcStages = dirtySrcStages & rng.getUint32();
9033 srcAccesses = dirtySrcAccesses & rng.getUint32();
9034
9035 dstStages = dirtyDstStages & rng.getUint32();
9036 dstAccesses = dirtyDstAccesses & rng.getUint32();
9037
9038 // If there are no bits in stage mask use the original dirty stages
9039 srcStages = srcStages ? srcStages : dirtySrcStages;
9040 dstStages = dstStages ? dstStages : dirtyDstStages;
9041
9042 if (!srcStages)
9043 srcStages = dstStages;
9044
9045 removeIllegalAccessFlags(dstAccesses, dstStages);
9046 removeIllegalAccessFlags(srcAccesses, srcStages);
9047
9048 PipelineBarrier::Type type;
9049 switch (op)
9050 {
9051 case OP_PIPELINE_BARRIER_IMAGE:
9052 type = PipelineBarrier::TYPE_IMAGE;
9053 break;
9054 case OP_PIPELINE_BARRIER_BUFFER:
9055 type = PipelineBarrier::TYPE_BUFFER;
9056 break;
9057 case OP_PIPELINE_BARRIER_GLOBAL:
9058 type = PipelineBarrier::TYPE_GLOBAL;
9059 break;
9060 default:
9061 type = PipelineBarrier::TYPE_LAST;
9062 DE_FATAL("Unknown op");
9063 }
9064
9065 if (type == PipelineBarrier::TYPE_IMAGE)
9066 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type,
9067 tcu::just(state.imageLayout)));
9068 else
9069 return de::MovePtr<CmdCommand>(
9070 new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::Nothing));
9071 }
9072
9073 default:
9074 DE_FATAL("Unknown op");
9075 return de::MovePtr<CmdCommand>(DE_NULL);
9076 }
9077 }
9078
createRenderPassCommand(de::Random &,const State &,const TestConfig & testConfig,Op op)9079 de::MovePtr<RenderPassCommand> createRenderPassCommand(de::Random &, const State &, const TestConfig &testConfig, Op op)
9080 {
9081 switch (op)
9082 {
9083 case OP_RENDER_VERTEX_BUFFER:
9084 return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer(testConfig.vertexBufferStride));
9085 case OP_RENDER_INDEX_BUFFER:
9086 return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
9087
9088 case OP_RENDER_VERTEX_UNIFORM_BUFFER:
9089 return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
9090 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
9091 return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
9092
9093 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
9094 return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
9095 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
9096 return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
9097
9098 case OP_RENDER_VERTEX_STORAGE_BUFFER:
9099 return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
9100 case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
9101 return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
9102
9103 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
9104 return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
9105 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
9106 return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
9107
9108 case OP_RENDER_VERTEX_STORAGE_IMAGE:
9109 return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
9110 case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
9111 return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
9112
9113 case OP_RENDER_VERTEX_SAMPLED_IMAGE:
9114 return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
9115 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
9116 return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
9117
9118 default:
9119 DE_FATAL("Unknown op");
9120 return de::MovePtr<RenderPassCommand>(DE_NULL);
9121 }
9122 }
9123
createRenderPassCommands(const Memory & memory,de::Random & nextOpRng,State & state,const TestConfig & testConfig,size_t & opNdx,size_t opCount)9124 de::MovePtr<CmdCommand> createRenderPassCommands(const Memory &memory, de::Random &nextOpRng, State &state,
9125 const TestConfig &testConfig, size_t &opNdx, size_t opCount)
9126 {
9127 vector<RenderPassCommand *> commands;
9128
9129 try
9130 {
9131 for (; opNdx < opCount; opNdx++)
9132 {
9133 vector<Op> ops;
9134
9135 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9136
9137 DE_ASSERT(!ops.empty());
9138
9139 {
9140 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9141
9142 if (op == OP_RENDERPASS_END)
9143 {
9144 break;
9145 }
9146 else
9147 {
9148 de::Random rng(state.rng);
9149
9150 commands.push_back(createRenderPassCommand(rng, state, testConfig, op).release());
9151 applyOp(state, memory, op, testConfig.usage);
9152
9153 DE_ASSERT(state.rng == rng);
9154 }
9155 }
9156 }
9157
9158 applyOp(state, memory, OP_RENDERPASS_END, testConfig.usage);
9159 return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
9160 }
9161 catch (...)
9162 {
9163 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9164 delete commands[commandNdx];
9165
9166 throw;
9167 }
9168 }
9169
createSecondaryCmdCommands(const Memory & memory,de::Random & nextOpRng,State & state,Usage usage,size_t & opNdx,size_t opCount)9170 de::MovePtr<CmdCommand> createSecondaryCmdCommands(const Memory &memory, de::Random &nextOpRng, State &state,
9171 Usage usage, size_t &opNdx, size_t opCount)
9172 {
9173 vector<CmdCommand *> commands;
9174
9175 try
9176 {
9177 for (; opNdx < opCount; opNdx++)
9178 {
9179 vector<Op> ops;
9180
9181 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9182
9183 DE_ASSERT(!ops.empty());
9184
9185 {
9186 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9187
9188 if (op == OP_SECONDARY_COMMAND_BUFFER_END)
9189 {
9190 break;
9191 }
9192 else
9193 {
9194 de::Random rng(state.rng);
9195
9196 commands.push_back(createCmdCommand(rng, state, op, usage).release());
9197 applyOp(state, memory, op, usage);
9198
9199 DE_ASSERT(state.rng == rng);
9200 }
9201 }
9202 }
9203
9204 applyOp(state, memory, OP_SECONDARY_COMMAND_BUFFER_END, usage);
9205 return de::MovePtr<CmdCommand>(new ExecuteSecondaryCommandBuffer(commands));
9206 }
9207 catch (...)
9208 {
9209 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9210 delete commands[commandNdx];
9211
9212 throw;
9213 }
9214 }
9215
createCmdCommands(const Memory & memory,de::Random & nextOpRng,State & state,const TestConfig & testConfig,size_t & opNdx,size_t opCount)9216 de::MovePtr<Command> createCmdCommands(const Memory &memory, de::Random &nextOpRng, State &state,
9217 const TestConfig &testConfig, size_t &opNdx, size_t opCount)
9218 {
9219 vector<CmdCommand *> commands;
9220
9221 try
9222 {
9223 // Insert a mostly-full barrier to order this work wrt previous command buffer.
9224 commands.push_back(new PipelineBarrier(state.cache.getAllowedStages(), state.cache.getAllowedAcceses(),
9225 state.cache.getAllowedStages(), state.cache.getAllowedAcceses(),
9226 PipelineBarrier::TYPE_GLOBAL, tcu::Nothing));
9227
9228 for (; opNdx < opCount; opNdx++)
9229 {
9230 vector<Op> ops;
9231
9232 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9233
9234 DE_ASSERT(!ops.empty());
9235
9236 {
9237 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9238
9239 if (op == OP_COMMAND_BUFFER_END)
9240 {
9241 break;
9242 }
9243 else
9244 {
9245 // \note Command needs to known the state before the operation
9246 if (op == OP_RENDERPASS_BEGIN)
9247 {
9248 applyOp(state, memory, op, testConfig.usage);
9249 commands.push_back(
9250 createRenderPassCommands(memory, nextOpRng, state, testConfig, opNdx, opCount).release());
9251 }
9252 else if (op == OP_SECONDARY_COMMAND_BUFFER_BEGIN)
9253 {
9254 applyOp(state, memory, op, testConfig.usage);
9255 commands.push_back(
9256 createSecondaryCmdCommands(memory, nextOpRng, state, testConfig.usage, opNdx, opCount)
9257 .release());
9258 }
9259 else
9260 {
9261 de::Random rng(state.rng);
9262
9263 commands.push_back(createCmdCommand(rng, state, op, testConfig.usage).release());
9264 applyOp(state, memory, op, testConfig.usage);
9265
9266 DE_ASSERT(state.rng == rng);
9267 }
9268 }
9269 }
9270 }
9271
9272 applyOp(state, memory, OP_COMMAND_BUFFER_END, testConfig.usage);
9273 return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9274 }
9275 catch (...)
9276 {
9277 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9278 delete commands[commandNdx];
9279
9280 throw;
9281 }
9282 }
9283
createCommands(vector<Command * > & commands,uint32_t seed,const Memory & memory,const TestConfig & testConfig,size_t opCount)9284 void createCommands(vector<Command *> &commands, uint32_t seed, const Memory &memory, const TestConfig &testConfig,
9285 size_t opCount)
9286 {
9287 State state(testConfig.usage, seed);
9288 // Used to select next operation only
9289 de::Random nextOpRng(seed ^ 12930809);
9290
9291 commands.reserve(opCount);
9292
9293 for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9294 {
9295 vector<Op> ops;
9296
9297 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9298
9299 DE_ASSERT(!ops.empty());
9300
9301 {
9302 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9303
9304 if (op == OP_COMMAND_BUFFER_BEGIN)
9305 {
9306 applyOp(state, memory, op, testConfig.usage);
9307 commands.push_back(createCmdCommands(memory, nextOpRng, state, testConfig, opNdx, opCount).release());
9308 }
9309 else
9310 {
9311 de::Random rng(state.rng);
9312
9313 commands.push_back(createHostCommand(op, rng, testConfig.usage, testConfig.sharing).release());
9314 applyOp(state, memory, op, testConfig.usage);
9315
9316 // Make sure that random generator is in sync
9317 DE_ASSERT(state.rng == rng);
9318 }
9319 }
9320 }
9321
9322 // Clean up resources
9323 if (state.hasBuffer && state.hasImage)
9324 {
9325 if (!state.queueIdle)
9326 commands.push_back(new QueueWaitIdle());
9327
9328 if (state.hasBuffer)
9329 commands.push_back(new DestroyBuffer());
9330
9331 if (state.hasImage)
9332 commands.push_back(new DestroyImage());
9333 }
9334 }
9335
9336 class MemoryTestInstance : public TestInstance
9337 {
9338 public:
9339 typedef bool (MemoryTestInstance::*StageFunc)(void);
9340
9341 MemoryTestInstance(::vkt::Context &context, const TestConfig &config);
9342 ~MemoryTestInstance(void);
9343
9344 tcu::TestStatus iterate(void);
9345
9346 private:
9347 const TestConfig m_config;
9348 const size_t m_iterationCount;
9349 const size_t m_opCount;
9350 const vk::VkPhysicalDeviceMemoryProperties m_memoryProperties;
9351 uint32_t m_memoryTypeNdx;
9352 size_t m_iteration;
9353 StageFunc m_stage;
9354 tcu::ResultCollector m_resultCollector;
9355
9356 vector<Command *> m_commands;
9357 MovePtr<Memory> m_memory;
9358 MovePtr<Context> m_renderContext;
9359 MovePtr<PrepareContext> m_prepareContext;
9360
9361 bool nextIteration(void);
9362 bool nextMemoryType(void);
9363
9364 bool createCommandsAndAllocateMemory(void);
9365 bool prepare(void);
9366 bool execute(void);
9367 bool verify(void);
9368 void resetResources(void);
9369 };
9370
resetResources(void)9371 void MemoryTestInstance::resetResources(void)
9372 {
9373 const vk::DeviceInterface &vkd = m_context.getDeviceInterface();
9374 const vk::VkDevice device = m_context.getDevice();
9375
9376 VK_CHECK(vkd.deviceWaitIdle(device));
9377
9378 for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9379 {
9380 delete m_commands[commandNdx];
9381 m_commands[commandNdx] = DE_NULL;
9382 }
9383
9384 m_commands.clear();
9385 m_prepareContext.clear();
9386 m_memory.clear();
9387 }
9388
nextIteration(void)9389 bool MemoryTestInstance::nextIteration(void)
9390 {
9391 m_iteration++;
9392
9393 if (m_iteration < m_iterationCount)
9394 {
9395 resetResources();
9396 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9397 return true;
9398 }
9399 else
9400 return nextMemoryType();
9401 }
9402
nextMemoryType(void)9403 bool MemoryTestInstance::nextMemoryType(void)
9404 {
9405 resetResources();
9406
9407 DE_ASSERT(m_commands.empty());
9408
9409 m_memoryTypeNdx++;
9410
9411 if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9412 {
9413 m_iteration = 0;
9414 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9415
9416 return true;
9417 }
9418 else
9419 {
9420 m_stage = DE_NULL;
9421 return false;
9422 }
9423 }
9424
MemoryTestInstance(::vkt::Context & context,const TestConfig & config)9425 MemoryTestInstance::MemoryTestInstance(::vkt::Context &context, const TestConfig &config)
9426 : TestInstance(context)
9427 , m_config(config)
9428 , m_iterationCount(5)
9429 , m_opCount(50)
9430 , m_memoryProperties(
9431 vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9432 , m_memoryTypeNdx(0)
9433 , m_iteration(0)
9434 , m_stage(&MemoryTestInstance::createCommandsAndAllocateMemory)
9435 , m_resultCollector(context.getTestContext().getLog())
9436
9437 , m_memory(DE_NULL)
9438 {
9439 TestLog &log = context.getTestContext().getLog();
9440 {
9441 const tcu::ScopedLogSection section(log, "TestCaseInfo", "Test Case Info");
9442
9443 log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9444 log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9445 log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9446 }
9447
9448 {
9449 const tcu::ScopedLogSection section(log, "MemoryProperties", "Memory Properties");
9450
9451 for (uint32_t heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9452 {
9453 const tcu::ScopedLogSection heapSection(log, "Heap" + de::toString(heapNdx),
9454 "Heap " + de::toString(heapNdx));
9455
9456 log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9457 log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags
9458 << TestLog::EndMessage;
9459 }
9460
9461 for (uint32_t memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9462 {
9463 const tcu::ScopedLogSection memoryTypeSection(log, "MemoryType" + de::toString(memoryTypeNdx),
9464 "Memory type " + de::toString(memoryTypeNdx));
9465
9466 log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags
9467 << TestLog::EndMessage;
9468 log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex
9469 << TestLog::EndMessage;
9470 }
9471 }
9472
9473 {
9474 const vk::InstanceInterface &vki = context.getInstanceInterface();
9475 const vk::VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
9476 const vk::DeviceInterface &vkd = context.getDeviceInterface();
9477 const vk::VkDevice device = context.getDevice();
9478 const vk::VkQueue queue = context.getUniversalQueue();
9479 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
9480 vector<pair<uint32_t, vk::VkQueue>> queues;
9481
9482 queues.push_back(std::make_pair(queueFamilyIndex, queue));
9483
9484 m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex,
9485 queues, context.getBinaryCollection()));
9486 }
9487 }
9488
~MemoryTestInstance(void)9489 MemoryTestInstance::~MemoryTestInstance(void)
9490 {
9491 resetResources();
9492 }
9493
createCommandsAndAllocateMemory(void)9494 bool MemoryTestInstance::createCommandsAndAllocateMemory(void)
9495 {
9496 const vk::VkDevice device = m_context.getDevice();
9497 TestLog &log = m_context.getTestContext().getLog();
9498 const vk::InstanceInterface &vki = m_context.getInstanceInterface();
9499 const vk::VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
9500 const vk::DeviceInterface &vkd = m_context.getDeviceInterface();
9501 const vk::VkPhysicalDeviceMemoryProperties memoryProperties =
9502 vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9503 const tcu::ScopedLogSection section(
9504 log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9505 "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9506 const vector<uint32_t> &queues = m_renderContext->getQueueFamilies();
9507
9508 DE_ASSERT(m_commands.empty());
9509
9510 if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE) &&
9511 !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9512 {
9513 log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9514
9515 return nextMemoryType();
9516 }
9517 else
9518 {
9519 try
9520 {
9521 const vk::VkBufferUsageFlags bufferUsage = usageToBufferUsageFlags(m_config.usage);
9522 const vk::VkImageUsageFlags imageUsage = usageToImageUsageFlags(m_config.usage);
9523 const vk::VkDeviceSize maxBufferSize =
9524 bufferUsage != 0 ? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing,
9525 queues, m_config.size, m_memoryTypeNdx)) :
9526 0;
9527 const IVec2 maxImageSize = imageUsage != 0 ?
9528 findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues,
9529 m_config.size, m_memoryTypeNdx) :
9530 IVec2(0, 0);
9531
9532 log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9533 log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9534
9535 // Skip tests if there are no supported operations
9536 if (maxBufferSize == 0 && maxImageSize[0] == 0 &&
9537 (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)) == 0)
9538 {
9539 log << TestLog::Message << "Skipping memory type. None of the usages are supported."
9540 << TestLog::EndMessage;
9541
9542 return nextMemoryType();
9543 }
9544 else
9545 {
9546 const uint32_t seed =
9547 2830980989u ^
9548 deUint32Hash((uint32_t)(m_iteration)*m_memoryProperties.memoryTypeCount + m_memoryTypeNdx);
9549
9550 m_memory = MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx,
9551 maxBufferSize, maxImageSize[0], maxImageSize[1]));
9552
9553 log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9554 createCommands(m_commands, seed, *m_memory, m_config, m_opCount);
9555
9556 m_stage = &MemoryTestInstance::prepare;
9557 return true;
9558 }
9559 }
9560 catch (const tcu::TestError &e)
9561 {
9562 m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9563 return nextMemoryType();
9564 }
9565 }
9566 }
9567
prepare(void)9568 bool MemoryTestInstance::prepare(void)
9569 {
9570 TestLog &log = m_context.getTestContext().getLog();
9571 const tcu::ScopedLogSection section(
9572 log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9573 "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration " + de::toString(m_iteration));
9574
9575 m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9576
9577 DE_ASSERT(!m_commands.empty());
9578
9579 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9580 {
9581 Command &command = *m_commands[cmdNdx];
9582
9583 try
9584 {
9585 command.prepare(*m_prepareContext);
9586 }
9587 catch (const tcu::TestError &e)
9588 {
9589 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() +
9590 " failed to prepare, got exception: " + string(e.getMessage()));
9591 return nextMemoryType();
9592 }
9593 }
9594
9595 m_stage = &MemoryTestInstance::execute;
9596 return true;
9597 }
9598
execute(void)9599 bool MemoryTestInstance::execute(void)
9600 {
9601 TestLog &log = m_context.getTestContext().getLog();
9602 const tcu::ScopedLogSection section(
9603 log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9604 "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9605 ExecuteContext executeContext(*m_renderContext);
9606 const vk::VkDevice device = m_context.getDevice();
9607 const vk::DeviceInterface &vkd = m_context.getDeviceInterface();
9608
9609 DE_ASSERT(!m_commands.empty());
9610
9611 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9612 {
9613 Command &command = *m_commands[cmdNdx];
9614
9615 try
9616 {
9617 command.execute(executeContext);
9618 }
9619 catch (const tcu::TestError &e)
9620 {
9621 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() +
9622 " failed to execute, got exception: " + string(e.getMessage()));
9623 return nextIteration();
9624 }
9625 }
9626
9627 VK_CHECK(vkd.deviceWaitIdle(device));
9628
9629 m_stage = &MemoryTestInstance::verify;
9630 return true;
9631 }
9632
verify(void)9633 bool MemoryTestInstance::verify(void)
9634 {
9635 DE_ASSERT(!m_commands.empty());
9636
9637 TestLog &log = m_context.getTestContext().getLog();
9638 const tcu::ScopedLogSection section(
9639 log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9640 "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9641 VerifyContext verifyContext(log, m_resultCollector, *m_renderContext, m_config.size);
9642
9643 log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9644
9645 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9646 {
9647 Command &command = *m_commands[cmdNdx];
9648
9649 try
9650 {
9651 command.verify(verifyContext, cmdNdx);
9652 }
9653 catch (const tcu::TestError &e)
9654 {
9655 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() +
9656 " failed to verify, got exception: " + string(e.getMessage()));
9657 return nextIteration();
9658 }
9659 }
9660
9661 return nextIteration();
9662 }
9663
iterate(void)9664 tcu::TestStatus MemoryTestInstance::iterate(void)
9665 {
9666 if ((this->*m_stage)())
9667 return tcu::TestStatus::incomplete();
9668 else
9669 return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9670 }
9671
9672 struct AddPrograms
9673 {
initvkt::memory::__anonc75fd9bb0111::AddPrograms9674 void init(vk::SourceCollections &sources, TestConfig config) const
9675 {
9676 // Vertex buffer rendering
9677 if (config.usage & USAGE_VERTEX_BUFFER)
9678 {
9679 const char *const vertexShader = "#version 310 es\n"
9680 "layout(location = 0) in highp vec2 a_position;\n"
9681 "void main (void) {\n"
9682 "\tgl_PointSize = 1.0;\n"
9683 "\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9684 "}\n";
9685
9686 sources.glslSources.add("vertex-buffer.vert") << glu::VertexSource(vertexShader);
9687 }
9688
9689 // Index buffer rendering
9690 if (config.usage & USAGE_INDEX_BUFFER)
9691 {
9692 const char *const vertexShader =
9693 "#version 310 es\n"
9694 "precision highp float;\n"
9695 "void main (void) {\n"
9696 "\tgl_PointSize = 1.0;\n"
9697 "\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9698 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9699 "}\n";
9700
9701 sources.glslSources.add("index-buffer.vert") << glu::VertexSource(vertexShader);
9702 }
9703
9704 if (config.usage & USAGE_UNIFORM_BUFFER)
9705 {
9706 {
9707 std::ostringstream vertexShader;
9708
9709 vertexShader << "#version 310 es\n"
9710 "precision highp float;\n"
9711 "layout(set=0, binding=0) uniform Block\n"
9712 "{\n"
9713 "\thighp uvec4 values["
9714 << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(uint32_t) * 4))
9715 << "];\n"
9716 "} block;\n"
9717 "void main (void) {\n"
9718 "\tgl_PointSize = 1.0;\n"
9719 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9720 "\thighp uint val;\n"
9721 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9722 "\t\tval = vecVal.x;\n"
9723 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9724 "\t\tval = vecVal.y;\n"
9725 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9726 "\t\tval = vecVal.z;\n"
9727 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9728 "\t\tval = vecVal.w;\n"
9729 "\tif ((gl_VertexIndex % 2) == 0)\n"
9730 "\t\tval = val & 0xFFFFu;\n"
9731 "\telse\n"
9732 "\t\tval = val >> 16u;\n"
9733 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9734 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9735 "}\n";
9736
9737 sources.glslSources.add("uniform-buffer.vert") << glu::VertexSource(vertexShader.str());
9738 }
9739
9740 {
9741 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(uint32_t) * 4);
9742 const size_t arrayIntSize = arraySize * 4;
9743 std::ostringstream fragmentShader;
9744
9745 fragmentShader << "#version 310 es\n"
9746 "precision highp float;\n"
9747 "precision highp int;\n"
9748 "layout(location = 0) out highp vec4 o_color;\n"
9749 "layout(set=0, binding=0) uniform Block\n"
9750 "{\n"
9751 "\thighp uvec4 values["
9752 << arraySize
9753 << "];\n"
9754 "} block;\n"
9755 "layout(push_constant) uniform PushC\n"
9756 "{\n"
9757 "\tuint callId;\n"
9758 "\tuint valuesPerPixel;\n"
9759 "\tuint bufferSize;\n"
9760 "} pushC;\n"
9761 "void main (void) {\n"
9762 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9763 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * ("
9764 << arrayIntSize
9765 << "u / pushC.valuesPerPixel))\n"
9766 "\t\tdiscard;\n"
9767 "\thighp uint value = id;\n"
9768 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9769 "\t{\n"
9770 "\t\thighp uvec4 vecVal = block.values[value % pushC.bufferSize];\n"
9771 "\t\tif ((value % 4u) == 0u)\n"
9772 "\t\t\tvalue = vecVal.x;\n"
9773 "\t\telse if ((value % 4u) == 1u)\n"
9774 "\t\t\tvalue = vecVal.y;\n"
9775 "\t\telse if ((value % 4u) == 2u)\n"
9776 "\t\t\tvalue = vecVal.z;\n"
9777 "\t\telse if ((value % 4u) == 3u)\n"
9778 "\t\t\tvalue = vecVal.w;\n"
9779 "\t}\n"
9780 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & "
9781 "0xFFu, (value >> 24u) & 0xFFu);\n"
9782 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9783 "}\n";
9784
9785 sources.glslSources.add("uniform-buffer.frag") << glu::FragmentSource(fragmentShader.str());
9786 }
9787 }
9788
9789 if (config.usage & USAGE_STORAGE_BUFFER)
9790 {
9791 {
9792 // Vertex storage buffer rendering
9793 const char *const vertexShader = "#version 310 es\n"
9794 "precision highp float;\n"
9795 "readonly layout(set=0, binding=0) buffer Block\n"
9796 "{\n"
9797 "\thighp uvec4 values[];\n"
9798 "} block;\n"
9799 "void main (void) {\n"
9800 "\tgl_PointSize = 1.0;\n"
9801 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9802 "\thighp uint val;\n"
9803 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9804 "\t\tval = vecVal.x;\n"
9805 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9806 "\t\tval = vecVal.y;\n"
9807 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9808 "\t\tval = vecVal.z;\n"
9809 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9810 "\t\tval = vecVal.w;\n"
9811 "\tif ((gl_VertexIndex % 2) == 0)\n"
9812 "\t\tval = val & 0xFFFFu;\n"
9813 "\telse\n"
9814 "\t\tval = val >> 16u;\n"
9815 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9816 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9817 "}\n";
9818
9819 sources.glslSources.add("storage-buffer.vert") << glu::VertexSource(vertexShader);
9820 }
9821
9822 {
9823 std::ostringstream fragmentShader;
9824
9825 fragmentShader << "#version 310 es\n"
9826 "precision highp float;\n"
9827 "precision highp int;\n"
9828 "layout(location = 0) out highp vec4 o_color;\n"
9829 "layout(set=0, binding=0) buffer Block\n"
9830 "{\n"
9831 "\thighp uvec4 values[];\n"
9832 "} block;\n"
9833 "layout(push_constant) uniform PushC\n"
9834 "{\n"
9835 "\tuint valuesPerPixel;\n"
9836 "\tuint bufferSize;\n"
9837 "} pushC;\n"
9838 "void main (void) {\n"
9839 "\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9840 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9841 "\thighp uint value = id;\n"
9842 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9843 "\t{\n"
9844 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9845 "\t\tif ((value % 4u) == 0u)\n"
9846 "\t\t\tvalue = vecVal.x;\n"
9847 "\t\telse if ((value % 4u) == 1u)\n"
9848 "\t\t\tvalue = vecVal.y;\n"
9849 "\t\telse if ((value % 4u) == 2u)\n"
9850 "\t\t\tvalue = vecVal.z;\n"
9851 "\t\telse if ((value % 4u) == 3u)\n"
9852 "\t\t\tvalue = vecVal.w;\n"
9853 "\t}\n"
9854 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & "
9855 "0xFFu, (value >> 24u) & 0xFFu);\n"
9856 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9857 "}\n";
9858
9859 sources.glslSources.add("storage-buffer.frag") << glu::FragmentSource(fragmentShader.str());
9860 }
9861 }
9862
9863 if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9864 {
9865 {
9866 // Vertex uniform texel buffer rendering
9867 const char *const vertexShader = "#version 310 es\n"
9868 "#extension GL_EXT_texture_buffer : require\n"
9869 "precision highp float;\n"
9870 "layout(set=0, binding=0) uniform highp utextureBuffer u_sampler;\n"
9871 "void main (void) {\n"
9872 "\tgl_PointSize = 1.0;\n"
9873 "\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9874 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9875 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9876 "}\n";
9877
9878 sources.glslSources.add("uniform-texel-buffer.vert") << glu::VertexSource(vertexShader);
9879 }
9880
9881 {
9882 // Fragment uniform texel buffer rendering
9883 const char *const fragmentShader =
9884 "#version 310 es\n"
9885 "#extension GL_EXT_texture_buffer : require\n"
9886 "#extension GL_EXT_samplerless_texture_functions : require\n"
9887 "precision highp float;\n"
9888 "precision highp int;\n"
9889 "layout(set=0, binding=0) uniform highp utextureBuffer u_sampler;\n"
9890 "layout(location = 0) out highp vec4 o_color;\n"
9891 "layout(push_constant) uniform PushC\n"
9892 "{\n"
9893 "\tuint callId;\n"
9894 "\tuint valuesPerPixel;\n"
9895 "\tuint maxTexelCount;\n"
9896 "} pushC;\n"
9897 "void main (void) {\n"
9898 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9899 "\thighp uint value = id;\n"
9900 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / "
9901 "pushC.valuesPerPixel))\n"
9902 "\t\tdiscard;\n"
9903 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9904 "\t{\n"
9905 "\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9906 "\t}\n"
9907 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> "
9908 "24u) & 0xFFu);\n"
9909 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9910 "}\n";
9911
9912 sources.glslSources.add("uniform-texel-buffer.frag") << glu::FragmentSource(fragmentShader);
9913 }
9914 }
9915
9916 if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9917 {
9918 {
9919 // Vertex storage texel buffer rendering
9920 const char *const vertexShader =
9921 "#version 450\n"
9922 "#extension GL_EXT_texture_buffer : require\n"
9923 "precision highp float;\n"
9924 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9925 "out gl_PerVertex {\n"
9926 "\tvec4 gl_Position;\n"
9927 "\tfloat gl_PointSize;\n"
9928 "};\n"
9929 "void main (void) {\n"
9930 "\tgl_PointSize = 1.0;\n"
9931 "\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9932 "\tif (gl_VertexIndex % 2 == 0)\n"
9933 "\t\tval = val & 0xFFFFu;\n"
9934 "\telse\n"
9935 "\t\tval = val >> 16;\n"
9936 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9937 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9938 "}\n";
9939
9940 sources.glslSources.add("storage-texel-buffer.vert") << glu::VertexSource(vertexShader);
9941 }
9942 {
9943 // Fragment storage texel buffer rendering
9944 const char *const fragmentShader =
9945 "#version 310 es\n"
9946 "#extension GL_EXT_texture_buffer : require\n"
9947 "precision highp float;\n"
9948 "precision highp int;\n"
9949 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9950 "layout(location = 0) out highp vec4 o_color;\n"
9951 "layout(push_constant) uniform PushC\n"
9952 "{\n"
9953 "\tuint callId;\n"
9954 "\tuint valuesPerPixel;\n"
9955 "\tuint maxTexelCount;\n"
9956 "\tuint width;\n"
9957 "} pushC;\n"
9958 "void main (void) {\n"
9959 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9960 "\thighp uint value = id;\n"
9961 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / "
9962 "pushC.valuesPerPixel))\n"
9963 "\t\tdiscard;\n"
9964 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9965 "\t{\n"
9966 "\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9967 "\t}\n"
9968 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> "
9969 "24u) & 0xFFu);\n"
9970 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9971 "}\n";
9972
9973 sources.glslSources.add("storage-texel-buffer.frag") << glu::FragmentSource(fragmentShader);
9974 }
9975 }
9976
9977 if (config.usage & USAGE_STORAGE_IMAGE)
9978 {
9979 {
9980 // Vertex storage image
9981 const char *const vertexShader =
9982 "#version 450\n"
9983 "precision highp float;\n"
9984 "layout(set=0, binding=0, rgba8) uniform readonly image2D u_image;\n"
9985 "out gl_PerVertex {\n"
9986 "\tvec4 gl_Position;\n"
9987 "\tfloat gl_PointSize;\n"
9988 "};\n"
9989 "void main (void) {\n"
9990 "\tgl_PointSize = 1.0;\n"
9991 "\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, "
9992 "(gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9993 "\thighp vec2 pos;\n"
9994 "\tif (gl_VertexIndex % 2 == 0)\n"
9995 "\t\tpos = val.xy;\n"
9996 "\telse\n"
9997 "\t\tpos = val.zw;\n"
9998 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9999 "}\n";
10000
10001 sources.glslSources.add("storage-image.vert") << glu::VertexSource(vertexShader);
10002 }
10003 {
10004 // Fragment storage image
10005 const char *const fragmentShader =
10006 "#version 450\n"
10007 "#extension GL_EXT_texture_buffer : require\n"
10008 "precision highp float;\n"
10009 "layout(set=0, binding=0, rgba8) uniform readonly image2D u_image;\n"
10010 "layout(location = 0) out highp vec4 o_color;\n"
10011 "void main (void) {\n"
10012 "\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
10013 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
10014 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
10015 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
10016 "\t{\n"
10017 "\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z * 256u + (value.x ^ value.z)) "
10018 "% size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
10019 "\t\tvalue = uvec4(uint(round(floatValue.x * 255.0)), uint(round(floatValue.y * 255.0)), "
10020 "uint(round(floatValue.z * 255.0)), uint(round(floatValue.w * 255.0)));\n"
10021 "\t}\n"
10022 "\to_color = vec4(value) / vec4(255.0);\n"
10023 "}\n";
10024
10025 sources.glslSources.add("storage-image.frag") << glu::FragmentSource(fragmentShader);
10026 }
10027 }
10028
10029 if (config.usage & USAGE_SAMPLED_IMAGE)
10030 {
10031 {
10032 // Vertex storage image
10033 const char *const vertexShader =
10034 "#version 450\n"
10035 "precision highp float;\n"
10036 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
10037 "out gl_PerVertex {\n"
10038 "\tvec4 gl_Position;\n"
10039 "\tfloat gl_PointSize;\n"
10040 "};\n"
10041 "void main (void) {\n"
10042 "\tgl_PointSize = 1.0;\n"
10043 "\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, "
10044 "0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
10045 "\thighp vec2 pos;\n"
10046 "\tif (gl_VertexIndex % 2 == 0)\n"
10047 "\t\tpos = val.xy;\n"
10048 "\telse\n"
10049 "\t\tpos = val.zw;\n"
10050 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
10051 "}\n";
10052
10053 sources.glslSources.add("sampled-image.vert") << glu::VertexSource(vertexShader);
10054 }
10055 {
10056 // Fragment storage image
10057 const char *const fragmentShader =
10058 "#version 450\n"
10059 "#extension GL_EXT_texture_buffer : require\n"
10060 "precision highp float;\n"
10061 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
10062 "layout(location = 0) out highp vec4 o_color;\n"
10063 "void main (void) {\n"
10064 "\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
10065 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
10066 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
10067 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
10068 "\t{\n"
10069 "\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z * 256u + (value.x ^ "
10070 "value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
10071 "\t\tvalue = uvec4(uint(round(floatValue.x * 255.0)), uint(round(floatValue.y * 255.0)), "
10072 "uint(round(floatValue.z * 255.0)), uint(round(floatValue.w * 255.0)));\n"
10073 "\t}\n"
10074 "\to_color = vec4(value) / vec4(255.0);\n"
10075 "}\n";
10076
10077 sources.glslSources.add("sampled-image.frag") << glu::FragmentSource(fragmentShader);
10078 }
10079 }
10080
10081 {
10082 const char *const vertexShader =
10083 "#version 450\n"
10084 "out gl_PerVertex {\n"
10085 "\tvec4 gl_Position;\n"
10086 "};\n"
10087 "precision highp float;\n"
10088 "void main (void) {\n"
10089 "\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
10090 "\t ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
10091 "}\n";
10092
10093 sources.glslSources.add("render-quad.vert") << glu::VertexSource(vertexShader);
10094 }
10095
10096 {
10097 const char *const fragmentShader = "#version 310 es\n"
10098 "layout(location = 0) out highp vec4 o_color;\n"
10099 "void main (void) {\n"
10100 "\to_color = vec4(1.0);\n"
10101 "}\n";
10102
10103 sources.glslSources.add("render-white.frag") << glu::FragmentSource(fragmentShader);
10104 }
10105 }
10106 };
10107
checkSupport(vkt::Context & context,TestConfig config)10108 void checkSupport(vkt::Context &context, TestConfig config)
10109 {
10110 #ifndef CTS_USES_VULKANSC
10111 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
10112 ((config.vertexBufferStride % context.getPortabilitySubsetProperties().minVertexInputBindingStrideAlignment) !=
10113 0u))
10114 {
10115 TCU_THROW(NotSupportedError,
10116 "VK_KHR_portability_subset: stride is not multiply of minVertexInputBindingStrideAlignment");
10117 }
10118 #else
10119 DE_UNREF(context);
10120 DE_UNREF(config);
10121 #endif // CTS_USES_VULKANSC
10122 }
10123
10124 } // namespace
10125
createPipelineBarrierTests(tcu::TestContext & testCtx)10126 tcu::TestCaseGroup *createPipelineBarrierTests(tcu::TestContext &testCtx)
10127 {
10128 de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "pipeline_barrier"));
10129 const vk::VkDeviceSize sizes[] = {
10130 1024, // 1K
10131 8 * 1024, // 8K
10132 64 * 1024, // 64K
10133 ONE_MEGABYTE, // 1M
10134 };
10135 const Usage usages[] = {
10136 USAGE_HOST_READ, USAGE_HOST_WRITE, USAGE_TRANSFER_SRC, USAGE_TRANSFER_DST,
10137 USAGE_VERTEX_BUFFER, USAGE_INDEX_BUFFER, USAGE_UNIFORM_BUFFER, USAGE_UNIFORM_TEXEL_BUFFER,
10138 USAGE_STORAGE_BUFFER, USAGE_STORAGE_TEXEL_BUFFER, USAGE_STORAGE_IMAGE, USAGE_SAMPLED_IMAGE};
10139 const Usage readUsages[] = {USAGE_HOST_READ, USAGE_TRANSFER_SRC, USAGE_VERTEX_BUFFER,
10140 USAGE_INDEX_BUFFER, USAGE_UNIFORM_BUFFER, USAGE_UNIFORM_TEXEL_BUFFER,
10141 USAGE_STORAGE_BUFFER, USAGE_STORAGE_TEXEL_BUFFER, USAGE_STORAGE_IMAGE,
10142 USAGE_SAMPLED_IMAGE};
10143
10144 const Usage writeUsages[] = {USAGE_HOST_WRITE, USAGE_TRANSFER_DST};
10145
10146 const uint32_t vertexStrides[] = {
10147 DEFAULT_VERTEX_BUFFER_STRIDE,
10148 ALTERNATIVE_VERTEX_BUFFER_STRIDE,
10149 };
10150
10151 for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
10152 {
10153 const Usage writeUsage = writeUsages[writeUsageNdx];
10154
10155 for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
10156 {
10157 const Usage readUsage = readUsages[readUsageNdx];
10158 const Usage usage = writeUsage | readUsage;
10159 const string usageGroupName(usageToName(usage));
10160 de::MovePtr<tcu::TestCaseGroup> usageGroup(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str()));
10161
10162 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10163 {
10164 const vk::VkDeviceSize size = sizes[sizeNdx];
10165 TestConfig config = {usage, DEFAULT_VERTEX_BUFFER_STRIDE, size, vk::VK_SHARING_MODE_EXCLUSIVE};
10166 const string testName(de::toString((uint64_t)(size)));
10167
10168 if (readUsage == USAGE_VERTEX_BUFFER)
10169 {
10170 for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10171 {
10172 const uint32_t stride = vertexStrides[strideNdx];
10173 const string finalTestName = testName + "_vertex_buffer_stride_" + de::toString(stride);
10174
10175 config.vertexBufferStride = stride;
10176 usageGroup->addChild(new InstanceFactory1WithSupport<MemoryTestInstance, TestConfig,
10177 FunctionSupport1<TestConfig>, AddPrograms>(
10178 testCtx, finalTestName, config,
10179 typename FunctionSupport1<TestConfig>::Args(checkSupport, config)));
10180 }
10181 }
10182 else
10183 {
10184 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(
10185 testCtx, testName, AddPrograms(), config));
10186 }
10187 }
10188
10189 group->addChild(usageGroup.get());
10190 usageGroup.release();
10191 }
10192 }
10193
10194 {
10195 Usage all = (Usage)0;
10196
10197 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
10198 all = all | usages[usageNdx];
10199
10200 {
10201 const string usageGroupName("all");
10202 de::MovePtr<tcu::TestCaseGroup> usageGroup(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str()));
10203
10204 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10205 {
10206 const vk::VkDeviceSize size = sizes[sizeNdx];
10207
10208 for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10209 {
10210 const uint32_t stride = vertexStrides[strideNdx];
10211 const string testName = de::toString(size) + "_vertex_buffer_stride_" + de::toString(stride);
10212 const TestConfig config = {all, stride, size, vk::VK_SHARING_MODE_EXCLUSIVE};
10213
10214 usageGroup->addChild(new InstanceFactory1WithSupport<MemoryTestInstance, vkt::memory::TestConfig,
10215 FunctionSupport1<TestConfig>, AddPrograms>(
10216 testCtx, testName, config, typename FunctionSupport1<TestConfig>::Args(checkSupport, config)));
10217 }
10218 }
10219
10220 group->addChild(usageGroup.get());
10221 usageGroup.release();
10222 }
10223
10224 {
10225 const string usageGroupName("all_device");
10226 de::MovePtr<tcu::TestCaseGroup> usageGroup(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str()));
10227
10228 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10229 {
10230 const vk::VkDeviceSize size = sizes[sizeNdx];
10231
10232 for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10233 {
10234 const uint32_t stride = vertexStrides[strideNdx];
10235 const string testName = de::toString(size) + "_vertex_buffer_stride_" + de::toString(stride);
10236 const TestConfig config = {(Usage)(all & (~(USAGE_HOST_READ | USAGE_HOST_WRITE))), stride, size,
10237 vk::VK_SHARING_MODE_EXCLUSIVE};
10238
10239 usageGroup->addChild(new InstanceFactory1WithSupport<MemoryTestInstance, TestConfig,
10240 FunctionSupport1<TestConfig>, AddPrograms>(
10241 testCtx, testName, config, typename FunctionSupport1<TestConfig>::Args(checkSupport, config)));
10242 }
10243 }
10244
10245 group->addChild(usageGroup.get());
10246 usageGroup.release();
10247 }
10248 }
10249
10250 return group.release();
10251 }
10252
10253 } // namespace memory
10254 } // namespace vkt
10255