1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/graphite/dawn/DawnUtils.h"
9 #include "src/gpu/graphite/dawn/DawnGraphiteUtilsPriv.h"
10
11 #include "include/gpu/ShaderErrorHandler.h"
12 #include "include/gpu/graphite/Context.h"
13 #include "include/gpu/graphite/dawn/DawnBackendContext.h"
14 #include "src/gpu/graphite/ContextPriv.h"
15 #include "src/gpu/graphite/dawn/DawnQueueManager.h"
16 #include "src/gpu/graphite/dawn/DawnSharedContext.h"
17
18 namespace skgpu::graphite {
19
20 namespace ContextFactory {
MakeDawn(const DawnBackendContext & backendContext,const ContextOptions & options)21 std::unique_ptr<Context> MakeDawn(const DawnBackendContext& backendContext,
22 const ContextOptions& options) {
23 sk_sp<SharedContext> sharedContext = DawnSharedContext::Make(backendContext, options);
24 if (!sharedContext) {
25 return nullptr;
26 }
27
28 auto queueManager =
29 std::make_unique<DawnQueueManager>(backendContext.fQueue, sharedContext.get());
30 if (!queueManager) {
31 return nullptr;
32 }
33
34 auto context = ContextCtorAccessor::MakeContext(std::move(sharedContext),
35 std::move(queueManager),
36 options);
37 SkASSERT(context);
38 return context;
39 }
40 } // namespace ContextFactory
41
DawnFormatIsDepthOrStencil(wgpu::TextureFormat format)42 bool DawnFormatIsDepthOrStencil(wgpu::TextureFormat format) {
43 switch (format) {
44 case wgpu::TextureFormat::Stencil8: [[fallthrough]];
45 case wgpu::TextureFormat::Depth16Unorm:
46 case wgpu::TextureFormat::Depth32Float:
47 case wgpu::TextureFormat::Depth24PlusStencil8:
48 case wgpu::TextureFormat::Depth32FloatStencil8:
49 return true;
50 default:
51 return false;
52 }
53
54 SkUNREACHABLE;
55 }
56
DawnFormatIsDepth(wgpu::TextureFormat format)57 bool DawnFormatIsDepth(wgpu::TextureFormat format) {
58 switch (format) {
59 case wgpu::TextureFormat::Depth16Unorm: [[fallthrough]];
60 case wgpu::TextureFormat::Depth32Float:
61 case wgpu::TextureFormat::Depth24PlusStencil8:
62 case wgpu::TextureFormat::Depth32FloatStencil8:
63 return true;
64 default:
65 return false;
66 }
67
68 SkUNREACHABLE;
69 }
70
DawnFormatIsStencil(wgpu::TextureFormat format)71 bool DawnFormatIsStencil(wgpu::TextureFormat format) {
72 switch (format) {
73 case wgpu::TextureFormat::Stencil8: [[fallthrough]];
74 case wgpu::TextureFormat::Depth24PlusStencil8:
75 case wgpu::TextureFormat::Depth32FloatStencil8:
76 return true;
77 default:
78 return false;
79 }
80
81 SkUNREACHABLE;
82 }
83
DawnDepthStencilFlagsToFormat(SkEnumBitMask<DepthStencilFlags> mask)84 wgpu::TextureFormat DawnDepthStencilFlagsToFormat(SkEnumBitMask<DepthStencilFlags> mask) {
85 // TODO: Decide if we want to change this to always return a combined depth and stencil format
86 // to allow more sharing of depth stencil allocations.
87 if (mask == DepthStencilFlags::kDepth) {
88 // If needed for workarounds or performance, Depth32Float is also available but requires 2x
89 // the amount of memory.
90 return wgpu::TextureFormat::Depth16Unorm;
91 } else if (mask == DepthStencilFlags::kStencil) {
92 return wgpu::TextureFormat::Stencil8;
93 } else if (mask == DepthStencilFlags::kDepthStencil) {
94 return wgpu::TextureFormat::Depth24PlusStencil8;
95 }
96 SkASSERT(false);
97 return wgpu::TextureFormat::Undefined;
98 }
99
check_shader_module(const DawnSharedContext * sharedContext,wgpu::ShaderModule * module,const char * shaderText,ShaderErrorHandler * errorHandler)100 static bool check_shader_module([[maybe_unused]] const DawnSharedContext* sharedContext,
101 wgpu::ShaderModule* module,
102 const char* shaderText,
103 ShaderErrorHandler* errorHandler) {
104 // Prior to emsdk 3.1.51 wgpu::ShaderModule::GetCompilationInfo is unimplemented.
105 #if defined(__EMSCRIPTEN__) && \
106 ((__EMSCRIPTEN_major__ < 3 || \
107 (__EMSCRIPTEN_major__ == 3 && __EMSCRIPTEN_minor__ < 1) || \
108 (__EMSCRIPTEN_major__ == 3 && __EMSCRIPTEN_minor__ == 1 && __EMSCRIPTEN_tiny__ < 51)))
109 return true;
110 #else
111 struct Handler {
112 static void Fn(WGPUCompilationInfoRequestStatus status,
113 const WGPUCompilationInfo* info,
114 void* userdata) {
115 Handler* self = reinterpret_cast<Handler*>(userdata);
116 SkASSERT(status == WGPUCompilationInfoRequestStatus_Success);
117
118 // Walk the message list and check for hard errors.
119 self->fSuccess = true;
120 for (size_t index = 0; index < info->messageCount; ++index) {
121 const WGPUCompilationMessage& entry = info->messages[index];
122 if (entry.type == WGPUCompilationMessageType_Error) {
123 self->fSuccess = false;
124 break;
125 }
126 }
127
128 // If we found a hard error, report the compilation messages to the error handler.
129 if (!self->fSuccess) {
130 std::string errors;
131 for (size_t index = 0; index < info->messageCount; ++index) {
132 const WGPUCompilationMessage& entry = info->messages[index];
133 #if defined(WGPU_BREAKING_CHANGE_STRING_VIEW_OUTPUT_STRUCTS)
134 std::string messageString(entry.message.data, entry.message.length);
135 #else // defined(WGPU_BREAKING_CHANGE_STRING_VIEW_OUTPUT_STRUCTS)
136 std::string messageString(entry.message);
137 #endif // defined(WGPU_BREAKING_CHANGE_STRING_VIEW_OUTPUT_STRUCTS)
138 errors += "line " + std::to_string(entry.lineNum) + ':' +
139 std::to_string(entry.linePos) + ' ' + messageString + '\n';
140 }
141 self->fErrorHandler->compileError(
142 self->fShaderText, errors.c_str(), /*shaderWasCached=*/false);
143 }
144 }
145
146 const char* fShaderText;
147 ShaderErrorHandler* fErrorHandler;
148 bool fSuccess = false;
149 };
150
151 Handler handler;
152 handler.fShaderText = shaderText;
153 handler.fErrorHandler = errorHandler;
154 #if defined(__EMSCRIPTEN__)
155 // Deprecated function.
156 module->GetCompilationInfo(&Handler::Fn, &handler);
157 #else
158 // New API.
159 wgpu::FutureWaitInfo waitInfo{};
160 waitInfo.future = module->GetCompilationInfo(
161 wgpu::CallbackMode::WaitAnyOnly,
162 [handlerPtr = &handler](wgpu::CompilationInfoRequestStatus status,
163 const wgpu::CompilationInfo* info) {
164 Handler::Fn(static_cast<WGPUCompilationInfoRequestStatus>(status),
165 reinterpret_cast<const WGPUCompilationInfo*>(info),
166 handlerPtr);
167 });
168
169 const auto& instance = static_cast<const DawnSharedContext*>(sharedContext)
170 ->device()
171 .GetAdapter()
172 .GetInstance();
173 [[maybe_unused]] auto status =
174 instance.WaitAny(1, &waitInfo, /*timeoutNS=*/std::numeric_limits<uint64_t>::max());
175 SkASSERT(status == wgpu::WaitStatus::Success);
176 #endif // defined(__EMSCRIPTEN__)
177
178 return handler.fSuccess;
179 #endif
180 }
181
DawnCompileWGSLShaderModule(const DawnSharedContext * sharedContext,const char * label,const std::string & wgsl,wgpu::ShaderModule * module,ShaderErrorHandler * errorHandler)182 bool DawnCompileWGSLShaderModule(const DawnSharedContext* sharedContext,
183 const char* label,
184 const std::string& wgsl,
185 wgpu::ShaderModule* module,
186 ShaderErrorHandler* errorHandler) {
187 #ifdef WGPU_BREAKING_CHANGE_DROP_DESCRIPTOR
188 wgpu::ShaderSourceWGSL wgslDesc;
189 #else
190 wgpu::ShaderModuleWGSLDescriptor wgslDesc;
191 #endif
192 wgslDesc.code = wgsl.c_str();
193
194 wgpu::ShaderModuleDescriptor desc;
195 desc.nextInChain = &wgslDesc;
196 if (sharedContext->caps()->setBackendLabels()) {
197 desc.label = label;
198 }
199
200 *module = sharedContext->device().CreateShaderModule(&desc);
201
202 return check_shader_module(sharedContext, module, wgsl.c_str(), errorHandler);
203 }
204
205 #if !defined(__EMSCRIPTEN__)
206 namespace ycbcrUtils {
207
DawnDescriptorIsValid(const wgpu::YCbCrVkDescriptor & desc)208 bool DawnDescriptorIsValid(const wgpu::YCbCrVkDescriptor& desc) {
209 static const wgpu::YCbCrVkDescriptor kDefaultYcbcrDescriptor = {};
210 return !DawnDescriptorsAreEquivalent(desc, kDefaultYcbcrDescriptor);
211 }
212
DawnDescriptorUsesExternalFormat(const wgpu::YCbCrVkDescriptor & desc)213 bool DawnDescriptorUsesExternalFormat(const wgpu::YCbCrVkDescriptor& desc) {
214 SkASSERT(desc.externalFormat != 0 || desc.vkFormat != 0);
215 return desc.externalFormat != 0;
216 }
217
218 } // namespace ycbcrUtils
219 #endif // !defined(__EMSCRIPTEN__)
220
221 } // namespace skgpu::graphite
222