1 /*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/d3d/GrD3DResourceProvider.h"
9
10 #include "include/gpu/ganesh/GrContextOptions.h"
11 #include "include/gpu/ganesh/GrDirectContext.h"
12 #include "src/gpu/ganesh/GrDirectContextPriv.h"
13 #include "src/gpu/ganesh/d3d/GrD3DBuffer.h"
14 #include "src/gpu/ganesh/d3d/GrD3DCommandList.h"
15 #include "src/gpu/ganesh/d3d/GrD3DGpu.h"
16 #include "src/gpu/ganesh/d3d/GrD3DPipelineState.h"
17 #include "src/gpu/ganesh/d3d/GrD3DPipelineStateBuilder.h"
18 #include "src/gpu/ganesh/d3d/GrD3DRenderTarget.h"
19
GrD3DResourceProvider(GrD3DGpu * gpu)20 GrD3DResourceProvider::GrD3DResourceProvider(GrD3DGpu* gpu)
21 : fGpu(gpu)
22 , fCpuDescriptorManager(gpu)
23 , fDescriptorTableManager(gpu)
24 , fPipelineStateCache(new PipelineStateCache(gpu))
25 , fShaderResourceDescriptorTableCache(gpu)
26 , fSamplerDescriptorTableCache(gpu) {
27 }
28
destroyResources()29 void GrD3DResourceProvider::destroyResources() {
30 fSamplers.reset();
31
32 fPipelineStateCache->release();
33 }
34
findOrCreateDirectCommandList()35 std::unique_ptr<GrD3DDirectCommandList> GrD3DResourceProvider::findOrCreateDirectCommandList() {
36 if (fAvailableDirectCommandLists.size()) {
37 std::unique_ptr<GrD3DDirectCommandList> list =
38 std::move(fAvailableDirectCommandLists.back());
39 fAvailableDirectCommandLists.pop_back();
40 return list;
41 }
42 return GrD3DDirectCommandList::Make(fGpu);
43 }
44
recycleDirectCommandList(std::unique_ptr<GrD3DDirectCommandList> commandList)45 void GrD3DResourceProvider::recycleDirectCommandList(
46 std::unique_ptr<GrD3DDirectCommandList> commandList) {
47 commandList->reset();
48 fAvailableDirectCommandLists.push_back(std::move(commandList));
49 }
50
findOrCreateRootSignature(int numTextureSamplers,int numUAVs)51 sk_sp<GrD3DRootSignature> GrD3DResourceProvider::findOrCreateRootSignature(int numTextureSamplers,
52 int numUAVs) {
53 for (int i = 0; i < fRootSignatures.size(); ++i) {
54 if (fRootSignatures[i]->isCompatible(numTextureSamplers, numUAVs)) {
55 return fRootSignatures[i];
56 }
57 }
58
59 auto rootSig = GrD3DRootSignature::Make(fGpu, numTextureSamplers, numUAVs);
60 if (!rootSig) {
61 return nullptr;
62 }
63 fRootSignatures.push_back(rootSig);
64 return rootSig;
65 }
66
findOrCreateCommandSignature(GrD3DCommandSignature::ForIndexed indexed,unsigned int slot)67 sk_sp<GrD3DCommandSignature> GrD3DResourceProvider::findOrCreateCommandSignature(
68 GrD3DCommandSignature::ForIndexed indexed, unsigned int slot) {
69 for (int i = 0; i < fCommandSignatures.size(); ++i) {
70 if (fCommandSignatures[i]->isCompatible(indexed, slot)) {
71 return fCommandSignatures[i];
72 }
73 }
74
75 auto commandSig = GrD3DCommandSignature::Make(fGpu, indexed, slot);
76 if (!commandSig) {
77 return nullptr;
78 }
79 fCommandSignatures.push_back(commandSig);
80 return commandSig;
81 }
82
createRenderTargetView(ID3D12Resource * textureResource)83 GrD3DDescriptorHeap::CPUHandle GrD3DResourceProvider::createRenderTargetView(
84 ID3D12Resource* textureResource) {
85 return fCpuDescriptorManager.createRenderTargetView(fGpu, textureResource);
86 }
87
recycleRenderTargetView(const GrD3DDescriptorHeap::CPUHandle & rtvDescriptor)88 void GrD3DResourceProvider::recycleRenderTargetView(
89 const GrD3DDescriptorHeap::CPUHandle& rtvDescriptor) {
90 fCpuDescriptorManager.recycleRenderTargetView(rtvDescriptor);
91 }
92
createDepthStencilView(ID3D12Resource * textureResource)93 GrD3DDescriptorHeap::CPUHandle GrD3DResourceProvider::createDepthStencilView(
94 ID3D12Resource* textureResource) {
95 return fCpuDescriptorManager.createDepthStencilView(fGpu, textureResource);
96 }
97
recycleDepthStencilView(const GrD3DDescriptorHeap::CPUHandle & dsvDescriptor)98 void GrD3DResourceProvider::recycleDepthStencilView(
99 const GrD3DDescriptorHeap::CPUHandle& dsvDescriptor) {
100 fCpuDescriptorManager.recycleDepthStencilView(dsvDescriptor);
101 }
102
createConstantBufferView(ID3D12Resource * bufferResource,size_t offset,size_t size)103 GrD3DDescriptorHeap::CPUHandle GrD3DResourceProvider::createConstantBufferView(
104 ID3D12Resource* bufferResource, size_t offset, size_t size) {
105 return fCpuDescriptorManager.createConstantBufferView(fGpu, bufferResource, offset, size);
106 }
107
createShaderResourceView(ID3D12Resource * resource,unsigned int highestMip,unsigned int mipLevels)108 GrD3DDescriptorHeap::CPUHandle GrD3DResourceProvider::createShaderResourceView(
109 ID3D12Resource* resource, unsigned int highestMip, unsigned int mipLevels) {
110 return fCpuDescriptorManager.createShaderResourceView(fGpu, resource, highestMip, mipLevels);
111 }
112
createUnorderedAccessView(ID3D12Resource * resource,unsigned int mipSlice)113 GrD3DDescriptorHeap::CPUHandle GrD3DResourceProvider::createUnorderedAccessView(
114 ID3D12Resource* resource, unsigned int mipSlice) {
115 return fCpuDescriptorManager.createUnorderedAccessView(fGpu, resource, mipSlice);
116 }
117
recycleShaderView(const GrD3DDescriptorHeap::CPUHandle & view)118 void GrD3DResourceProvider::recycleShaderView(
119 const GrD3DDescriptorHeap::CPUHandle& view) {
120 fCpuDescriptorManager.recycleShaderView(view);
121 }
122
wrap_mode_to_d3d_address_mode(GrSamplerState::WrapMode wrapMode)123 static D3D12_TEXTURE_ADDRESS_MODE wrap_mode_to_d3d_address_mode(GrSamplerState::WrapMode wrapMode) {
124 switch (wrapMode) {
125 case GrSamplerState::WrapMode::kClamp:
126 return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
127 case GrSamplerState::WrapMode::kRepeat:
128 return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
129 case GrSamplerState::WrapMode::kMirrorRepeat:
130 return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
131 case GrSamplerState::WrapMode::kClampToBorder:
132 return D3D12_TEXTURE_ADDRESS_MODE_BORDER;
133 }
134 SK_ABORT("Unknown wrap mode.");
135 }
136
d3d_filter(GrSamplerState sampler)137 static D3D12_FILTER d3d_filter(GrSamplerState sampler) {
138 if (sampler.isAniso()) {
139 return D3D12_FILTER_ANISOTROPIC;
140 }
141 switch (sampler.mipmapMode()) {
142 // When the mode is kNone we disable filtering using maxLOD.
143 case GrSamplerState::MipmapMode::kNone:
144 case GrSamplerState::MipmapMode::kNearest:
145 switch (sampler.filter()) {
146 case GrSamplerState::Filter::kNearest: return D3D12_FILTER_MIN_MAG_MIP_POINT;
147 case GrSamplerState::Filter::kLinear: return D3D12_FILTER_MIN_MAG_LINEAR_MIP_POINT;
148 }
149 SkUNREACHABLE;
150 case GrSamplerState::MipmapMode::kLinear:
151 switch (sampler.filter()) {
152 case GrSamplerState::Filter::kNearest: return D3D12_FILTER_MIN_MAG_POINT_MIP_LINEAR;
153 case GrSamplerState::Filter::kLinear: return D3D12_FILTER_MIN_MAG_MIP_LINEAR;
154 }
155 SkUNREACHABLE;
156 }
157 SkUNREACHABLE;
158 }
159
findOrCreateCompatibleSampler(const GrSamplerState & params)160 D3D12_CPU_DESCRIPTOR_HANDLE GrD3DResourceProvider::findOrCreateCompatibleSampler(
161 const GrSamplerState& params) {
162 // In D3D anisotropic filtering uses the same field (D3D12_SAMPLER_DESC::Filter) as min/mag/mip
163 // settings and so is not orthogonal to them.
164 uint32_t key = params.asKey(/*anisoIsOrthogonal=*/false);
165 D3D12_CPU_DESCRIPTOR_HANDLE* samplerPtr = fSamplers.find(key);
166 if (samplerPtr) {
167 return *samplerPtr;
168 }
169
170 D3D12_FILTER filter = d3d_filter(params);
171 // We disable MIP filtering using maxLOD. Otherwise, we want the max LOD to be unbounded.
172 float maxLOD =
173 params.mipmapped() == skgpu::Mipmapped::kYes ? std::numeric_limits<float>::max() : 0.f;
174 D3D12_TEXTURE_ADDRESS_MODE addressModeU = wrap_mode_to_d3d_address_mode(params.wrapModeX());
175 D3D12_TEXTURE_ADDRESS_MODE addressModeV = wrap_mode_to_d3d_address_mode(params.wrapModeY());
176 unsigned int maxAnisotropy = params.maxAniso();
177 D3D12_CPU_DESCRIPTOR_HANDLE sampler =
178 fCpuDescriptorManager.createSampler(
179 fGpu, filter, maxLOD, maxAnisotropy, addressModeU, addressModeV).fHandle;
180 fSamplers.set(key, sampler);
181 return sampler;
182 }
183
findOrCreateShaderViewTable(const std::vector<D3D12_CPU_DESCRIPTOR_HANDLE> & shaderViews)184 sk_sp<GrD3DDescriptorTable> GrD3DResourceProvider::findOrCreateShaderViewTable(
185 const std::vector<D3D12_CPU_DESCRIPTOR_HANDLE>& shaderViews) {
186
187 auto createFunc = [this](GrD3DGpu* gpu, unsigned int numDesc) {
188 return this->fDescriptorTableManager.createShaderViewTable(gpu, numDesc);
189 };
190 return fShaderResourceDescriptorTableCache.findOrCreateDescTable(shaderViews, createFunc);
191 }
192
findOrCreateSamplerTable(const std::vector<D3D12_CPU_DESCRIPTOR_HANDLE> & samplers)193 sk_sp<GrD3DDescriptorTable> GrD3DResourceProvider::findOrCreateSamplerTable(
194 const std::vector<D3D12_CPU_DESCRIPTOR_HANDLE>& samplers) {
195 auto createFunc = [this](GrD3DGpu* gpu, unsigned int numDesc) {
196 return this->fDescriptorTableManager.createSamplerTable(gpu, numDesc);
197 };
198 return fShaderResourceDescriptorTableCache.findOrCreateDescTable(samplers, createFunc);
199 }
200
findOrCreateCompatiblePipelineState(GrD3DRenderTarget * rt,const GrProgramInfo & info)201 GrD3DPipelineState* GrD3DResourceProvider::findOrCreateCompatiblePipelineState(
202 GrD3DRenderTarget* rt, const GrProgramInfo& info) {
203 return fPipelineStateCache->refPipelineState(rt, info);
204 }
205
findOrCreateMipmapPipeline()206 sk_sp<GrD3DPipeline> GrD3DResourceProvider::findOrCreateMipmapPipeline() {
207 if (!fMipmapPipeline) {
208 // Note: filtering for non-even widths and heights samples at the 0.25 and 0.75
209 // locations and averages the result. As the initial samples are bilerped this is
210 // approximately a triangle filter. We should look into doing a better kernel but
211 // this should hold us for now.
212 const char* shader =
213 "SamplerState textureSampler : register(s0, space1);\n"
214 "Texture2D<float4> inputTexture : register(t1, space1);\n"
215 "RWTexture2D<float4> outUAV : register(u2, space1);\n"
216 "\n"
217 "cbuffer UniformBuffer : register(b0, space0) {\n"
218 " float2 inverseDims;\n"
219 " uint mipLevel;\n"
220 " uint sampleMode;\n"
221 "}\n"
222 "\n"
223 "[numthreads(8, 8, 1)]\n"
224 "void main(uint groupIndex : SV_GroupIndex, uint3 threadID : SV_DispatchThreadID) {\n"
225 " float2 uv = inverseDims * (threadID.xy + 0.5);\n"
226 " float4 mipVal;\n"
227 " switch (sampleMode) {\n"
228 " case 0: {\n"
229 " mipVal = inputTexture.SampleLevel(textureSampler, uv, mipLevel);\n"
230 " break;\n"
231 " }\n"
232 " case 1: {\n"
233 " float2 uvdiff = inverseDims * 0.25;\n"
234 " mipVal = inputTexture.SampleLevel(textureSampler, uv-uvdiff, mipLevel);\n"
235 " mipVal += inputTexture.SampleLevel(textureSampler, uv+uvdiff, mipLevel);\n"
236 " uvdiff.y = -uvdiff.y;\n"
237 " mipVal += inputTexture.SampleLevel(textureSampler, uv-uvdiff, mipLevel);\n"
238 " mipVal += inputTexture.SampleLevel(textureSampler, uv+uvdiff, mipLevel);\n"
239 " mipVal *= 0.25;\n"
240 " break;\n"
241 " }\n"
242 " case 2: {\n"
243 " float2 uvdiff = float2(inverseDims.x * 0.25, 0);\n"
244 " mipVal = inputTexture.SampleLevel(textureSampler, uv-uvdiff, mipLevel);\n"
245 " mipVal += inputTexture.SampleLevel(textureSampler, uv+uvdiff, mipLevel);\n"
246 " mipVal *= 0.5;\n"
247 " break;\n"
248 " }\n"
249 " case 3: {\n"
250 " float2 uvdiff = float2(0, inverseDims.y * 0.25);\n"
251 " mipVal = inputTexture.SampleLevel(textureSampler, uv-uvdiff, mipLevel);\n"
252 " mipVal += inputTexture.SampleLevel(textureSampler, uv+uvdiff, mipLevel);\n"
253 " mipVal *= 0.5;\n"
254 " break;\n"
255 " }\n"
256 " }\n"
257 "\n"
258 " outUAV[threadID.xy] = mipVal;\n"
259 "}\n";
260
261 sk_sp<GrD3DRootSignature> rootSig = this->findOrCreateRootSignature(1, 1);
262
263 fMipmapPipeline =
264 GrD3DPipelineStateBuilder::MakeComputePipeline(fGpu, rootSig.get(), shader);
265 }
266
267 return fMipmapPipeline;
268 }
269
uploadConstantData(void * data,size_t size)270 D3D12_GPU_VIRTUAL_ADDRESS GrD3DResourceProvider::uploadConstantData(void* data, size_t size) {
271 // constant size has to be aligned to 256
272 constexpr int kConstantAlignment = 256;
273
274 // upload the data
275 size_t paddedSize = SkAlignTo(size, kConstantAlignment);
276 GrRingBuffer::Slice slice = fGpu->uniformsRingBuffer()->suballocate(paddedSize);
277 char* destPtr = static_cast<char*>(slice.fBuffer->map()) + slice.fOffset;
278 memcpy(destPtr, data, size);
279
280 // create the associated constant buffer view descriptor
281 GrD3DBuffer* d3dBuffer = static_cast<GrD3DBuffer*>(slice.fBuffer);
282 D3D12_GPU_VIRTUAL_ADDRESS gpuAddress = d3dBuffer->d3dResource()->GetGPUVirtualAddress();
283 return gpuAddress + slice.fOffset;
284 }
285
prepForSubmit()286 void GrD3DResourceProvider::prepForSubmit() {
287 fDescriptorTableManager.prepForSubmit(fGpu);
288 // Any heap memory used for these will be returned when the command buffer finishes,
289 // so we have to invalidate all entries.
290 fShaderResourceDescriptorTableCache.release();
291 fSamplerDescriptorTableCache.release();
292 }
293
294 ////////////////////////////////////////////////////////////////////////////////////////////////
295
296 #ifdef GR_PIPELINE_STATE_CACHE_STATS
297 // Display pipeline state cache usage
298 static const bool c_DisplayMtlPipelineCache{false};
299 #endif
300
301 struct GrD3DResourceProvider::PipelineStateCache::Entry {
EntryGrD3DResourceProvider::PipelineStateCache::Entry302 Entry(GrD3DGpu* gpu, std::unique_ptr<GrD3DPipelineState> pipelineState)
303 : fGpu(gpu), fPipelineState(std::move(pipelineState)) {}
304
305 GrD3DGpu* fGpu;
306 std::unique_ptr<GrD3DPipelineState> fPipelineState;
307 };
308
PipelineStateCache(GrD3DGpu * gpu)309 GrD3DResourceProvider::PipelineStateCache::PipelineStateCache(GrD3DGpu* gpu)
310 : fMap(gpu->getContext()->priv().options().fRuntimeProgramCacheSize)
311 , fGpu(gpu)
312 #ifdef GR_PIPELINE_STATE_CACHE_STATS
313 , fTotalRequests(0)
314 , fCacheMisses(0)
315 #endif
316 {
317 }
318
~PipelineStateCache()319 GrD3DResourceProvider::PipelineStateCache::~PipelineStateCache() {
320 // dump stats
321 #ifdef GR_PIPELINE_STATE_CACHE_STATS
322 if (c_DisplayMtlPipelineCache) {
323 SkDebugf("--- Pipeline State Cache ---\n");
324 SkDebugf("Total requests: %d\n", fTotalRequests);
325 SkDebugf("Cache misses: %d\n", fCacheMisses);
326 SkDebugf("Cache miss %%: %f\n",
327 (fTotalRequests > 0) ? 100.f * fCacheMisses / fTotalRequests : 0.f);
328 SkDebugf("---------------------\n");
329 }
330 #endif
331 }
332
release()333 void GrD3DResourceProvider::PipelineStateCache::release() {
334 fMap.reset();
335 }
336
refPipelineState(GrD3DRenderTarget * renderTarget,const GrProgramInfo & programInfo)337 GrD3DPipelineState* GrD3DResourceProvider::PipelineStateCache::refPipelineState(
338 GrD3DRenderTarget* renderTarget, const GrProgramInfo& programInfo) {
339 #ifdef GR_PIPELINE_STATE_CACHE_STATS
340 ++fTotalRequests;
341 #endif
342
343 const GrCaps* caps = fGpu->caps();
344
345 GrProgramDesc desc = caps->makeDesc(renderTarget, programInfo);
346 if (!desc.isValid()) {
347 GrCapsDebugf(fGpu->caps(), "Failed to build mtl program descriptor!\n");
348 return nullptr;
349 }
350
351 std::unique_ptr<Entry>* entry = fMap.find(desc);
352 if (!entry) {
353 #ifdef GR_PIPELINE_STATE_CACHE_STATS
354 ++fCacheMisses;
355 #endif
356 std::unique_ptr<GrD3DPipelineState> pipelineState =
357 GrD3DPipelineStateBuilder::MakePipelineState(fGpu, renderTarget, desc, programInfo);
358 if (!pipelineState) {
359 return nullptr;
360 }
361 entry = fMap.insert(desc, std::unique_ptr<Entry>(
362 new Entry(fGpu, std::move(pipelineState))));
363 return ((*entry)->fPipelineState).get();
364 }
365 return ((*entry)->fPipelineState).get();
366 }
367
markPipelineStateUniformsDirty()368 void GrD3DResourceProvider::PipelineStateCache::markPipelineStateUniformsDirty() {
369 fMap.foreach ([](const GrProgramDesc*, std::unique_ptr<Entry>* entry) {
370 (*entry)->fPipelineState->markUniformsDirty();
371 });
372 }
373
374 ////////////////////////////////////////////////////////////////////////////////////////////////
375
release()376 void GrD3DResourceProvider::DescriptorTableCache::release() {
377 fMap.reset();
378 }
379
findOrCreateDescTable(const std::vector<D3D12_CPU_DESCRIPTOR_HANDLE> & cpuDescriptors,std::function<sk_sp<GrD3DDescriptorTable> (GrD3DGpu *,unsigned int numDesc)> createFunc)380 sk_sp<GrD3DDescriptorTable> GrD3DResourceProvider::DescriptorTableCache::findOrCreateDescTable(
381 const std::vector<D3D12_CPU_DESCRIPTOR_HANDLE>& cpuDescriptors,
382 std::function<sk_sp<GrD3DDescriptorTable>(GrD3DGpu*, unsigned int numDesc)> createFunc) {
383 sk_sp<GrD3DDescriptorTable>* entry = fMap.find(cpuDescriptors);
384 if (entry) {
385 return *entry;
386 }
387
388 unsigned int numDescriptors = cpuDescriptors.size();
389 SkASSERT(numDescriptors <= kRangeSizesCount);
390 sk_sp<GrD3DDescriptorTable> descTable = createFunc(fGpu, numDescriptors);
391 fGpu->device()->CopyDescriptors(1, descTable->baseCpuDescriptorPtr(), &numDescriptors,
392 numDescriptors, cpuDescriptors.data(), fRangeSizes,
393 descTable->type());
394 entry = fMap.insert(cpuDescriptors, std::move(descTable));
395 return *entry;
396 }
397