1 /*==============================================================================
2 Copyright(c) 2017 Intel Corporation
3
4 Permission is hereby granted, free of charge, to any person obtaining a
5 copy of this software and associated documentation files(the "Software"),
6 to deal in the Software without restriction, including without limitation
7 the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 and / or sell copies of the Software, and to permit persons to whom the
9 Software is furnished to do so, subject to the following conditions:
10
11 The above copyright notice and this permission notice shall be included
12 in all copies or substantial portions of the Software.
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 OTHER DEALINGS IN THE SOFTWARE.
21 ============================================================================*/
22
23 #include "Internal/Common/GmmLibInc.h"
24
25 /////////////////////////////////////////////////////////////////////////////////////
26 /// Checks that clients only set Presentable flag during a resource allocation, ONLY
27 /// when a platform supported render target is selected in ::GMM_RESOURCE_FORMAT enum.
28 ///
29 /// @return true if displayable, false otherwise.
30 /////////////////////////////////////////////////////////////////////////////////////
IsPresentableformat()31 bool GmmLib::GmmResourceInfoCommon::IsPresentableformat()
32 {
33 const GMM_PLATFORM_INFO *pPlatform;
34 const GMM_FORMAT_ENTRY * FormatTable = NULL;
35
36 GMM_DPF_ENTER;
37 __GMM_ASSERTPTR(GetGmmLibContext(), false);
38
39 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(&Surf, GetGmmLibContext());
40 FormatTable = &(pPlatform->FormatTable[0]);
41
42 if(Surf.Flags.Gpu.Presentable == false)
43 {
44 // When Presentable flag is not set, no reason to check for valid RT
45 // platform supported format. Safe to return true.
46 return true;
47 }
48
49 if((Surf.Format > GMM_FORMAT_INVALID) &&
50 (Surf.Format < GMM_RESOURCE_FORMATS))
51 {
52 if((FormatTable[Surf.Format].RenderTarget) &&
53 (FormatTable[Surf.Format].Supported))
54 {
55 return true;
56 }
57 else
58 {
59 GMM_ASSERTDPF(0, "Present flag can only be set w/ a format!");
60 return false;
61 }
62 }
63
64 return false;
65 }
66
67 /////////////////////////////////////////////////////////////////////////////////////
68 /// Returns the restrictions that a particular resource must follow on a particular
69 /// OS or hardware.
70 ///
71 /// @param[out] Restrictions: restrictions that this resource must adhere to
72 /////////////////////////////////////////////////////////////////////////////////////
GetRestrictions(__GMM_BUFFER_TYPE & Restrictions)73 void GmmLib::GmmResourceInfoCommon::GetRestrictions(__GMM_BUFFER_TYPE &Restrictions)
74 {
75 GMM_DPF_ENTER;
76
77 GMM_TEXTURE_CALC *pTextureCalc = NULL;
78 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(&Surf, GetGmmLibContext());
79 if (pTextureCalc)
80 {
81 pTextureCalc->GetResRestrictions(&Surf, Restrictions);
82 }
83
84 GMM_DPF_EXIT;
85 }
86
87
88 //=============================================================================
89 //
90 // Function: GmmResGetRestrictions
91 //
92 // Desc: This routine returns resource restrictions
93 //
94 // Parameters:
95 // pPlatform: ptr to HW_DEVICE_EXTENSION
96 // pResourceInfo: ptr to GMM_RESOURCE_INFO
97 // pRestrictions: ptr to restrictions
98 //
99 // Returns:
100 // void
101 //
102 //-----------------------------------------------------------------------------
GmmResGetRestrictions(GMM_RESOURCE_INFO * pResourceInfo,__GMM_BUFFER_TYPE * pRestrictions)103 void GMM_STDCALL GmmResGetRestrictions(GMM_RESOURCE_INFO *pResourceInfo,
104 __GMM_BUFFER_TYPE *pRestrictions)
105 {
106 pResourceInfo->GetRestrictions(*pRestrictions);
107 }
108
109 /////////////////////////////////////////////////////////////////////////////////////
110 /// Returns the best restrictions by comparing two buffer types. Each buffer type
111 /// carries alignment and size restrictions.
112 ///
113 /// @param[in] pFirstBuffer: Contains surface alignment and size restrictions
114 /// @param[in] pSecondBuffer: Contains surface alignment and size restrictions
115 ///
116 /// @return Best Restrictions based on the two parameters passed
117 /////////////////////////////////////////////////////////////////////////////////////
GetBestRestrictions(__GMM_BUFFER_TYPE * pFirstBuffer,const __GMM_BUFFER_TYPE * pSecondBuffer)118 __GMM_BUFFER_TYPE *GmmLib::GmmTextureCalc::GetBestRestrictions(__GMM_BUFFER_TYPE * pFirstBuffer,
119 const __GMM_BUFFER_TYPE *pSecondBuffer)
120 {
121 GMM_DPF_ENTER;
122
123 if(IsRestrictionInvalid(pFirstBuffer)) //default
124 {
125 *pFirstBuffer = *pSecondBuffer;
126 return pFirstBuffer;
127 }
128
129 pFirstBuffer->Alignment = GFX_MAX(pFirstBuffer->Alignment,
130 pSecondBuffer->Alignment);
131
132 pFirstBuffer->PitchAlignment = GFX_MAX(pFirstBuffer->PitchAlignment,
133 pSecondBuffer->PitchAlignment);
134
135 pFirstBuffer->RenderPitchAlignment = GFX_MAX(pFirstBuffer->RenderPitchAlignment,
136 pSecondBuffer->RenderPitchAlignment);
137
138 pFirstBuffer->LockPitchAlignment = GFX_MAX(pFirstBuffer->LockPitchAlignment,
139 pSecondBuffer->LockPitchAlignment);
140
141 pFirstBuffer->MinPitch = GFX_MAX(pFirstBuffer->MinPitch,
142 pSecondBuffer->MinPitch);
143
144 pFirstBuffer->MinAllocationSize = GFX_MAX(pFirstBuffer->MinAllocationSize,
145 pSecondBuffer->MinAllocationSize);
146
147 pFirstBuffer->MinDepth = GFX_MAX(pFirstBuffer->MinDepth,
148 pSecondBuffer->MinDepth);
149
150 pFirstBuffer->MinHeight = GFX_MAX(pFirstBuffer->MinHeight,
151 pSecondBuffer->MinHeight);
152
153 pFirstBuffer->MinWidth = GFX_MAX(pFirstBuffer->MinWidth,
154 pSecondBuffer->MinWidth);
155
156 pFirstBuffer->MaxDepth = GFX_MIN(pFirstBuffer->MaxDepth,
157 pSecondBuffer->MaxDepth);
158
159 pFirstBuffer->MaxHeight = GFX_MIN(pFirstBuffer->MaxHeight,
160 pSecondBuffer->MaxHeight);
161
162 pFirstBuffer->MaxWidth = GFX_MIN(pFirstBuffer->MaxWidth,
163 pSecondBuffer->MaxWidth);
164
165 pFirstBuffer->NeedPow2LockAlignment = pFirstBuffer->NeedPow2LockAlignment |
166 pSecondBuffer->NeedPow2LockAlignment;
167
168 GMM_DPF_EXIT;
169 return pFirstBuffer;
170 }
171
172 /////////////////////////////////////////////////////////////////////////////////////
173 /// Returns restrictions for 1D, 2D, 3D textures depending on how the surface
174 /// may possibliy be used.
175 ///
176 /// @param[out] pBuff: Restrictions filled in this struct
177 /////////////////////////////////////////////////////////////////////////////////////
GetGenericRestrictions(GMM_TEXTURE_INFO * pTexInfo,__GMM_BUFFER_TYPE * pBuff)178 void GmmLib::GmmTextureCalc::GetGenericRestrictions(GMM_TEXTURE_INFO *pTexInfo, __GMM_BUFFER_TYPE *pBuff)
179 {
180 GMM_DPF_ENTER;
181 const GMM_PLATFORM_INFO *pPlatformResource = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo, pGmmLibContext);
182
183 if(pTexInfo->Flags.Gpu.NoRestriction)
184 {
185 // Impose zero restrictions. Ignore any other GPU usage flags
186 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->NoRestriction);
187 return;
188 }
189
190 if(pTexInfo->Flags.Gpu.Texture)
191 {
192 if(pTexInfo->Type == RESOURCE_BUFFER)
193 {
194 *pBuff = pPlatformResource->BufferType;
195 }
196 else if(pTexInfo->Type == RESOURCE_CUBE)
197 {
198 *pBuff = pPlatformResource->CubeSurface;
199 }
200 else if(pTexInfo->Type == RESOURCE_3D)
201 {
202 *pBuff = pPlatformResource->Texture3DSurface;
203 }
204 else
205 {
206 *pBuff = pPlatformResource->Texture2DSurface;
207 if(pTexInfo->Flags.Info.Linear)
208 {
209 *pBuff = pPlatformResource->Texture2DLinearSurface;
210 }
211 if(GmmIsReconstructableSurface(pTexInfo->Format))
212 {
213 pBuff->MaxHeight = pPlatformResource->ReconMaxHeight;
214 pBuff->MaxWidth = pPlatformResource->ReconMaxWidth;
215 }
216 }
217 }
218 if(pTexInfo->Flags.Gpu.RenderTarget ||
219 pTexInfo->Flags.Gpu.CCS ||
220 pTexInfo->Flags.Gpu.MCS)
221 {
222 // Gen7 onwards, bound by SURFACE_STATE constraints.
223 if(pTexInfo->Type == RESOURCE_BUFFER)
224 {
225 *pBuff = pPlatformResource->BufferType;
226 }
227 else if(pTexInfo->Type == RESOURCE_CUBE)
228 {
229 *pBuff = pPlatformResource->CubeSurface;
230 }
231 else if(pTexInfo->Type == RESOURCE_3D)
232 {
233 *pBuff = pPlatformResource->Texture3DSurface;
234 }
235 else
236 {
237 *pBuff = pPlatformResource->Texture2DSurface;
238 if(pTexInfo->Flags.Info.Linear)
239 {
240 *pBuff = pPlatformResource->Texture2DLinearSurface;
241 }
242 if(GmmIsReconstructableSurface(pTexInfo->Format))
243 {
244 pBuff->MaxHeight = pPlatformResource->ReconMaxHeight;
245 pBuff->MaxWidth = pPlatformResource->ReconMaxWidth;
246 }
247 }
248 }
249 if(pTexInfo->Flags.Gpu.Depth)
250 {
251 // Z
252 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Depth);
253 }
254 if(pTexInfo->Flags.Gpu.Vertex)
255 {
256 // VertexData
257 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Vertex);
258 }
259 if(pTexInfo->Flags.Gpu.Index)
260 {
261 // Index buffer
262 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Index);
263 }
264 if(pTexInfo->Flags.Gpu.FlipChain)
265 {
266 // Async Flip
267 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->ASyncFlipSurface);
268 }
269 if(pTexInfo->Flags.Gpu.MotionComp)
270 {
271 // Media buffer
272 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->MotionComp);
273 }
274 if(pTexInfo->Flags.Gpu.State ||
275 pTexInfo->Flags.Gpu.InstructionFlat ||
276 pTexInfo->Flags.Gpu.ScratchFlat)
277 {
278 // indirect state
279 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Vertex);
280 }
281 if(pTexInfo->Flags.Gpu.Query ||
282 pTexInfo->Flags.Gpu.HistoryBuffer)
283 {
284 // Query
285 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->NoRestriction);
286 }
287 if(pTexInfo->Flags.Gpu.Constant)
288 {
289 //
290 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Constant);
291 }
292 if(pTexInfo->Flags.Gpu.Stream)
293 {
294 //
295 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Stream);
296 }
297 if(pTexInfo->Flags.Gpu.InterlacedScan)
298 {
299 //
300 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->InterlacedScan);
301 }
302 if(pTexInfo->Flags.Gpu.TextApi)
303 {
304 //
305 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->TextApi);
306 }
307 if(pTexInfo->Flags.Gpu.SeparateStencil)
308 {
309 //
310 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Stencil);
311 }
312 if(pTexInfo->Flags.Gpu.HiZ)
313 {
314 //
315 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->HiZ);
316 }
317 if(pTexInfo->Flags.Gpu.Video)
318 {
319 //
320 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Video);
321 if(GmmIsReconstructableSurface(pTexInfo->Format))
322 {
323 pBuff->MaxHeight = pPlatformResource->ReconMaxHeight;
324 pBuff->MaxWidth = pPlatformResource->ReconMaxWidth;
325 }
326 }
327 if(pTexInfo->Flags.Gpu.StateDx9ConstantBuffer)
328 {
329 //
330 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->StateDx9ConstantBuffer);
331 }
332 if(pTexInfo->Flags.Gpu.Overlay)
333 {
334 // Overlay buffer use Async Flip values
335 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->Overlay);
336
337 if((pTexInfo->Format == GMM_FORMAT_YUY2) && (pTexInfo->BaseWidth == 640))
338 {
339 // override the pitch alignment
340 pBuff->PitchAlignment = 64;
341 }
342 }
343 if(pTexInfo->Flags.Info.XAdapter)
344 {
345 //Add Cross Adapter resource restriction for hybrid graphics.
346 pBuff = GetBestRestrictions(pBuff, &pPlatformResource->XAdapter);
347 if(pTexInfo->Type == RESOURCE_BUFFER)
348 {
349 pBuff->MaxWidth = pPlatformResource->SurfaceMaxSize;
350 pBuff->MaxPitch = pPlatformResource->BufferType.MaxPitch;
351 pBuff->MaxHeight = 1;
352 }
353 }
354
355 //Non Aligned ExistingSysMem Special cases.
356 if((pTexInfo->Flags.Info.ExistingSysMem &&
357 (!pTexInfo->ExistingSysMem.IsGmmAllocated) &&
358 (!pTexInfo->ExistingSysMem.IsPageAligned)))
359 {
360
361 if(pTexInfo->Flags.Info.Linear ||
362 pTexInfo->Flags.Info.SVM)
363 {
364 if(pTexInfo->Type == RESOURCE_BUFFER)
365 {
366 //Use combination of BufferType, NoRestriction to support large buffer with minimal pitch alignment
367 *pBuff = pPlatformResource->BufferType;
368 pBuff->PitchAlignment = pPlatformResource->NoRestriction.PitchAlignment;
369 pBuff->LockPitchAlignment = pPlatformResource->NoRestriction.LockPitchAlignment;
370 pBuff->RenderPitchAlignment = pPlatformResource->NoRestriction.LockPitchAlignment;
371 pBuff->MinPitch = pPlatformResource->NoRestriction.MinPitch;
372 }
373
374 //[To DO] Handle other types when needed!
375 }
376 /*
377 else if(Surf.Flags.Gpu.Texture)
378 {
379 //Override as and when required
380 }
381 else if(Surf.Flags.Gpu.RenderTarget)
382 {
383 //Overide as and when Required
384 }*/
385 }
386
387 GMM_DPF_EXIT;
388 }
389
390 /////////////////////////////////////////////////////////////////////////////////////
391 /// Internal function resets the restrictions and puts the allocation in invalid state
392 ///
393 /// @param[in] pTexInfo: ptr to ::GMM_TEXTURE_INFO,
394 /// @param[in] pRestrictions: reset the restrictions to invalid state.
395 ///
396 /////////////////////////////////////////////////////////////////////////////////////
ResetRestrictions(__GMM_BUFFER_TYPE * pRestriction)397 void GmmLib::GmmTextureCalc::ResetRestrictions(__GMM_BUFFER_TYPE *pRestriction)
398 {
399 pRestriction->MinDepth = 0xffffffff;
400 }
401
402
403 /////////////////////////////////////////////////////////////////////////////////////
404 /// Internal function returns the best restrictions depending on how the surface may
405 /// possibly be used.
406 ///
407 /// @param[in] pTexInfo: ptr to ::GMM_TEXTURE_INFO,
408 /// @param[in] pRestrictions: Reference to surface alignment and size restrictions
409 ///
410 /////////////////////////////////////////////////////////////////////////////////////
GetTexRestrictions(GMM_TEXTURE_INFO * pTexInfo,__GMM_BUFFER_TYPE * pRestrictions)411 void GmmLib::GmmTextureCalc::GetTexRestrictions(GMM_TEXTURE_INFO * pTexInfo,
412 __GMM_BUFFER_TYPE *pRestrictions)
413 {
414 GMM_DPF_ENTER;
415
416 GetResRestrictions(pTexInfo, *pRestrictions);
417
418 GMM_DPF_EXIT;
419 }
420
421 /////////////////////////////////////////////////////////////////////////////////////
422 /// Returns the restrictions that a particular resource must follow on a particular
423 /// OS or hardware.
424 ///
425 /// @param[out] Restrictions: restrictions that this resource must adhere to
426 /////////////////////////////////////////////////////////////////////////////////////
GetResRestrictions(GMM_TEXTURE_INFO * pTexinfo,__GMM_BUFFER_TYPE & Restrictions)427 void GmmLib::GmmTextureCalc::GetResRestrictions(GMM_TEXTURE_INFO * pTexinfo,
428 __GMM_BUFFER_TYPE &Restrictions)
429 {
430 GMM_DPF_ENTER;
431 const GMM_PLATFORM_INFO *pPlatform = NULL;
432 GMM_RESOURCE_FLAG ZeroGpuFlags;
433
434 __GMM_ASSERTPTR(pGmmLibContext, VOIDRETURN);
435
436 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexinfo, pGmmLibContext);
437
438 // Check that at least one usage flag is set for allocations other than
439 // Primary/Shadow/Staging.
440 memset(&ZeroGpuFlags.Gpu, 0, sizeof(ZeroGpuFlags.Gpu));
441 if((pTexinfo->Type <= RESOURCE_KMD_CHECK_START ||
442 pTexinfo->Type >= RESOURCE_KMD_CHECK_END) &&
443 !memcmp(&pTexinfo->Flags.Gpu, &ZeroGpuFlags.Gpu, sizeof(ZeroGpuFlags.Gpu)))
444 {
445 GMM_ASSERTDPF(0, "No GPU Usage specified!");
446 return;
447 }
448
449 ResetRestrictions(&Restrictions); //Set to Default
450
451 // Get worst case restrictions that match GPU flags set in resource
452 switch(pTexinfo->Type)
453 {
454 case RESOURCE_1D:
455 case RESOURCE_2D:
456 case RESOURCE_3D:
457 case RESOURCE_CUBE:
458 case RESOURCE_BUFFER:
459 case RESOURCE_SCRATCH:
460 case RESOURCE_GDI:
461 GetGenericRestrictions(pTexinfo, &Restrictions);
462 break;
463
464 case RESOURCE_HW_CONTEXT:
465 case RESOURCE_TAG_PAGE:
466 if(pTexinfo->Flags.Info.TiledW ||
467 pTexinfo->Flags.Info.TiledX ||
468 GMM_IS_4KB_TILE(pTexinfo->Flags))
469 {
470 GMM_ASSERTDPF(0, "Tiled Pref specified for RESOURCE_LINEAR!");
471 return;
472 }
473 GetLinearRestrictions(pTexinfo, &Restrictions);
474 break;
475
476 case RESOURCE_PRIMARY:
477 case RESOURCE_SHADOW:
478 case RESOURCE_STAGING:
479 GetPrimaryRestrictions(pTexinfo, &Restrictions);
480 break;
481
482 case RESOURCE_NNDI:
483 Restrictions = pPlatform->Nndi;
484 break;
485
486 case RESOURCE_HARDWARE_MBM:
487 case RESOURCE_IFFS_MAPTOGTT:
488 //Hardware MBM resource request can come for overlay allocation or normal
489 //displayable allocation. So get the restrictions accordingly
490 if(pTexinfo->Flags.Gpu.Overlay)
491 {
492 Restrictions = pPlatform->Overlay;
493 }
494 else
495 {
496 Restrictions = pPlatform->HardwareMBM;
497 }
498 break;
499
500 case RESOURCE_CURSOR:
501 case RESOURCE_PWR_CONTEXT:
502 case RESOURCE_KMD_BUFFER:
503 case RESOURCE_NULL_CONTEXT_INDIRECT_STATE:
504 case RESOURCE_PERF_DATA_QUEUE:
505 case RESOURCE_GLOBAL_BUFFER:
506 case RESOURCE_FBC:
507 case RESOURCE_GFX_CLIENT_BUFFER:
508 Restrictions = pPlatform->Cursor;
509 break;
510
511 case RESOURCE_OVERLAY_DMA:
512 Restrictions = pPlatform->NoRestriction;
513 break;
514
515 case RESOURCE_GTT_TRANSFER_REGION:
516 GetGenericRestrictions(pTexinfo, &Restrictions);
517 break;
518
519 case RESOURCE_OVERLAY_INTERMEDIATE_SURFACE:
520 Restrictions = pPlatform->Overlay;
521 break;
522
523 default:
524 GetGenericRestrictions(pTexinfo, &Restrictions);
525 GMM_ASSERTDPF(0, "Unkown Resource type");
526 }
527 // Apply any specific WA
528
529 if(((pTexinfo->Flags.Wa.ILKNeedAvcMprRowStore32KAlign)) ||
530 ((pTexinfo->Flags.Wa.ILKNeedAvcDmvBuffer32KAlign)))
531 {
532 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(32));
533 }
534
535 if(pGmmLibContext->GetWaTable().WaAlignContextImage && (pTexinfo->Type == RESOURCE_HW_CONTEXT))
536 {
537 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(64));
538 }
539
540 if(pTexinfo->Flags.Gpu.S3d &&
541 pTexinfo->Flags.Info.Linear &&
542 !pGmmLibContext->GetSkuTable().FtrDisplayEngineS3d)
543 {
544 Restrictions.Alignment = PAGE_SIZE;
545 Restrictions.PitchAlignment = PAGE_SIZE;
546 }
547
548 if(pTexinfo->Flags.Gpu.TiledResource)
549 {
550 // Need at least 64KB alignment to track tile mappings (h/w or s/w tracking).
551 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(64));
552
553 // Buffer tiled resources are trivially divided into 64KB tiles => Pitch must divide into 64KB tiles
554 if(pTexinfo->Type == RESOURCE_BUFFER)
555 {
556 Restrictions.PitchAlignment = GFX_ALIGN(Restrictions.PitchAlignment, GMM_KBYTE(64));
557 }
558
559 }
560
561 // SKL TileY Display needs 1MB alignment.
562 if(((pTexinfo->Type == RESOURCE_PRIMARY) ||
563 pTexinfo->Flags.Gpu.FlipChain) &&
564 (GMM_IS_4KB_TILE(pTexinfo->Flags) ||
565 pTexinfo->Flags.Info.TiledYf))
566 {
567 Restrictions.Alignment = GMM_MBYTE(1);
568 }
569
570 if(pTexinfo->Flags.Info.RenderCompressed ||
571 pTexinfo->Flags.Info.MediaCompressed || (pGmmLibContext->GetSkuTable().FtrXe2Compression && !pTexinfo->Flags.Info.NotCompressed))
572 {
573 if(pGmmLibContext->GetSkuTable().FtrFlatPhysCCS)
574 {
575 Restrictions.Alignment = pGmmLibContext->GetSkuTable().FtrXe2Compression ? GFX_ALIGN(Restrictions.Alignment, GMM_BYTES(256)) : GFX_ALIGN(Restrictions.Alignment, GMM_BYTES(128));
576 }
577 else // only for platforms having auxtable
578 {
579 Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, (WA16K(pGmmLibContext) ? GMM_KBYTE(16) : WA64K(pGmmLibContext) ? GMM_KBYTE(64) : GMM_MBYTE(1)));
580 }
581 }
582
583 GMM_DPF_EXIT;
584 }
585
586 /////////////////////////////////////////////////////////////////////////////////////
587 /// Calculates surface size based on Non Aligned ExistingSysMem restrictions.
588 ///
589 /// @return ::GMM_STATUS
590 /////////////////////////////////////////////////////////////////////////////////////
ApplyExistingSysMemRestrictions()591 GMM_STATUS GmmLib::GmmResourceInfoCommon::ApplyExistingSysMemRestrictions()
592 {
593 const GMM_PLATFORM_INFO *pPlatform;
594
595 // Handle Minimal Restriction ExistingSysMem Requirements...
596 GMM_GFX_SIZE_T AdditionalPaddingBytes = 0;
597 GMM_GFX_SIZE_T AdditionalPaddingRows = 0;
598 GMM_GFX_SIZE_T BaseAlignment = 1; // 1 = Byte Alignment
599 GMM_GFX_SIZE_T EndAlignment = 1; // 1 = Byte Alignment
600 GMM_GFX_SIZE_T SizePadding = 1; // 1 = Byte Padding
601 uint32_t CompressHeight, CompressWidth, CompressDepth;
602 GMM_GFX_SIZE_T Width, Height;
603 GMM_TEXTURE_INFO *pTexInfo = &Surf;
604 GMM_TEXTURE_CALC *pTextureCalc;
605
606 GMM_DPF_ENTER;
607
608 pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo, GetGmmLibContext());
609 pTextureCalc = GMM_OVERRIDE_TEXTURE_CALC(pTexInfo, GetGmmLibContext());
610
611 Height = pTexInfo->BaseHeight;
612 Width = pTexInfo->BaseWidth;
613
614 #define UPDATE_BASE_ALIGNMENT(a) \
615 { \
616 __GMM_ASSERT((GFX_MAX(BaseAlignment, a) % GFX_MIN(BaseAlignment, a)) == 0); /* Revisit if ever have to support complex alignments. */ \
617 BaseAlignment = GFX_MAX(BaseAlignment, a); \
618 }
619
620 #define UPDATE_PADDING(p) \
621 { \
622 SizePadding = GFX_MAX(SizePadding, p); \
623 }
624
625 #define UPDATE_ADDITIONAL_ROWS(r) \
626 { \
627 AdditionalPaddingRows = GFX_MAX(AdditionalPaddingRows, r); \
628 }
629
630 #define UPDATE_ADDITIONAL_BYTES(b) \
631 { \
632 AdditionalPaddingBytes = GFX_MAX(AdditionalPaddingBytes, b); \
633 }
634
635 #define UPDATE_END_ALIGNMENT(a) \
636 { \
637 __GMM_ASSERT((GFX_MAX(EndAlignment, a) % GFX_MIN(EndAlignment, a)) == 0); /* Revisit if ever have to support complex alignments. */ \
638 EndAlignment = GFX_MAX(EndAlignment, a); \
639 }
640
641
642 if(!pTexInfo->Pitch)
643 {
644 __GMM_ASSERT(pTexInfo->Type == RESOURCE_1D); // Clients can leave pitch zero for 1D, and we'll fill-in...
645 pTexInfo->Pitch = Width * (pTexInfo->BitsPerPixel >> 3);
646 }
647
648 __GMM_ASSERT( // Currently limiting our support...
649 pTexInfo->Flags.Gpu.NoRestriction ||
650 pTexInfo->Flags.Gpu.Index ||
651 pTexInfo->Flags.Gpu.RenderTarget ||
652 pTexInfo->Flags.Gpu.Texture ||
653 pTexInfo->Flags.Gpu.Vertex);
654
655 __GMM_ASSERT( // Trivial, Linear Surface...
656 ((pTexInfo->Type == RESOURCE_BUFFER) || (pTexInfo->Type == RESOURCE_1D) || (pTexInfo->Type == RESOURCE_2D)) &&
657 (pTexInfo->MaxLod == 0) &&
658 !GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]) &&
659 !GmmIsPlanar(pTexInfo->Format) &&
660 ((pTexInfo->ArraySize <= 1) || (pTexInfo->Type == RESOURCE_BUFFER)));
661
662 __GMM_ASSERT( // Valid Surface...
663 (Width > 0) &&
664 !((pTexInfo->Type == RESOURCE_BUFFER) && GmmIsYUVPacked(pTexInfo->Format)));
665
666 // Convert to compression blocks, if applicable...
667 if(GmmIsCompressed(GetGmmLibContext(), pTexInfo->Format))
668 {
669 pTextureCalc->GetCompressionBlockDimensions(pTexInfo->Format, &CompressWidth, &CompressHeight, &CompressDepth);
670
671 Width = GFX_CEIL_DIV(Width, CompressWidth);
672 Height = GFX_CEIL_DIV(Height, CompressHeight);
673 }
674
675 __GMM_ASSERT( // Valid Surface Follow-Up...
676 (pTexInfo->Pitch >= (Width * (pTexInfo->BitsPerPixel >> 3))));
677
678 if(!pTexInfo->Flags.Gpu.NoRestriction && !pTexInfo->Flags.Info.SVM && !pTexInfo->Flags.Info.Linear)
679 {
680 if(pTexInfo->Flags.Gpu.Index) /////////////////////////////////////////////////////////
681 {
682 __GMM_ASSERT(!(
683 pTexInfo->Flags.Gpu.RenderTarget ||
684 pTexInfo->Flags.Gpu.Texture ||
685 pTexInfo->Flags.Gpu.Vertex)); // Can explore if needed what combo's make sense--and how req's should combine.
686
687 // 3DSTATE_INDEX_BUFFER...
688 UPDATE_BASE_ALIGNMENT(4); // 32-bit worst-case, since GMM doesn't receive element-size from clients.
689 if(GetGmmLibContext()->GetWaTable().WaAlignIndexBuffer)
690 {
691 UPDATE_END_ALIGNMENT(64);
692 }
693 else
694 {
695 UPDATE_END_ALIGNMENT(1);
696 }
697 }
698
699 if(pTexInfo->Flags.Gpu.Vertex) ////////////////////////////////////////////////////////
700 {
701 __GMM_ASSERT(!(
702 pTexInfo->Flags.Gpu.Index ||
703 pTexInfo->Flags.Gpu.RenderTarget ||
704 pTexInfo->Flags.Gpu.Texture)); // Can explore if needed what combo's make sense--and how req's should combine.
705
706 // VERTEX_BUFFER_STATE...
707 UPDATE_BASE_ALIGNMENT(1); // VB's have member alignment requirements--but it's up to UMD to enforce.
708 UPDATE_PADDING(1);
709 }
710
711 if(pTexInfo->Flags.Gpu.RenderTarget) //////////////////////////////////////////////////
712 {
713 uint32_t ElementSize;
714
715 // SURFACE_STATE...
716 ElementSize = (pTexInfo->BitsPerPixel >> 3) * (GmmIsYUVPacked(pTexInfo->Format) ? 2 : 1);
717 __GMM_ASSERT((pTexInfo->Pitch % ElementSize) == 0);
718 UPDATE_BASE_ALIGNMENT(ElementSize);
719 UPDATE_PADDING(pTexInfo->Pitch * 2); // "Surface Padding Requirements --> Render Target and Media Surfaces"
720 }
721
722 if(pTexInfo->Flags.Gpu.Texture) // (i.e. Sampler Surfaces) ///////////////////////////
723 {
724 UPDATE_BASE_ALIGNMENT(1); // Sampler supports byte alignment (with performance hit if misaligned).
725
726 if(GetGmmLibContext()->GetWaTable().WaNoMinimizedTrivialSurfacePadding)
727 {
728 if(pTexInfo->Type == RESOURCE_BUFFER)
729 {
730 if(GetGmmLibContext()->GetWaTable().WaNoBufferSamplerPadding)
731 {
732 // Client agreeing to take responsibility for flushing L3 after sampling/etc.
733 }
734 else
735 {
736 // GMM currently receives GENERIC_8BIT for
737 // RESOURCE_BUFFER creations, so we have to assume the
738 // worst-case sample size of 128-bit (unless we alter
739 // our interface meaning):
740 uint32_t ElementSize = 16;
741
742 // "Surface Padding Requirements --> Sampling Engine Surfaces"
743 UPDATE_PADDING(ElementSize * ((GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) == IGFX_GEN8_CORE) ? 512 : 256));
744 UPDATE_ADDITIONAL_BYTES(16);
745 }
746 }
747 else // RESOURCE_1D/2D...
748 {
749 /* Sampler needs Alignment Unit padding--
750 but sampler arch confirms that's overly conservative
751 padding--and for trivial (linear, single-subresource)
752 2D's, even-row (quad-row on BDW.A0) plus additional
753 64B padding is sufficient. (E.g. pitch overfetch will
754 be caught by subsequent rows or the additional 64B. */
755
756 __GMM_ASSERT((GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) <= IGFX_GEN8_CORE));
757
758 if(GmmIsCompressed(GetGmmLibContext(), pTexInfo->Format))
759 {
760 // "For compressed textures...padding at the bottom of the surface is to an even compressed row."
761 UPDATE_PADDING(pTexInfo->Pitch * 2); // (Sampler arch confirmed that even-row is sufficient on BDW despite BDW's 4x4 sampling, since this req is from L2 instead of L1.)
762 }
763 else
764 {
765 UPDATE_PADDING(pTexInfo->Pitch * ((GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) == IGFX_GEN8_CORE) ? 4 : 2)); // Sampler Fetch Rows: BDW ? 4 : 2
766 }
767
768 // "For packed YUV, 96 bpt, 48 bpt, and 24 bpt surface formats, additional padding is required."
769 if(GmmIsYUVPacked(pTexInfo->Format) || (pTexInfo->BitsPerPixel == 96) || (pTexInfo->BitsPerPixel == 48) || (pTexInfo->BitsPerPixel == 24))
770 {
771 UPDATE_ADDITIONAL_BYTES(16);
772 UPDATE_ADDITIONAL_ROWS(1);
773 }
774
775 /* "For linear surfaces, additional padding of 64
776 bytes is required at the bottom of the surface."
777 (Sampler arch confirmed the 64 bytes can overlap with
778 the other "additional 16 bytes" mentions in that section.) */
779 UPDATE_ADDITIONAL_BYTES(64);
780 }
781 }
782 else
783 {
784 /* For SURFTYPE_BUFFER, SURFTYPE_1D, and
785 SURFTYPE_2D non-array, non-MSAA, non-mip-mapped surfaces in
786 linear memory, the only padding requirement is to the next
787 aligned 64-byte boundary beyond the end of the surface. */
788 UPDATE_END_ALIGNMENT(64);
789 }
790 }
791 }
792 else // Gpu.NoRestriction...
793 {
794 // Clients specify NoRestriction at their own risk--e.g. it can be
795 // appropriate when using IA-Coherent L3 combined with L3 being in
796 // unified/"Rest" mode (where there won't be write-->read-only
797 // collisions on unintentionally shared cachelines).
798 }
799
800 { //Finally calculate surf size
801 GMM_GFX_SIZE_T OriginalEnd, RequiredSize;
802
803 ExistingSysMem.pVirtAddress =
804 (ExistingSysMem.pExistingSysMem & (PAGE_SIZE - 1)) ?
805 ((uint64_t)GFX_ALIGN(ExistingSysMem.pExistingSysMem,
806 BaseAlignment)) :
807 ExistingSysMem.pExistingSysMem;
808
809 ExistingSysMem.pGfxAlignedVirtAddress =
810 (uint64_t)GFX_ALIGN(
811 (uint64_t)ExistingSysMem.pVirtAddress, PAGE_SIZE);
812
813 __GMM_ASSERT((ExistingSysMem.pVirtAddress % BaseAlignment) == 0);
814
815 RequiredSize = pTexInfo->Pitch * Height;
816
817 RequiredSize =
818 GFX_ALIGN(RequiredSize, SizePadding) +
819 (AdditionalPaddingRows * pTexInfo->Pitch) +
820 AdditionalPaddingBytes;
821
822 OriginalEnd = ExistingSysMem.pVirtAddress + RequiredSize;
823 RequiredSize += GFX_ALIGN(OriginalEnd, EndAlignment) - OriginalEnd;
824
825 //Ensure sufficient ExistingSysMem available.
826 if(ExistingSysMem.Size < RequiredSize)
827 {
828 return GMM_ERROR;
829 }
830
831 Surf.Size = RequiredSize;
832 }
833
834 GMM_DPF_EXIT;
835
836 return GMM_SUCCESS;
837 }
838