1 /*============================================================================== 2 Copyright(c) 2017 Intel Corporation 3 4 Permission is hereby granted, free of charge, to any person obtaining a 5 copy of this software and associated documentation files(the "Software"), 6 to deal in the Software without restriction, including without limitation 7 the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 and / or sell copies of the Software, and to permit persons to whom the 9 Software is furnished to do so, subject to the following conditions: 10 11 The above copyright notice and this permission notice shall be included 12 in all copies or substantial portions of the Software. 13 14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 OTHER DEALINGS IN THE SOFTWARE. 21 ============================================================================*/ 22 23 // #pragma once <-- Don't use with this file! (Multi-inclusions, differnt uses.) 24 25 // Format Group Selection... 26 #if(defined(GMM_FORMAT_INCLUDE_ASTC_FORMATS_ONLY)) 27 #define INCLUDE_ASTC_FORMATS 28 #elif(defined(GMM_FORMAT_INCLUDE_SURFACESTATE_FORMATS_ONLY)) 29 #define INCLUDE_SURFACESTATE_FORMATS 30 #else 31 #define INCLUDE_ASTC_FORMATS 32 #define INCLUDE_MISC_FORMATS 33 #define INCLUDE_SURFACESTATE_FORMATS 34 #endif 35 36 // Table Macros (for Readability) 37 #define A 1 38 #define ALWAYS 1 39 #define ASTC_3D SKU(FtrAstc3D) 40 #define ASTC_HDR_2D SKU(FtrAstcHdr2D) 41 #define ASTC_LDR_2D SKU(FtrAstcLdr2D) 42 #define GEN GMM_FORMAT_GEN 43 #define NA GMM_SURFACESTATE_FORMAT_INVALID 44 #define R 1 45 #define SKU GMM_FORMAT_SKU 46 #define VLV2 GFX_IS_PRODUCT(Data.Platform,IGFX_VALLEYVIEW) 47 #define WA GMM_FORMAT_WA 48 #define x 0 49 #if(!defined(__GMM_KMD__)) 50 #define NC GMM_COMPR_FORMAT_INVALID(pGmmLibContext) 51 #else 52 #define NC GMM_COMPR_FORMAT_INVALID 53 #endif 54 #define MC(n) n | (0x1 << 5) //GMM_FLATCCS_MIN_MC_FORMAT - 1 55 #define FC(ver, bpc, fmtstr, bpcstr, typestr) \ 56 (ver == 1 || (SKU(FtrE2ECompression) && !(SKU(FtrFlatPhysCCS) || SKU(FtrUnified3DMediaCompressionFormats) || SKU(FtrXe2Compression)))) ? \ 57 ((bpc == 16) ? GMM_E2ECOMP_FORMAT_RGBAFLOAT16 : \ 58 (bpc == 32) ? GMM_E2ECOMP_FORMAT_R32G32B32A32_FLOAT : \ 59 (bpc == 8) ? GMM_E2ECOMP_FORMAT_ARGB8b : \ 60 (bpc == x) ? GMM_E2ECOMP_FORMAT_##fmtstr : \ 61 NC) : \ 62 (ver == 2 || (SKU(FtrFlatPhysCCS) && !(SKU(FtrUnified3DMediaCompressionFormats) || SKU(FtrXe2Compression)))) ? \ 63 (GMM_FLATCCS_FORMAT_##fmtstr##bpcstr##typestr) : \ 64 (ver == 3 || (SKU(FtrUnified3DMediaCompressionFormats) && !SKU(FtrXe2Compression))) ? \ 65 (GMM_UNIFIED_COMP_FORMAT_##fmtstr##bpcstr##typestr) : \ 66 (ver == 4 || SKU(FtrXe2Compression)) ? \ 67 (GMM_XE2_UNIFIED_COMP_FORMAT_##fmtstr##bpcstr##typestr) : \ 68 NC 69 70 /****************************************************************************\ 71 GMM FORMAT TABLE 72 (See bottom of file for more info.) 73 74 Supported (ALWAYS / *) -----------------------------------------------------------------o 75 SURFACE_STATE.CompressionFormat (or NC) --------------------------------------o | 76 RCS SURFACE_STATE.Format (or NA) --------------------------------o | | 77 ASTC Format (A / x) ----------------------------------------o | | | 78 Render Target Eligibility (R / x / *) -------------------o | | | | 79 Element Depth (Pixels) -------------------------------o | | | | | 80 Element Height (Pixels) ---------------------------o | | | | | | 81 Element Width (Pixels) ------------------------o | | | | | | | 82 Bits-per-Element -------------------------o | | | | | | | | 83 | | | | | | | | | 84 Name bpe w h d R A RCS.SS CompressFormat Available 85 ------------------------------------------------------------------------------------------*/ 86 #ifdef INCLUDE_SURFACESTATE_FORMATS 87 GMM_FORMAT( A1B5G5R5_UNORM , 16, 1, 1, 1, R, x, 0x124, FC(4, x, RGB5A1, , ), GEN(8) || VLV2 ) 88 GMM_FORMAT( A4B4G4R4_UNORM , 16, 1, 1, 1, R, x, 0x125, FC(4, x, RGB5A1, , ), GEN(8) ) 89 GMM_FORMAT( A4P4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x148, NC , ALWAYS ) 90 GMM_FORMAT( A4P4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14F, NC , ALWAYS ) 91 GMM_FORMAT( A8_UNORM , 8, 1, 1, 1, R, x, 0x144, FC(4, 8, R, 8, U), GEN(7) ) 92 GMM_FORMAT( A8P8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x10F, NC , ALWAYS ) 93 GMM_FORMAT( A8P8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x110, NC , ALWAYS ) 94 GMM_FORMAT( A8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E7, NC , ALWAYS ) 95 GMM_FORMAT( A16_FLOAT , 16, 1, 1, 1, R, x, 0x117, NC , GEN(7) ) 96 GMM_FORMAT( A16_UNORM , 16, 1, 1, 1, R, x, 0x113, NC , GEN(7) ) 97 GMM_FORMAT( A24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E2, NC , GEN(7) ) 98 GMM_FORMAT( A32_FLOAT , 32, 1, 1, 1, R, x, 0x0E5, NC , GEN(7) ) 99 GMM_FORMAT( A32_UNORM , 32, 1, 1, 1, R, x, 0x0DE, NC , GEN(7) ) 100 GMM_FORMAT( A32X32_FLOAT , 64, 1, 1, 1, R, x, 0x090, NC , ALWAYS ) 101 GMM_FORMAT( B4G4R4A4_UNORM , 16, 1, 1, 1, R, x, 0x104, FC(4, x, RGBA4, , ), ALWAYS ) 102 GMM_FORMAT( B4G4R4A4_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x105, FC(4, x, RGBA4, , ), ALWAYS ) 103 GMM_FORMAT( B5G5R5A1_UNORM , 16, 1, 1, 1, R, x, 0x102, FC(4, x, RGB5A1, , ), ALWAYS ) 104 GMM_FORMAT( B5G5R5A1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x103, FC(4, x, RGB5A1, , ), ALWAYS ) 105 GMM_FORMAT( B5G5R5X1_UNORM , 16, 1, 1, 1, R, x, 0x11A, FC(4, x, RGB5A1, , ), ALWAYS ) 106 GMM_FORMAT( B5G5R5X1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x11B, FC(4, x, RGB5A1, , ), ALWAYS ) 107 GMM_FORMAT( B5G6R5_UNORM , 16, 1, 1, 1, R, x, 0x100, FC(4, x, B5G6R5, , ), ALWAYS ) 108 GMM_FORMAT( B5G6R5_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x101, FC(4, x, B5G6R5, , ), ALWAYS ) 109 GMM_FORMAT( B8G8R8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C0, FC(4, 8, RGBA, 8, U), ALWAYS ) 110 GMM_FORMAT( B8G8R8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C1, FC(4, 8, RGBA, 8, U), ALWAYS ) 111 GMM_FORMAT( B8G8R8X8_UNORM , 32, 1, 1, 1, R, x, 0x0E9, FC(4, 8, RGBA, 8, U), ALWAYS ) 112 GMM_FORMAT( B8G8R8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EA, FC(4, 8, RGBA, 8, U), ALWAYS ) 113 GMM_FORMAT( B8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E8, NC , ALWAYS ) 114 GMM_FORMAT( B10G10R10A2_SINT , 32, 1, 1, 1, R, x, 0x1BB, FC(4, x, RGB10A2, , ), GEN(8) ) 115 GMM_FORMAT( B10G10R10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B7, FC(4, x, RGB10A2, , ), GEN(8) ) 116 GMM_FORMAT( B10G10R10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B9, FC(4, x, RGB10A2, , ), GEN(8) ) 117 GMM_FORMAT( B10G10R10A2_UINT , 32, 1, 1, 1, R, x, 0x1BA, FC(4, x, RGB10A2, , ), GEN(8) ) 118 GMM_FORMAT( B10G10R10A2_UNORM , 32, 1, 1, 1, R, x, 0x0D1, FC(4, x, RGB10A2, , ), ALWAYS ) 119 GMM_FORMAT( B10G10R10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0D2, FC(4, x, RGB10A2, , ), ALWAYS ) 120 GMM_FORMAT( B10G10R10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B8, FC(4, x, RGB10A2, , ), GEN(8) ) 121 GMM_FORMAT( B10G10R10X2_UNORM , 32, 1, 1, 1, R, x, 0x0EE, FC(4, x, RGB10A2, , ), ALWAYS ) 122 GMM_FORMAT( BC1_UNORM , 64, 4, 4, 1, x, x, 0x186, FC(4, x, ML8, , ), ALWAYS ) 123 GMM_FORMAT( BC1_UNORM_SRGB , 64, 4, 4, 1, x, x, 0x18B, FC(4, x, ML8, , ), ALWAYS ) 124 GMM_FORMAT( BC2_UNORM , 128, 4, 4, 1, x, x, 0x187, FC(4, x, ML8, , ), ALWAYS ) 125 GMM_FORMAT( BC2_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18C, FC(4, x, ML8, , ), ALWAYS ) 126 GMM_FORMAT( BC3_UNORM , 128, 4, 4, 1, x, x, 0x188, FC(4, x, ML8, , ), ALWAYS ) 127 GMM_FORMAT( BC3_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18D, FC(4, x, ML8, , ), ALWAYS ) 128 GMM_FORMAT( BC4_SNORM , 64, 4, 4, 1, x, x, 0x199, FC(4, x, ML8, , ), ALWAYS ) 129 GMM_FORMAT( BC4_UNORM , 64, 4, 4, 1, x, x, 0x189, FC(4, x, ML8, , ), ALWAYS ) 130 GMM_FORMAT( BC5_SNORM , 128, 4, 4, 1, x, x, 0x19A, FC(4, x, ML8, , ), ALWAYS ) 131 GMM_FORMAT( BC5_UNORM , 128, 4, 4, 1, x, x, 0x18A, FC(4, x, ML8, , ), ALWAYS ) 132 GMM_FORMAT( BC6H_SF16 , 128, 4, 4, 1, x, x, 0x1A1, FC(4, x, ML8, , ), GEN(7) ) 133 GMM_FORMAT( BC6H_UF16 , 128, 4, 4, 1, x, x, 0x1A4, FC(4, x, ML8, , ), GEN(7) ) 134 GMM_FORMAT( BC7_UNORM , 128, 4, 4, 1, x, x, 0x1A2, FC(4, x, ML8, , ), GEN(7) ) 135 GMM_FORMAT( BC7_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x1A3, FC(4, x, ML8, , ), GEN(7) ) 136 GMM_FORMAT( DXT1_RGB , 64, 4, 4, 1, x, x, 0x191, NC , ALWAYS ) // verify for ML8 137 GMM_FORMAT( DXT1_RGB_SRGB , 64, 4, 4, 1, x, x, 0x180, NC , ALWAYS ) // verify for ML8 138 GMM_FORMAT( EAC_R11 , 64, 4, 4, 1, x, x, 0x1AB, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 139 GMM_FORMAT( EAC_RG11 , 128, 4, 4, 1, x, x, 0x1AC, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 140 GMM_FORMAT( EAC_SIGNED_R11 , 64, 4, 4, 1, x, x, 0x1AD, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 141 GMM_FORMAT( EAC_SIGNED_RG11 , 128, 4, 4, 1, x, x, 0x1AE, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 142 GMM_FORMAT( ETC1_RGB8 , 64, 4, 4, 1, x, x, 0x1A9, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 143 GMM_FORMAT( ETC2_EAC_RGBA8 , 128, 4, 4, 1, x, x, 0x1C2, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 144 GMM_FORMAT( ETC2_EAC_SRGB8_A8 , 128, 4, 4, 1, x, x, 0x1C3, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 145 GMM_FORMAT( ETC2_RGB8 , 64, 4, 4, 1, x, x, 0x1AA, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 146 GMM_FORMAT( ETC2_RGB8_PTA , 64, 4, 4, 1, x, x, 0x1C0, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 147 GMM_FORMAT( ETC2_SRGB8 , 64, 4, 4, 1, x, x, 0x1AF, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 148 GMM_FORMAT( ETC2_SRGB8_PTA , 64, 4, 4, 1, x, x, 0x1C1, FC(4, x, ML8, , ), GEN(8) || VLV2 ) 149 GMM_FORMAT( FXT1 , 128, 8, 4, 1, x, x, 0x192, NC , ALWAYS ) 150 GMM_FORMAT( I8_SINT , 8, 1, 1, 1, R, x, 0x155, NC , GEN(9) ) 151 GMM_FORMAT( I8_UINT , 8, 1, 1, 1, R, x, 0x154, NC , GEN(9) ) 152 GMM_FORMAT( I8_UNORM , 8, 1, 1, 1, R, x, 0x145, NC , ALWAYS ) 153 GMM_FORMAT( I16_FLOAT , 16, 1, 1, 1, R, x, 0x115, NC , ALWAYS ) 154 GMM_FORMAT( I16_UNORM , 16, 1, 1, 1, R, x, 0x111, NC , ALWAYS ) 155 GMM_FORMAT( I24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E0, NC , ALWAYS ) 156 GMM_FORMAT( I32_FLOAT , 32, 1, 1, 1, R, x, 0x0E3, NC , ALWAYS ) 157 GMM_FORMAT( I32X32_FLOAT , 64, 1, 1, 1, R, x, 0x092, NC , ALWAYS ) 158 GMM_FORMAT( L8_SINT , 8, 1, 1, 1, R, x, 0x153, NC , GEN(9) ) 159 GMM_FORMAT( L8_UINT , 8, 1, 1, 1, R, x, 0x152, NC , GEN(9) ) 160 GMM_FORMAT( L8_UNORM , 8, 1, 1, 1, R, x, 0x146, NC , ALWAYS ) 161 GMM_FORMAT( L8_UNORM_SRGB , 8, 1, 1, 1, R, x, 0x14C, NC , ALWAYS ) 162 GMM_FORMAT( L8A8_SINT , 16, 1, 1, 1, R, x, 0x127, NC , GEN(9) ) 163 GMM_FORMAT( L8A8_UINT , 16, 1, 1, 1, R, x, 0x126, NC , GEN(9) ) 164 GMM_FORMAT( L8A8_UNORM , 16, 1, 1, 1, R, x, 0x114, NC , ALWAYS ) 165 GMM_FORMAT( L8A8_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x118, NC , ALWAYS ) 166 GMM_FORMAT( L16_FLOAT , 16, 1, 1, 1, R, x, 0x116, NC , ALWAYS ) 167 GMM_FORMAT( L16_UNORM , 16, 1, 1, 1, R, x, 0x112, NC , ALWAYS ) 168 GMM_FORMAT( L16A16_FLOAT , 32, 1, 1, 1, R, x, 0x0F0, NC , ALWAYS ) 169 GMM_FORMAT( L16A16_UNORM , 32, 1, 1, 1, R, x, 0x0DF, NC , ALWAYS ) 170 GMM_FORMAT( L24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E1, NC , ALWAYS ) 171 GMM_FORMAT( L32_FLOAT , 32, 1, 1, 1, R, x, 0x0E4, NC , ALWAYS ) 172 GMM_FORMAT( L32_UNORM , 32, 1, 1, 1, R, x, 0x0DD, NC , ALWAYS ) 173 GMM_FORMAT( L32A32_FLOAT , 64, 1, 1, 1, R, x, 0x08A, NC , ALWAYS ) 174 GMM_FORMAT( L32X32_FLOAT , 64, 1, 1, 1, R, x, 0x091, NC , ALWAYS ) 175 GMM_FORMAT( MONO8 , 1, 1, 1, 1, R, x, 0x18E, NC , x ) // No current GMM support by this name. 176 GMM_FORMAT( P2_UNORM_PALETTE0 , 2, 1, 1, 1, R, x, 0x184, NC , x ) // No current GMM support by this name. 177 GMM_FORMAT( P2_UNORM_PALETTE1 , 2, 1, 1, 1, R, x, 0x185, NC , x ) // " 178 GMM_FORMAT( P4A4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x147, NC , ALWAYS ) 179 GMM_FORMAT( P4A4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14E, NC , ALWAYS ) 180 GMM_FORMAT( P8_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x14B, NC , ALWAYS ) 181 GMM_FORMAT( P8_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14D, NC , ALWAYS ) 182 GMM_FORMAT( P8A8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x122, NC , ALWAYS ) 183 GMM_FORMAT( P8A8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x123, NC , ALWAYS ) 184 GMM_FORMAT( PACKED_422_16 , 64, 2, 1, 1, R, x, 0x1A7, NC , GEN(12) ) 185 GMM_FORMAT( PLANAR_420_8 , 8, 1, 1, 1, R, x, 0x1A5, NC , x ) // No current GMM support by this name. 186 GMM_FORMAT( PLANAR_420_16 , 16, 1, 1, 1, R, x, 0x1A6, NC , x ) // " 187 GMM_FORMAT( PLANAR_422_8 , 8, 1, 1, 1, R, x, 0x00F, NC , x ) // <-- TODO(Minor): Remove this HW-internal format. 188 GMM_FORMAT( R1_UNORM , 1, 1, 1, 1, R, x, 0x181, NC , x ) // " 189 GMM_FORMAT( R8_SINT , 8, 1, 1, 1, R, x, 0x142, FC(4, 8, R, 8, S1), ALWAYS ) 190 GMM_FORMAT( R8_SNORM , 8, 1, 1, 1, R, x, 0x141, FC(4, 8, R, 8, S), ALWAYS ) 191 GMM_FORMAT( R8_SSCALED , 8, 1, 1, 1, R, x, 0x149, FC(4, 8, R, 8, S), ALWAYS ) 192 GMM_FORMAT( R8_UINT , 8, 1, 1, 1, R, x, 0x143, FC(4, 8, R, 8, U1), ALWAYS ) 193 GMM_FORMAT( R8_UNORM , 8, 1, 1, 1, R, x, 0x140, FC(4, 8, R, 8, U), ALWAYS ) 194 GMM_FORMAT( R8_USCALED , 8, 1, 1, 1, R, x, 0x14A, FC(4, 8, R, 8, U), ALWAYS ) 195 GMM_FORMAT( R8G8_SINT , 16, 1, 1, 1, R, x, 0x108, FC(4, 8, RG, 8, S), ALWAYS ) 196 GMM_FORMAT( R8G8_SNORM , 16, 1, 1, 1, R, x, 0x107, FC(4, 8, RG, 8, S), ALWAYS ) 197 GMM_FORMAT( R8G8_SSCALED , 16, 1, 1, 1, R, x, 0x11C, FC(4, 8, RG, 8, S), ALWAYS ) 198 GMM_FORMAT( R8G8_UINT , 16, 1, 1, 1, R, x, 0x109, FC(4, 8, RG, 8, U), ALWAYS ) 199 GMM_FORMAT( R8G8_UNORM , 16, 1, 1, 1, R, x, 0x106, FC(4, 8, RG, 8, U), ALWAYS ) 200 GMM_FORMAT( R8G8_USCALED , 16, 1, 1, 1, R, x, 0x11D, FC(4, 8, RG, 8, U), ALWAYS ) 201 GMM_FORMAT( R8G8B8_SINT , 24, 1, 1, 1, R, x, 0x1C9, NC , GEN(8) ) 202 GMM_FORMAT( R8G8B8_SNORM , 24, 1, 1, 1, R, x, 0x194, NC , ALWAYS ) 203 GMM_FORMAT( R8G8B8_SSCALED , 24, 1, 1, 1, R, x, 0x195, NC , ALWAYS ) 204 GMM_FORMAT( R8G8B8_UINT , 24, 1, 1, 1, R, x, 0x1C8, NC , GEN(8) || VLV2 ) 205 GMM_FORMAT( R8G8B8_UNORM , 24, 1, 1, 1, R, x, 0x193, NC , ALWAYS ) 206 GMM_FORMAT( R8G8B8_UNORM_SRGB , 24, 1, 1, 1, R, x, 0x1A8, NC , GEN(7_5) ) 207 GMM_FORMAT( R8G8B8_USCALED , 24, 1, 1, 1, R, x, 0x196, NC , ALWAYS ) 208 GMM_FORMAT( R8G8B8A8_SINT , 32, 1, 1, 1, R, x, 0x0CA, FC(4, 8, RGBA, 8, S), ALWAYS ) 209 GMM_FORMAT( R8G8B8A8_SNORM , 32, 1, 1, 1, R, x, 0x0C9, FC(4, 8, RGBA, 8, S), ALWAYS ) 210 GMM_FORMAT( R8G8B8A8_SSCALED , 32, 1, 1, 1, R, x, 0x0F4, FC(4, 8, RGBA, 8, S), ALWAYS ) 211 GMM_FORMAT( R8G8B8A8_UINT , 32, 1, 1, 1, R, x, 0x0CB, FC(4, 8, RGBA, 8, U), ALWAYS ) 212 GMM_FORMAT( R8G8B8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C7, FC(4, 8, RGBA, 8, U), ALWAYS ) 213 GMM_FORMAT( R8G8B8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C8, FC(4, 8, RGBA, 8, U), ALWAYS ) 214 GMM_FORMAT( R8G8B8A8_USCALED , 32, 1, 1, 1, R, x, 0x0F5, FC(4, 8, RGBA, 8, U), ALWAYS ) 215 GMM_FORMAT( R8G8B8X8_UNORM , 32, 1, 1, 1, R, x, 0x0EB, FC(4, 8, RGBA, 8, U), ALWAYS ) 216 GMM_FORMAT( R8G8B8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EC, FC(4, 8, RGBA, 8, U), ALWAYS ) 217 GMM_FORMAT( R9G9B9E5_SHAREDEXP , 32, 1, 1, 1, R, x, 0x0ED, NC , ALWAYS ) 218 GMM_FORMAT( R10G10B10_FLOAT_A2_UNORM , 32, 1, 1, 1, R, x, 0x0D5, FC(4, x, RGB10A2, , ), GEN(12) ) 219 GMM_FORMAT( R10G10B10_SNORM_A2_UNORM , 32, 1, 1, 1, R, x, 0x0C5, FC(4, x, RGB10A2, , ), ALWAYS ) 220 GMM_FORMAT( R10G10B10A2_SINT , 32, 1, 1, 1, R, x, 0x1B6, FC(4, x, RGB10A2, , ), GEN(8) ) 221 GMM_FORMAT( R10G10B10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B3, FC(4, x, RGB10A2, , ), GEN(8) ) 222 GMM_FORMAT( R10G10B10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B5, FC(4, x, RGB10A2, , ), GEN(8) ) 223 GMM_FORMAT( R10G10B10A2_UINT , 32, 1, 1, 1, R, x, 0x0C4, FC(4, x, RGB10A2, , ), ALWAYS ) 224 GMM_FORMAT( R10G10B10A2_UNORM , 32, 1, 1, 1, R, x, 0x0C2, FC(4, x, RGB10A2, , ), ALWAYS ) 225 GMM_FORMAT( R10G10B10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C3, FC(4, x, RGB10A2, , ), ALWAYS ) 226 GMM_FORMAT( R10G10B10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B4, FC(4, x, RGB10A2, , ), GEN(8) ) 227 GMM_FORMAT( R10G10B10X2_USCALED , 32, 1, 1, 1, R, x, 0x0F3, FC(4, x, RGB10A2, , ), ALWAYS ) 228 GMM_FORMAT( R11G11B10_FLOAT , 32, 1, 1, 1, R, x, 0x0D3, FC(4, x, RG11B10, , ), ALWAYS ) 229 GMM_FORMAT( R16_FLOAT , 16, 1, 1, 1, R, x, 0x10E, FC(4, 16, R, 16, F1), ALWAYS ) 230 GMM_FORMAT( R16_SINT , 16, 1, 1, 1, R, x, 0x10C, FC(4, 16, R, 16, S1), ALWAYS ) 231 GMM_FORMAT( R16_SNORM , 16, 1, 1, 1, R, x, 0x10B, FC(4, 16, R, 16, S), ALWAYS ) 232 GMM_FORMAT( R16_SSCALED , 16, 1, 1, 1, R, x, 0x11E, FC(4, 16, R, 16, S), ALWAYS ) 233 GMM_FORMAT( R16_UINT , 16, 1, 1, 1, R, x, 0x10D, FC(4, 16, R, 16, U1), ALWAYS ) 234 GMM_FORMAT( R16_UNORM , 16, 1, 1, 1, R, x, 0x10A, FC(4, 16, R, 16, U), ALWAYS ) 235 GMM_FORMAT( R16_USCALED , 16, 1, 1, 1, R, x, 0x11F, FC(4, 16, R, 16, U), ALWAYS ) 236 GMM_FORMAT( R16G16_FLOAT , 32, 1, 1, 1, R, x, 0x0D0, FC(4, 16, RG, 16, F), ALWAYS ) 237 GMM_FORMAT( R16G16_SINT , 32, 1, 1, 1, R, x, 0x0CE, FC(4, 16, RG, 16, S), ALWAYS ) 238 GMM_FORMAT( R16G16_SNORM , 32, 1, 1, 1, R, x, 0x0CD, FC(4, 16, RG, 16, S), ALWAYS ) 239 GMM_FORMAT( R16G16_SSCALED , 32, 1, 1, 1, R, x, 0x0F6, FC(4, 16, RG, 16, S), ALWAYS ) 240 GMM_FORMAT( R16G16_UINT , 32, 1, 1, 1, R, x, 0x0CF, FC(4, 16, RG, 16, U), ALWAYS ) 241 GMM_FORMAT( R16G16_UNORM , 32, 1, 1, 1, R, x, 0x0CC, FC(4, 16, RG, 16, U), ALWAYS ) 242 GMM_FORMAT( R16G16_USCALED , 32, 1, 1, 1, R, x, 0x0F7, FC(4, 16, RG, 16, U), ALWAYS ) 243 GMM_FORMAT( R16G16B16_FLOAT , 48, 1, 1, 1, R, x, 0x19B, NC , ALWAYS ) 244 GMM_FORMAT( R16G16B16_SINT , 48, 1, 1, 1, R, x, 0x1B1, NC , GEN(8) ) 245 GMM_FORMAT( R16G16B16_SNORM , 48, 1, 1, 1, R, x, 0x19D, NC , ALWAYS ) 246 GMM_FORMAT( R16G16B16_SSCALED , 48, 1, 1, 1, R, x, 0x19E, NC , ALWAYS ) 247 GMM_FORMAT( R16G16B16_UINT , 48, 1, 1, 1, R, x, 0x1B0, NC , GEN(8) || VLV2 ) 248 GMM_FORMAT( R16G16B16_UNORM , 48, 1, 1, 1, R, x, 0x19C, NC , ALWAYS ) 249 GMM_FORMAT( R16G16B16_USCALED , 48, 1, 1, 1, R, x, 0x19F, NC , ALWAYS ) 250 GMM_FORMAT( R16G16B16A16_FLOAT , 64, 1, 1, 1, R, x, 0x084, FC(4, 16, RGBA, 16, F), ALWAYS ) 251 GMM_FORMAT( R16G16B16A16_SINT , 64, 1, 1, 1, R, x, 0x082, FC(4, 16, RGBA, 16, S), ALWAYS ) 252 GMM_FORMAT( R16G16B16A16_SNORM , 64, 1, 1, 1, R, x, 0x081, FC(4, 16, RGBA, 16, S), ALWAYS ) 253 GMM_FORMAT( R16G16B16A16_SSCALED , 64, 1, 1, 1, R, x, 0x093, FC(4, 16, RGBA, 16, S), ALWAYS ) 254 GMM_FORMAT( R16G16B16A16_UINT , 64, 1, 1, 1, R, x, 0x083, FC(4, 16, RGBA, 16, U), ALWAYS ) 255 GMM_FORMAT( R16G16B16A16_UNORM , 64, 1, 1, 1, R, x, 0x080, FC(4, 16, RGBA, 16, U), ALWAYS ) 256 GMM_FORMAT( R16G16B16A16_USCALED , 64, 1, 1, 1, R, x, 0x094, FC(4, 16, RGBA, 16, U), ALWAYS ) 257 GMM_FORMAT( R16G16B16X16_FLOAT , 64, 1, 1, 1, R, x, 0x08F, FC(4, 16, RGBA, 16, F), ALWAYS ) 258 GMM_FORMAT( R16G16B16X16_UNORM , 64, 1, 1, 1, R, x, 0x08E, FC(4, 16, RGBA, 16, U), ALWAYS ) 259 GMM_FORMAT( R24_UNORM_X8_TYPELESS , 32, 1, 1, 1, R, x, 0x0D9, FC(4, 32, R, 32, U1), ALWAYS ) 260 GMM_FORMAT( R32_FLOAT , 32, 1, 1, 1, R, x, 0x0D8, FC(4, 32, R, 32, F1), ALWAYS ) 261 GMM_FORMAT( R32_FLOAT_X8X24_TYPELESS , 64, 1, 1, 1, R, x, 0x088, FC(4, 32, R, 32, F), ALWAYS ) 262 GMM_FORMAT( R32_SFIXED , 32, 1, 1, 1, R, x, 0x1B2, FC(4, 32, R, 32, S), GEN(8) ) 263 GMM_FORMAT( R32_SINT , 32, 1, 1, 1, R, x, 0x0D6, FC(4, 32, R, 32, S1), ALWAYS ) 264 GMM_FORMAT( R32_SNORM , 32, 1, 1, 1, R, x, 0x0F2, FC(4, 32, R, 32, S), ALWAYS ) 265 GMM_FORMAT( R32_SSCALED , 32, 1, 1, 1, R, x, 0x0F8, FC(4, 32, R, 32, S), ALWAYS ) 266 GMM_FORMAT( R32_UINT , 32, 1, 1, 1, R, x, 0x0D7, FC(4, 32, R, 32, U1), ALWAYS ) 267 GMM_FORMAT( R32_UNORM , 32, 1, 1, 1, R, x, 0x0F1, FC(4, 32, R, 32, U), ALWAYS ) 268 GMM_FORMAT( R32_USCALED , 32, 1, 1, 1, R, x, 0x0F9, FC(4, 32, R, 32, U), ALWAYS ) 269 GMM_FORMAT( R32G32_FLOAT , 64, 1, 1, 1, R, x, 0x085, FC(4, 32, RG, 32, F), ALWAYS ) 270 GMM_FORMAT( R32G32_SFIXED , 64, 1, 1, 1, R, x, 0x0A0, FC(4, 32, RG, 32, S), ALWAYS ) 271 GMM_FORMAT( R32G32_SINT , 64, 1, 1, 1, R, x, 0x086, FC(4, 32, RG, 32, S), ALWAYS ) 272 GMM_FORMAT( R32G32_SNORM , 64, 1, 1, 1, R, x, 0x08C, FC(4, 32, RG, 32, S), ALWAYS ) 273 GMM_FORMAT( R32G32_SSCALED , 64, 1, 1, 1, R, x, 0x095, FC(4, 32, RG, 32, S), ALWAYS ) 274 GMM_FORMAT( R32G32_UINT , 64, 1, 1, 1, R, x, 0x087, FC(4, 32, RG, 32, U), ALWAYS ) 275 GMM_FORMAT( R32G32_UNORM , 64, 1, 1, 1, R, x, 0x08B, FC(4, 32, RG, 32, U), ALWAYS ) 276 GMM_FORMAT( R32G32_USCALED , 64, 1, 1, 1, R, x, 0x096, FC(4, 32, RG, 32, U), ALWAYS ) 277 GMM_FORMAT( R32G32B32_FLOAT , 96, 1, 1, 1, R, x, 0x040, NC , ALWAYS ) 278 GMM_FORMAT( R32G32B32_SFIXED , 96, 1, 1, 1, R, x, 0x050, NC , ALWAYS ) 279 GMM_FORMAT( R32G32B32_SINT , 96, 1, 1, 1, R, x, 0x041, NC , ALWAYS ) 280 GMM_FORMAT( R32G32B32_SNORM , 96, 1, 1, 1, R, x, 0x044, NC , ALWAYS ) 281 GMM_FORMAT( R32G32B32_SSCALED , 96, 1, 1, 1, R, x, 0x045, NC , ALWAYS ) 282 GMM_FORMAT( R32G32B32_UINT , 96, 1, 1, 1, R, x, 0x042, NC , ALWAYS ) 283 GMM_FORMAT( R32G32B32_UNORM , 96, 1, 1, 1, R, x, 0x043, NC , ALWAYS ) 284 GMM_FORMAT( R32G32B32_USCALED , 96, 1, 1, 1, R, x, 0x046, NC , ALWAYS ) 285 GMM_FORMAT( R32G32B32A32_FLOAT , 128, 1, 1, 1, R, x, 0x000, FC(4, 32, RGBA, 32, F), ALWAYS ) 286 GMM_FORMAT( R32G32B32A32_SFIXED , 128, 1, 1, 1, R, x, 0x020, FC(4, 32, RGBA, 32, S), ALWAYS ) 287 GMM_FORMAT( R32G32B32A32_SINT , 128, 1, 1, 1, R, x, 0x001, FC(4, 32, RGBA, 32, S), ALWAYS ) 288 GMM_FORMAT( R32G32B32A32_SNORM , 128, 1, 1, 1, R, x, 0x004, FC(4, 32, RGBA, 32, S), ALWAYS ) 289 GMM_FORMAT( R32G32B32A32_SSCALED , 128, 1, 1, 1, R, x, 0x007, FC(4, 32, RGBA, 32, S), ALWAYS ) 290 GMM_FORMAT( R32G32B32A32_UINT , 128, 1, 1, 1, R, x, 0x002, FC(4, 32, RGBA, 32, U), ALWAYS ) 291 GMM_FORMAT( R32G32B32A32_UNORM , 128, 1, 1, 1, R, x, 0x003, FC(4, 32, RGBA, 32, U), ALWAYS ) 292 GMM_FORMAT( R32G32B32A32_USCALED , 128, 1, 1, 1, R, x, 0x008, FC(4, 32, RGBA, 32, U), ALWAYS ) 293 GMM_FORMAT( R32G32B32X32_FLOAT , 128, 1, 1, 1, R, x, 0x006, FC(4, 32, RGBA, 32, F), ALWAYS ) 294 GMM_FORMAT( R5G5_SNORM_B6_UNORM , 16, 1, 1, 1, R, x, 0x119, NC , ALWAYS ) 295 GMM_FORMAT( R64_FLOAT , 64, 1, 1, 1, R, x, 0x08D, NC , ALWAYS ) 296 GMM_FORMAT( R64_PASSTHRU , 64, 1, 1, 1, R, x, 0x0A1, NC , ALWAYS ) 297 GMM_FORMAT( R64G64_FLOAT , 128, 1, 1, 1, R, x, 0x005, NC , ALWAYS ) 298 GMM_FORMAT( R64G64_PASSTHRU , 128, 1, 1, 1, R, x, 0x021, NC , ALWAYS ) 299 GMM_FORMAT( R64G64B64_FLOAT , 192, 1, 1, 1, R, x, 0x198, NC , ALWAYS ) 300 GMM_FORMAT( R64G64B64_PASSTHRU , 192, 1, 1, 1, R, x, 0x1BD, NC , GEN(8) ) 301 GMM_FORMAT( R64G64B64A64_FLOAT , 256, 1, 1, 1, R, x, 0x197, NC , ALWAYS ) 302 GMM_FORMAT( R64G64B64A64_PASSTHRU , 256, 1, 1, 1, R, x, 0x1BC, NC , GEN(8) ) 303 GMM_FORMAT( RAW , 8, 1, 1, 1, R, x, 0x1FF, NC , GEN(7) ) // "8bpp" for current GMM implementation. 304 GMM_FORMAT( X24_TYPELESS_G8_UINT , 32, 1, 1, 1, R, x, 0x0DA, FC(4, 32, R, 32, U1), ALWAYS ) 305 GMM_FORMAT( X32_TYPELESS_G8X24_UINT , 64, 1, 1, 1, R, x, 0x089, FC(4, 32, RG, 32, U), ALWAYS ) 306 GMM_FORMAT( X8B8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E6, NC , ALWAYS ) 307 GMM_FORMAT( Y8_UNORM , 8, 1, 1, 1, R, x, 0x150, FC(4, x, NV12, ,_L ), ALWAYS ) 308 GMM_FORMAT( YCRCB_NORMAL , 16, 1, 1, 1, R, x, 0x182, FC(4, x, YUY2, , ), ALWAYS ) 309 GMM_FORMAT( YCRCB_SWAPUV , 16, 1, 1, 1, R, x, 0x18F, FC(4, x, YCRCB_SWAPUV, ,), ALWAYS ) 310 GMM_FORMAT( YCRCB_SWAPUVY , 16, 1, 1, 1, R, x, 0x183, FC(4, x, YCRCB_SWAPUVY,,), ALWAYS ) 311 GMM_FORMAT( YCRCB_SWAPY , 16, 1, 1, 1, R, x, 0x190, FC(4, x, YCRCB_SWAPY, , ), ALWAYS ) 312 #endif // INCLUDE_SURFACESTATE_FORMATS 313 #ifdef INCLUDE_ASTC_FORMATS 314 GMM_FORMAT( ASTC_FULL_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x140, NC , ASTC_HDR_2D ) 315 GMM_FORMAT( ASTC_FULL_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x148, NC , ASTC_HDR_2D ) 316 GMM_FORMAT( ASTC_FULL_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x149, NC , ASTC_HDR_2D ) 317 GMM_FORMAT( ASTC_FULL_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x151, NC , ASTC_HDR_2D ) 318 GMM_FORMAT( ASTC_FULL_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x152, NC , ASTC_HDR_2D ) 319 GMM_FORMAT( ASTC_FULL_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x161, NC , ASTC_HDR_2D ) 320 GMM_FORMAT( ASTC_FULL_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x162, NC , ASTC_HDR_2D ) 321 GMM_FORMAT( ASTC_FULL_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x164, NC , ASTC_HDR_2D ) 322 GMM_FORMAT( ASTC_FULL_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x171, NC , ASTC_HDR_2D ) 323 GMM_FORMAT( ASTC_FULL_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x172, NC , ASTC_HDR_2D ) 324 GMM_FORMAT( ASTC_FULL_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x174, NC , ASTC_HDR_2D ) 325 GMM_FORMAT( ASTC_FULL_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x176, NC , ASTC_HDR_2D ) 326 GMM_FORMAT( ASTC_FULL_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x17e, NC , ASTC_HDR_2D ) 327 GMM_FORMAT( ASTC_FULL_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x17f, NC , ASTC_HDR_2D ) 328 GMM_FORMAT( ASTC_FULL_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x1c0, NC , ASTC_3D ) 329 GMM_FORMAT( ASTC_FULL_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x1d0, NC , ASTC_3D ) 330 GMM_FORMAT( ASTC_FULL_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x1d4, NC , ASTC_3D ) 331 GMM_FORMAT( ASTC_FULL_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x1d5, NC , ASTC_3D ) 332 GMM_FORMAT( ASTC_FULL_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x1e5, NC , ASTC_3D ) 333 GMM_FORMAT( ASTC_FULL_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x1e9, NC , ASTC_3D ) 334 GMM_FORMAT( ASTC_FULL_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x1ea, NC , ASTC_3D ) 335 GMM_FORMAT( ASTC_FULL_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x1fa, NC , ASTC_3D ) 336 GMM_FORMAT( ASTC_FULL_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x1fe, NC , ASTC_3D ) 337 GMM_FORMAT( ASTC_FULL_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x1ff, NC , ASTC_3D ) 338 GMM_FORMAT( ASTC_LDR_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x040, NC , ASTC_LDR_2D ) 339 GMM_FORMAT( ASTC_LDR_2D_4x4_U8sRGB , 128, 4, 4, 1, x, A, 0x000, NC , ASTC_LDR_2D ) 340 GMM_FORMAT( ASTC_LDR_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x048, NC , ASTC_LDR_2D ) 341 GMM_FORMAT( ASTC_LDR_2D_5x4_U8sRGB , 128, 5, 4, 1, x, A, 0x008, NC , ASTC_LDR_2D ) 342 GMM_FORMAT( ASTC_LDR_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x049, NC , ASTC_LDR_2D ) 343 GMM_FORMAT( ASTC_LDR_2D_5x5_U8sRGB , 128, 5, 5, 1, x, A, 0x009, NC , ASTC_LDR_2D ) 344 GMM_FORMAT( ASTC_LDR_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x051, NC , ASTC_LDR_2D ) 345 GMM_FORMAT( ASTC_LDR_2D_6x5_U8sRGB , 128, 6, 5, 1, x, A, 0x011, NC , ASTC_LDR_2D ) 346 GMM_FORMAT( ASTC_LDR_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x052, NC , ASTC_LDR_2D ) 347 GMM_FORMAT( ASTC_LDR_2D_6x6_U8sRGB , 128, 6, 6, 1, x, A, 0x012, NC , ASTC_LDR_2D ) 348 GMM_FORMAT( ASTC_LDR_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x061, NC , ASTC_LDR_2D ) 349 GMM_FORMAT( ASTC_LDR_2D_8x5_U8sRGB , 128, 8, 5, 1, x, A, 0x021, NC , ASTC_LDR_2D ) 350 GMM_FORMAT( ASTC_LDR_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x062, NC , ASTC_LDR_2D ) 351 GMM_FORMAT( ASTC_LDR_2D_8x6_U8sRGB , 128, 8, 6, 1, x, A, 0x022, NC , ASTC_LDR_2D ) 352 GMM_FORMAT( ASTC_LDR_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x064, NC , ASTC_LDR_2D ) 353 GMM_FORMAT( ASTC_LDR_2D_8x8_U8sRGB , 128, 8, 8, 1, x, A, 0x024, NC , ASTC_LDR_2D ) 354 GMM_FORMAT( ASTC_LDR_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x071, NC , ASTC_LDR_2D ) 355 GMM_FORMAT( ASTC_LDR_2D_10x5_U8sRGB , 128, 10, 5, 1, x, A, 0x031, NC , ASTC_LDR_2D ) 356 GMM_FORMAT( ASTC_LDR_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x072, NC , ASTC_LDR_2D ) 357 GMM_FORMAT( ASTC_LDR_2D_10x6_U8sRGB , 128, 10, 6, 1, x, A, 0x032, NC , ASTC_LDR_2D ) 358 GMM_FORMAT( ASTC_LDR_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x074, NC , ASTC_LDR_2D ) 359 GMM_FORMAT( ASTC_LDR_2D_10x8_U8sRGB , 128, 10, 8, 1, x, A, 0x034, NC , ASTC_LDR_2D ) 360 GMM_FORMAT( ASTC_LDR_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x076, NC , ASTC_LDR_2D ) 361 GMM_FORMAT( ASTC_LDR_2D_10x10_U8sRGB , 128, 10, 10, 1, x, A, 0x036, NC , ASTC_LDR_2D ) 362 GMM_FORMAT( ASTC_LDR_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x07e, NC , ASTC_LDR_2D ) 363 GMM_FORMAT( ASTC_LDR_2D_12x10_U8sRGB , 128, 12, 10, 1, x, A, 0x03e, NC , ASTC_LDR_2D ) 364 GMM_FORMAT( ASTC_LDR_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x07f, NC , ASTC_LDR_2D ) 365 GMM_FORMAT( ASTC_LDR_2D_12x12_U8sRGB , 128, 12, 12, 1, x, A, 0x03f, NC , ASTC_LDR_2D ) 366 GMM_FORMAT( ASTC_LDR_3D_3x3x3_U8sRGB , 128, 3, 3, 3, x, A, 0x080, NC , ASTC_3D ) 367 GMM_FORMAT( ASTC_LDR_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x0c0, NC , ASTC_3D ) 368 GMM_FORMAT( ASTC_LDR_3D_4x3x3_U8sRGB , 128, 4, 3, 3, x, A, 0x090, NC , ASTC_3D ) 369 GMM_FORMAT( ASTC_LDR_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x0d0, NC , ASTC_3D ) 370 GMM_FORMAT( ASTC_LDR_3D_4x4x3_U8sRGB , 128, 4, 4, 3, x, A, 0x094, NC , ASTC_3D ) 371 GMM_FORMAT( ASTC_LDR_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x0d4, NC , ASTC_3D ) 372 GMM_FORMAT( ASTC_LDR_3D_4x4x4_U8sRGB , 128, 4, 4, 4, x, A, 0x095, NC , ASTC_3D ) 373 GMM_FORMAT( ASTC_LDR_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x0d5, NC , ASTC_3D ) 374 GMM_FORMAT( ASTC_LDR_3D_5x4x4_U8sRGB , 128, 5, 4, 4, x, A, 0x0a5, NC , ASTC_3D ) 375 GMM_FORMAT( ASTC_LDR_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x0e5, NC , ASTC_3D ) 376 GMM_FORMAT( ASTC_LDR_3D_5x5x4_U8sRGB , 128, 5, 5, 4, x, A, 0x0a9, NC , ASTC_3D ) 377 GMM_FORMAT( ASTC_LDR_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x0e9, NC , ASTC_3D ) 378 GMM_FORMAT( ASTC_LDR_3D_5x5x5_U8sRGB , 128, 5, 5, 5, x, A, 0x0aa, NC , ASTC_3D ) 379 GMM_FORMAT( ASTC_LDR_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x0ea, NC , ASTC_3D ) 380 GMM_FORMAT( ASTC_LDR_3D_6x5x5_U8sRGB , 128, 6, 5, 5, x, A, 0x0ba, NC , ASTC_3D ) 381 GMM_FORMAT( ASTC_LDR_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x0fa, NC , ASTC_3D ) 382 GMM_FORMAT( ASTC_LDR_3D_6x6x5_U8sRGB , 128, 6, 6, 5, x, A, 0x0be, NC , ASTC_3D ) 383 GMM_FORMAT( ASTC_LDR_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x0fe, NC , ASTC_3D ) 384 GMM_FORMAT( ASTC_LDR_3D_6x6x6_U8sRGB , 128, 6, 6, 6, x, A, 0x0bf, NC , ASTC_3D ) 385 GMM_FORMAT( ASTC_LDR_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x0ff, NC , ASTC_3D ) 386 #endif // INCLUDE_ASTC_FORMATS 387 #ifdef INCLUDE_MISC_FORMATS 388 GMM_FORMAT( AUYV , 32, 1, 1, 1, R, x, NA , NC , ALWAYS ) 389 GMM_FORMAT( AYUV , 32, 1, 1, 1, R, x, NA , FC(4, x, AYUV, , ), ALWAYS ) 390 GMM_FORMAT( BAYER_BGGR8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B 391 GMM_FORMAT( BAYER_BGGR16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B 392 GMM_FORMAT( BAYER_GBRG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B 393 GMM_FORMAT( BAYER_GBRG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B 394 GMM_FORMAT( BAYER_GRBG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R 395 GMM_FORMAT( BAYER_GRBG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R 396 GMM_FORMAT( BAYER_RGGB8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R 397 GMM_FORMAT( BAYER_RGGB16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R 398 GMM_FORMAT( BC1 , 64, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) // Legacy GMM name for related HW format. 399 GMM_FORMAT( BC2 , 128, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) // " 400 GMM_FORMAT( BC3 , 128, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) // " 401 GMM_FORMAT( BC4 , 64, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) // " 402 GMM_FORMAT( BC5 , 128, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) // " 403 GMM_FORMAT( BC6 , 128, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) // " 404 GMM_FORMAT( BC6H , 128, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) // " 405 GMM_FORMAT( BC7 , 128, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), GEN(7) ) // " 406 GMM_FORMAT( BGRP , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // FOURCC:BGRP 407 GMM_FORMAT( D16_UNORM , 16, 1, 1, 1, x, x, NA , FC(4, 16, R, 16, U), ALWAYS ) //Depth uses color format L1e.En 408 GMM_FORMAT( D24_UNORM_X8_UINT , 32, 1, 1, 1, x, x, NA , FC(4, 32, D, 32, U), ALWAYS ) 409 GMM_FORMAT( D32_FLOAT , 32, 1, 1, 1, x, x, NA , FC(4, 32, R, 32, F1), ALWAYS ) 410 GMM_FORMAT( DXT1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format. 411 GMM_FORMAT( DXT2_5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // " 412 GMM_FORMAT( ETC1 , 64, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), GEN(8) || VLV2 ) // " 413 GMM_FORMAT( ETC2 , 64, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), GEN(8) || VLV2 ) // " 414 GMM_FORMAT( ETC2_EAC , 128, 4, 4, 1, x, x, NA , FC(4, x, ML8, , ), GEN(8) || VLV2 ) // " 415 GMM_FORMAT( GENERIC_8BIT , 8, 1, 1, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) 416 GMM_FORMAT( GENERIC_16BIT , 16, 1, 1, 1, x, x, NA , FC(4, x, ML8, , ), ALWAYS ) 417 GMM_FORMAT( GENERIC_24BIT , 24, 1, 1, 1, x, x, NA , NC , ALWAYS ) // verify ML8 for > 16 bit 418 GMM_FORMAT( GENERIC_32BIT , 32, 1, 1, 1, x, x, NA , NC , ALWAYS ) 419 GMM_FORMAT( GENERIC_48BIT , 48, 1, 1, 1, x, x, NA , NC , ALWAYS ) 420 GMM_FORMAT( GENERIC_64BIT , 64, 1, 1, 1, x, x, NA , NC , ALWAYS ) 421 GMM_FORMAT( GENERIC_96BIT , 96, 1, 1, 1, x, x, NA , NC , ALWAYS ) 422 GMM_FORMAT( GENERIC_128BIT , 128, 1, 1, 1, x, x, NA , NC , ALWAYS ) 423 GMM_FORMAT( GENERIC_192BIT , 192, 1, 1, 1, x, x, NA , NC , GEN(8) ) 424 GMM_FORMAT( GENERIC_256BIT , 256, 1, 1, 1, x, x, NA , NC , GEN(8) ) 425 GMM_FORMAT( I420 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // Same as IYUV. 426 GMM_FORMAT( IYUV , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) 427 GMM_FORMAT( IMC1 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), ALWAYS ) 428 GMM_FORMAT( IMC2 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), ALWAYS ) 429 GMM_FORMAT( IMC3 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), ALWAYS ) 430 GMM_FORMAT( IMC4 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), ALWAYS ) 431 GMM_FORMAT( L4A4 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), ALWAYS ) // No HW support. 432 GMM_FORMAT( MFX_JPEG_YUV411 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), GEN(7) ) 433 GMM_FORMAT( MFX_JPEG_YUV411R , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), GEN(7) ) 434 GMM_FORMAT( MFX_JPEG_YUV420 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), GEN(7) ) // Same as IMC3. 435 GMM_FORMAT( MFX_JPEG_YUV422H , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), GEN(7) ) 436 GMM_FORMAT( MFX_JPEG_YUV422V , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), GEN(7) ) 437 GMM_FORMAT( MFX_JPEG_YUV444 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), GEN(7) ) 438 GMM_FORMAT( NV11 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) 439 GMM_FORMAT( NV12 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), ALWAYS ) 440 GMM_FORMAT( NV21 , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), ALWAYS ) 441 GMM_FORMAT( P8 , 8, 1, 1, 1, R, x, NA, NC , ALWAYS ) 442 GMM_FORMAT( P010 , 16, 1, 1, 1, R, x, NA , FC(4, x, P010, ,_L ), ALWAYS ) 443 GMM_FORMAT( P012 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) 444 GMM_FORMAT( P016 , 16, 1, 1, 1, R, x, NA , FC(4, x, P016, ,_L ), ALWAYS ) 445 GMM_FORMAT( P208 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) 446 GMM_FORMAT( R10G10B10_XR_BIAS_A2_UNORM , 32, 1, 1, 1, x, x, NA , FC(4, x, RGB10A2, , ), ALWAYS ) // DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM 447 GMM_FORMAT( R24G8_TYPELESS , 32, 1, 1, 1, x, x, NA , FC(4, 32, R, 32, U), ALWAYS ) // DXGI_FORMAT_R24G8_TYPELESS (To differentiate between GENERIC_32BIT.) 448 GMM_FORMAT( R32G8X24_TYPELESS , 64, 1, 1, 1, x, x, NA , FC(4, 32, R, 32, U), ALWAYS ) // DXGI_FORMAT_R32G8X24_TYPELESS (To differentiate between GENERIC_64BIT.) 449 GMM_FORMAT( RENDER_8BIT , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) 450 GMM_FORMAT( RGBP , 8, 1, 1, 1, R, x, NA , FC(4, x, NV12, ,_L ), ALWAYS ) // FOURCC:RGBP 451 GMM_FORMAT( Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) ) 452 GMM_FORMAT( Y8_UNORM_VA , 8, 1, 1, 1, x, x, NA , FC(4, x, NV12, ,_L ), GEN(8) ) 453 GMM_FORMAT( Y16_SNORM , 16, 1, 1, 1, x, x, NA , FC(4, x, P010, ,_L ), GEN(8) ) 454 GMM_FORMAT( Y16_UNORM , 16, 1, 1, 1, x, x, NA , FC(4, x, P010, ,_L ), GEN(8) ) 455 #if (IGFX_GEN >= IGFX_GEN10) 456 GMM_FORMAT( Y32_UNORM , 32, 1, 1, 1, x, x, NA , NC , GEN(10) ) // Y32 removed from Gen9 but still referenced, only available Gen10+ 457 #endif 458 GMM_FORMAT( Y210 , 64, 2, 1, 1, R, x, NA , FC(4, x, Y210, , ), GEN(11) ) // Packed 422 10/12/16 bit 459 GMM_FORMAT( Y212 , 64, 2, 1, 1, R, x, NA , FC(4, x, Y216, , ), GEN(11) ) 460 GMM_FORMAT( Y410 , 32, 1, 1, 1, R, x, NA , FC(4, x, Y410, , ), GEN(11) ) 461 GMM_FORMAT( Y412 , 64, 1, 1, 1, R, x, NA , FC(4, x, Y416, , ), GEN(11) ) 462 GMM_FORMAT( Y216 , 64, 2, 1, 1, R, x, NA, FC(4, x, Y216, , ), ALWAYS ) 463 GMM_FORMAT( Y416 , 64, 1, 1, 1, R, x, NA , FC(4, x, Y416, , ), ALWAYS ) // Packed 444 10/12/16 bit, 464 GMM_FORMAT( YV12 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) 465 GMM_FORMAT( YVU9 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) 466 // Implement packed 4:2:2 YUV format (UYVY, VYUY, YUY2, YVYU) as compressed block format by suffixing _2x1.(i.e. 32bpe 2x1 pixel blocks instead of 16bpp 1x1 block) 467 // All OS components(UMDs/KMD) can switch to *_2x1 style independent of legacy implementation. 468 // Refer GmmCommonExt.h for legacy implemenation of UYVY, VYUY, YUY2, YVYU) 469 // TODO : Unify them when all OS-components switch to compressed block format 470 GMM_FORMAT( UYVY_2x1 , 32, 2, 1, 1, R, x, NA , FC(4, x, SWAPY, , ), ALWAYS ) 471 GMM_FORMAT( VYUY_2x1 , 32, 2, 1, 1, R, x, NA , FC(4, x, SWAPUVY, , ), ALWAYS ) 472 GMM_FORMAT( YUY2_2x1 , 32, 2, 1, 1, R, x, NA , FC(4, x, YUY2, , ), ALWAYS ) 473 GMM_FORMAT( YVYU_2x1 , 32, 2, 1, 1, R, x, NA , FC(4, x, SWAPUV, , ), ALWAYS ) 474 GMM_FORMAT( MEDIA_Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) ) 475 GMM_FORMAT( MEDIA_Y8_UNORM , 8, 1, 1, 1, x, x, NA , FC(4, x, NV12, ,_L ), GEN(8) ) 476 GMM_FORMAT( MEDIA_Y16_SNORM , 16, 1, 1, 1, x, x, NA , FC(4, x, P010, ,_L ), GEN(8) ) 477 GMM_FORMAT( MEDIA_Y16_UNORM , 16, 1, 1, 1, x, x, NA , FC(4, x, P010, ,_L ), GEN(8) ) 478 GMM_FORMAT( MEDIA_Y32_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) ) // Y32 is BDW name for SKL Y1, and is 1bpp with 32b granularity 479 GMM_FORMAT( B16G16R16A16_UNORM , 64, 1, 1, 1, R, x, NA , FC(4, 16, RGBA, 16, U), ALWAYS ) // Swapped ARGB16 for media-SFC output 480 GMM_FORMAT( P216 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) 481 #if _WIN32 482 GMM_FORMAT( WGBOX_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only. 483 GMM_FORMAT( WGBOX_PLANAR_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only. 484 #endif 485 #endif // INCLUDE_MISC_FORMATS 486 487 /*****************************************************************************\ 488 Usage: 489 File #include'ed into various areas of source code, to produce different 490 things--various enums, struct/array-initializing code, etc. 491 492 Format Names: 493 The GMM_RESOURCE_FORMAT and GMM_SURFACESTATE_FORMAT enums are generated from 494 this table. 495 496 Supported Conditionals(*) and Meaning: 497 GEN(X)........"Gen X or later" where X is text for IGFX_GEN[X]_CORE. 498 SKU(FtrXxx)..."SKU FtrXxx is set". 499 WA(WaXxx)....."WA WaXxx is set". Usually used with !/NOT prefix in table. 500 501 Conditionals: 502 Inclusions making use of columns supporting conditionals (e.g. "RT", 503 "Available") must wrap the inclusion with macro definitions to service the 504 supported conditionals in the local source--e.g.... 505 #define GMM_FORMAT_GEN(X) (GFX_GET_CURRENT_RENDERCORE(pHwDevExt->platform) >= IGFX_GEN##X##_CORE) 506 #define GMM_FORMAT_SKU(FtrXxx) (GFX_IS_SKU(pHwDevExt, FtrXxx)) 507 #define GMM_FORMAT_WA(WaXxx) (GFX_IS_WA(pHwDevExt, WaXxx)) 508 #define GMM_FORMAT(Name, bpe, Width, Height, Depth, IsRT, IsASTC, RcsSurfaceFormat, AuxL1eFormat, Availability) ... 509 #include "GmmFormatTable.h" 510 511 \*****************************************************************************/ 512 513 #undef A 514 #undef ALWAYS 515 #undef ASTC_3D 516 #undef ASTC_HDR_2D 517 #undef ASTC_LDR_2D 518 #undef GEN 519 #undef INCLUDE_ASTC_FORMATS 520 #undef INCLUDE_MISC_FORMATS 521 #undef INCLUDE_SURFACESTATE_FORMATS 522 #undef NA 523 #undef NC 524 #undef R 525 #undef SKU 526 #undef VLV2 527 #undef WA 528 #undef x 529 530 // So include-side code doesn't have to do this... 531 #undef GMM_FORMAT 532 #undef GMM_FORMAT_INCLUDE_ASTC_FORMATS_ONLY 533 #undef GMM_FORMAT_INCLUDE_SURFACESTATE_FORMATS_ONLY 534 #undef GMM_FORMAT_GEN 535 #undef GMM_FORMAT_SKU 536 #undef GMM_FORMAT_WA 537