1 /*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef INTEL_DEVICE_INFO_H
26 #define INTEL_DEVICE_INFO_H
27
28 #include <stdbool.h>
29 #include <stdint.h>
30
31 #include "util/bitset.h"
32 #include "util/macros.h"
33 #include "compiler/shader_enums.h"
34 #include "intel_kmd.h"
35
36 #include "intel/dev/intel_wa.h"
37
38 #include "intel/dev/intel_device_info_gen.h"
39
40 #ifdef __cplusplus
41 extern "C" {
42 #endif
43
44 #define intel_platform_in_range(platform, platform_range) \
45 (((platform) >= INTEL_PLATFORM_ ## platform_range ## _START) && \
46 ((platform) <= INTEL_PLATFORM_ ## platform_range ## _END))
47
48 #define intel_device_info_is_atsm(devinfo) \
49 intel_platform_in_range((devinfo)->platform, ATSM)
50
51 #define intel_device_info_is_dg2(devinfo) \
52 (intel_platform_in_range((devinfo)->platform, DG2) || \
53 intel_platform_in_range((devinfo)->platform, ATSM))
54
55 #define intel_device_info_is_mtl(devinfo) \
56 intel_platform_in_range((devinfo)->platform, MTL)
57
58 #define intel_device_info_is_adln(devinfo) \
59 (devinfo->is_adl_n == true)
60
61 #define intel_device_info_is_arl(devinfo) \
62 intel_platform_in_range((devinfo)->platform, ARL)
63
64 #define intel_device_info_is_mtl_or_arl(devinfo) \
65 (intel_device_info_is_mtl(devinfo) || intel_device_info_is_arl(devinfo))
66
67 #define PAT_ENTRY(index_, mmap_) \
68 { \
69 .index = index_, \
70 .mmap = INTEL_DEVICE_INFO_MMAP_MODE_##mmap_ \
71 }
72
73 #ifdef GFX_VER
74
75 #define intel_device_info_is_9lp(devinfo) \
76 (GFX_VER == 9 && ((devinfo)->platform == INTEL_PLATFORM_BXT || \
77 (devinfo)->platform == INTEL_PLATFORM_GLK))
78
79 #else
80
81 #define intel_device_info_is_9lp(devinfo) \
82 ((devinfo)->platform == INTEL_PLATFORM_BXT || \
83 (devinfo)->platform == INTEL_PLATFORM_GLK)
84
85 #endif
86
87 #define GFX_IP_VER(major, minor) ((major << 16) | minor)
88
89 static inline bool
intel_device_info_slice_available(const struct intel_device_info * devinfo,int slice)90 intel_device_info_slice_available(const struct intel_device_info *devinfo,
91 int slice)
92 {
93 assert(slice < INTEL_DEVICE_MAX_SLICES);
94 return (devinfo->slice_masks & (1U << slice)) != 0;
95 }
96
97 static inline bool
intel_device_info_subslice_available(const struct intel_device_info * devinfo,int slice,int subslice)98 intel_device_info_subslice_available(const struct intel_device_info *devinfo,
99 int slice, int subslice)
100 {
101 return (devinfo->subslice_masks[slice * devinfo->subslice_slice_stride +
102 subslice / 8] & (1U << (subslice % 8))) != 0;
103 }
104
105 static inline bool
intel_device_info_eu_available(const struct intel_device_info * devinfo,int slice,int subslice,int eu)106 intel_device_info_eu_available(const struct intel_device_info *devinfo,
107 int slice, int subslice, int eu)
108 {
109 unsigned subslice_offset = slice * devinfo->eu_slice_stride +
110 subslice * devinfo->eu_subslice_stride;
111
112 return (devinfo->eu_masks[subslice_offset + eu / 8] & (1U << eu % 8)) != 0;
113 }
114
115 static inline uint32_t
intel_device_info_subslice_total(const struct intel_device_info * devinfo)116 intel_device_info_subslice_total(const struct intel_device_info *devinfo)
117 {
118 uint32_t total = 0;
119
120 for (size_t i = 0; i < ARRAY_SIZE(devinfo->subslice_masks); i++) {
121 total += __builtin_popcount(devinfo->subslice_masks[i]);
122 }
123
124 return total;
125 }
126
127 static inline uint32_t
intel_device_info_eu_total(const struct intel_device_info * devinfo)128 intel_device_info_eu_total(const struct intel_device_info *devinfo)
129 {
130 uint32_t total = 0;
131
132 for (size_t i = 0; i < ARRAY_SIZE(devinfo->eu_masks); i++)
133 total += __builtin_popcount(devinfo->eu_masks[i]);
134
135 return total;
136 }
137
138 /**
139 * Computes the bound of dualsubslice ID that can be used on this device.
140 *
141 * You should use this number if you're going to make calculation based on the
142 * slice/dualsubslice ID provided by the SR0.0 EU register. The maximum
143 * dualsubslice ID can be superior to the total number of dualsubslices on the
144 * device, depending on fusing.
145 *
146 * On a 16 dualsubslice GPU, the maximum dualsubslice ID is 15. This function
147 * would return the exclusive bound : 16.
148 */
149 static inline unsigned
intel_device_info_dual_subslice_id_bound(const struct intel_device_info * devinfo)150 intel_device_info_dual_subslice_id_bound(const struct intel_device_info *devinfo)
151 {
152 /* Start from the last slice/subslice so we find the answer faster. */
153 for (int s = devinfo->max_slices - 1; s >= 0; s--) {
154 for (int ss = devinfo->max_subslices_per_slice - 1; ss >= 0; ss--) {
155 if (intel_device_info_subslice_available(devinfo, s, ss))
156 return s * devinfo->max_subslices_per_slice + ss + 1;
157 }
158 }
159 unreachable("Invalid topology");
160 return 0;
161 }
162
163 int intel_device_name_to_pci_device_id(const char *name);
164
165 static inline uint64_t
intel_device_info_timebase_scale(const struct intel_device_info * devinfo,uint64_t gpu_timestamp)166 intel_device_info_timebase_scale(const struct intel_device_info *devinfo,
167 uint64_t gpu_timestamp)
168 {
169 /* Try to avoid going over the 64bits when doing the scaling */
170 uint64_t upper_ts = gpu_timestamp >> 32;
171 uint64_t lower_ts = gpu_timestamp & 0xffffffff;
172 uint64_t upper_scaled_ts = upper_ts * 1000000000ull / devinfo->timestamp_frequency;
173 uint64_t lower_scaled_ts = lower_ts * 1000000000ull / devinfo->timestamp_frequency;
174 return (upper_scaled_ts << 32) + lower_scaled_ts;
175 }
176
177 static inline bool
intel_vram_all_mappable(const struct intel_device_info * devinfo)178 intel_vram_all_mappable(const struct intel_device_info *devinfo)
179 {
180 return devinfo->mem.vram.unmappable.size == 0;
181 }
182
183 bool intel_get_device_info_from_fd(int fh, struct intel_device_info *devinfo, int min_ver, int max_ver);
184 bool intel_get_device_info_from_pci_id(int pci_id,
185 struct intel_device_info *devinfo);
186 bool intel_get_device_info_for_build(int pci_id,
187 struct intel_device_info *devinfo);
188
189 /* Only updates intel_device_info::regions::...::free fields. The
190 * class/instance/size should remain the same over time.
191 */
192 bool intel_device_info_update_memory_info(struct intel_device_info *devinfo,
193 int fd);
194
195 void intel_device_info_topology_reset_masks(struct intel_device_info *devinfo);
196 void intel_device_info_topology_update_counts(struct intel_device_info *devinfo);
197 void intel_device_info_update_pixel_pipes(struct intel_device_info *devinfo, uint8_t *subslice_masks);
198 void intel_device_info_update_l3_banks(struct intel_device_info *devinfo);
199 uint32_t intel_device_info_get_eu_count_first_subslice(const struct intel_device_info *devinfo);
200 void intel_device_info_update_cs_workgroup_threads(struct intel_device_info *devinfo);
201 bool intel_device_info_compute_system_memory(struct intel_device_info *devinfo, bool update);
202 void intel_device_info_update_after_hwconfig(struct intel_device_info *devinfo);
203
204 #ifdef GFX_VERx10
205 #define intel_needs_workaround(devinfo, id) \
206 (INTEL_WA_ ## id ## _GFX_VER && \
207 BITSET_TEST(devinfo->workarounds, INTEL_WA_##id))
208 #else
209 #define intel_needs_workaround(devinfo, id) \
210 BITSET_TEST(devinfo->workarounds, INTEL_WA_##id)
211 #endif
212
213 enum intel_wa_steppings intel_device_info_wa_stepping(struct intel_device_info *devinfo);
214
215 uint32_t intel_device_info_get_max_slm_size(const struct intel_device_info *devinfo);
216 uint32_t intel_device_info_get_max_preferred_slm_size(const struct intel_device_info *devinfo);
217
218 #ifdef __cplusplus
219 }
220 #endif
221
222 #endif /* INTEL_DEVICE_INFO_H */
223