xref: /aosp_15_r20/external/coreboot/src/arch/x86/include/arch/cpu.h (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #ifndef ARCH_CPU_H
4 #define ARCH_CPU_H
5 
6 #include <types.h>
7 #include <arch/cpuid.h> /* IWYU pragma: export */
8 
9 /*
10  * EFLAGS bits
11  */
12 #define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
13 #define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
14 #define X86_EFLAGS_AF	0x00000010 /* Auxiliary carry Flag */
15 #define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
16 #define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
17 #define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
18 #define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
19 #define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
20 #define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
21 #define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
22 #define X86_EFLAGS_NT	0x00004000 /* Nested Task */
23 #define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
24 #define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
25 #define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
26 #define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
27 #define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
28 #define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
29 
cpuid_get_max_func(void)30 static inline unsigned int cpuid_get_max_func(void)
31 {
32 	return cpuid_eax(0);
33 }
34 
35 #define X86_VENDOR_INVALID    0
36 #define X86_VENDOR_INTEL      1
37 #define X86_VENDOR_CYRIX      2
38 #define X86_VENDOR_AMD        3
39 #define X86_VENDOR_UMC        4
40 #define X86_VENDOR_NEXGEN     5
41 #define X86_VENDOR_CENTAUR    6
42 #define X86_VENDOR_RISE       7
43 #define X86_VENDOR_TRANSMETA  8
44 #define X86_VENDOR_NSC        9
45 #define X86_VENDOR_SIS       10
46 #define X86_VENDOR_HYGON     11
47 #define X86_VENDOR_ANY     0xfe
48 #define X86_VENDOR_UNKNOWN 0xff
49 
50 #define CPUID_FEATURE_PAE (1 << 6)
51 #define CPUID_FEATURE_PSE36 (1 << 17)
52 #define CPUID_FEATURE_HTT (1 << 28)
53 
54 /* Structured Extended Feature Flags */
55 #define CPUID_STRUCT_EXTENDED_FEATURE_FLAGS 0x7
56 
57 // Intel leaf 0x4, AMD leaf 0x8000001d EAX
58 
59 #define CPUID_CACHE(x, res) \
60 	(((res) >> CPUID_CACHE_##x##_SHIFT) & CPUID_CACHE_##x##_MASK)
61 
62 #define CPUID_CACHE_SHARING_CACHE_SHIFT 14
63 #define CPUID_CACHE_SHARING_CACHE_MASK 0xfff
64 #define CPUID_CACHE_SHARING_CACHE(res) CPUID_CACHE(SHARING_CACHE, (res).eax)
65 
66 #define CPUID_CACHE_FULL_ASSOC_SHIFT 9
67 #define CPUID_CACHE_FULL_ASSOC_MASK 0x1
68 #define CPUID_CACHE_FULL_ASSOC(res) CPUID_CACHE(FULL_ASSOC, (res).eax)
69 
70 #define CPUID_CACHE_SELF_INIT_SHIFT 8
71 #define CPUID_CACHE_SELF_INIT_MASK 0x1
72 #define CPUID_CACHE_SELF_INIT(res) CPUID_CACHE(SELF_INIT, (res).eax)
73 
74 #define CPUID_CACHE_LEVEL_SHIFT 5
75 #define CPUID_CACHE_LEVEL_MASK 0x7
76 #define CPUID_CACHE_LEVEL(res) CPUID_CACHE(LEVEL, (res).eax)
77 
78 #define CPUID_CACHE_TYPE_SHIFT 0
79 #define CPUID_CACHE_TYPE_MASK 0x1f
80 #define CPUID_CACHE_TYPE(res) CPUID_CACHE(TYPE, (res).eax)
81 
82 // Intel leaf 0x4, AMD leaf 0x8000001d EBX
83 
84 #define CPUID_CACHE_WAYS_OF_ASSOC_SHIFT 22
85 #define CPUID_CACHE_WAYS_OF_ASSOC_MASK 0x3ff
86 #define CPUID_CACHE_WAYS_OF_ASSOC(res) CPUID_CACHE(WAYS_OF_ASSOC, (res).ebx)
87 
88 #define CPUID_CACHE_PHYS_LINE_SHIFT 12
89 #define CPUID_CACHE_PHYS_LINE_MASK 0x3ff
90 #define CPUID_CACHE_PHYS_LINE(res) CPUID_CACHE(PHYS_LINE, (res).ebx)
91 
92 #define CPUID_CACHE_COHER_LINE_SHIFT 0
93 #define CPUID_CACHE_COHER_LINE_MASK 0xfff
94 #define CPUID_CACHE_COHER_LINE(res) CPUID_CACHE(COHER_LINE, (res).ebx)
95 
96 // Intel leaf 0x4, AMD leaf 0x8000001d ECX
97 
98 #define CPUID_CACHE_NO_OF_SETS_SHIFT 0
99 #define CPUID_CACHE_NO_OF_SETS_MASK 0xffffffff
100 #define CPUID_CACHE_NO_OF_SETS(res) CPUID_CACHE(NO_OF_SETS, (res).ecx)
101 
102 // Intel leaf 0x5
103 #define CPUID_FEATURE_MONITOR_MWAIT		(1 << 0)
104 #define CPUID_FEATURE_INTERUPT_BREAK_EVENT	(1 << 1)
105 
106 unsigned int cpu_cpuid_extended_level(void);
107 int cpu_have_cpuid(void);
108 
cpu_is_amd(void)109 static inline bool cpu_is_amd(void)
110 {
111 	return CONFIG(CPU_AMD_PI) || CONFIG(SOC_AMD_COMMON);
112 }
113 
cpu_is_intel(void)114 static inline bool cpu_is_intel(void)
115 {
116 	return CONFIG(CPU_INTEL_COMMON) || CONFIG(SOC_INTEL_COMMON);
117 }
118 
119 struct device;
120 
121 #define CPUID_FROM_FMS(family, model, stepping) ( \
122 	/* bits 31..28: reserved, set to 0 */ \
123 	((family) > 0xf ? ((family) - 0xf) & 0xff : 0) << 20 | \
124 	((model) >> 4 & 0xf) << 16 | \
125 	/* bits 15..14: reserved, set to 0 */ \
126 	/* bits 13..12: processor type, set to 0 */ \
127 	((family) > 0xf ? 0xf : (family) & 0xf) << 8 | \
128 	((model) & 0xf) << 4 | \
129 	((stepping) & 0xf) << 0)
130 
131 #define CPUID_EXACT_MATCH_MASK				0xffffffff
132 #define CPUID_ALL_STEPPINGS_MASK			0xfffffff0
133 #define CPUID_ALL_STEPPINGS_AND_BASE_MODELS_MASK	0xffffff00
134 
cpuid_match(uint32_t a,uint32_t b,uint32_t mask)135 static inline bool cpuid_match(uint32_t a, uint32_t b, uint32_t mask)
136 {
137 	return (a & mask) == (b & mask);
138 }
139 
140 #define CPU_TABLE_END	{ X86_VENDOR_INVALID, 0, 0 }
141 
142 struct cpu_device_id {
143 	unsigned int vendor;
144 	uint32_t device;
145 	uint32_t device_match_mask;
146 };
147 
148 struct cpu_driver {
149 	struct device_operations *ops;
150 	const struct cpu_device_id *id_table;
151 };
152 
153 struct cpu_driver *find_cpu_driver(struct device *cpu);
154 
155 struct thread;
156 
157 struct cpu_info {
158 	struct device *cpu;
159 	size_t index;
160 };
161 
162 /*
163  * This structure describes the data allocated in the %gs segment for each CPU.
164  * In order to read from this structure you will need to use assembly to
165  * reference the segment.
166  *
167  * e.g., Reading the cpu_info pointer:
168  *     %%gs:0
169  */
170 struct per_cpu_segment_data {
171 	/*
172 	 * Instead of keeping a `struct cpu_info`, we actually keep a pointer
173 	 * pointing to the cpu_info struct located in %ds. This prevents
174 	 * needing specific access functions to read the fields in the cpu_info.
175 	 */
176 	struct cpu_info *cpu_info;
177 };
178 
179 enum cb_err set_cpu_info(unsigned int index, struct device *cpu);
180 
cpu_info(void)181 static inline struct cpu_info *cpu_info(void)
182 {
183 	struct cpu_info *ci = NULL;
184 
185 	__asm__ __volatile__("mov %%gs:%c[offset], %[ci]"
186 		: [ci] "=r" (ci)
187 		: [offset] "i" (offsetof(struct per_cpu_segment_data, cpu_info))
188 	);
189 
190 	return ci;
191 }
192 
cpu_index(void)193 static inline unsigned long cpu_index(void)
194 {
195 	struct cpu_info *ci;
196 	ci = cpu_info();
197 	return ci->index;
198 }
199 
200 struct cpuinfo_x86 {
201 	uint8_t	x86;		/* CPU family */
202 	uint8_t	x86_vendor;	/* CPU vendor */
203 	uint8_t	x86_model;
204 	uint8_t	x86_mask;
205 };
206 
get_fms(struct cpuinfo_x86 * c,uint32_t tfms)207 static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms)
208 {
209 	c->x86 = (tfms >> 8) & 0xf;
210 	c->x86_model = (tfms >> 4) & 0xf;
211 	c->x86_mask = tfms & 0xf;
212 	if (c->x86 == 0xf)
213 		c->x86 += (tfms >> 20) & 0xff;
214 	if (c->x86 >= 0x6)
215 		c->x86_model += ((tfms >> 16) & 0xF) << 4;
216 }
217 
218 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
cpu_relax(void)219 static __always_inline void cpu_relax(void)
220 {
221 	__asm__ __volatile__("rep;nop" : : : "memory");
222 }
223 
224 #define asmlinkage __attribute__((regparm(0)))
225 
226 /*
227  * The car_stage_entry() is the symbol jumped to for each stage
228  * after bootblock using cache-as-ram.
229  */
230 asmlinkage void car_stage_entry(void);
231 
232 /*
233  * Get processor id using cpuid eax=1
234  * return value in EAX register
235  */
236 uint32_t cpu_get_cpuid(void);
237 
238 /*
239  * Get processor feature flag using cpuid eax=1
240  * return value in ECX register
241  */
242 uint32_t cpu_get_feature_flags_ecx(void);
243 
244 /*
245  * Get processor feature flag using cpuid eax=1
246  * return value in EDX register
247  */
248 uint32_t cpu_get_feature_flags_edx(void);
249 
250 #define DETERMINISTIC_CACHE_PARAMETERS_CPUID_IA	0x04
251 #define DETERMINISTIC_CACHE_PARAMETERS_CPUID_AMD	0x8000001d
252 
253 enum cache_level {
254 	CACHE_L1D = 0,
255 	CACHE_L1I = 1,
256 	CACHE_L2 = 2,
257 	CACHE_L3 = 3,
258 	CACHE_LINV = 0xFF,
259 };
260 
261 enum cpu_type {
262 	CPUID_COMMAND_UNSUPPORTED = 0,
263 	CPUID_TYPE_AMD = 1,
264 	CPUID_TYPE_INTEL = 2,
265 	CPUID_TYPE_INVALID = 0xFF,
266 };
267 
268 struct cpu_cache_info {
269 	uint8_t type;
270 	uint8_t level;
271 	size_t num_ways;
272 	size_t num_sets;
273 	size_t line_size;
274 	size_t size;
275 	size_t physical_partitions;
276 	size_t num_cores_shared;
277 	bool fully_associative;
278 };
279 
280 enum cpu_type cpu_check_deterministic_cache_cpuid_supported(void);
281 
282 /* cpu_get_cache_assoc_info to get cache ways of associativity information. */
283 size_t cpu_get_cache_ways_assoc_info(const struct cpu_cache_info *info);
284 
285 /*
286  * cpu_get_cache_type to get cache type.
287  * Cache type can be between 0: no cache, 1: data cache, 2: instruction cache
288  * 3: unified cache and rests are reserved.
289  */
290 uint8_t cpu_get_cache_type(const struct cpu_cache_info *info);
291 
292 /*
293  * cpu_get_cache_level to get cache level.
294  * Cache level can be between 0: reserved, 1: L1, 2: L2, 3: L3 and rests are reserved.
295  */
296 uint8_t cpu_get_cache_level(const struct cpu_cache_info *info);
297 
298 /* cpu_get_cache_phy_partition_info to get cache physical partitions information. */
299 size_t cpu_get_cache_phy_partition_info(const struct cpu_cache_info *info);
300 
301 /* cpu_get_cache_line_size to get cache line size in bytes. */
302 size_t cpu_get_cache_line_size(const struct cpu_cache_info *info);
303 
304 /* cpu_get_cache_line_size to get cache number of sets information. */
305 size_t cpu_get_cache_sets(const struct cpu_cache_info *info);
306 
307 /* cpu_is_cache_full_assoc checks if cache is fully associative. */
308 bool cpu_is_cache_full_assoc(const struct cpu_cache_info *info);
309 
310 /* cpu_get_max_cache_share checks the number of cores are sharing this cache. */
311 size_t cpu_get_max_cache_share(const struct cpu_cache_info *info);
312 
313 /* get_cache_size to calculate the cache size. */
314 size_t get_cache_size(const struct cpu_cache_info *info);
315 
316 /*
317  * Returns the sub-states supported by the specified CPU
318  * C-state level.
319  *
320  * Level 0 corresponds to the lowest C-state (C0).
321  * Higher levels are processor specific.
322  */
323 uint8_t cpu_get_c_substate_support(const int state);
324 
325 /*
326  * fill_cpu_cache_info to get all required cache info data and fill into cpu_cache_info
327  * structure by calling CPUID.EAX=leaf and ECX=Cache Level.
328  */
329 bool fill_cpu_cache_info(uint8_t level, struct cpu_cache_info *info);
330 
331 /*
332  * Determines whether the number of cache sets is a power of two.
333  *
334  * Cache designs often favor power-of-two set counts for efficient indexing
335  * and addressing. This function checks if the provided cache configuration
336  * adheres to this practice.
337  */
338 bool is_cache_sets_power_of_two(void);
339 
340 #if CONFIG(RESERVED_PHYSICAL_ADDRESS_BITS_SUPPORT)
341 unsigned int get_reserved_phys_addr_bits(void);
342 #else
343 /* Default implementation */
get_reserved_phys_addr_bits(void)344 static inline unsigned int get_reserved_phys_addr_bits(void)
345 {
346 	/* Default implementation */
347 	return 0;
348 }
349 #endif
350 
351 #endif /* ARCH_CPU_H */
352