xref: /aosp_15_r20/external/coreboot/src/cpu/x86/topology.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <cpu/cpu.h>
4 #include <device/device.h>
5 #include <cpu/x86/topology.h>
6 
7 #define CPUID_EXTENDED_CPU_TOPOLOGY2 0x1f
8 
9 #define CPUID_EXTENDED_CPU_TOPOLOGY 0x0b
10 #define LEVEL_TYPE_CORE 2
11 #define LEVEL_TYPE_SMT 1
12 
13 #define CPUID_CPU_TOPOLOGY(x, val) \
14 	(((val) >> CPUID_CPU_TOPOLOGY_##x##_SHIFT) & CPUID_CPU_TOPOLOGY_##x##_MASK)
15 
16 #define CPUID_CPU_TOPOLOGY_LEVEL_TYPE_SHIFT 0x8
17 #define CPUID_CPU_TOPOLOGY_LEVEL_TYPE_MASK 0xff
18 #define CPUID_CPU_TOPOLOGY_LEVEL(res) CPUID_CPU_TOPOLOGY(LEVEL_TYPE, (res).ecx)
19 
20 #define CPUID_CPU_TOPOLOGY_LEVEL_BITS_SHIFT 0x0
21 #define CPUID_CPU_TOPOLOGY_LEVEL_BITS_MASK 0x1f
22 #define CPUID_CPU_TOPOLOGY_THREAD_BITS(res) CPUID_CPU_TOPOLOGY(LEVEL_BITS, (res).eax)
23 #define CPUID_CPU_TOPOLOGY_CORE_BITS(res, threadbits) \
24 	((CPUID_CPU_TOPOLOGY(LEVEL_BITS, (res).eax)) - threadbits)
25 
26 /* Return the level shift for the highest supported level (the package) */
get_cpu_package_bits(uint32_t * package_bits)27 static enum cb_err get_cpu_package_bits(uint32_t *package_bits)
28 {
29 	struct cpuid_result cpuid_regs;
30 	int level_num, cpu_id_op = 0;
31 	const uint32_t cpuid_max_func = cpuid_get_max_func();
32 
33 	/*
34 	 * Not all CPUs support this, those won't get topology filled in here.
35 	 * CPU specific code can do this however.
36 	 */
37 	if (cpuid_max_func >= CPUID_EXTENDED_CPU_TOPOLOGY2)
38 		cpu_id_op = CPUID_EXTENDED_CPU_TOPOLOGY2;
39 	else if (cpuid_max_func >= CPUID_EXTENDED_CPU_TOPOLOGY)
40 		cpu_id_op = CPUID_EXTENDED_CPU_TOPOLOGY;
41 	else
42 		return CB_ERR;
43 
44 	*package_bits = level_num = 0;
45 	cpuid_regs = cpuid_ext(cpu_id_op, level_num);
46 
47 	/*
48 	 * Sub-leaf index 0 enumerates SMT level, some AMD CPUs leave this CPUID leaf
49 	 * reserved so bail out. Cpu specific code can fill in the topology later.
50 	 */
51 	if (CPUID_CPU_TOPOLOGY_LEVEL(cpuid_regs) != LEVEL_TYPE_SMT)
52 		return CB_ERR;
53 
54 	do {
55 		*package_bits = (CPUID_CPU_TOPOLOGY(LEVEL_BITS, (cpuid_regs).eax));
56 		level_num++;
57 		cpuid_regs = cpuid_ext(cpu_id_op, level_num);
58 	/* Stop when level type is invalid i.e 0. */
59 	} while (CPUID_CPU_TOPOLOGY_LEVEL(cpuid_regs));
60 
61 	return CB_SUCCESS;
62 }
63 
set_cpu_node_id_leaf_1f_b(struct device * cpu)64 void set_cpu_node_id_leaf_1f_b(struct device *cpu)
65 {
66 	static uint32_t package_bits;
67 	static enum cb_err package_bits_ret;
68 	static bool done = false;
69 
70 	if (!done) {
71 		package_bits_ret = get_cpu_package_bits(&package_bits);
72 		done = true;
73 	}
74 
75 	const uint32_t apicid = cpu->path.apic.initial_lapicid;
76 
77 	/*
78 	 *  If leaf_1f or leaf_b does not exist don't update the node_id.
79 	 */
80 	if (package_bits_ret == CB_SUCCESS)
81 		cpu->path.apic.node_id = (apicid >> package_bits);
82 }
83 
84 /* Get number of bits for core ID and SMT ID */
get_cpu_core_thread_bits(uint32_t * core_bits,uint32_t * thread_bits)85 static enum cb_err get_cpu_core_thread_bits(uint32_t *core_bits, uint32_t *thread_bits)
86 {
87 	struct cpuid_result cpuid_regs;
88 	int level_num, cpu_id_op = 0;
89 	const uint32_t cpuid_max_func = cpuid_get_max_func();
90 
91 	/*
92 	 * Not all CPUs support this, those won't get topology filled in here.
93 	 * CPU specific code can do this however.
94 	 */
95 	if (cpuid_max_func < CPUID_EXTENDED_CPU_TOPOLOGY)
96 		return CB_ERR;
97 
98 	cpu_id_op = CPUID_EXTENDED_CPU_TOPOLOGY;
99 
100 	*core_bits = level_num = 0;
101 	cpuid_regs = cpuid_ext(cpu_id_op, level_num);
102 
103 	/*
104 	 * Sub-leaf index 0 enumerates SMT level, some AMD CPUs leave this CPUID leaf
105 	 * reserved so bail out. Cpu specific code can fill in the topology later.
106 	 */
107 	if (CPUID_CPU_TOPOLOGY_LEVEL(cpuid_regs) != LEVEL_TYPE_SMT)
108 		return CB_ERR;
109 
110 	*thread_bits = CPUID_CPU_TOPOLOGY_THREAD_BITS(cpuid_regs);
111 	do {
112 		level_num++;
113 		cpuid_regs = cpuid_ext(cpu_id_op, level_num);
114 		if (CPUID_CPU_TOPOLOGY_LEVEL(cpuid_regs) == LEVEL_TYPE_CORE) {
115 			*core_bits = CPUID_CPU_TOPOLOGY_CORE_BITS(cpuid_regs, *thread_bits);
116 			break;
117 		}
118 	/* Stop when level type is invalid i.e 0 */
119 	} while (CPUID_CPU_TOPOLOGY_LEVEL(cpuid_regs));
120 
121 	return CB_SUCCESS;
122 }
123 
set_cpu_topology(struct device * cpu,unsigned int node,unsigned int package,unsigned int core,unsigned int thread)124 static void set_cpu_topology(struct device *cpu, unsigned int node,
125 		      unsigned int package, unsigned int core,
126 		      unsigned int thread)
127 {
128 	cpu->path.apic.node_id = node;
129 	cpu->path.apic.package_id = package;
130 	cpu->path.apic.core_id = core;
131 	cpu->path.apic.thread_id = thread;
132 }
133 
set_cpu_topology_from_leaf_b(struct device * cpu)134 void set_cpu_topology_from_leaf_b(struct device *cpu)
135 {
136 	static uint32_t core_bits, thread_bits;
137 	static enum cb_err core_thread_bits_ret;
138 	static bool done = false;
139 	if (!done) {
140 		core_thread_bits_ret = get_cpu_core_thread_bits(&core_bits, &thread_bits);
141 		done = true;
142 	}
143 
144 	const uint32_t apicid = cpu->path.apic.initial_lapicid;
145 	uint32_t package_id, core_id, thread_id;
146 	/*
147 	 *  If leaf_b does not exist set the following best-guess defaults:
148 	 * - 1 package
149 	 * - no SMP
150 	 * - core_id = apicid
151 	 * CPU specific code can always update these later on.
152 	 */
153 	if (core_thread_bits_ret != CB_SUCCESS) {
154 		package_id = 0;
155 		core_id = apicid;
156 		thread_id = 0;
157 	} else {
158 		package_id = apicid >> (thread_bits + core_bits);
159 		core_id = (apicid >> thread_bits) & ((1 << core_bits) - 1);
160 		thread_id = apicid & ((1 << thread_bits) - 1);
161 	}
162 
163 	set_cpu_topology(cpu, 0, package_id, core_id, thread_id);
164 }
165