1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  acpi_numa.c - ACPI NUMA support
4  *
5  *  Copyright (C) 2002 Takayoshi Kochi <[email protected]>
6  */
7 
8 #define pr_fmt(fmt) "ACPI: " fmt
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/errno.h>
15 #include <linux/acpi.h>
16 #include <linux/memblock.h>
17 #include <linux/numa.h>
18 #include <linux/nodemask.h>
19 #include <linux/topology.h>
20 #include <linux/numa_memblks.h>
21 
22 static nodemask_t nodes_found_map = NODE_MASK_NONE;
23 
24 /* maps to convert between proximity domain and logical node ID */
25 static int pxm_to_node_map[MAX_PXM_DOMAINS]
26 			= { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
27 static int node_to_pxm_map[MAX_NUMNODES]
28 			= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
29 
30 unsigned char acpi_srat_revision __initdata;
31 static int acpi_numa __initdata;
32 
33 static int last_real_pxm;
34 
disable_srat(void)35 void __init disable_srat(void)
36 {
37 	acpi_numa = -1;
38 }
39 
pxm_to_node(int pxm)40 int pxm_to_node(int pxm)
41 {
42 	if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
43 		return NUMA_NO_NODE;
44 	return pxm_to_node_map[pxm];
45 }
46 EXPORT_SYMBOL(pxm_to_node);
47 
node_to_pxm(int node)48 int node_to_pxm(int node)
49 {
50 	if (node < 0)
51 		return PXM_INVAL;
52 	return node_to_pxm_map[node];
53 }
54 
__acpi_map_pxm_to_node(int pxm,int node)55 static void __acpi_map_pxm_to_node(int pxm, int node)
56 {
57 	if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
58 		pxm_to_node_map[pxm] = node;
59 	if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
60 		node_to_pxm_map[node] = pxm;
61 }
62 
acpi_map_pxm_to_node(int pxm)63 int acpi_map_pxm_to_node(int pxm)
64 {
65 	int node;
66 
67 	if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
68 		return NUMA_NO_NODE;
69 
70 	node = pxm_to_node_map[pxm];
71 
72 	if (node == NUMA_NO_NODE) {
73 		node = first_unset_node(nodes_found_map);
74 		if (node >= MAX_NUMNODES)
75 			return NUMA_NO_NODE;
76 		__acpi_map_pxm_to_node(pxm, node);
77 		node_set(node, nodes_found_map);
78 	}
79 
80 	return node;
81 }
82 EXPORT_SYMBOL(acpi_map_pxm_to_node);
83 
84 #ifdef CONFIG_NUMA_EMU
85 /*
86  * Take max_nid - 1 fake-numa nodes into account in both
87  * pxm_to_node_map()/node_to_pxm_map[] tables.
88  */
fix_pxm_node_maps(int max_nid)89 int __init fix_pxm_node_maps(int max_nid)
90 {
91 	static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata
92 			= { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
93 	static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata
94 			= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
95 	int i, j, index = -1, count = 0;
96 	nodemask_t nodes_to_enable;
97 
98 	if (numa_off)
99 		return -1;
100 
101 	/* no or incomplete node/PXM mapping set, nothing to do */
102 	if (srat_disabled())
103 		return 0;
104 
105 	/* find fake nodes PXM mapping */
106 	for (i = 0; i < MAX_NUMNODES; i++) {
107 		if (node_to_pxm_map[i] != PXM_INVAL) {
108 			for (j = 0; j <= max_nid; j++) {
109 				if ((emu_nid_to_phys[j] == i) &&
110 				    WARN(node_to_pxm_map_copy[j] != PXM_INVAL,
111 					 "Node %d is already binded to PXM %d\n",
112 					 j, node_to_pxm_map_copy[j]))
113 					return -1;
114 				if (emu_nid_to_phys[j] == i) {
115 					node_to_pxm_map_copy[j] =
116 						node_to_pxm_map[i];
117 					if (j > index)
118 						index = j;
119 					count++;
120 				}
121 			}
122 		}
123 	}
124 	if (index == -1) {
125 		pr_debug("No node/PXM mapping has been set\n");
126 		/* nothing more to be done */
127 		return 0;
128 	}
129 	if (WARN(index != max_nid, "%d max nid  when expected %d\n",
130 		      index, max_nid))
131 		return -1;
132 
133 	nodes_clear(nodes_to_enable);
134 
135 	/* map phys nodes not used for fake nodes */
136 	for (i = 0; i < MAX_NUMNODES; i++) {
137 		if (node_to_pxm_map[i] != PXM_INVAL) {
138 			for (j = 0; j <= max_nid; j++)
139 				if (emu_nid_to_phys[j] == i)
140 					break;
141 			/* fake nodes PXM mapping has been done */
142 			if (j <= max_nid)
143 				continue;
144 			/* find first hole */
145 			for (j = 0;
146 			     j < MAX_NUMNODES &&
147 				 node_to_pxm_map_copy[j] != PXM_INVAL;
148 			     j++)
149 			;
150 			if (WARN(j == MAX_NUMNODES,
151 			    "Number of nodes exceeds MAX_NUMNODES\n"))
152 				return -1;
153 			node_to_pxm_map_copy[j] = node_to_pxm_map[i];
154 			node_set(j, nodes_to_enable);
155 			count++;
156 		}
157 	}
158 
159 	/* creating reverse mapping in pxm_to_node_map[] */
160 	for (i = 0; i < MAX_NUMNODES; i++)
161 		if (node_to_pxm_map_copy[i] != PXM_INVAL &&
162 		    pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE)
163 			pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i;
164 
165 	/* overwrite with new mapping */
166 	for (i = 0; i < MAX_NUMNODES; i++) {
167 		node_to_pxm_map[i] = node_to_pxm_map_copy[i];
168 		pxm_to_node_map[i] = pxm_to_node_map_copy[i];
169 	}
170 
171 	/* enable other nodes found in PXM for hotplug */
172 	nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed);
173 
174 	pr_debug("found %d total number of nodes\n", count);
175 	return 0;
176 }
177 #endif
178 
179 static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header * header)180 acpi_table_print_srat_entry(struct acpi_subtable_header *header)
181 {
182 	switch (header->type) {
183 	case ACPI_SRAT_TYPE_CPU_AFFINITY:
184 		{
185 			struct acpi_srat_cpu_affinity *p =
186 			    (struct acpi_srat_cpu_affinity *)header;
187 			pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
188 				 p->apic_id, p->local_sapic_eid,
189 				 p->proximity_domain_lo,
190 				 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
191 				 "enabled" : "disabled");
192 		}
193 		break;
194 
195 	case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
196 		{
197 			struct acpi_srat_mem_affinity *p =
198 			    (struct acpi_srat_mem_affinity *)header;
199 			pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
200 				 (unsigned long long)p->base_address,
201 				 (unsigned long long)p->length,
202 				 p->proximity_domain,
203 				 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
204 				 "enabled" : "disabled",
205 				 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
206 				 " hot-pluggable" : "",
207 				 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
208 				 " non-volatile" : "");
209 		}
210 		break;
211 
212 	case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
213 		{
214 			struct acpi_srat_x2apic_cpu_affinity *p =
215 			    (struct acpi_srat_x2apic_cpu_affinity *)header;
216 			pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
217 				 p->apic_id,
218 				 p->proximity_domain,
219 				 (p->flags & ACPI_SRAT_CPU_ENABLED) ?
220 				 "enabled" : "disabled");
221 		}
222 		break;
223 
224 	case ACPI_SRAT_TYPE_GICC_AFFINITY:
225 		{
226 			struct acpi_srat_gicc_affinity *p =
227 			    (struct acpi_srat_gicc_affinity *)header;
228 			pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
229 				 p->acpi_processor_uid,
230 				 p->proximity_domain,
231 				 (p->flags & ACPI_SRAT_GICC_ENABLED) ?
232 				 "enabled" : "disabled");
233 		}
234 		break;
235 
236 	case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
237 	{
238 		struct acpi_srat_generic_affinity *p =
239 			(struct acpi_srat_generic_affinity *)header;
240 
241 		if (p->device_handle_type == 0) {
242 			/*
243 			 * For pci devices this may be the only place they
244 			 * are assigned a proximity domain
245 			 */
246 			pr_debug("SRAT Generic Initiator(Seg:%u BDF:%u) in proximity domain %d %s\n",
247 				 *(u16 *)(&p->device_handle[0]),
248 				 *(u16 *)(&p->device_handle[2]),
249 				 p->proximity_domain,
250 				 (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
251 				"enabled" : "disabled");
252 		} else {
253 			/*
254 			 * In this case we can rely on the device having a
255 			 * proximity domain reference
256 			 */
257 			pr_debug("SRAT Generic Initiator(HID=%.8s UID=%.4s) in proximity domain %d %s\n",
258 				(char *)(&p->device_handle[0]),
259 				(char *)(&p->device_handle[8]),
260 				p->proximity_domain,
261 				(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
262 				"enabled" : "disabled");
263 		}
264 	}
265 	break;
266 
267 	case ACPI_SRAT_TYPE_RINTC_AFFINITY:
268 		{
269 			struct acpi_srat_rintc_affinity *p =
270 			    (struct acpi_srat_rintc_affinity *)header;
271 			pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
272 				 p->acpi_processor_uid,
273 				 p->proximity_domain,
274 				 (p->flags & ACPI_SRAT_RINTC_ENABLED) ?
275 				 "enabled" : "disabled");
276 		}
277 		break;
278 
279 	default:
280 		pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
281 			header->type);
282 		break;
283 	}
284 }
285 
286 /*
287  * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
288  * up the NUMA heuristics which wants the local node to have a smaller
289  * distance than the others.
290  * Do some quick checks here and only use the SLIT if it passes.
291  */
slit_valid(struct acpi_table_slit * slit)292 static int __init slit_valid(struct acpi_table_slit *slit)
293 {
294 	int i, j;
295 	int d = slit->locality_count;
296 	for (i = 0; i < d; i++) {
297 		for (j = 0; j < d; j++) {
298 			u8 val = slit->entry[d*i + j];
299 			if (i == j) {
300 				if (val != LOCAL_DISTANCE)
301 					return 0;
302 			} else if (val <= LOCAL_DISTANCE)
303 				return 0;
304 		}
305 	}
306 	return 1;
307 }
308 
bad_srat(void)309 void __init bad_srat(void)
310 {
311 	pr_err("SRAT: SRAT not used.\n");
312 	disable_srat();
313 }
314 
srat_disabled(void)315 int __init srat_disabled(void)
316 {
317 	return acpi_numa < 0;
318 }
319 
numa_fill_memblks(u64 start,u64 end)320 __weak int __init numa_fill_memblks(u64 start, u64 end)
321 {
322 	return NUMA_NO_MEMBLK;
323 }
324 
325 /*
326  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
327  * I/O localities since SRAT does not list them.  I/O localities are
328  * not supported at this point.
329  */
acpi_parse_slit(struct acpi_table_header * table)330 static int __init acpi_parse_slit(struct acpi_table_header *table)
331 {
332 	struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
333 	int i, j;
334 
335 	if (!slit_valid(slit)) {
336 		pr_info("SLIT table looks invalid. Not used.\n");
337 		return -EINVAL;
338 	}
339 
340 	for (i = 0; i < slit->locality_count; i++) {
341 		const int from_node = pxm_to_node(i);
342 
343 		if (from_node == NUMA_NO_NODE)
344 			continue;
345 
346 		for (j = 0; j < slit->locality_count; j++) {
347 			const int to_node = pxm_to_node(j);
348 
349 			if (to_node == NUMA_NO_NODE)
350 				continue;
351 
352 			numa_set_distance(from_node, to_node,
353 				slit->entry[slit->locality_count * i + j]);
354 		}
355 	}
356 
357 	return 0;
358 }
359 
360 static int parsed_numa_memblks __initdata;
361 
362 static int __init
acpi_parse_memory_affinity(union acpi_subtable_headers * header,const unsigned long table_end)363 acpi_parse_memory_affinity(union acpi_subtable_headers *header,
364 			   const unsigned long table_end)
365 {
366 	struct acpi_srat_mem_affinity *ma;
367 	u64 start, end;
368 	u32 hotpluggable;
369 	int node, pxm;
370 
371 	ma = (struct acpi_srat_mem_affinity *)header;
372 
373 	acpi_table_print_srat_entry(&header->common);
374 
375 	if (srat_disabled())
376 		return 0;
377 	if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) {
378 		pr_err("SRAT: Unexpected header length: %d\n",
379 		       ma->header.length);
380 		goto out_err_bad_srat;
381 	}
382 	if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
383 		return 0;
384 	hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
385 		(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
386 
387 	start = ma->base_address;
388 	end = start + ma->length;
389 	pxm = ma->proximity_domain;
390 	if (acpi_srat_revision <= 1)
391 		pxm &= 0xff;
392 
393 	node = acpi_map_pxm_to_node(pxm);
394 	if (node == NUMA_NO_NODE) {
395 		pr_err("SRAT: Too many proximity domains.\n");
396 		goto out_err_bad_srat;
397 	}
398 
399 	if (numa_add_memblk(node, start, end) < 0) {
400 		pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n",
401 		       node, (unsigned long long) start,
402 		       (unsigned long long) end - 1);
403 		goto out_err_bad_srat;
404 	}
405 
406 	node_set(node, numa_nodes_parsed);
407 
408 	pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
409 		node, pxm,
410 		(unsigned long long) start, (unsigned long long) end - 1,
411 		hotpluggable ? " hotplug" : "",
412 		ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
413 
414 	/* Mark hotplug range in memblock. */
415 	if (hotpluggable && memblock_mark_hotplug(start, ma->length))
416 		pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
417 			(unsigned long long)start, (unsigned long long)end - 1);
418 
419 	max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
420 
421 	parsed_numa_memblks++;
422 
423 	return 0;
424 
425 out_err_bad_srat:
426 	/* Just disable SRAT, but do not fail and ignore errors. */
427 	bad_srat();
428 
429 	return 0;
430 }
431 
acpi_parse_cfmws(union acpi_subtable_headers * header,void * arg,const unsigned long table_end)432 static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
433 				   void *arg, const unsigned long table_end)
434 {
435 	struct acpi_cedt_cfmws *cfmws;
436 	int *fake_pxm = arg;
437 	u64 start, end;
438 	int node;
439 
440 	cfmws = (struct acpi_cedt_cfmws *)header;
441 	start = cfmws->base_hpa;
442 	end = cfmws->base_hpa + cfmws->window_size;
443 
444 	/*
445 	 * The SRAT may have already described NUMA details for all,
446 	 * or a portion of, this CFMWS HPA range. Extend the memblks
447 	 * found for any portion of the window to cover the entire
448 	 * window.
449 	 */
450 	if (!numa_fill_memblks(start, end))
451 		return 0;
452 
453 	/* No SRAT description. Create a new node. */
454 	node = acpi_map_pxm_to_node(*fake_pxm);
455 
456 	if (node == NUMA_NO_NODE) {
457 		pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n");
458 		return -EINVAL;
459 	}
460 
461 	if (numa_add_memblk(node, start, end) < 0) {
462 		/* CXL driver must handle the NUMA_NO_NODE case */
463 		pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
464 			node, start, end);
465 	}
466 	node_set(node, numa_nodes_parsed);
467 
468 	/* Set the next available fake_pxm value */
469 	(*fake_pxm)++;
470 	return 0;
471 }
472 
473 void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity * pa)474 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
475 {
476 	pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
477 }
478 
479 static int __init
acpi_parse_x2apic_affinity(union acpi_subtable_headers * header,const unsigned long end)480 acpi_parse_x2apic_affinity(union acpi_subtable_headers *header,
481 			   const unsigned long end)
482 {
483 	struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
484 
485 	processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header;
486 
487 	acpi_table_print_srat_entry(&header->common);
488 
489 	/* let architecture-dependent part to do it */
490 	acpi_numa_x2apic_affinity_init(processor_affinity);
491 
492 	return 0;
493 }
494 
495 static int __init
acpi_parse_processor_affinity(union acpi_subtable_headers * header,const unsigned long end)496 acpi_parse_processor_affinity(union acpi_subtable_headers *header,
497 			      const unsigned long end)
498 {
499 	struct acpi_srat_cpu_affinity *processor_affinity;
500 
501 	processor_affinity = (struct acpi_srat_cpu_affinity *)header;
502 
503 	acpi_table_print_srat_entry(&header->common);
504 
505 	/* let architecture-dependent part to do it */
506 	acpi_numa_processor_affinity_init(processor_affinity);
507 
508 	return 0;
509 }
510 
511 static int __init
acpi_parse_gicc_affinity(union acpi_subtable_headers * header,const unsigned long end)512 acpi_parse_gicc_affinity(union acpi_subtable_headers *header,
513 			 const unsigned long end)
514 {
515 	struct acpi_srat_gicc_affinity *processor_affinity;
516 
517 	processor_affinity = (struct acpi_srat_gicc_affinity *)header;
518 
519 	acpi_table_print_srat_entry(&header->common);
520 
521 	/* let architecture-dependent part to do it */
522 	acpi_numa_gicc_affinity_init(processor_affinity);
523 
524 	return 0;
525 }
526 
527 #if defined(CONFIG_X86) || defined(CONFIG_ARM64)
528 static int __init
acpi_parse_gi_affinity(union acpi_subtable_headers * header,const unsigned long end)529 acpi_parse_gi_affinity(union acpi_subtable_headers *header,
530 		       const unsigned long end)
531 {
532 	struct acpi_srat_generic_affinity *gi_affinity;
533 	int node;
534 
535 	gi_affinity = (struct acpi_srat_generic_affinity *)header;
536 	if (!gi_affinity)
537 		return -EINVAL;
538 	acpi_table_print_srat_entry(&header->common);
539 
540 	if (!(gi_affinity->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED))
541 		return -EINVAL;
542 
543 	node = acpi_map_pxm_to_node(gi_affinity->proximity_domain);
544 	if (node == NUMA_NO_NODE) {
545 		pr_err("SRAT: Too many proximity domains.\n");
546 		return -EINVAL;
547 	}
548 	node_set(node, numa_nodes_parsed);
549 	node_set_state(node, N_GENERIC_INITIATOR);
550 
551 	return 0;
552 }
553 #else
554 static int __init
acpi_parse_gi_affinity(union acpi_subtable_headers * header,const unsigned long end)555 acpi_parse_gi_affinity(union acpi_subtable_headers *header,
556 		       const unsigned long end)
557 {
558 	return 0;
559 }
560 #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
561 
562 static int __init
acpi_parse_rintc_affinity(union acpi_subtable_headers * header,const unsigned long end)563 acpi_parse_rintc_affinity(union acpi_subtable_headers *header,
564 			  const unsigned long end)
565 {
566 	struct acpi_srat_rintc_affinity *rintc_affinity;
567 
568 	rintc_affinity = (struct acpi_srat_rintc_affinity *)header;
569 	acpi_table_print_srat_entry(&header->common);
570 
571 	/* let architecture-dependent part to do it */
572 	acpi_numa_rintc_affinity_init(rintc_affinity);
573 
574 	return 0;
575 }
576 
acpi_parse_srat(struct acpi_table_header * table)577 static int __init acpi_parse_srat(struct acpi_table_header *table)
578 {
579 	struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
580 
581 	acpi_srat_revision = srat->header.revision;
582 
583 	/* Real work done in acpi_table_parse_srat below. */
584 
585 	return 0;
586 }
587 
588 static int __init
acpi_table_parse_srat(enum acpi_srat_type id,acpi_tbl_entry_handler handler,unsigned int max_entries)589 acpi_table_parse_srat(enum acpi_srat_type id,
590 		      acpi_tbl_entry_handler handler, unsigned int max_entries)
591 {
592 	return acpi_table_parse_entries(ACPI_SIG_SRAT,
593 					    sizeof(struct acpi_table_srat), id,
594 					    handler, max_entries);
595 }
596 
acpi_numa_init(void)597 int __init acpi_numa_init(void)
598 {
599 	int i, fake_pxm, cnt = 0;
600 
601 	if (acpi_disabled)
602 		return -EINVAL;
603 
604 	/*
605 	 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
606 	 * SRAT cpu entries could have different order with that in MADT.
607 	 * So go over all cpu entries in SRAT to get apicid to node mapping.
608 	 */
609 
610 	/* SRAT: System Resource Affinity Table */
611 	if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
612 		struct acpi_subtable_proc srat_proc[5];
613 
614 		memset(srat_proc, 0, sizeof(srat_proc));
615 		srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
616 		srat_proc[0].handler = acpi_parse_processor_affinity;
617 		srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
618 		srat_proc[1].handler = acpi_parse_x2apic_affinity;
619 		srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY;
620 		srat_proc[2].handler = acpi_parse_gicc_affinity;
621 		srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY;
622 		srat_proc[3].handler = acpi_parse_gi_affinity;
623 		srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY;
624 		srat_proc[4].handler = acpi_parse_rintc_affinity;
625 
626 		acpi_table_parse_entries_array(ACPI_SIG_SRAT,
627 					sizeof(struct acpi_table_srat),
628 					srat_proc, ARRAY_SIZE(srat_proc), 0);
629 
630 		cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
631 					    acpi_parse_memory_affinity, 0);
632 	}
633 
634 	/* SLIT: System Locality Information Table */
635 	acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
636 
637 	/*
638 	 * CXL Fixed Memory Window Structures (CFMWS) must be parsed
639 	 * after the SRAT. Create NUMA Nodes for CXL memory ranges that
640 	 * are defined in the CFMWS and not already defined in the SRAT.
641 	 * Initialize a fake_pxm as the first available PXM to emulate.
642 	 */
643 
644 	/* fake_pxm is the next unused PXM value after SRAT parsing */
645 	for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) {
646 		if (node_to_pxm_map[i] > fake_pxm)
647 			fake_pxm = node_to_pxm_map[i];
648 	}
649 	last_real_pxm = fake_pxm;
650 	fake_pxm++;
651 	acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws,
652 			      &fake_pxm);
653 
654 	if (cnt < 0)
655 		return cnt;
656 	else if (!parsed_numa_memblks)
657 		return -ENOENT;
658 	return 0;
659 }
660 
acpi_node_backed_by_real_pxm(int nid)661 bool acpi_node_backed_by_real_pxm(int nid)
662 {
663 	int pxm = node_to_pxm(nid);
664 
665 	return pxm <= last_real_pxm;
666 }
667 EXPORT_SYMBOL_GPL(acpi_node_backed_by_real_pxm);
668 
acpi_get_pxm(acpi_handle h)669 static int acpi_get_pxm(acpi_handle h)
670 {
671 	unsigned long long pxm;
672 	acpi_status status;
673 	acpi_handle handle;
674 	acpi_handle phandle = h;
675 
676 	do {
677 		handle = phandle;
678 		status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
679 		if (ACPI_SUCCESS(status))
680 			return pxm;
681 		status = acpi_get_parent(handle, &phandle);
682 	} while (ACPI_SUCCESS(status));
683 	return -1;
684 }
685 
acpi_get_node(acpi_handle handle)686 int acpi_get_node(acpi_handle handle)
687 {
688 	int pxm;
689 
690 	pxm = acpi_get_pxm(handle);
691 
692 	return pxm_to_node(pxm);
693 }
694 EXPORT_SYMBOL(acpi_get_node);
695