xref: /aosp_15_r20/external/coreboot/src/device/device_util.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <commonlib/bsd/helpers.h>
4 #include <console/console.h>
5 #include <device/device.h>
6 #include <device/pci_def.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <types.h>
11 
12 /**
13  * Given a Local APIC ID, find the device structure.
14  *
15  * @param apic_id The Local APIC ID number.
16  * @return Pointer to the device structure (if found), 0 otherwise.
17  */
dev_find_lapic(unsigned int apic_id)18 struct device *dev_find_lapic(unsigned int apic_id)
19 {
20 	struct device *dev;
21 	struct device *result = NULL;
22 
23 	for (dev = all_devices; dev; dev = dev->next) {
24 		if (dev->path.type == DEVICE_PATH_APIC &&
25 		    dev->path.apic.apic_id == apic_id) {
26 			result = dev;
27 			break;
28 		}
29 	}
30 	return result;
31 }
32 
33 /**
34  * Find a device of a given vendor and type.
35  *
36  * @param vendor A PCI vendor ID (e.g. 0x8086 for Intel).
37  * @param device A PCI device ID.
38  * @param from Pointer to the device structure, used as a starting point in
39  *             the linked list of all_devices, which can be 0 to start at the
40  *             head of the list (i.e. all_devices).
41  * @return Pointer to the device struct.
42  */
dev_find_device(u16 vendor,u16 device,struct device * from)43 struct device *dev_find_device(u16 vendor, u16 device, struct device *from)
44 {
45 	if (!from)
46 		from = all_devices;
47 	else
48 		from = from->next;
49 
50 	while (from && (from->vendor != vendor || from->device != device))
51 		from = from->next;
52 
53 	return from;
54 }
55 
56 /**
57  * Find a device of a given class.
58  *
59  * @param class Class of the device.
60  * @param from Pointer to the device structure, used as a starting point in
61  *             the linked list of all_devices, which can be 0 to start at the
62  *             head of the list (i.e. all_devices).
63  * @return Pointer to the device struct.
64  */
dev_find_class(unsigned int class,struct device * from)65 struct device *dev_find_class(unsigned int class, struct device *from)
66 {
67 	if (!from)
68 		from = all_devices;
69 	else
70 		from = from->next;
71 
72 	while (from && (from->class & 0xffffff00) != class)
73 		from = from->next;
74 
75 	return from;
76 }
77 
78 /**
79  * Encode the device path into 3 bytes for logging to CMOS.
80  *
81  * @param dev The device path to encode.
82  * @return Device path encoded into lower 3 bytes of dword.
83  */
dev_path_encode(const struct device * dev)84 u32 dev_path_encode(const struct device *dev)
85 {
86 	u32 ret;
87 
88 	if (!dev)
89 		return 0;
90 
91 	/* Store the device type in 3rd byte. */
92 	ret = dev->path.type << 16;
93 
94 	/* Encode the device specific path in the low word. */
95 	switch (dev->path.type) {
96 	case DEVICE_PATH_ROOT:
97 		break;
98 	case DEVICE_PATH_PCI:
99 		ret |= dev->upstream->segment_group << 16 | dev->upstream->secondary << 8 | dev->path.pci.devfn;
100 		break;
101 	case DEVICE_PATH_PNP:
102 		ret |= dev->path.pnp.port << 8 | dev->path.pnp.device;
103 		break;
104 	case DEVICE_PATH_I2C:
105 		ret |= dev->path.i2c.mode_10bit << 8 | dev->path.i2c.device;
106 		break;
107 	case DEVICE_PATH_APIC:
108 		ret |= dev->path.apic.apic_id;
109 		break;
110 	case DEVICE_PATH_DOMAIN:
111 		ret |= dev->path.domain.domain;
112 		break;
113 	case DEVICE_PATH_CPU_CLUSTER:
114 		ret |= dev->path.cpu_cluster.cluster;
115 		break;
116 	case DEVICE_PATH_CPU:
117 		ret |= dev->path.cpu.id;
118 		break;
119 	case DEVICE_PATH_CPU_BUS:
120 		ret |= dev->path.cpu_bus.id;
121 		break;
122 	case DEVICE_PATH_IOAPIC:
123 		ret |= dev->path.ioapic.ioapic_id;
124 		break;
125 	case DEVICE_PATH_GENERIC:
126 		ret |= dev->path.generic.subid << 8 | dev->path.generic.id;
127 		break;
128 	case DEVICE_PATH_SPI:
129 		ret |= dev->path.spi.cs;
130 		break;
131 	case DEVICE_PATH_USB:
132 		ret |= dev->path.usb.port_type << 8 | dev->path.usb.port_id;
133 		break;
134 	case DEVICE_PATH_GPIO:
135 		ret |= dev->path.gpio.id;
136 		break;
137 	case DEVICE_PATH_MDIO:
138 		ret |= dev->path.mdio.addr;
139 		break;
140 	case DEVICE_PATH_NONE:
141 	case DEVICE_PATH_MMIO:  /* don't care */
142 	default:
143 		break;
144 	}
145 
146 	return ret;
147 }
148 
149 /*
150  * Warning: This function uses a static buffer. Don't call it more than once
151  * from the same print statement!
152  */
dev_path(const struct device * dev)153 const char *dev_path(const struct device *dev)
154 {
155 	static char buffer[DEVICE_PATH_MAX];
156 
157 	buffer[0] = '\0';
158 	if (!dev) {
159 		strcpy(buffer, "<null>");
160 	} else {
161 		switch (dev->path.type) {
162 		case DEVICE_PATH_NONE:
163 			strcpy(buffer, "NONE");
164 			break;
165 		case DEVICE_PATH_ROOT:
166 			strcpy(buffer, "Root Device");
167 			break;
168 		case DEVICE_PATH_PCI:
169 			snprintf(buffer, sizeof(buffer),
170 				 "PCI: %02x:%02x:%02x.%01x",
171 				 dev->upstream->segment_group,
172 				 dev->upstream->secondary,
173 				 PCI_SLOT(dev->path.pci.devfn),
174 				 PCI_FUNC(dev->path.pci.devfn));
175 			break;
176 		case DEVICE_PATH_PNP:
177 			snprintf(buffer, sizeof(buffer), "PNP: %04x.%01x",
178 				 dev->path.pnp.port, dev->path.pnp.device);
179 			break;
180 		case DEVICE_PATH_I2C:
181 			snprintf(buffer, sizeof(buffer), "I2C: %02x:%02x",
182 				 dev->upstream->secondary,
183 				 dev->path.i2c.device);
184 			break;
185 		case DEVICE_PATH_APIC:
186 			snprintf(buffer, sizeof(buffer), "APIC: %02x",
187 				 dev->path.apic.apic_id);
188 			break;
189 		case DEVICE_PATH_IOAPIC:
190 			snprintf(buffer, sizeof(buffer), "IOAPIC: %02x",
191 				 dev->path.ioapic.ioapic_id);
192 			break;
193 		case DEVICE_PATH_DOMAIN:
194 			snprintf(buffer, sizeof(buffer), "DOMAIN: %08x",
195 				dev->path.domain.domain);
196 			break;
197 		case DEVICE_PATH_CPU_CLUSTER:
198 			snprintf(buffer, sizeof(buffer), "CPU_CLUSTER: %01x",
199 				dev->path.cpu_cluster.cluster);
200 			break;
201 		case DEVICE_PATH_CPU:
202 			snprintf(buffer, sizeof(buffer),
203 				 "CPU: %02x", dev->path.cpu.id);
204 			break;
205 		case DEVICE_PATH_CPU_BUS:
206 			snprintf(buffer, sizeof(buffer),
207 				 "CPU_BUS: %02x", dev->path.cpu_bus.id);
208 			break;
209 		case DEVICE_PATH_GENERIC:
210 			snprintf(buffer, sizeof(buffer),
211 				 "GENERIC: %d.%d", dev->path.generic.id,
212 				 dev->path.generic.subid);
213 			break;
214 		case DEVICE_PATH_SPI:
215 			snprintf(buffer, sizeof(buffer), "SPI: %02x",
216 				 dev->path.spi.cs);
217 			break;
218 		case DEVICE_PATH_USB:
219 			snprintf(buffer, sizeof(buffer), "USB%u port %u",
220 				 dev->path.usb.port_type, dev->path.usb.port_id);
221 			break;
222 		case DEVICE_PATH_MMIO:
223 			snprintf(buffer, sizeof(buffer), "MMIO: %08lx",
224 				 dev->path.mmio.addr);
225 			break;
226 		case DEVICE_PATH_GPIO:
227 			snprintf(buffer, sizeof(buffer), "GPIO: %d", dev->path.gpio.id);
228 			break;
229 		case DEVICE_PATH_MDIO:
230 			snprintf(buffer, sizeof(buffer), "MDIO: %02x", dev->path.mdio.addr);
231 			break;
232 		default:
233 			printk(BIOS_ERR, "Unknown device path type: %d\n",
234 			       dev->path.type);
235 			break;
236 		}
237 	}
238 	return buffer;
239 }
240 
dev_name(const struct device * dev)241 const char *dev_name(const struct device *dev)
242 {
243 	if (dev->name)
244 		return dev->name;
245 	else if (dev->chip_ops && dev->chip_ops->name)
246 		return dev->chip_ops->name;
247 	else
248 		return "unknown";
249 }
250 
251 /* Returns the domain for the given device */
dev_get_domain(const struct device * dev)252 const struct device *dev_get_domain(const struct device *dev)
253 {
254 	/* Walk up the tree up to the domain */
255 	while (dev && dev->upstream && !is_root_device(dev)) {
256 		if (dev->path.type == DEVICE_PATH_DOMAIN)
257 			return dev;
258 		dev = dev->upstream->dev;
259 	}
260 
261 	return NULL;
262 }
263 
264 /**
265  * Allocate 64 more resources to the free list.
266  *
267  * @return TODO.
268  */
allocate_more_resources(void)269 static int allocate_more_resources(void)
270 {
271 	int i;
272 	struct resource *new_res_list;
273 
274 	new_res_list = malloc(64 * sizeof(*new_res_list));
275 
276 	if (new_res_list == NULL)
277 		return 0;
278 
279 	memset(new_res_list, 0, 64 * sizeof(*new_res_list));
280 
281 	for (i = 0; i < 64 - 1; i++)
282 		new_res_list[i].next = &new_res_list[i+1];
283 
284 	free_resources = new_res_list;
285 	return 1;
286 }
287 
288 /**
289  * Remove resource res from the device's list and add it to the free list.
290  *
291  * @param dev TODO
292  * @param res TODO
293  * @param prev TODO
294  * @return TODO.
295  */
free_resource(struct device * dev,struct resource * res,struct resource * prev)296 static void free_resource(struct device *dev, struct resource *res,
297 			  struct resource *prev)
298 {
299 	if (prev)
300 		prev->next = res->next;
301 	else
302 		dev->resource_list = res->next;
303 
304 	res->next = free_resources;
305 	free_resources = res;
306 }
307 
308 /**
309  * See if we have unused but allocated resource structures.
310  *
311  * If so remove the allocation.
312  *
313  * @param dev The device to find the resource on.
314  */
compact_resources(struct device * dev)315 void compact_resources(struct device *dev)
316 {
317 	struct resource *res, *next, *prev = NULL;
318 
319 	/* Move all of the free resources to the end */
320 	for (res = dev->resource_list; res; res = next) {
321 		next = res->next;
322 		if (!res->flags)
323 			free_resource(dev, res, prev);
324 		else
325 			prev = res;
326 	}
327 }
328 
329 /**
330  * See if a resource structure already exists for a given index.
331  *
332  * @param dev The device to find the resource on.
333  * @param index The index of the resource on the device.
334  * @return The resource, if it already exists.
335  */
probe_resource(const struct device * dev,unsigned int index)336 struct resource *probe_resource(const struct device *dev, unsigned int index)
337 {
338 	struct resource *res;
339 
340 	/* See if there is a resource with the appropriate index */
341 	for (res = dev->resource_list; res; res = res->next) {
342 		if (res->index == index)
343 			break;
344 	}
345 
346 	return res;
347 }
348 
349 /**
350  * See if a resource structure already exists for a given index and if not
351  * allocate one.
352  *
353  * Then initialize the resource to default values.
354  *
355  * @param dev The device to find the resource on.
356  * @param index The index of the resource on the device.
357  * @return TODO.
358  */
new_resource(struct device * dev,unsigned int index)359 struct resource *new_resource(struct device *dev, unsigned int index)
360 {
361 	struct resource *resource, *tail;
362 
363 	/* First move all of the free resources to the end. */
364 	compact_resources(dev);
365 
366 	/* See if there is a resource with the appropriate index. */
367 	resource = probe_resource(dev, index);
368 	if (!resource) {
369 		if (free_resources == NULL && !allocate_more_resources())
370 			die("Couldn't allocate more resources.");
371 
372 		resource = free_resources;
373 		free_resources = free_resources->next;
374 		memset(resource, 0, sizeof(*resource));
375 		resource->next = NULL;
376 		tail = dev->resource_list;
377 		if (tail) {
378 			while (tail->next)
379 				tail = tail->next;
380 			tail->next = resource;
381 		} else {
382 			dev->resource_list = resource;
383 		}
384 	}
385 
386 	/* Initialize the resource values. */
387 	if (!(resource->flags & IORESOURCE_FIXED)) {
388 		resource->flags = 0;
389 		resource->base = 0;
390 	}
391 	resource->size  = 0;
392 	resource->limit = 0;
393 	resource->index = index;
394 	resource->align = 0;
395 	resource->gran  = 0;
396 
397 	return resource;
398 }
399 
400 /**
401  * Return an existing resource structure for a given index.
402  *
403  * @param dev The device to find the resource on.
404  * @param index The index of the resource on the device.
405  * return TODO.
406  */
find_resource(const struct device * dev,unsigned int index)407 struct resource *find_resource(const struct device *dev, unsigned int index)
408 {
409 	struct resource *resource;
410 
411 	/* See if there is a resource with the appropriate index. */
412 	resource = probe_resource(dev, index);
413 	if (!resource)
414 		die("%s missing resource: %02x\n", dev_path(dev), index);
415 	return resource;
416 }
417 
418 /**
419  * Round a number up to the next multiple of gran.
420  *
421  * @param val The starting value.
422  * @param gran Granularity we are aligning the number to.
423  * @return The aligned value.
424  */
align_up(resource_t val,unsigned long gran)425 static resource_t align_up(resource_t val, unsigned long gran)
426 {
427 	resource_t mask;
428 	mask = (1ULL << gran) - 1ULL;
429 	val += mask;
430 	val &= ~mask;
431 	return val;
432 }
433 
434 /**
435  * Round a number up to the previous multiple of gran.
436  *
437  * @param val The starting value.
438  * @param gran Granularity we are aligning the number to.
439  * @return The aligned value.
440  */
align_down(resource_t val,unsigned long gran)441 static resource_t align_down(resource_t val, unsigned long gran)
442 {
443 	resource_t mask;
444 	mask = (1ULL << gran) - 1ULL;
445 	val &= ~mask;
446 	return val;
447 }
448 
449 /**
450  * Compute the maximum address that is part of a resource.
451  *
452  * @param resource The resource whose limit is desired.
453  * @return The end.
454  */
resource_end(const struct resource * resource)455 resource_t resource_end(const struct resource *resource)
456 {
457 	resource_t base, end;
458 
459 	/* Get the base address. */
460 	base = resource->base;
461 
462 	/*
463 	 * For a non bridge resource granularity and alignment are the same.
464 	 * For a bridge resource align is the largest needed alignment below
465 	 * the bridge. While the granularity is simply how many low bits of
466 	 * the address cannot be set.
467 	 */
468 
469 	/* Get the end (rounded up). */
470 	end = base + align_up(resource->size, resource->gran) - 1;
471 
472 	return end;
473 }
474 
475 /**
476  * Compute the maximum legal value for resource->base.
477  *
478  * @param resource The resource whose maximum is desired.
479  * @return The maximum.
480  */
resource_max(const struct resource * resource)481 resource_t resource_max(const struct resource *resource)
482 {
483 	resource_t max;
484 
485 	max = align_down(resource->limit - resource->size + 1, resource->align);
486 
487 	return max;
488 }
489 
490 /**
491  * Return the resource type of a resource.
492  *
493  * @param resource The resource type to decode.
494  * @return TODO.
495  */
resource_type(const struct resource * resource)496 const char *resource_type(const struct resource *resource)
497 {
498 	static char buffer[RESOURCE_TYPE_MAX];
499 	snprintf(buffer, sizeof(buffer), "%s%s%s%s",
500 		 ((resource->flags & IORESOURCE_READONLY) ? "ro" : ""),
501 		 ((resource->flags & IORESOURCE_PREFETCH) ? "pref" : ""),
502 		 ((resource->flags == 0) ? "unused" :
503 		  (resource->flags & IORESOURCE_IO) ? "io" :
504 		  (resource->flags & IORESOURCE_DRQ) ? "drq" :
505 		  (resource->flags & IORESOURCE_IRQ) ? "irq" :
506 		  (resource->flags & IORESOURCE_MEM) ? "mem" : "??????"),
507 		 ((resource->flags & IORESOURCE_PCI64) ? "64" : ""));
508 	return buffer;
509 }
510 
511 /**
512  * Print the resource that was just stored.
513  *
514  * @param dev The device the stored resource lives on.
515  * @param resource The resource that was just stored.
516  * @param comment TODO
517  */
report_resource_stored(struct device * dev,const struct resource * resource,const char * comment)518 void report_resource_stored(struct device *dev, const struct resource *resource,
519 			    const char *comment)
520 {
521 	char buf[10];
522 	unsigned long long base, end;
523 
524 	if (!(resource->flags & IORESOURCE_STORED))
525 		return;
526 
527 	base = resource->base;
528 	end = resource_end(resource);
529 	buf[0] = '\0';
530 
531 	if (dev->downstream && (resource->flags & IORESOURCE_PCI_BRIDGE)) {
532 		snprintf(buf, sizeof(buf),
533 			 "seg %02x bus %02x ", dev->downstream->segment_group,
534 			 dev->downstream->secondary);
535 	}
536 	printk(BIOS_DEBUG, "%s %02lx <- [0x%016llx - 0x%016llx] size 0x%08llx "
537 	       "gran 0x%02x %s%s%s\n", dev_path(dev), resource->index,
538 		base, end, resource->size, resource->gran, buf,
539 		resource_type(resource), comment);
540 }
541 
search_bus_resources(struct bus * bus,unsigned long type_mask,unsigned long type,resource_search_t search,void * gp)542 void search_bus_resources(struct bus *bus, unsigned long type_mask,
543 			  unsigned long type, resource_search_t search,
544 			  void *gp)
545 {
546 	struct device *curdev;
547 
548 	for (curdev = bus->children; curdev; curdev = curdev->sibling) {
549 		struct resource *res;
550 
551 		/* Ignore disabled devices. */
552 		if (!curdev->enabled)
553 			continue;
554 
555 		for (res = curdev->resource_list; res; res = res->next) {
556 			/* If it isn't the right kind of resource ignore it. */
557 			if ((res->flags & type_mask) != type)
558 				continue;
559 
560 			/* If it is a subtractive resource recurse. */
561 			if (res->flags & IORESOURCE_SUBTRACTIVE) {
562 				if (curdev->downstream)
563 					search_bus_resources(curdev->downstream, type_mask, type,
564 							     search, gp);
565 				continue;
566 			}
567 			search(gp, curdev, res);
568 		}
569 	}
570 }
571 
search_global_resources(unsigned long type_mask,unsigned long type,resource_search_t search,void * gp)572 void search_global_resources(unsigned long type_mask, unsigned long type,
573 			     resource_search_t search, void *gp)
574 {
575 	struct device *curdev;
576 
577 	for (curdev = all_devices; curdev; curdev = curdev->next) {
578 		struct resource *res;
579 
580 		/* Ignore disabled devices. */
581 		if (!curdev->enabled)
582 			continue;
583 
584 		for (res = curdev->resource_list; res; res = res->next) {
585 			/* If it isn't the right kind of resource ignore it. */
586 			if ((res->flags & type_mask) != type)
587 				continue;
588 
589 			/* If it is a subtractive resource ignore it. */
590 			if (res->flags & IORESOURCE_SUBTRACTIVE)
591 				continue;
592 
593 			/* If the resource is not assigned ignore it. */
594 			if (!(res->flags & IORESOURCE_ASSIGNED))
595 				continue;
596 
597 			search(gp, curdev, res);
598 		}
599 	}
600 }
601 
dev_set_enabled(struct device * dev,int enable)602 void dev_set_enabled(struct device *dev, int enable)
603 {
604 	if (dev->enabled == enable)
605 		return;
606 
607 	dev->enabled = enable;
608 	if (dev->ops && dev->ops->enable)
609 		dev->ops->enable(dev);
610 	else if (dev->chip_ops && dev->chip_ops->enable_dev)
611 		dev->chip_ops->enable_dev(dev);
612 }
613 
disable_children(struct bus * bus)614 void disable_children(struct bus *bus)
615 {
616 	struct device *child;
617 
618 	for (child = bus->children; child; child = child->sibling) {
619 		if (child->downstream)
620 			disable_children(child->downstream);
621 		dev_set_enabled(child, 0);
622 	}
623 }
624 
625 /*
626  * Returns true if the device is an enabled bridge that has at least
627  * one enabled device on its secondary bus that is not of type NONE.
628  */
dev_is_active_bridge(struct device * dev)629 bool dev_is_active_bridge(struct device *dev)
630 {
631 	struct device *child;
632 
633 	if (!dev || !dev->enabled)
634 		return 0;
635 
636 	if (!dev->downstream || !dev->downstream->children)
637 		return 0;
638 
639 	for (child = dev->downstream->children; child; child = child->sibling) {
640 		if (child->path.type == DEVICE_PATH_NONE)
641 			continue;
642 		if (child->enabled)
643 			return 1;
644 	}
645 
646 	return 0;
647 }
648 
resource_tree(const struct device * root,int debug_level,int depth)649 static void resource_tree(const struct device *root, int debug_level, int depth)
650 {
651 	int i = 0;
652 	struct device *child;
653 	struct resource *res;
654 	char indent[30];	/* If your tree has more levels, it's wrong. */
655 
656 	for (i = 0; i < depth + 1 && i < 29; i++)
657 		indent[i] = ' ';
658 	indent[i] = '\0';
659 
660 	printk(BIOS_DEBUG, "%s%s", indent, dev_path(root));
661 	if (root->downstream && root->downstream->children)
662 		printk(BIOS_DEBUG, " child on link 0 %s",
663 			  dev_path(root->downstream->children));
664 	printk(BIOS_DEBUG, "\n");
665 
666 	for (res = root->resource_list; res; res = res->next) {
667 		printk(debug_level, "%s%s resource base %llx size %llx "
668 			  "align %d gran %d limit %llx flags %lx index %lx\n",
669 			  indent, dev_path(root), res->base, res->size,
670 			  res->align, res->gran, res->limit, res->flags,
671 			  res->index);
672 	}
673 
674 	if (!root->downstream)
675 		return;
676 
677 	for (child = root->downstream->children; child; child = child->sibling)
678 		resource_tree(child, debug_level, depth + 1);
679 }
680 
print_resource_tree(const struct device * root,int debug_level,const char * msg)681 void print_resource_tree(const struct device *root, int debug_level,
682 			 const char *msg)
683 {
684 	/* Bail if root is null. */
685 	if (!root) {
686 		printk(debug_level, "%s passed NULL for root!\n", __func__);
687 		return;
688 	}
689 
690 	/* Bail if not printing to screen. */
691 	if (!printk(debug_level, "Show resources in subtree (%s)...%s\n",
692 		       dev_path(root), msg))
693 		return;
694 
695 	resource_tree(root, debug_level, 0);
696 }
697 
show_devs_tree(const struct device * dev,int debug_level,int depth)698 void show_devs_tree(const struct device *dev, int debug_level, int depth)
699 {
700 	char depth_str[20];
701 	int i;
702 	struct device *sibling;
703 
704 	for (i = 0; i < depth; i++)
705 		depth_str[i] = ' ';
706 	depth_str[i] = '\0';
707 
708 	printk(debug_level, "%s%s: enabled %d\n",
709 		  depth_str, dev_path(dev), dev->enabled);
710 
711 	if (!dev->downstream)
712 		return;
713 
714 	for (sibling = dev->downstream->children; sibling; sibling = sibling->sibling)
715 		show_devs_tree(sibling, debug_level, depth + 1);
716 }
717 
show_all_devs_tree(int debug_level,const char * msg)718 void show_all_devs_tree(int debug_level, const char *msg)
719 {
720 	/* Bail if not printing to screen. */
721 	if (!printk(debug_level, "Show all devs in tree form... %s\n", msg))
722 		return;
723 	show_devs_tree(all_devices, debug_level, 0);
724 }
725 
show_devs_subtree(struct device * root,int debug_level,const char * msg)726 void show_devs_subtree(struct device *root, int debug_level, const char *msg)
727 {
728 	/* Bail if not printing to screen. */
729 	if (!printk(debug_level, "Show all devs in subtree %s... %s\n",
730 		       dev_path(root), msg))
731 		return;
732 	printk(debug_level, "%s\n", msg);
733 	show_devs_tree(root, debug_level, 0);
734 }
735 
show_all_devs(int debug_level,const char * msg)736 void show_all_devs(int debug_level, const char *msg)
737 {
738 	struct device *dev;
739 
740 	/* Bail if not printing to screen. */
741 	if (!printk(debug_level, "Show all devs... %s\n", msg))
742 		return;
743 	for (dev = all_devices; dev; dev = dev->next) {
744 		printk(debug_level, "%s: enabled %d\n",
745 			  dev_path(dev), dev->enabled);
746 	}
747 }
748 
show_one_resource(int debug_level,struct device * dev,struct resource * resource,const char * comment)749 void show_one_resource(int debug_level, struct device *dev,
750 		       struct resource *resource, const char *comment)
751 {
752 	char buf[10];
753 	unsigned long long base, end;
754 	base = resource->base;
755 	end = resource_end(resource);
756 	buf[0] = '\0';
757 
758 	printk(debug_level, "%s %02lx <- [0x%016llx - 0x%016llx] "
759 		  "size 0x%08llx gran 0x%02x %s%s%s\n", dev_path(dev),
760 		  resource->index, base, end, resource->size, resource->gran,
761 		  buf, resource_type(resource), comment);
762 }
763 
show_all_devs_resources(int debug_level,const char * msg)764 void show_all_devs_resources(int debug_level, const char *msg)
765 {
766 	struct device *dev;
767 
768 	if (!printk(debug_level, "Show all devs with resources... %s\n", msg))
769 		return;
770 
771 	for (dev = all_devices; dev; dev = dev->next) {
772 		struct resource *res;
773 		printk(debug_level, "%s: enabled %d\n",
774 			  dev_path(dev), dev->enabled);
775 		for (res = dev->resource_list; res; res = res->next)
776 			show_one_resource(debug_level, dev, res, "");
777 	}
778 }
779 
resource_range_idx(struct device * dev,unsigned long index,uint64_t base,uint64_t size,unsigned long flags)780 const struct resource *resource_range_idx(struct device *dev, unsigned long index,
781 				uint64_t base, uint64_t size, unsigned long flags)
782 {
783 	struct resource *resource;
784 	if (!size)
785 		return NULL;
786 
787 	resource = new_resource(dev, index);
788 	resource->base = base;
789 
790 	if (flags & IORESOURCE_FIXED)
791 		resource->size = size;
792 	if (flags & IORESOURCE_BRIDGE)
793 		resource->limit = base + size - 1;
794 
795 	resource->flags = IORESOURCE_ASSIGNED;
796 	resource->flags |= flags;
797 
798 	printk(BIOS_SPEW, "dev: %s, index: 0x%lx, base: 0x%llx, size: 0x%llx\n",
799 	       dev_path(dev), resource->index, resource->base, resource->size);
800 
801 	return resource;
802 }
803 
lower_ram_end(struct device * dev,unsigned long index,uint64_t end)804 const struct resource *lower_ram_end(struct device *dev, unsigned long index, uint64_t end)
805 {
806 	return ram_from_to(dev, index, 0, end);
807 }
808 
upper_ram_end(struct device * dev,unsigned long index,uint64_t end)809 const struct resource *upper_ram_end(struct device *dev, unsigned long index, uint64_t end)
810 {
811 	if (end <= 4ull * GiB)
812 		return NULL;
813 
814 	printk(BIOS_INFO, "Available memory above 4GB: %lluM\n", (end - 4ull * GiB) / MiB);
815 
816 	return ram_from_to(dev, index, 4ull * GiB, end);
817 }
818 
mmconf_resource(struct device * dev,unsigned long index)819 void mmconf_resource(struct device *dev, unsigned long index)
820 {
821 	struct resource *resource = new_resource(dev, index);
822 	resource->base = CONFIG_ECAM_MMCONF_BASE_ADDRESS;
823 	resource->size = CONFIG_ECAM_MMCONF_LENGTH;
824 	resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
825 		IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
826 
827 	printk(BIOS_DEBUG, "Adding PCIe enhanced config space BAR 0x%08lx-0x%08lx.\n",
828 			(unsigned long)(resource->base),
829 			(unsigned long)(resource->base + resource->size));
830 }
831 
tolm_test(void * gp,struct device * dev,struct resource * new)832 void tolm_test(void *gp, struct device *dev, struct resource *new)
833 {
834 	struct resource **best_p = gp;
835 	struct resource *best;
836 
837 	best = *best_p;
838 
839 	/*
840 	 * If resource is not allocated any space i.e. size is zero,
841 	 * then do not consider this resource in tolm calculations.
842 	 */
843 	if (new->size == 0)
844 		return;
845 
846 	if (!best || (best->base > new->base))
847 		best = new;
848 
849 	*best_p = best;
850 }
851 
find_pci_tolm(struct bus * bus)852 u32 find_pci_tolm(struct bus *bus)
853 {
854 	struct resource *min = NULL;
855 	u32 tolm;
856 	unsigned long mask_match = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
857 
858 	search_bus_resources(bus, mask_match, mask_match, tolm_test, &min);
859 
860 	tolm = 0xffffffffUL;
861 
862 	if (min && tolm > min->base)
863 		tolm = min->base;
864 
865 	return tolm;
866 }
867 
868 /* Count of enabled CPUs */
dev_count_cpu(void)869 int dev_count_cpu(void)
870 {
871 	struct device *cpu;
872 	int count = 0;
873 
874 	for (cpu = all_devices; cpu; cpu = cpu->next) {
875 		if (!is_enabled_cpu(cpu))
876 			continue;
877 		count++;
878 	}
879 
880 	return count;
881 }
882 
883 /* Get device path name */
dev_path_name(enum device_path_type type)884 const char *dev_path_name(enum device_path_type type)
885 {
886 	static const char *const type_names[] = DEVICE_PATH_NAMES;
887 	const char *type_name = "Unknown";
888 
889 	/* Translate the type value into a string */
890 	if (type < ARRAY_SIZE(type_names))
891 		type_name = type_names[type];
892 	return type_name;
893 }
894 
dev_path_hotplug(const struct device * dev)895 bool dev_path_hotplug(const struct device *dev)
896 {
897 	for (dev = dev->upstream->dev; dev != dev->upstream->dev; dev = dev->upstream->dev) {
898 		if (dev->hotplug_port)
899 			return true;
900 	}
901 	return false;
902 }
903 
log_resource(const char * type,const struct device * dev,const struct resource * res,const char * srcfile,const int line)904 void log_resource(const char *type, const struct device *dev, const struct resource *res,
905 			const char *srcfile, const int line)
906 {
907 	printk(BIOS_SPEW, "%s:%d res: %s, dev: %s, index: 0x%lx, base: 0x%llx, "
908 			  "end: 0x%llx, size_kb: 0x%llx\n",
909 			  srcfile, line, type, dev_path(dev), res->index, res->base,
910 			  resource_end(res), res->size / KiB);
911 }
912 
is_cpu(const struct device * cpu)913 bool is_cpu(const struct device *cpu)
914 {
915 	return cpu->path.type == DEVICE_PATH_APIC &&
916 	       cpu->upstream->dev->path.type == DEVICE_PATH_CPU_CLUSTER;
917 }
918 
is_enabled_cpu(const struct device * cpu)919 bool is_enabled_cpu(const struct device *cpu)
920 {
921 	return is_cpu(cpu) && cpu->enabled;
922 }
923 
is_pci(const struct device * pci)924 bool is_pci(const struct device *pci)
925 {
926 	return pci->path.type == DEVICE_PATH_PCI;
927 }
928 
is_enabled_pci(const struct device * pci)929 bool is_enabled_pci(const struct device *pci)
930 {
931 	return is_pci(pci) && pci->enabled;
932 }
933 
is_pci_dev_on_bus(const struct device * pci,unsigned int bus)934 bool is_pci_dev_on_bus(const struct device *pci, unsigned int bus)
935 {
936 	return is_pci(pci) && pci->upstream->segment_group == 0
937 		&& pci->upstream->secondary == bus;
938 }
939 
is_pci_bridge(const struct device * pci)940 bool is_pci_bridge(const struct device *pci)
941 {
942 	return is_pci(pci) && ((pci->hdr_type & 0x7f) == PCI_HEADER_TYPE_BRIDGE);
943 }
944