xref: /aosp_15_r20/external/coreboot/src/soc/intel/xeon_sp/chip_gen1.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #include <acpi/acpigen_pci.h>
4 #include <assert.h>
5 #include <console/console.h>
6 #include <device/pci.h>
7 #include <device/pci_ids.h>
8 #include <soc/pci_devs.h>
9 #include <intelblocks/acpi.h>
10 #include <intelblocks/vtd.h>
11 #include <soc/acpi.h>
12 #include <soc/chip_common.h>
13 #include <soc/soc_util.h>
14 #include <soc/util.h>
15 
domain_to_stack_res(const struct device * dev)16 static const STACK_RES *domain_to_stack_res(const struct device *dev)
17 {
18 	assert(dev->path.type == DEVICE_PATH_DOMAIN);
19 	const union xeon_domain_path dn = {
20 		.domain_path = dev->path.domain.domain
21 	};
22 
23 	const IIO_UDS *hob = get_iio_uds();
24 	assert(hob != NULL);
25 
26 	return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
27 }
28 
iio_pci_domain_read_resources(struct device * dev)29 static void iio_pci_domain_read_resources(struct device *dev)
30 {
31 	const STACK_RES *sr = domain_to_stack_res(dev);
32 
33 	if (!sr)
34 		return;
35 
36 	int index = 0;
37 
38 	if (is_domain0(dev)) {
39 		/* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
40 		struct resource *res = new_resource(dev, index++);
41 		res->base = 0;
42 		res->size = 0x1000;
43 		res->limit = 0xfff;
44 		res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
45 	}
46 
47 	if (sr->PciResourceIoBase < sr->PciResourceIoLimit)
48 		domain_io_window_from_to(dev, index++,
49 				sr->PciResourceIoBase, sr->PciResourceIoLimit + 1);
50 
51 	if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit)
52 		domain_mem_window_from_to(dev, index++,
53 				sr->PciResourceMem32Base, sr->PciResourceMem32Limit + 1);
54 
55 	if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit)
56 		domain_mem_window_from_to(dev, index++,
57 				sr->PciResourceMem64Base, sr->PciResourceMem64Limit + 1);
58 
59 	/* Declare domain reserved MMIO */
60 	uint64_t reserved_mmio = sr->VtdBarAddress + vtd_probe_bar_size(pcidev_on_root(0, 0));
61 	if ((reserved_mmio >= sr->PciResourceMem32Base) &&
62 	    (reserved_mmio <= sr->PciResourceMem32Limit))
63 		mmio_range(dev, index++, reserved_mmio,
64 			sr->PciResourceMem32Limit - reserved_mmio + 1);
65 }
66 
67 /*
68  * Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
69  *  all the bus numbers on the IIO stack can be used for this bridge
70  */
71 static struct device_operations iio_pcie_domain_ops = {
72 	.read_resources = iio_pci_domain_read_resources,
73 	.set_resources = pci_domain_set_resources,
74 	.scan_bus = pci_host_bridge_scan_bus,
75 #if CONFIG(HAVE_ACPI_TABLES)
76 	.acpi_name        = soc_acpi_name,
77 	.write_acpi_tables = northbridge_write_acpi_tables,
78 	.acpi_fill_ssdt	   = pci_domain_fill_ssdt,
79 #endif
80 };
81 
82 /*
83  * Used by UBOX stacks. Those contain multiple PCI host bridges, each having
84  * only one bus with UBOX devices. UBOX devices have no resources.
85  */
86 static struct device_operations ubox_pcie_domain_ops = {
87 	.read_resources = noop_read_resources,
88 	.set_resources = noop_set_resources,
89 	.scan_bus = pci_host_bridge_scan_bus,
90 #if CONFIG(HAVE_ACPI_TABLES)
91 	.acpi_name        = soc_acpi_name,
92 	.write_acpi_tables = northbridge_write_acpi_tables,
93 	.acpi_fill_ssdt	   = pci_domain_fill_ssdt,
94 #endif
95 };
96 
create_pcie_domains(const union xeon_domain_path dp,struct bus * upstream,const STACK_RES * sr,const size_t pci_segment_group)97 static void create_pcie_domains(const union xeon_domain_path dp, struct bus *upstream,
98 				    const STACK_RES *sr, const size_t pci_segment_group)
99 {
100 	create_domain(dp, upstream, sr->BusBase, sr->BusLimit, DOMAIN_TYPE_PCIE,
101 			   &iio_pcie_domain_ops, pci_segment_group);
102 }
103 
104 /*
105  * On the first Xeon-SP generations there are no separate UBOX stacks,
106  * and the UBOX devices reside on the first and second IIO. Starting
107  * with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
108  */
create_ubox_domains(const union xeon_domain_path dp,struct bus * upstream,const STACK_RES * sr,const size_t pci_segment_group)109 static void create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
110 				    const STACK_RES *sr, const size_t pci_segment_group)
111 {
112 	/* Only expect 2 UBOX buses here */
113 	assert(sr->BusBase + 1 == sr->BusLimit);
114 
115 	create_domain(dp, upstream, sr->BusBase, sr->BusBase, DOMAIN_TYPE_UBX0,
116 			   &ubox_pcie_domain_ops, pci_segment_group);
117 	create_domain(dp, upstream, sr->BusLimit, sr->BusLimit, DOMAIN_TYPE_UBX1,
118 			   &ubox_pcie_domain_ops, pci_segment_group);
119 }
120 
121 void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
122 			const STACK_RES *sr, const size_t pci_segment_group);
123 
124 #if CONFIG(SOC_INTEL_HAS_CXL)
iio_cxl_domain_read_resources(struct device * dev)125 static void iio_cxl_domain_read_resources(struct device *dev)
126 {
127 	const STACK_RES *sr = domain_to_stack_res(dev);
128 
129 	if (!sr)
130 		return;
131 
132 	int index = 0;
133 
134 	if (sr->IoBase < sr->PciResourceIoBase)
135 		domain_io_window_from_to(dev, index++,
136 				sr->IoBase, sr->PciResourceIoBase);
137 
138 	if (sr->Mmio32Base < sr->PciResourceMem32Base)
139 		domain_mem_window_from_to(dev, index++,
140 				sr->Mmio32Base, sr->PciResourceMem32Base);
141 
142 	if (sr->Mmio64Base < sr->PciResourceMem64Base)
143 		domain_mem_window_from_to(dev, index++,
144 				sr->Mmio64Base, sr->PciResourceMem64Base);
145 }
146 
147 static struct device_operations iio_cxl_domain_ops = {
148 	.read_resources = iio_cxl_domain_read_resources,
149 	.set_resources = pci_domain_set_resources,
150 	.scan_bus = pci_host_bridge_scan_bus,
151 #if CONFIG(HAVE_ACPI_TABLES)
152 	.acpi_name        = soc_acpi_name,
153 	.write_acpi_tables = northbridge_write_acpi_tables,
154 	.acpi_fill_ssdt	   = pci_domain_fill_ssdt,
155 #endif
156 };
157 
create_cxl_domains(const union xeon_domain_path dp,struct bus * bus,const STACK_RES * sr,const size_t pci_segment_group)158 void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
159 			    const STACK_RES *sr, const size_t pci_segment_group)
160 {
161 	assert(sr->BusBase + 1 <= sr->BusLimit);
162 
163 	/* 1st domain contains PCIe RCiEPs */
164 	create_domain(dp, bus, sr->BusBase, sr->BusBase, DOMAIN_TYPE_PCIE,
165 			   &iio_pcie_domain_ops, pci_segment_group);
166 	/* 2nd domain contains CXL 1.1 end-points */
167 	create_domain(dp, bus, sr->BusBase + 1, sr->BusLimit, DOMAIN_TYPE_CXL,
168 			   &iio_cxl_domain_ops, pci_segment_group);
169 }
170 #endif //CONFIG(SOC_INTEL_HAS_CXL)
171 
create_xeonsp_domains(const union xeon_domain_path dp,struct bus * bus,const STACK_RES * sr,const size_t pci_segment_group)172 void create_xeonsp_domains(const union xeon_domain_path dp, struct bus *bus,
173 				const STACK_RES *sr, const size_t pci_segment_group)
174 {
175 	if (is_ubox_stack_res(sr))
176 		create_ubox_domains(dp, bus, sr, pci_segment_group);
177 	else if (CONFIG(SOC_INTEL_HAS_CXL) && is_iio_cxl_stack_res(sr))
178 		create_cxl_domains(dp, bus, sr, pci_segment_group);
179 	else if (is_pcie_iio_stack_res(sr))
180 		create_pcie_domains(dp, bus, sr, pci_segment_group);
181 	else if (CONFIG(HAVE_IOAT_DOMAINS) && is_ioat_iio_stack_res(sr))
182 		create_ioat_domains(dp, bus, sr, pci_segment_group);
183 }
184 
185 /*
186  * Route PAM segment access to DRAM
187  * Only call this code from socket0!
188  */
unlock_pam_regions(void)189 void unlock_pam_regions(void)
190 {
191 	uint32_t pam0123_unlock_dram = 0x33333330;
192 	uint32_t pam456_unlock_dram = 0x00333333;
193 	/* Get UBOX(1) for socket0 */
194 	uint32_t bus1 = socket0_get_ubox_busno(PCU_IIO_STACK);
195 
196 	/* Assume socket0 owns PCI segment 0 */
197 	pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
198 		SAD_ALL_PAM0123_CSR, pam0123_unlock_dram);
199 	pci_io_write_config32(PCI_DEV(bus1, SAD_ALL_DEV, SAD_ALL_FUNC),
200 		SAD_ALL_PAM456_CSR, pam456_unlock_dram);
201 
202 	uint32_t reg1 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
203 		SAD_ALL_FUNC), SAD_ALL_PAM0123_CSR);
204 	uint32_t reg2 = pci_io_read_config32(PCI_DEV(bus1, SAD_ALL_DEV,
205 		SAD_ALL_FUNC), SAD_ALL_PAM456_CSR);
206 	printk(BIOS_DEBUG, "%s:%s pam0123_csr: 0x%x, pam456_csr: 0x%x\n",
207 		__FILE__, __func__, reg1, reg2);
208 }
209