xref: /aosp_15_r20/external/coreboot/src/soc/intel/common/block/systemagent/systemagent.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <acpi/acpigen.h>
4 #include <cbmem.h>
5 #include <console/console.h>
6 #include <cpu/cpu.h>
7 #include <device/device.h>
8 #include <device/pci.h>
9 #include <device/pci_ids.h>
10 #include <intelblocks/acpi.h>
11 #include <intelblocks/cfg.h>
12 #include <intelblocks/systemagent.h>
13 #include <smbios.h>
14 #include <soc/iomap.h>
15 #include <soc/nvs.h>
16 #include <soc/pci_devs.h>
17 #include <soc/systemagent.h>
18 #include <types.h>
19 #include "systemagent_def.h"
20 
21 /* SoC override function */
soc_systemagent_init(struct device * dev)22 __weak void soc_systemagent_init(struct device *dev)
23 {
24 	/* no-op */
25 }
26 
soc_add_fixed_mmio_resources(struct device * dev,int * resource_cnt)27 __weak void soc_add_fixed_mmio_resources(struct device *dev,
28 		int *resource_cnt)
29 {
30 	/* no-op */
31 }
32 
soc_add_configurable_mmio_resources(struct device * dev,int * resource_cnt)33 __weak void soc_add_configurable_mmio_resources(struct device *dev,
34 		int *resource_cnt)
35 {
36 	/* no-op */
37 }
38 
soc_get_uncore_prmmr_base_and_mask(uint64_t * base,uint64_t * mask)39 __weak int soc_get_uncore_prmmr_base_and_mask(uint64_t *base,
40 		uint64_t *mask)
41 {
42 	/* return failure for this dummy API */
43 	return -1;
44 }
45 
sa_write_acpi_tables(const struct device * dev,unsigned long current,struct acpi_rsdp * rsdp)46 __weak unsigned long sa_write_acpi_tables(const struct device *dev,
47 					  unsigned long current,
48 					  struct acpi_rsdp *rsdp)
49 {
50 	return current;
51 }
52 
soc_systemagent_max_chan_capacity_mib(u8 capid0_a_ddrsz)53 __weak uint32_t soc_systemagent_max_chan_capacity_mib(u8 capid0_a_ddrsz)
54 {
55 	return 32768;	/* 32 GiB per channel */
56 }
57 
sa_get_ecc_type(const uint32_t capid0_a)58 static uint8_t sa_get_ecc_type(const uint32_t capid0_a)
59 {
60 	return capid0_a & CAPID_ECCDIS ? MEMORY_ARRAY_ECC_NONE : MEMORY_ARRAY_ECC_SINGLE_BIT;
61 }
62 
sa_slots_per_channel(const uint32_t capid0_a)63 static size_t sa_slots_per_channel(const uint32_t capid0_a)
64 {
65 	return !(capid0_a & CAPID_DDPCD) + 1;
66 }
67 
sa_number_of_channels(const uint32_t capid0_a)68 static size_t sa_number_of_channels(const uint32_t capid0_a)
69 {
70 	return !(capid0_a & CAPID_PDCD) + 1;
71 }
72 
sa_soc_systemagent_init(struct device * dev)73 static void sa_soc_systemagent_init(struct device *dev)
74 {
75 	soc_systemagent_init(dev);
76 
77 	struct memory_info *m = cbmem_find(CBMEM_ID_MEMINFO);
78 	if (m == NULL)
79 		return;
80 
81 	const uint32_t capid0_a = pci_read_config32(dev, CAPID0_A);
82 
83 	m->ecc_type = sa_get_ecc_type(capid0_a);
84 	m->max_capacity_mib = soc_systemagent_max_chan_capacity_mib(CAPID_DDRSZ(capid0_a)) *
85 			      sa_number_of_channels(capid0_a);
86 	m->number_of_devices = sa_slots_per_channel(capid0_a) *
87 			       sa_number_of_channels(capid0_a);
88 }
89 
90 /*
91  * Add all known fixed MMIO ranges that hang off the host bridge/memory
92  * controller device.
93  */
sa_add_fixed_mmio_resources(struct device * dev,int * resource_cnt,const struct sa_mmio_descriptor * sa_fixed_resources,size_t count)94 void sa_add_fixed_mmio_resources(struct device *dev, int *resource_cnt,
95 	const struct sa_mmio_descriptor *sa_fixed_resources, size_t count)
96 {
97 	int i;
98 	int index = *resource_cnt;
99 
100 	for (i = 0; i < count; i++) {
101 		uintptr_t base;
102 		size_t size;
103 
104 		size = sa_fixed_resources[i].size;
105 		base = sa_fixed_resources[i].base;
106 
107 		printk(BIOS_DEBUG,
108 			"SA MMIO resource: %-8s ->  base = 0x%08llx, size = 0x%08llx\n",
109 			sa_fixed_resources[i].description, sa_fixed_resources[i].base,
110 			sa_fixed_resources[i].size);
111 
112 		mmio_range(dev, index++, base, size);
113 	}
114 
115 	*resource_cnt = index;
116 }
117 
118 /*
119  * DRAM memory mapped register
120  *
121  * TOUUD: This 64 bit register defines the Top of Upper Usable DRAM
122  * TOLUD: This 32 bit register defines the Top of Low Usable DRAM
123  * BGSM: This register contains the base address of stolen DRAM memory for GTT
124  * TSEG: This register contains the base address of TSEG DRAM memory
125  */
126 static const struct sa_mem_map_descriptor sa_memory_map[MAX_MAP_ENTRIES] = {
127 	{ TOUUD, true, "TOUUD" },
128 	{ TOLUD, false, "TOLUD" },
129 	{ BGSM, false, "BGSM" },
130 	{ TSEG, false, "TSEG" },
131 };
132 
133 /* Read DRAM memory map register value through PCI configuration space */
sa_read_map_entry(const struct device * dev,const struct sa_mem_map_descriptor * entry,uint64_t * result)134 static void sa_read_map_entry(const struct device *dev,
135 		const struct sa_mem_map_descriptor *entry, uint64_t *result)
136 {
137 	uint64_t value = 0;
138 
139 	if (entry->is_64_bit) {
140 		value = pci_read_config32(dev, entry->reg + 4);
141 		value <<= 32;
142 	}
143 
144 	value |= pci_read_config32(dev, entry->reg);
145 	/* All registers are on a 1MiB granularity. */
146 	value = ALIGN_DOWN(value, 1 * MiB);
147 
148 	*result = value;
149 }
150 
sa_get_mem_map(struct device * dev,uint64_t * values)151 static void sa_get_mem_map(struct device *dev, uint64_t *values)
152 {
153 	int i;
154 	for (i = 0; i < MAX_MAP_ENTRIES; i++)
155 		sa_read_map_entry(dev, &sa_memory_map[i], &values[i]);
156 }
157 
158 /*
159  * These are the host memory ranges that should be added:
160  * - 0 -> 0xa0000: cacheable
161  * - 0xc0000 -> top_of_ram : cacheable
162  * - top_of_ram -> TOLUD: not cacheable with standard MTRRs and reserved
163  * - 4GiB -> TOUUD: cacheable
164  *
165  * The default SMRAM space is reserved so that the range doesn't
166  * have to be saved during S3 Resume. Once marked reserved the OS
167  * cannot use the memory. This is a bit of an odd place to reserve
168  * the region, but the CPU devices don't have dev_ops->read_resources()
169  * called on them.
170  *
171  * The range 0xa0000 -> 0xc0000 does not have any resources
172  * associated with it to handle legacy VGA memory. If this range
173  * is not omitted the mtrr code will setup the area as cacheable
174  * causing VGA access to not work.
175  *
176  * Don't need to mark the entire top_of_ram till TOLUD range (used
177  * for stolen memory like GFX and ME, PTT, DPR, PRMRR, TSEG etc) as
178  * cacheable for OS usage as coreboot already done with mpinit w/ smm
179  * relocation early.
180  *
181  * It should be noted that cacheable entry types need to be added in
182  * order. The reason is that the current MTRR code assumes this and
183  * falls over itself if it isn't.
184  *
185  * The resource index starts low and should not meet or exceed
186  * PCI_BASE_ADDRESS_0.
187  */
sa_add_dram_resources(struct device * dev,int * resource_count)188 static void sa_add_dram_resources(struct device *dev, int *resource_count)
189 {
190 	uint64_t sa_map_values[MAX_MAP_ENTRIES];
191 	int index = *resource_count;
192 
193 	const uintptr_t top_of_ram = cbmem_top();
194 
195 	/* 0 - > 0xa0000 */
196 	ram_from_to(dev, index++, 0, 0xa0000);
197 
198 	/* 0xc0000 -> top_of_ram */
199 	ram_from_to(dev, index++, 0xc0000, top_of_ram);
200 
201 	sa_get_mem_map(dev, &sa_map_values[0]);
202 
203 	/*
204 	 * top_of_ram -> TOLUD: This contains TSEG which needs to be uncacheable
205 	 * for proper operation of the smihandler.
206 	 */
207 	mmio_from_to(dev, index++, top_of_ram, sa_map_values[SA_TOLUD_REG]);
208 
209 	/* 4GiB -> TOUUD */
210 	upper_ram_end(dev, index++, sa_map_values[SA_TOUUD_REG]);
211 
212 	/*
213 	 * Reserve everything between A segment and 1MB:
214 	 *
215 	 * 0xa0000 - 0xbffff: legacy VGA
216 	 * 0xc0000 - 0xfffff: RAM
217 	 */
218 	mmio_from_to(dev, index++, 0xa0000, 0xc0000);
219 	reserved_ram_from_to(dev, index++, 0xc0000, 1 * MiB);
220 
221 	*resource_count = index;
222 }
223 
is_imr_enabled(uint32_t imr_base_reg)224 static bool is_imr_enabled(uint32_t imr_base_reg)
225 {
226 	return !!(imr_base_reg & (1 << 31));
227 }
228 
imr_resource(struct device * dev,int idx,uint32_t base,uint32_t mask)229 static void imr_resource(struct device *dev, int idx, uint32_t base,
230 			 uint32_t mask)
231 {
232 	uint32_t base_k, size_k;
233 	/* Bits 28:0 encode the base address bits 38:10, hence the KiB unit. */
234 	base_k = (base & 0x0fffffff);
235 	/* Bits 28:0 encode the AND mask used for comparison, in KiB. */
236 	size_k = ((~mask & 0x0fffffff) + 1);
237 	/*
238 	 * IMRs sit in lower DRAM. Mark them cacheable, otherwise we run
239 	 * out of MTRRs. Memory reserved by IMRs is not usable for host
240 	 * so mark it reserved.
241 	 */
242 	reserved_ram_range(dev, idx, base_k * KiB, size_k * KiB);
243 }
244 
245 /*
246  * Add IMR ranges that hang off the host bridge/memory
247  * controller device in case CONFIG(SA_ENABLE_IMR) is selected by SoC.
248  */
sa_add_imr_resources(struct device * dev,int * resource_cnt)249 static void sa_add_imr_resources(struct device *dev, int *resource_cnt)
250 {
251 	size_t i, imr_offset;
252 	uint32_t base, mask;
253 	int index = *resource_cnt;
254 
255 	for (i = 0; i < MCH_NUM_IMRS; i++) {
256 		imr_offset = i * MCH_IMR_PITCH;
257 		base = MCHBAR32(imr_offset + MCH_IMR0_BASE);
258 		mask = MCHBAR32(imr_offset + MCH_IMR0_MASK);
259 
260 		if (is_imr_enabled(base))
261 			imr_resource(dev, index++, base, mask);
262 	}
263 
264 	*resource_cnt = index;
265 }
266 
systemagent_read_resources(struct device * dev)267 static void systemagent_read_resources(struct device *dev)
268 {
269 	int index = 0;
270 
271 	/* Read standard PCI resources. */
272 	pci_dev_read_resources(dev);
273 
274 	/* Add all fixed MMIO resources. */
275 	soc_add_fixed_mmio_resources(dev, &index);
276 
277 	/* Add all configurable MMIO resources. */
278 	soc_add_configurable_mmio_resources(dev, &index);
279 
280 	/* Calculate and add DRAM resources. */
281 	sa_add_dram_resources(dev, &index);
282 	if (CONFIG(SA_ENABLE_IMR))
283 		/* Add the isolated memory ranges (IMRs). */
284 		sa_add_imr_resources(dev, &index);
285 }
286 
enable_power_aware_intr(void)287 void enable_power_aware_intr(void)
288 {
289 	uint8_t pair;
290 
291 	/* Enable Power Aware Interrupt Routing */
292 	pair = MCHBAR8(MCH_PAIR);
293 	pair &= ~0x7;	/* Clear 2:0 */
294 	pair |= 0x4;	/* Fixed Priority */
295 	MCHBAR8(MCH_PAIR) = pair;
296 }
297 
sa_lock_pam(void)298 void sa_lock_pam(void)
299 {
300 	const struct device *dev = pcidev_path_on_root(SA_DEVFN_ROOT);
301 	if (!CONFIG(HAVE_PAM0_REGISTER) || !dev)
302 		return;
303 
304 	pci_or_config8(dev, PAM0, PAM_LOCK);
305 }
306 
ssdt_set_above_4g_pci(const struct device * dev)307 void ssdt_set_above_4g_pci(const struct device *dev)
308 {
309 	if (dev->path.type != DEVICE_PATH_DOMAIN)
310 		return;
311 
312 	uint64_t touud;
313 	sa_read_map_entry(pcidev_path_on_root(SA_DEVFN_ROOT), &sa_memory_map[SA_TOUUD_REG],
314 			  &touud);
315 	const uint64_t len = POWER_OF_2(soc_phys_address_size()) - touud;
316 
317 	const char *scope = acpi_device_path(dev);
318 	acpigen_write_scope(scope);
319 	acpigen_write_name_qword("A4GB", touud);
320 	acpigen_write_name_qword("A4GS", len);
321 	acpigen_pop_len();
322 
323 	printk(BIOS_DEBUG, "PCI space above 4GB MMIO is at 0x%llx, len = 0x%llx\n", touud, len);
324 }
325 
sa_get_mmcfg_size(void)326 uint64_t sa_get_mmcfg_size(void)
327 {
328 	const uint32_t pciexbar_reg = pci_read_config32(__pci_0_00_0, PCIEXBAR);
329 
330 	if (!(pciexbar_reg & (1 << 0))) {
331 		printk(BIOS_ERR, "%s : PCIEXBAR disabled\n", __func__);
332 		return 0;
333 	}
334 
335 	switch ((pciexbar_reg & PCIEXBAR_LENGTH_MASK) >> PCIEXBAR_LENGTH_MASK_LSB) {
336 	case PCIEXBAR_LENGTH_4096MB:
337 		return 4ULL * GiB;
338 	case PCIEXBAR_LENGTH_2048MB:
339 		return 2ULL * GiB;
340 	case PCIEXBAR_LENGTH_1024MB:
341 		return 1 * GiB;
342 	case PCIEXBAR_LENGTH_512MB:
343 		return 512 * MiB;
344 	case PCIEXBAR_LENGTH_256MB:
345 		return 256 * MiB;
346 	case PCIEXBAR_LENGTH_128MB:
347 		return 128 * MiB;
348 	case PCIEXBAR_LENGTH_64MB:
349 		return 64 * MiB;
350 	default:
351 		printk(BIOS_ERR, "%s : PCIEXBAR - invalid length (0x%x)\n", __func__,
352 			(pciexbar_reg & PCIEXBAR_LENGTH_MASK) >> PCIEXBAR_LENGTH_MASK_LSB);
353 		return 0x0;
354 	}
355 }
356 
sa_get_dsm_size(void)357 uint64_t sa_get_dsm_size(void)
358 {
359 	const uint32_t size_field = (pci_read_config32(__pci_0_00_0, GGC) & DSM_LENGTH_MASK)
360 					 >> DSM_LENGTH_MASK_LSB;
361 	if (size_field <= 0x10) { /* 0x0 - 0x10 */
362 		return size_field * 32 * MiB;
363 	} else if ((size_field >= 0xF0) && (size_field >= 0xFE)) {
364 		return ((uint64_t)size_field - 0xEF) * 4 * MiB;
365 	} else {
366 		switch (size_field) {
367 		case 0x20:
368 			return 1 * GiB;
369 		case 0x30:
370 			return 1536 * MiB;
371 		case 0x40:
372 			return 2 * (uint64_t)GiB;
373 		default:
374 			printk(BIOS_ERR, "%s : DSM - invalid length (0x%x)\n", __func__, size_field);
375 			return 0x0;
376 		}
377 	}
378 }
379 
sa_get_gsm_size(void)380 uint64_t sa_get_gsm_size(void)
381 {
382 	const uint32_t size_field = (pci_read_config32(__pci_0_00_0, GGC) & GSM_LENGTH_MASK)
383 					 >> GSM_LENGTH_MASK_LSB;
384 	switch (size_field) {
385 	case 0x0:
386 	default:
387 		return 0;
388 	case 0x1:
389 		return 2 * MiB;
390 	case 0x2:
391 		return 4 * MiB;
392 	case 0x3:
393 		return 8 * MiB;
394 	}
395 }
396 
sa_get_dpr_size(void)397 uint64_t sa_get_dpr_size(void)
398 {
399 	const uint32_t size_field = (pci_read_config32(__pci_0_00_0, DPR) & DPR_LENGTH_MASK)
400 					 >> DPR_LENGTH_MASK_LSB;
401 	return (uint64_t)size_field * MiB;
402 }
403 
404 struct device_operations systemagent_ops = {
405 	.read_resources   = systemagent_read_resources,
406 	.set_resources    = pci_dev_set_resources,
407 	.enable_resources = pci_dev_enable_resources,
408 	.init             = sa_soc_systemagent_init,
409 	.ops_pci          = &pci_dev_ops_pci,
410 #if CONFIG(HAVE_ACPI_TABLES)
411 	.write_acpi_tables = sa_write_acpi_tables,
412 #endif
413 };
414 
415 static const unsigned short systemagent_ids[] = {
416 	PCI_DID_INTEL_PTL_ID,
417 	PCI_DID_INTEL_LNL_M_ID,
418 	PCI_DID_INTEL_LNL_M_ID_1,
419 	PCI_DID_INTEL_MTL_M_ID,
420 	PCI_DID_INTEL_MTL_P_ID_1,
421 	PCI_DID_INTEL_MTL_P_ID_2,
422 	PCI_DID_INTEL_MTL_P_ID_3,
423 	PCI_DID_INTEL_MTL_P_ID_4,
424 	PCI_DID_INTEL_MTL_P_ID_5,
425 	PCI_DID_INTEL_GLK_NB,
426 	PCI_DID_INTEL_APL_NB,
427 	PCI_DID_INTEL_CNL_ID_U,
428 	PCI_DID_INTEL_CNL_ID_Y,
429 	PCI_DID_INTEL_WHL_ID_W_2,
430 	PCI_DID_INTEL_WHL_ID_W_4,
431 	PCI_DID_INTEL_CFL_ID_U,
432 	PCI_DID_INTEL_CFL_ID_U_2,
433 	PCI_DID_INTEL_CFL_ID_H,
434 	PCI_DID_INTEL_CFL_ID_H_4,
435 	PCI_DID_INTEL_CFL_ID_H_8,
436 	PCI_DID_INTEL_CFL_ID_S,
437 	PCI_DID_INTEL_CFL_ID_S_DT_2,
438 	PCI_DID_INTEL_CFL_ID_S_DT_4,
439 	PCI_DID_INTEL_CFL_ID_S_DT_8,
440 	PCI_DID_INTEL_CFL_ID_S_WS_4,
441 	PCI_DID_INTEL_CFL_ID_S_WS_6,
442 	PCI_DID_INTEL_CFL_ID_S_WS_8,
443 	PCI_DID_INTEL_CFL_ID_S_S_4,
444 	PCI_DID_INTEL_CFL_ID_S_S_6,
445 	PCI_DID_INTEL_CFL_ID_S_S_8,
446 	PCI_DID_INTEL_CML_ULT,
447 	PCI_DID_INTEL_CML_ULT_2_2,
448 	PCI_DID_INTEL_CML_ULT_6_2,
449 	PCI_DID_INTEL_CML_ULX,
450 	PCI_DID_INTEL_CML_S,
451 	PCI_DID_INTEL_CML_S_G0G1_P0P1_6_2,
452 	PCI_DID_INTEL_CML_S_P0P1_8_2,
453 	PCI_DID_INTEL_CML_S_P0P1_10_2,
454 	PCI_DID_INTEL_CML_S_G0G1_4,
455 	PCI_DID_INTEL_CML_S_G0G1_2,
456 	PCI_DID_INTEL_CML_H,
457 	PCI_DID_INTEL_CML_H_4_2,
458 	PCI_DID_INTEL_CML_H_8_2,
459 	PCI_DID_INTEL_TGL_ID_U_2_2,
460 	PCI_DID_INTEL_TGL_ID_U_4_2,
461 	PCI_DID_INTEL_TGL_ID_Y_2_2,
462 	PCI_DID_INTEL_TGL_ID_Y_4_2,
463 	PCI_DID_INTEL_TGL_ID_H_6_1,
464 	PCI_DID_INTEL_TGL_ID_H_8_1,
465 	PCI_DID_INTEL_EHL_ID_0,
466 	PCI_DID_INTEL_EHL_ID_1,
467 	PCI_DID_INTEL_EHL_ID_1A,
468 	PCI_DID_INTEL_EHL_ID_2,
469 	PCI_DID_INTEL_EHL_ID_2_1,
470 	PCI_DID_INTEL_EHL_ID_3,
471 	PCI_DID_INTEL_EHL_ID_3A,
472 	PCI_DID_INTEL_EHL_ID_4,
473 	PCI_DID_INTEL_EHL_ID_5,
474 	PCI_DID_INTEL_EHL_ID_6,
475 	PCI_DID_INTEL_EHL_ID_7,
476 	PCI_DID_INTEL_EHL_ID_8,
477 	PCI_DID_INTEL_EHL_ID_9,
478 	PCI_DID_INTEL_EHL_ID_10,
479 	PCI_DID_INTEL_EHL_ID_11,
480 	PCI_DID_INTEL_EHL_ID_12,
481 	PCI_DID_INTEL_EHL_ID_13,
482 	PCI_DID_INTEL_EHL_ID_14,
483 	PCI_DID_INTEL_EHL_ID_15,
484 	PCI_DID_INTEL_JSL_ID_1,
485 	PCI_DID_INTEL_JSL_ID_2,
486 	PCI_DID_INTEL_JSL_ID_3,
487 	PCI_DID_INTEL_JSL_ID_4,
488 	PCI_DID_INTEL_JSL_ID_5,
489 	PCI_DID_INTEL_JSL_ID_6,
490 	PCI_DID_INTEL_ADL_S_ID_1,
491 	PCI_DID_INTEL_ADL_S_ID_2,
492 	PCI_DID_INTEL_ADL_S_ID_3,
493 	PCI_DID_INTEL_ADL_S_ID_4,
494 	PCI_DID_INTEL_ADL_S_ID_5,
495 	PCI_DID_INTEL_ADL_S_ID_6,
496 	PCI_DID_INTEL_ADL_S_ID_7,
497 	PCI_DID_INTEL_ADL_S_ID_8,
498 	PCI_DID_INTEL_ADL_S_ID_9,
499 	PCI_DID_INTEL_ADL_S_ID_10,
500 	PCI_DID_INTEL_ADL_S_ID_11,
501 	PCI_DID_INTEL_ADL_S_ID_12,
502 	PCI_DID_INTEL_ADL_S_ID_13,
503 	PCI_DID_INTEL_ADL_S_ID_14,
504 	PCI_DID_INTEL_ADL_S_ID_15,
505 	PCI_DID_INTEL_ADL_P_ID_1,
506 	PCI_DID_INTEL_ADL_P_ID_3,
507 	PCI_DID_INTEL_ADL_P_ID_4,
508 	PCI_DID_INTEL_ADL_P_ID_5,
509 	PCI_DID_INTEL_ADL_P_ID_6,
510 	PCI_DID_INTEL_ADL_P_ID_7,
511 	PCI_DID_INTEL_ADL_P_ID_8,
512 	PCI_DID_INTEL_ADL_P_ID_9,
513 	PCI_DID_INTEL_ADL_P_ID_10,
514 	PCI_DID_INTEL_ADL_M_ID_1,
515 	PCI_DID_INTEL_ADL_M_ID_2,
516 	PCI_DID_INTEL_ADL_N_ID_1,
517 	PCI_DID_INTEL_ADL_N_ID_2,
518 	PCI_DID_INTEL_ADL_N_ID_3,
519 	PCI_DID_INTEL_ADL_N_ID_4,
520 	PCI_DID_INTEL_ADL_N_ID_5,
521 	PCI_DID_INTEL_RPL_HX_ID_1,
522 	PCI_DID_INTEL_RPL_HX_ID_2,
523 	PCI_DID_INTEL_RPL_HX_ID_3,
524 	PCI_DID_INTEL_RPL_HX_ID_4,
525 	PCI_DID_INTEL_RPL_HX_ID_5,
526 	PCI_DID_INTEL_RPL_HX_ID_6,
527 	PCI_DID_INTEL_RPL_HX_ID_7,
528 	PCI_DID_INTEL_RPL_HX_ID_8,
529 	PCI_DID_INTEL_RPL_S_ID_1,
530 	PCI_DID_INTEL_RPL_S_ID_2,
531 	PCI_DID_INTEL_RPL_S_ID_3,
532 	PCI_DID_INTEL_RPL_S_ID_4,
533 	PCI_DID_INTEL_RPL_S_ID_5,
534 	PCI_DID_INTEL_RPL_P_ID_1,
535 	PCI_DID_INTEL_RPL_P_ID_2,
536 	PCI_DID_INTEL_RPL_P_ID_3,
537 	PCI_DID_INTEL_RPL_P_ID_4,
538 	PCI_DID_INTEL_RPL_P_ID_5,
539 	PCI_DID_INTEL_RPL_P_ID_6,
540 	PCI_DID_INTEL_RPL_P_ID_7,
541 	PCI_DID_INTEL_RPL_P_ID_8,
542 	0
543 };
544 
545 static const struct pci_driver systemagent_driver __pci_driver = {
546 	.ops     = &systemagent_ops,
547 	.vendor  = PCI_VID_INTEL,
548 	.devices = systemagent_ids
549 };
550