xref: /aosp_15_r20/external/coreboot/src/device/pciexp_device.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <console/console.h>
4 #include <commonlib/helpers.h>
5 #include <delay.h>
6 #include <device/device.h>
7 #include <device/pci.h>
8 #include <device/pci_ids.h>
9 #include <device/pci_ops.h>
10 #include <device/pciexp.h>
11 
ext_cap_id(unsigned int cap)12 static unsigned int ext_cap_id(unsigned int cap)
13 {
14 	return cap & 0xffff;
15 }
16 
ext_cap_next_offset(unsigned int cap)17 static unsigned int ext_cap_next_offset(unsigned int cap)
18 {
19 	return cap >> 20 & 0xffc;
20 }
21 
find_ext_cap_offset(const struct device * dev,unsigned int cap_id,unsigned int offset)22 static unsigned int find_ext_cap_offset(const struct device *dev, unsigned int cap_id,
23 					unsigned int offset)
24 {
25 	unsigned int this_cap_offset = offset;
26 
27 	while (this_cap_offset >= PCIE_EXT_CAP_OFFSET) {
28 		const unsigned int this_cap = pci_read_config32(dev, this_cap_offset);
29 
30 		/* Bail out when this request is unsupported */
31 		if (this_cap == 0xffffffff)
32 			break;
33 
34 		if (ext_cap_id(this_cap) == cap_id)
35 			return this_cap_offset;
36 
37 		this_cap_offset = ext_cap_next_offset(this_cap);
38 	}
39 
40 	return 0;
41 }
42 
43 /*
44  * Search for an extended capability with the ID `cap`.
45  *
46  * Returns the offset of the first matching extended
47  * capability if found, or 0 otherwise.
48  *
49  * A new search is started with `offset == 0`.
50  * To continue a search, the prior return value
51  * should be passed as `offset`.
52  */
pciexp_find_extended_cap(const struct device * dev,unsigned int cap,unsigned int offset)53 unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap,
54 				      unsigned int offset)
55 {
56 	unsigned int next_cap_offset;
57 
58 	if (offset)
59 		next_cap_offset = ext_cap_next_offset(pci_read_config32(dev, offset));
60 	else
61 		next_cap_offset = PCIE_EXT_CAP_OFFSET;
62 
63 	return find_ext_cap_offset(dev, cap, next_cap_offset);
64 }
65 
66 /*
67  * Search for a vendor-specific extended capability,
68  * with the vendor-specific ID `cap`.
69  *
70  * Returns the offset of the vendor-specific header,
71  * i.e. the offset of the extended capability + 4,
72  * or 0 if none is found.
73  *
74  * A new search is started with `offset == 0`.
75  * To continue a search, the prior return value
76  * should be passed as `offset`.
77  */
pciexp_find_ext_vendor_cap(const struct device * dev,unsigned int cap,unsigned int offset)78 unsigned int pciexp_find_ext_vendor_cap(const struct device *dev, unsigned int cap,
79 					unsigned int offset)
80 {
81 	/* Reconstruct capability offset from vendor-specific header offset. */
82 	if (offset >= 4)
83 		offset -= 4;
84 
85 	for (;;) {
86 		offset = pciexp_find_extended_cap(dev, PCI_EXT_CAP_ID_VNDR, offset);
87 		if (!offset)
88 			return 0;
89 
90 		const unsigned int vndr_cap = pci_read_config32(dev, offset + 4);
91 		if ((vndr_cap & 0xffff) == cap)
92 			return offset + 4;
93 	}
94 }
95 
96 /**
97  * Find a PCIe device with a given serial number, and a given VID if applicable
98  *
99  * @param serial The serial number of the device.
100  * @param vid Vendor ID of the device, may be 0 if not applicable.
101  * @param from Pointer to the device structure, used as a starting point in
102  *             the linked list of all_devices, which can be 0 to start at the
103  *             head of the list (i.e. all_devices).
104  * @return Pointer to the device struct.
105  */
pcie_find_dsn(const uint64_t serial,const uint16_t vid,struct device * from)106 struct device *pcie_find_dsn(const uint64_t serial, const uint16_t vid,
107 			struct device *from)
108 {
109 	union dsn {
110 		struct {
111 			uint32_t dsn_low;
112 			uint32_t dsn_high;
113 		};
114 		uint64_t dsn;
115 	} dsn;
116 	unsigned int cap;
117 	uint16_t vendor_id;
118 
119 	if (!from)
120 		from = all_devices;
121 	else
122 		from = from->next;
123 
124 	while (from) {
125 		if (from->path.type == DEVICE_PATH_PCI) {
126 			cap = pciexp_find_extended_cap(from, PCI_EXT_CAP_ID_DSN, 0);
127 			/*
128 			 * For PCIe device, find extended capability for serial number.
129 			 * The capability header is 4 bytes, followed by lower 4 bytes
130 			 * of serial number, then higher 4 byes of serial number.
131 			 */
132 			if (cap != 0) {
133 				dsn.dsn_low = pci_read_config32(from, cap + 4);
134 				dsn.dsn_high = pci_read_config32(from, cap + 8);
135 				vendor_id = pci_read_config16(from, PCI_VENDOR_ID);
136 				if ((dsn.dsn == serial) && (vid == 0 || vendor_id == vid))
137 					return from;
138 			}
139 		}
140 
141 		from = from->next;
142 	}
143 
144 	return from;
145 }
146 
pcie_is_root_port(struct device * dev)147 static bool pcie_is_root_port(struct device *dev)
148 {
149 	unsigned int pcie_pos, pcie_type;
150 
151 	pcie_pos = pci_find_capability(dev, PCI_CAP_ID_PCIE);
152 	if (!pcie_pos)
153 		return false;
154 
155 	pcie_type = pci_read_config16(dev, pcie_pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_TYPE;
156 	pcie_type >>= 4;
157 
158 	return (pcie_type == PCI_EXP_TYPE_ROOT_PORT);
159 }
160 
pcie_is_endpoint(struct device * dev)161 static bool pcie_is_endpoint(struct device *dev)
162 {
163 	unsigned int pcie_pos, pcie_type;
164 
165 	pcie_pos = pci_find_capability(dev, PCI_CAP_ID_PCIE);
166 	if (!pcie_pos)
167 		return false;
168 
169 	pcie_type = pci_read_config16(dev, pcie_pos + PCI_EXP_FLAGS) & PCI_EXP_FLAGS_TYPE;
170 	pcie_type >>= 4;
171 
172 	return ((pcie_type == PCI_EXP_TYPE_ENDPOINT) || (pcie_type == PCI_EXP_TYPE_LEG_END));
173 }
174 
175 
176 /*
177  * Re-train a PCIe link
178  */
179 #define PCIE_TRAIN_RETRY 10000
pciexp_retrain_link(struct device * dev,unsigned int cap)180 static int pciexp_retrain_link(struct device *dev, unsigned int cap)
181 {
182 	unsigned int try;
183 	u16 lnk;
184 
185 	/*
186 	 * Implementation note (page 633) in PCIe Specification 3.0 suggests
187 	 * polling the Link Training bit in the Link Status register until the
188 	 * value returned is 0 before setting the Retrain Link bit to 1.
189 	 * This is meant to avoid a race condition when using the
190 	 * Retrain Link mechanism.
191 	 */
192 	for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
193 		lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
194 		if (!(lnk & PCI_EXP_LNKSTA_LT))
195 			break;
196 		udelay(100);
197 	}
198 	if (try == 0) {
199 		printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
200 		return -1;
201 	}
202 
203 	/* Start link retraining */
204 	lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
205 	lnk |= PCI_EXP_LNKCTL_RL;
206 	pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
207 
208 	/* Wait for training to complete */
209 	for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
210 		lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
211 		if (!(lnk & PCI_EXP_LNKSTA_LT))
212 			return 0;
213 		udelay(100);
214 	}
215 
216 	printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
217 	return -1;
218 }
219 
pciexp_is_ccc_active(struct device * root,unsigned int root_cap,struct device * endp,unsigned int endp_cap)220 static bool pciexp_is_ccc_active(struct device *root, unsigned int root_cap,
221 				 struct device *endp, unsigned int endp_cap)
222 {
223 	u16 root_ccc, endp_ccc;
224 
225 	root_ccc = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_CCC;
226 	endp_ccc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_CCC;
227 	if (root_ccc && endp_ccc) {
228 		printk(BIOS_INFO, "PCIe: Common Clock Configuration already enabled\n");
229 		return true;
230 	}
231 	return false;
232 }
233 
234 /*
235  * Check the Slot Clock Configuration for root port and endpoint
236  * and enable Common Clock Configuration if possible.  If CCC is
237  * enabled the link must be retrained.
238  */
pciexp_enable_common_clock(struct device * root,unsigned int root_cap,struct device * endp,unsigned int endp_cap)239 static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
240 				       struct device *endp, unsigned int endp_cap)
241 {
242 	u16 root_scc, endp_scc, lnkctl;
243 
244 	/* No need to enable common clock if it is already active. */
245 	if (pciexp_is_ccc_active(root, root_cap, endp, endp_cap))
246 		return;
247 
248 	/* Get Slot Clock Configuration for root port */
249 	root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
250 	root_scc &= PCI_EXP_LNKSTA_SLC;
251 
252 	/* Get Slot Clock Configuration for endpoint */
253 	endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
254 	endp_scc &= PCI_EXP_LNKSTA_SLC;
255 
256 	/* Enable Common Clock Configuration and retrain */
257 	if (root_scc && endp_scc) {
258 		printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
259 
260 		/* Set in endpoint */
261 		lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
262 		lnkctl |= PCI_EXP_LNKCTL_CCC;
263 		pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
264 
265 		/* Set in root port */
266 		lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
267 		lnkctl |= PCI_EXP_LNKCTL_CCC;
268 		pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
269 
270 		/* Retrain link if CCC was enabled */
271 		pciexp_retrain_link(root, root_cap);
272 	}
273 }
274 
pciexp_enable_clock_power_pm(struct device * endp,unsigned int endp_cap)275 static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
276 {
277 	/* check if per port clkreq is supported in device */
278 	u32 endp_ca;
279 	u16 lnkctl;
280 	endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
281 	if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
282 		printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
283 		return;
284 	}
285 	lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
286 	lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
287 	pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
288 }
289 
_pciexp_ltr_supported(struct device * dev,unsigned int cap)290 static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap)
291 {
292 	return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR;
293 }
294 
_pciexp_ltr_enabled(struct device * dev,unsigned int cap)295 static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap)
296 {
297 	return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR;
298 }
299 
_pciexp_enable_ltr(struct device * parent,unsigned int parent_cap,struct device * dev,unsigned int cap)300 static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap,
301 			       struct device *dev, unsigned int cap)
302 {
303 	if (!_pciexp_ltr_supported(dev, cap)) {
304 		printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev));
305 		return false;
306 	}
307 
308 	if (_pciexp_ltr_enabled(dev, cap))
309 		return true;
310 
311 	if (parent &&
312 	    (!_pciexp_ltr_supported(parent, parent_cap) ||
313 	     !_pciexp_ltr_enabled(parent, parent_cap)))
314 		return false;
315 
316 	pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR);
317 	printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev));
318 	return true;
319 }
320 
pciexp_enable_ltr(struct device * dev)321 static void pciexp_enable_ltr(struct device *dev)
322 {
323 	const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
324 	if (!cap)
325 		return;
326 
327 	/*
328 	 * If we have get_ltr_max_latencies(), treat `dev` as the root.
329 	 * If not, let _pciexp_enable_ltr() query the parent's state.
330 	 */
331 	struct device *parent = NULL;
332 	unsigned int parent_cap = 0;
333 	if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) {
334 		parent = dev->upstream->dev;
335 		if (parent->path.type != DEVICE_PATH_PCI)
336 			return;
337 		parent_cap = pci_find_capability(parent, PCI_CAP_ID_PCIE);
338 		if (!parent_cap)
339 			return;
340 	}
341 
342 	(void)_pciexp_enable_ltr(parent, parent_cap, dev, cap);
343 }
344 
pciexp_get_ltr_max_latencies(struct device * dev,u16 * max_snoop,u16 * max_nosnoop)345 bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop)
346 {
347 	/* Walk the hierarchy up to find get_ltr_max_latencies(). */
348 	do {
349 		if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies)
350 			break;
351 		if (dev->upstream->dev == dev || dev->upstream->dev->path.type != DEVICE_PATH_PCI)
352 			return false;
353 		dev = dev->upstream->dev;
354 	} while (true);
355 
356 	dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop);
357 	return true;
358 }
359 
pciexp_configure_ltr(struct device * parent,unsigned int parent_cap,struct device * dev,unsigned int cap)360 static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap,
361 				 struct device *dev, unsigned int cap)
362 {
363 	if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap))
364 		return;
365 
366 	const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID, 0);
367 	if (!ltr_cap)
368 		return;
369 
370 	u16 max_snoop, max_nosnoop;
371 	if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop))
372 		return;
373 
374 	pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop);
375 	pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop);
376 	printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev));
377 }
378 
pciexp_L1_substate_cal(struct device * dev,unsigned int endp_cap,unsigned int * data)379 static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
380 	unsigned int *data)
381 {
382 	unsigned char mult[4] = {2, 10, 100, 0};
383 
384 	unsigned int L1SubStateSupport = *data & 0xf;
385 	unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
386 	unsigned int power_on_scale = (*data >> 16) & 0x3;
387 	unsigned int power_on_value = (*data >> 19) & 0x1f;
388 
389 	unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
390 	unsigned int endp_L1SubStateSupport = endp_data & 0xf;
391 	unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
392 	unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
393 	unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
394 
395 	L1SubStateSupport &= endp_L1SubStateSupport;
396 
397 	if (L1SubStateSupport == 0)
398 		return 0;
399 
400 	if (power_on_value * mult[power_on_scale] <
401 		endp_power_on_value * mult[endp_power_on_scale]) {
402 		power_on_value = endp_power_on_value;
403 		power_on_scale = endp_power_on_scale;
404 	}
405 	if (comm_mode_rst_time < endp_comm_mode_restore_time)
406 		comm_mode_rst_time = endp_comm_mode_restore_time;
407 
408 	*data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
409 		| (power_on_value << 19) | L1SubStateSupport;
410 
411 	return 1;
412 }
413 
pciexp_L1_substate_commit(struct device * root,struct device * dev,unsigned int root_cap,unsigned int end_cap)414 static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
415 	unsigned int root_cap, unsigned int end_cap)
416 {
417 	struct device *dev_t;
418 	unsigned char L1_ss_ok;
419 	unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
420 	unsigned int L1SubStateSupport;
421 	unsigned int comm_mode_rst_time;
422 	unsigned int power_on_scale;
423 	unsigned int endp_power_on_value;
424 
425 	for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
426 		/*
427 		 * rp_L1_support is init'd above from root port.
428 		 * it needs coordination with endpoints to reach in common.
429 		 * if certain endpoint doesn't support L1 Sub-State, abort
430 		 * this feature enabling.
431 		 */
432 		L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
433 						&rp_L1_support);
434 		if (!L1_ss_ok)
435 			return;
436 	}
437 
438 	L1SubStateSupport = rp_L1_support & 0xf;
439 	comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
440 	power_on_scale = (rp_L1_support >> 16) & 0x3;
441 	endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
442 
443 	printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
444 		root->path.pci.devfn >> 3);
445 	printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
446 	printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
447 	printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
448 		endp_power_on_value, power_on_scale);
449 
450 	pci_update_config32(root, root_cap + 0x08, ~0xff00,
451 		(comm_mode_rst_time << 8));
452 
453 	pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
454 		(endp_power_on_value << 3) | (power_on_scale));
455 
456 	/* TODO: 0xa0, 2 are values that work on some chipsets but really
457 	 * should be determined dynamically by looking at downstream devices.
458 	 */
459 	pci_update_config32(root, root_cap + 0x08,
460 		~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
461 			ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
462 		(0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
463 		(2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
464 
465 	pci_update_config32(root, root_cap + 0x08, ~0x1f,
466 		L1SubStateSupport);
467 
468 	for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
469 		pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
470 			(endp_power_on_value << 3) | (power_on_scale));
471 
472 		pci_update_config32(dev_t, end_cap + 0x08,
473 			~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
474 				ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
475 			(0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
476 			(2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
477 
478 		pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
479 			L1SubStateSupport);
480 	}
481 }
482 
pciexp_config_L1_sub_state(struct device * root,struct device * dev)483 static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
484 {
485 	unsigned int root_cap, end_cap;
486 
487 	/* Do it for function 0 only */
488 	if (dev->path.pci.devfn & 0x7)
489 		return;
490 
491 	root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID, 0);
492 	if (!root_cap)
493 		return;
494 
495 	end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID, 0);
496 	if (!end_cap) {
497 		if (dev->vendor != PCI_VID_INTEL)
498 			return;
499 
500 		end_cap = pciexp_find_ext_vendor_cap(dev, 0xcafe, 0);
501 		if (!end_cap)
502 			return;
503 	}
504 
505 	pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
506 }
507 
508 /*
509  * Determine the ASPM L0s or L1 exit latency for a link
510  * by checking both root port and endpoint and returning
511  * the highest latency value.
512  */
pciexp_aspm_latency(struct device * root,unsigned int root_cap,struct device * endp,unsigned int endp_cap,enum aspm_type type)513 static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
514 			       struct device *endp, unsigned int endp_cap,
515 			       enum aspm_type type)
516 {
517 	int root_lat = 0, endp_lat = 0;
518 	u32 root_lnkcap, endp_lnkcap;
519 
520 	root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
521 	endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
522 
523 	/* Make sure the link supports this ASPM type by checking
524 	 * capability bits 11:10 with aspm_type offset by 1 */
525 	if (!(root_lnkcap & (1 << (type + 9))) ||
526 	    !(endp_lnkcap & (1 << (type + 9))))
527 		return -1;
528 
529 	/* Find the one with higher latency */
530 	switch (type) {
531 	case PCIE_ASPM_L0S:
532 		root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
533 		endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
534 		break;
535 	case PCIE_ASPM_L1:
536 		root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
537 		endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
538 		break;
539 	default:
540 		return -1;
541 	}
542 
543 	return (endp_lat > root_lat) ? endp_lat : root_lat;
544 }
545 
546 /*
547  * Enable ASPM on PCIe root port and endpoint.
548  */
pciexp_enable_aspm(struct device * root,unsigned int root_cap,struct device * endp,unsigned int endp_cap)549 static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
550 					 struct device *endp, unsigned int endp_cap)
551 {
552 	const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
553 	enum aspm_type apmc = PCIE_ASPM_NONE;
554 	int exit_latency, ok_latency;
555 	u16 lnkctl;
556 	u32 devcap;
557 
558 	if (endp->disable_pcie_aspm)
559 		return;
560 
561 	/* Get endpoint device capabilities for acceptable limits */
562 	devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
563 
564 	/* Enable L0s if it is within endpoint acceptable limit */
565 	ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
566 	exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
567 					   PCIE_ASPM_L0S);
568 	if (exit_latency >= 0 && exit_latency <= ok_latency)
569 		apmc |= PCIE_ASPM_L0S;
570 
571 	/* Enable L1 if it is within endpoint acceptable limit */
572 	ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
573 	exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
574 					   PCIE_ASPM_L1);
575 	if (exit_latency >= 0 && exit_latency <= ok_latency)
576 		apmc |= PCIE_ASPM_L1;
577 
578 	if (apmc != PCIE_ASPM_NONE) {
579 		/* Set APMC in root port first */
580 		lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
581 		lnkctl |= apmc;
582 		pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
583 
584 		/* Set APMC in endpoint device next */
585 		lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
586 		lnkctl |= apmc;
587 		pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
588 	}
589 
590 	printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
591 }
592 
pciexp_dev_set_max_payload_size(struct device * dev,unsigned int max_payload)593 static void pciexp_dev_set_max_payload_size(struct device *dev, unsigned int max_payload)
594 {
595 	u16 devctl;
596 	unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
597 
598 	if (!pcie_cap)
599 		return;
600 
601 	devctl = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCTL);
602 	devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
603 	/*
604 	 * Should never overflow to higher bits, due to how max_payload is
605 	 * guarded in this file.
606 	 */
607 	devctl |= max_payload << 5;
608 	pci_write_config16(dev, pcie_cap + PCI_EXP_DEVCTL, devctl);
609 }
610 
pciexp_dev_get_current_max_payload_size(struct device * dev)611 static unsigned int pciexp_dev_get_current_max_payload_size(struct device *dev)
612 {
613 	u16 devctl;
614 	unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
615 
616 	if (!pcie_cap)
617 		return 0;
618 
619 	devctl = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCTL);
620 	devctl &= PCI_EXP_DEVCTL_PAYLOAD;
621 	return (devctl >> 5);
622 }
623 
pciexp_dev_get_max_payload_size_cap(struct device * dev)624 static unsigned int pciexp_dev_get_max_payload_size_cap(struct device *dev)
625 {
626 	u16 devcap;
627 	unsigned int pcie_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
628 
629 	if (!pcie_cap)
630 		return 0;
631 
632 	devcap = pci_read_config16(dev, pcie_cap + PCI_EXP_DEVCAP);
633 	return (devcap & PCI_EXP_DEVCAP_PAYLOAD);
634 }
635 
636 /*
637  * Set max payload size of a parent based on max payload size capability of the child.
638  */
pciexp_configure_max_payload_size(struct device * parent,struct device * child)639 static void pciexp_configure_max_payload_size(struct device *parent, struct device *child)
640 {
641 	unsigned int child_max_payload, parent_max_payload, max_payload;
642 
643 	/* Get max payload size supported by child */
644 	child_max_payload = pciexp_dev_get_current_max_payload_size(child);
645 	/* Get max payload size configured by parent */
646 	parent_max_payload = pciexp_dev_get_current_max_payload_size(parent);
647 	/* Set max payload to smaller of the reported device capability or parent config. */
648 	max_payload = MIN(child_max_payload, parent_max_payload);
649 
650 	if (max_payload > 5) {
651 		/* Values 6 and 7 are reserved in PCIe 3.0 specs. */
652 		printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
653 		       max_payload);
654 		max_payload = 5;
655 	}
656 
657 	if (max_payload != parent_max_payload) {
658 		pciexp_dev_set_max_payload_size(parent, max_payload);
659 		printk(BIOS_INFO, "%s: Max_Payload_Size adjusted to %d\n", dev_path(parent),
660 		       (1 << (max_payload + 7)));
661 	}
662 }
663 
664 /*
665  * Clear Lane Error State at the end of PCIe link training.
666  * Lane error status is cleared if PCIEXP_LANE_ERR_STAT_CLEAR is set.
667  * Lane error is normal during link training, so we need to clear it.
668  * At this moment, link has been used, but for a very short duration.
669  */
clear_lane_error_status(struct device * dev)670 static void clear_lane_error_status(struct device *dev)
671 {
672 	u32 reg32;
673 	u16 pos;
674 
675 	pos = pciexp_find_extended_cap(dev, PCI_EXP_SEC_CAP_ID, 0);
676 	if (pos == 0)
677 		return;
678 
679 	reg32 = pci_read_config32(dev, pos + PCI_EXP_SEC_LANE_ERR_STATUS);
680 	if (reg32 == 0)
681 		return;
682 
683 	printk(BIOS_DEBUG, "%s: Clear Lane Error Status.\n", dev_path(dev));
684 	printk(BIOS_DEBUG, "LaneErrStat:0x%x\n", reg32);
685 	pci_write_config32(dev, pos + PCI_EXP_SEC_LANE_ERR_STATUS, reg32);
686 }
687 
pciexp_tune_dev(struct device * dev)688 static void pciexp_tune_dev(struct device *dev)
689 {
690 	struct device *root = dev->upstream->dev;
691 	unsigned int root_cap, cap;
692 
693 	cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
694 	if (!cap)
695 		return;
696 
697 	root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
698 	if (!root_cap)
699 		return;
700 
701 	/* Check for and enable Common Clock */
702 	if (CONFIG(PCIEXP_COMMON_CLOCK))
703 		pciexp_enable_common_clock(root, root_cap, dev, cap);
704 
705 	/* Check if per port CLK req is supported by endpoint*/
706 	if (CONFIG(PCIEXP_CLK_PM))
707 		pciexp_enable_clock_power_pm(dev, cap);
708 
709 	/* Enable L1 Sub-State when both root port and endpoint support */
710 	if (CONFIG(PCIEXP_L1_SUB_STATE))
711 		pciexp_config_L1_sub_state(root, dev);
712 
713 	/* Check for and enable ASPM */
714 	if (CONFIG(PCIEXP_ASPM))
715 		pciexp_enable_aspm(root, root_cap, dev, cap);
716 
717 	/* Clear PCIe Lane Error Status */
718 	if (CONFIG(PCIEXP_LANE_ERR_STAT_CLEAR))
719 		clear_lane_error_status(root);
720 
721 	/* Set the Max Payload Size to the maximum supported capability for this device */
722 	if (pcie_is_endpoint(dev))
723 		pciexp_dev_set_max_payload_size(dev, pciexp_dev_get_max_payload_size_cap(dev));
724 
725 	/* Limit the parent's Max Payload Size if needed */
726 	pciexp_configure_max_payload_size(root, dev);
727 
728 	pciexp_configure_ltr(root, root_cap, dev, cap);
729 }
730 
pciexp_sync_max_payload_size(struct bus * bus,unsigned int max_payload)731 static void pciexp_sync_max_payload_size(struct bus *bus, unsigned int max_payload)
732 {
733 	struct device *child;
734 
735 	/* Set the max payload for children on the bus and their children, etc. */
736 	for (child = bus->children; child; child = child->sibling) {
737 		if (!is_pci(child))
738 			continue;
739 
740 		pciexp_dev_set_max_payload_size(child, max_payload);
741 
742 		if (child->downstream)
743 			pciexp_sync_max_payload_size(child->downstream, max_payload);
744 	}
745 }
746 
pciexp_scan_bus(struct bus * bus,unsigned int min_devfn,unsigned int max_devfn)747 void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
748 			     unsigned int max_devfn)
749 {
750 	struct device *child;
751 	unsigned int max_payload;
752 
753 	pciexp_enable_ltr(bus->dev);
754 
755 	/*
756 	 * Set the Max Payload Size to the maximum supported capability for this bridge.
757 	 * This value will be used in pciexp_tune_dev to limit the Max Payload size if needed.
758 	 */
759 	max_payload = pciexp_dev_get_max_payload_size_cap(bus->dev);
760 	pciexp_dev_set_max_payload_size(bus->dev, max_payload);
761 
762 	pci_scan_bus(bus, min_devfn, max_devfn);
763 
764 	for (child = bus->children; child; child = child->sibling) {
765 		if (child->path.type != DEVICE_PATH_PCI)
766 			continue;
767 		if ((child->path.pci.devfn < min_devfn) ||
768 		    (child->path.pci.devfn > max_devfn)) {
769 			continue;
770 		}
771 		pciexp_tune_dev(child);
772 	}
773 
774 	/*
775 	 * Now the root port's Max Payload Size should be set to the highest
776 	 * possible value supported by all devices under a given root port.
777 	 * Propagate that value down from root port to all devices, so the Max
778 	 * Payload Size is equal on all devices, as some devices may have
779 	 * different capabilities and the programmed value depends on the
780 	 * order of device population the in the subtree.
781 	 */
782 	if (pcie_is_root_port(bus->dev)) {
783 		max_payload = pciexp_dev_get_current_max_payload_size(bus->dev);
784 
785 		printk(BIOS_INFO, "%s: Setting Max_Payload_Size to %d for devices under this"
786 				  " root port\n", dev_path(bus->dev), 1 << (max_payload + 7));
787 
788 		pciexp_sync_max_payload_size(bus, max_payload);
789 	}
790 }
791 
pciexp_scan_bridge(struct device * dev)792 void pciexp_scan_bridge(struct device *dev)
793 {
794 	do_pci_scan_bridge(dev, pciexp_scan_bus);
795 }
796 
797 /** Default device operations for PCI Express bridges */
798 static struct pci_operations pciexp_bus_ops_pci = {
799 	.set_subsystem = 0,
800 };
801 
802 struct device_operations default_pciexp_ops_bus = {
803 	.read_resources   = pci_bus_read_resources,
804 	.set_resources    = pci_dev_set_resources,
805 	.enable_resources = pci_bus_enable_resources,
806 	.scan_bus         = pciexp_scan_bridge,
807 	.reset_bus        = pci_bus_reset,
808 	.ops_pci          = &pciexp_bus_ops_pci,
809 };
810 
pciexp_hotplug_dummy_read_resources(struct device * dev)811 static void pciexp_hotplug_dummy_read_resources(struct device *dev)
812 {
813 	struct resource *resource;
814 
815 	/* Add extra memory space */
816 	resource = new_resource(dev, 0x10);
817 	resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
818 	resource->align = 12;
819 	resource->gran = 12;
820 	resource->limit = 0xffffffff;
821 	resource->flags |= IORESOURCE_MEM;
822 
823 	/* Add extra prefetchable memory space */
824 	resource = new_resource(dev, 0x14);
825 	resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
826 	resource->align = 12;
827 	resource->gran = 12;
828 	resource->limit = 0xffffffffffffffff;
829 	resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
830 
831 	/* Set resource flag requesting allocation above 4G boundary. */
832 	if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
833 		resource->flags |= IORESOURCE_ABOVE_4G;
834 
835 	/* Add extra I/O space */
836 	resource = new_resource(dev, 0x18);
837 	resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
838 	resource->align = 12;
839 	resource->gran = 12;
840 	resource->limit = 0xffff;
841 	resource->flags |= IORESOURCE_IO;
842 }
843 
844 static struct device_operations pciexp_hotplug_dummy_ops = {
845 	.read_resources   = pciexp_hotplug_dummy_read_resources,
846 	.set_resources    = noop_set_resources,
847 };
848 
pciexp_hotplug_scan_bridge(struct device * dev)849 void pciexp_hotplug_scan_bridge(struct device *dev)
850 {
851 	dev->hotplug_port = 1;
852 	dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
853 
854 	/* Normal PCIe Scan */
855 	pciexp_scan_bridge(dev);
856 
857 	/* Add dummy slot to preserve resources, must happen after bus scan */
858 	struct device *dummy;
859 	struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
860 	dummy = alloc_dev(dev->downstream, &dummy_path);
861 	dummy->ops = &pciexp_hotplug_dummy_ops;
862 }
863 
864 struct device_operations default_pciexp_hotplug_ops_bus = {
865 	.read_resources   = pci_bus_read_resources,
866 	.set_resources    = pci_dev_set_resources,
867 	.enable_resources = pci_bus_enable_resources,
868 	.scan_bus         = pciexp_hotplug_scan_bridge,
869 	.reset_bus        = pci_bus_reset,
870 	.ops_pci          = &pciexp_bus_ops_pci,
871 };
872