1 /*
2 * PCI / PCI-X / PCI-Express support for 4xx parts
3 *
4 * Copyright 2007 Ben. Herrenschmidt <[email protected]>, IBM Corp.
5 *
6 * Most PCI Express code is coming from Stefan Roese implementation for
7 * arch/ppc in the Denx tree, slightly reworked by me.
8 *
9 * Copyright 2007 DENX Software Engineering, Stefan Roese <[email protected]>
10 *
11 * Some of that comes itself from a previous implementation for 440SPE only
12 * by Roland Dreier:
13 *
14 * Copyright (c) 2005 Cisco Systems. All rights reserved.
15 * Roland Dreier <[email protected]>
16 *
17 */
18
19 #undef DEBUG
20
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/init.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28
29 #include <asm/io.h>
30 #include <asm/pci-bridge.h>
31 #include <asm/machdep.h>
32 #include <asm/dcr.h>
33 #include <asm/dcr-regs.h>
34 #include <mm/mmu_decl.h>
35
36 #include "pci.h"
37
38 static int dma_offset_set;
39
40 #define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
41 #define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
42
43 #define RES_TO_U32_LOW(val) \
44 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
45 #define RES_TO_U32_HIGH(val) \
46 ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
47
ppc440spe_revA(void)48 static inline int ppc440spe_revA(void)
49 {
50 /* Catch both 440SPe variants, with and without RAID6 support */
51 if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
52 return 1;
53 else
54 return 0;
55 }
56
fixup_ppc4xx_pci_bridge(struct pci_dev * dev)57 static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
58 {
59 struct pci_controller *hose;
60 struct resource *r;
61
62 if (dev->devfn != 0 || dev->bus->self != NULL)
63 return;
64
65 hose = pci_bus_to_host(dev->bus);
66 if (hose == NULL)
67 return;
68
69 if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
70 !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
71 !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
72 return;
73
74 if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
75 of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
76 hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
77 }
78
79 /* Hide the PCI host BARs from the kernel as their content doesn't
80 * fit well in the resource management
81 */
82 pci_dev_for_each_resource(dev, r) {
83 r->start = r->end = 0;
84 r->flags = 0;
85 }
86
87 printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
88 pci_name(dev));
89 }
90 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
91
ppc4xx_parse_dma_ranges(struct pci_controller * hose,void __iomem * reg,struct resource * res)92 static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
93 void __iomem *reg,
94 struct resource *res)
95 {
96 u64 size;
97 struct of_range_parser parser;
98 struct of_range range;
99
100 /* Default */
101 res->start = 0;
102 size = 0x80000000;
103 res->end = size - 1;
104 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
105
106 if (of_pci_dma_range_parser_init(&parser, hose->dn))
107 goto out;
108
109 for_each_of_range(&parser, &range) {
110 u32 pci_space = range.flags;
111 u64 pci_addr = range.bus_addr;
112 u64 cpu_addr = range.cpu_addr;
113 size = range.size;
114
115 if (cpu_addr == OF_BAD_ADDR || size == 0)
116 continue;
117
118 /* We only care about memory */
119 if ((pci_space & 0x03000000) != 0x02000000)
120 continue;
121
122 /* We currently only support memory at 0, and pci_addr
123 * within 32 bits space
124 */
125 if (cpu_addr != 0 || pci_addr > 0xffffffff) {
126 printk(KERN_WARNING "%pOF: Ignored unsupported dma range"
127 " 0x%016llx...0x%016llx -> 0x%016llx\n",
128 hose->dn,
129 pci_addr, pci_addr + size - 1, cpu_addr);
130 continue;
131 }
132
133 /* Check if not prefetchable */
134 if (!(pci_space & 0x40000000))
135 res->flags &= ~IORESOURCE_PREFETCH;
136
137
138 /* Use that */
139 res->start = pci_addr;
140 /* Beware of 32 bits resources */
141 if (sizeof(resource_size_t) == sizeof(u32) &&
142 (pci_addr + size) > 0x100000000ull)
143 res->end = 0xffffffff;
144 else
145 res->end = res->start + size - 1;
146 break;
147 }
148
149 /* We only support one global DMA offset */
150 if (dma_offset_set && pci_dram_offset != res->start) {
151 printk(KERN_ERR "%pOF: dma-ranges(s) mismatch\n", hose->dn);
152 return -ENXIO;
153 }
154
155 /* Check that we can fit all of memory as we don't support
156 * DMA bounce buffers
157 */
158 if (size < total_memory) {
159 printk(KERN_ERR "%pOF: dma-ranges too small "
160 "(size=%llx total_memory=%llx)\n",
161 hose->dn, size, (u64)total_memory);
162 return -ENXIO;
163 }
164
165 /* Check we are a power of 2 size and that base is a multiple of size*/
166 if ((size & (size - 1)) != 0 ||
167 (res->start & (size - 1)) != 0) {
168 printk(KERN_ERR "%pOF: dma-ranges unaligned\n", hose->dn);
169 return -ENXIO;
170 }
171
172 /* Check that we are fully contained within 32 bits space if we are not
173 * running on a 460sx or 476fpe which have 64 bit bus addresses.
174 */
175 if (res->end > 0xffffffff &&
176 !(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx")
177 || of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) {
178 printk(KERN_ERR "%pOF: dma-ranges outside of 32 bits space\n",
179 hose->dn);
180 return -ENXIO;
181 }
182 out:
183 dma_offset_set = 1;
184 pci_dram_offset = res->start;
185 hose->dma_window_base_cur = res->start;
186 hose->dma_window_size = resource_size(res);
187
188 printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
189 pci_dram_offset);
190 printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
191 (unsigned long long)hose->dma_window_base_cur);
192 printk(KERN_INFO "DMA window size 0x%016llx\n",
193 (unsigned long long)hose->dma_window_size);
194 return 0;
195 }
196
197 /*
198 * 4xx PCI 2.x part
199 */
200
ppc4xx_setup_one_pci_PMM(struct pci_controller * hose,void __iomem * reg,u64 plb_addr,u64 pci_addr,u64 size,unsigned int flags,int index)201 static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
202 void __iomem *reg,
203 u64 plb_addr,
204 u64 pci_addr,
205 u64 size,
206 unsigned int flags,
207 int index)
208 {
209 u32 ma, pcila, pciha;
210
211 /* Hack warning ! The "old" PCI 2.x cell only let us configure the low
212 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
213 * address are actually hard wired to a value that appears to depend
214 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
215 *
216 * The trick here is we just crop those top bits and ignore them when
217 * programming the chip. That means the device-tree has to be right
218 * for the specific part used (we don't print a warning if it's wrong
219 * but on the other hand, you'll crash quickly enough), but at least
220 * this code should work whatever the hard coded value is
221 */
222 plb_addr &= 0xffffffffull;
223
224 /* Note: Due to the above hack, the test below doesn't actually test
225 * if you address is above 4G, but it tests that address and
226 * (address + size) are both contained in the same 4G
227 */
228 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
229 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
230 printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn);
231 return -1;
232 }
233 ma = (0xffffffffu << ilog2(size)) | 1;
234 if (flags & IORESOURCE_PREFETCH)
235 ma |= 2;
236
237 pciha = RES_TO_U32_HIGH(pci_addr);
238 pcila = RES_TO_U32_LOW(pci_addr);
239
240 writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
241 writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
242 writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
243 writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
244
245 return 0;
246 }
247
ppc4xx_configure_pci_PMMs(struct pci_controller * hose,void __iomem * reg)248 static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
249 void __iomem *reg)
250 {
251 int i, j, found_isa_hole = 0;
252
253 /* Setup outbound memory windows */
254 for (i = j = 0; i < 3; i++) {
255 struct resource *res = &hose->mem_resources[i];
256 resource_size_t offset = hose->mem_offset[i];
257
258 /* we only care about memory windows */
259 if (!(res->flags & IORESOURCE_MEM))
260 continue;
261 if (j > 2) {
262 printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn);
263 break;
264 }
265
266 /* Configure the resource */
267 if (ppc4xx_setup_one_pci_PMM(hose, reg,
268 res->start,
269 res->start - offset,
270 resource_size(res),
271 res->flags,
272 j) == 0) {
273 j++;
274
275 /* If the resource PCI address is 0 then we have our
276 * ISA memory hole
277 */
278 if (res->start == offset)
279 found_isa_hole = 1;
280 }
281 }
282
283 /* Handle ISA memory hole if not already covered */
284 if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
285 if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
286 hose->isa_mem_size, 0, j) == 0)
287 printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
288 hose->dn);
289 }
290
ppc4xx_configure_pci_PTMs(struct pci_controller * hose,void __iomem * reg,const struct resource * res)291 static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
292 void __iomem *reg,
293 const struct resource *res)
294 {
295 resource_size_t size = resource_size(res);
296 u32 sa;
297
298 /* Calculate window size */
299 sa = (0xffffffffu << ilog2(size)) | 1;
300 sa |= 0x1;
301
302 /* RAM is always at 0 local for now */
303 writel(0, reg + PCIL0_PTM1LA);
304 writel(sa, reg + PCIL0_PTM1MS);
305
306 /* Map on PCI side */
307 early_write_config_dword(hose, hose->first_busno, 0,
308 PCI_BASE_ADDRESS_1, res->start);
309 early_write_config_dword(hose, hose->first_busno, 0,
310 PCI_BASE_ADDRESS_2, 0x00000000);
311 early_write_config_word(hose, hose->first_busno, 0,
312 PCI_COMMAND, 0x0006);
313 }
314
ppc4xx_probe_pci_bridge(struct device_node * np)315 static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
316 {
317 /* NYI */
318 struct resource rsrc_cfg;
319 struct resource rsrc_reg;
320 struct resource dma_window;
321 struct pci_controller *hose = NULL;
322 void __iomem *reg = NULL;
323 const int *bus_range;
324 int primary = 0;
325
326 /* Check if device is enabled */
327 if (!of_device_is_available(np)) {
328 printk(KERN_INFO "%pOF: Port disabled via device-tree\n", np);
329 return;
330 }
331
332 /* Fetch config space registers address */
333 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
334 printk(KERN_ERR "%pOF: Can't get PCI config register base !",
335 np);
336 return;
337 }
338 /* Fetch host bridge internal registers address */
339 if (of_address_to_resource(np, 3, &rsrc_reg)) {
340 printk(KERN_ERR "%pOF: Can't get PCI internal register base !",
341 np);
342 return;
343 }
344
345 /* Check if primary bridge */
346 if (of_property_read_bool(np, "primary"))
347 primary = 1;
348
349 /* Get bus range if any */
350 bus_range = of_get_property(np, "bus-range", NULL);
351
352 /* Map registers */
353 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
354 if (reg == NULL) {
355 printk(KERN_ERR "%pOF: Can't map registers !", np);
356 goto fail;
357 }
358
359 /* Allocate the host controller data structure */
360 hose = pcibios_alloc_controller(np);
361 if (!hose)
362 goto fail;
363
364 hose->first_busno = bus_range ? bus_range[0] : 0x0;
365 hose->last_busno = bus_range ? bus_range[1] : 0xff;
366
367 /* Setup config space */
368 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
369
370 /* Disable all windows */
371 writel(0, reg + PCIL0_PMM0MA);
372 writel(0, reg + PCIL0_PMM1MA);
373 writel(0, reg + PCIL0_PMM2MA);
374 writel(0, reg + PCIL0_PTM1MS);
375 writel(0, reg + PCIL0_PTM2MS);
376
377 /* Parse outbound mapping resources */
378 pci_process_bridge_OF_ranges(hose, np, primary);
379
380 /* Parse inbound mapping resources */
381 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
382 goto fail;
383
384 /* Configure outbound ranges POMs */
385 ppc4xx_configure_pci_PMMs(hose, reg);
386
387 /* Configure inbound ranges PIMs */
388 ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
389
390 /* We don't need the registers anymore */
391 iounmap(reg);
392 return;
393
394 fail:
395 if (hose)
396 pcibios_free_controller(hose);
397 if (reg)
398 iounmap(reg);
399 }
400
401 /*
402 * 4xx PCI-X part
403 */
404
ppc4xx_setup_one_pcix_POM(struct pci_controller * hose,void __iomem * reg,u64 plb_addr,u64 pci_addr,u64 size,unsigned int flags,int index)405 static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
406 void __iomem *reg,
407 u64 plb_addr,
408 u64 pci_addr,
409 u64 size,
410 unsigned int flags,
411 int index)
412 {
413 u32 lah, lal, pciah, pcial, sa;
414
415 if (!is_power_of_2(size) || size < 0x1000 ||
416 (plb_addr & (size - 1)) != 0) {
417 printk(KERN_WARNING "%pOF: Resource out of range\n",
418 hose->dn);
419 return -1;
420 }
421
422 /* Calculate register values */
423 lah = RES_TO_U32_HIGH(plb_addr);
424 lal = RES_TO_U32_LOW(plb_addr);
425 pciah = RES_TO_U32_HIGH(pci_addr);
426 pcial = RES_TO_U32_LOW(pci_addr);
427 sa = (0xffffffffu << ilog2(size)) | 0x1;
428
429 /* Program register values */
430 if (index == 0) {
431 writel(lah, reg + PCIX0_POM0LAH);
432 writel(lal, reg + PCIX0_POM0LAL);
433 writel(pciah, reg + PCIX0_POM0PCIAH);
434 writel(pcial, reg + PCIX0_POM0PCIAL);
435 writel(sa, reg + PCIX0_POM0SA);
436 } else {
437 writel(lah, reg + PCIX0_POM1LAH);
438 writel(lal, reg + PCIX0_POM1LAL);
439 writel(pciah, reg + PCIX0_POM1PCIAH);
440 writel(pcial, reg + PCIX0_POM1PCIAL);
441 writel(sa, reg + PCIX0_POM1SA);
442 }
443
444 return 0;
445 }
446
ppc4xx_configure_pcix_POMs(struct pci_controller * hose,void __iomem * reg)447 static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
448 void __iomem *reg)
449 {
450 int i, j, found_isa_hole = 0;
451
452 /* Setup outbound memory windows */
453 for (i = j = 0; i < 3; i++) {
454 struct resource *res = &hose->mem_resources[i];
455 resource_size_t offset = hose->mem_offset[i];
456
457 /* we only care about memory windows */
458 if (!(res->flags & IORESOURCE_MEM))
459 continue;
460 if (j > 1) {
461 printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn);
462 break;
463 }
464
465 /* Configure the resource */
466 if (ppc4xx_setup_one_pcix_POM(hose, reg,
467 res->start,
468 res->start - offset,
469 resource_size(res),
470 res->flags,
471 j) == 0) {
472 j++;
473
474 /* If the resource PCI address is 0 then we have our
475 * ISA memory hole
476 */
477 if (res->start == offset)
478 found_isa_hole = 1;
479 }
480 }
481
482 /* Handle ISA memory hole if not already covered */
483 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
484 if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
485 hose->isa_mem_size, 0, j) == 0)
486 printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
487 hose->dn);
488 }
489
ppc4xx_configure_pcix_PIMs(struct pci_controller * hose,void __iomem * reg,const struct resource * res,int big_pim,int enable_msi_hole)490 static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
491 void __iomem *reg,
492 const struct resource *res,
493 int big_pim,
494 int enable_msi_hole)
495 {
496 resource_size_t size = resource_size(res);
497 u32 sa;
498
499 /* RAM is always at 0 */
500 writel(0x00000000, reg + PCIX0_PIM0LAH);
501 writel(0x00000000, reg + PCIX0_PIM0LAL);
502
503 /* Calculate window size */
504 sa = (0xffffffffu << ilog2(size)) | 1;
505 sa |= 0x1;
506 if (res->flags & IORESOURCE_PREFETCH)
507 sa |= 0x2;
508 if (enable_msi_hole)
509 sa |= 0x4;
510 writel(sa, reg + PCIX0_PIM0SA);
511 if (big_pim)
512 writel(0xffffffff, reg + PCIX0_PIM0SAH);
513
514 /* Map on PCI side */
515 writel(0x00000000, reg + PCIX0_BAR0H);
516 writel(res->start, reg + PCIX0_BAR0L);
517 writew(0x0006, reg + PCIX0_COMMAND);
518 }
519
ppc4xx_probe_pcix_bridge(struct device_node * np)520 static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
521 {
522 struct resource rsrc_cfg;
523 struct resource rsrc_reg;
524 struct resource dma_window;
525 struct pci_controller *hose = NULL;
526 void __iomem *reg = NULL;
527 const int *bus_range;
528 int big_pim, msi, primary;
529
530 /* Fetch config space registers address */
531 if (of_address_to_resource(np, 0, &rsrc_cfg)) {
532 printk(KERN_ERR "%pOF: Can't get PCI-X config register base !",
533 np);
534 return;
535 }
536 /* Fetch host bridge internal registers address */
537 if (of_address_to_resource(np, 3, &rsrc_reg)) {
538 printk(KERN_ERR "%pOF: Can't get PCI-X internal register base !",
539 np);
540 return;
541 }
542
543 /* Check if it supports large PIMs (440GX) */
544 big_pim = of_property_read_bool(np, "large-inbound-windows");
545
546 /* Check if we should enable MSIs inbound hole */
547 msi = of_property_read_bool(np, "enable-msi-hole");
548
549 /* Check if primary bridge */
550 primary = of_property_read_bool(np, "primary");
551
552 /* Get bus range if any */
553 bus_range = of_get_property(np, "bus-range", NULL);
554
555 /* Map registers */
556 reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
557 if (reg == NULL) {
558 printk(KERN_ERR "%pOF: Can't map registers !", np);
559 goto fail;
560 }
561
562 /* Allocate the host controller data structure */
563 hose = pcibios_alloc_controller(np);
564 if (!hose)
565 goto fail;
566
567 hose->first_busno = bus_range ? bus_range[0] : 0x0;
568 hose->last_busno = bus_range ? bus_range[1] : 0xff;
569
570 /* Setup config space */
571 setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
572 PPC_INDIRECT_TYPE_SET_CFG_TYPE);
573
574 /* Disable all windows */
575 writel(0, reg + PCIX0_POM0SA);
576 writel(0, reg + PCIX0_POM1SA);
577 writel(0, reg + PCIX0_POM2SA);
578 writel(0, reg + PCIX0_PIM0SA);
579 writel(0, reg + PCIX0_PIM1SA);
580 writel(0, reg + PCIX0_PIM2SA);
581 if (big_pim) {
582 writel(0, reg + PCIX0_PIM0SAH);
583 writel(0, reg + PCIX0_PIM2SAH);
584 }
585
586 /* Parse outbound mapping resources */
587 pci_process_bridge_OF_ranges(hose, np, primary);
588
589 /* Parse inbound mapping resources */
590 if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
591 goto fail;
592
593 /* Configure outbound ranges POMs */
594 ppc4xx_configure_pcix_POMs(hose, reg);
595
596 /* Configure inbound ranges PIMs */
597 ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
598
599 /* We don't need the registers anymore */
600 iounmap(reg);
601 return;
602
603 fail:
604 if (hose)
605 pcibios_free_controller(hose);
606 if (reg)
607 iounmap(reg);
608 }
609
610 #ifdef CONFIG_PPC4xx_PCI_EXPRESS
611
612 /*
613 * 4xx PCI-Express part
614 *
615 * We support 3 parts currently based on the compatible property:
616 *
617 * ibm,plb-pciex-440spe
618 * ibm,plb-pciex-405ex
619 * ibm,plb-pciex-460ex
620 *
621 * Anything else will be rejected for now as they are all subtly
622 * different unfortunately.
623 *
624 */
625
626 #define MAX_PCIE_BUS_MAPPED 0x40
627
628 struct ppc4xx_pciex_port
629 {
630 struct pci_controller *hose;
631 struct device_node *node;
632 unsigned int index;
633 int endpoint;
634 int link;
635 int has_ibpre;
636 unsigned int sdr_base;
637 dcr_host_t dcrs;
638 struct resource cfg_space;
639 struct resource utl_regs;
640 void __iomem *utl_base;
641 };
642
643 static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
644 static unsigned int ppc4xx_pciex_port_count;
645
646 struct ppc4xx_pciex_hwops
647 {
648 bool want_sdr;
649 int (*core_init)(struct device_node *np);
650 int (*port_init_hw)(struct ppc4xx_pciex_port *port);
651 int (*setup_utl)(struct ppc4xx_pciex_port *port);
652 void (*check_link)(struct ppc4xx_pciex_port *port);
653 };
654
655 static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
656
ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port * port,unsigned int sdr_offset,unsigned int mask,unsigned int value,int timeout_ms)657 static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
658 unsigned int sdr_offset,
659 unsigned int mask,
660 unsigned int value,
661 int timeout_ms)
662 {
663 u32 val;
664
665 while(timeout_ms--) {
666 val = mfdcri(SDR0, port->sdr_base + sdr_offset);
667 if ((val & mask) == value) {
668 pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
669 port->index, sdr_offset, timeout_ms, val);
670 return 0;
671 }
672 msleep(1);
673 }
674 return -1;
675 }
676
ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port * port)677 static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
678 {
679 /* Wait for reset to complete */
680 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
681 printk(KERN_WARNING "PCIE%d: PGRST failed\n",
682 port->index);
683 return -1;
684 }
685 return 0;
686 }
687
688
ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port * port)689 static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
690 {
691 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
692
693 /* Check for card presence detect if supported, if not, just wait for
694 * link unconditionally.
695 *
696 * note that we don't fail if there is no link, we just filter out
697 * config space accesses. That way, it will be easier to implement
698 * hotplug later on.
699 */
700 if (!port->has_ibpre ||
701 !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
702 1 << 28, 1 << 28, 100)) {
703 printk(KERN_INFO
704 "PCIE%d: Device detected, waiting for link...\n",
705 port->index);
706 if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
707 0x1000, 0x1000, 2000))
708 printk(KERN_WARNING
709 "PCIE%d: Link up failed\n", port->index);
710 else {
711 printk(KERN_INFO
712 "PCIE%d: link is up !\n", port->index);
713 port->link = 1;
714 }
715 } else
716 printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
717 }
718
719 #ifdef CONFIG_44x
720
721 /* Check various reset bits of the 440SPe PCIe core */
ppc440spe_pciex_check_reset(struct device_node * np)722 static int __init ppc440spe_pciex_check_reset(struct device_node *np)
723 {
724 u32 valPE0, valPE1, valPE2;
725 int err = 0;
726
727 /* SDR0_PEGPLLLCT1 reset */
728 if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
729 /*
730 * the PCIe core was probably already initialised
731 * by firmware - let's re-reset RCSSET regs
732 *
733 * -- Shouldn't we also re-reset the whole thing ? -- BenH
734 */
735 pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
736 mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
737 mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
738 mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
739 }
740
741 valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
742 valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
743 valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
744
745 /* SDR0_PExRCSSET rstgu */
746 if (!(valPE0 & 0x01000000) ||
747 !(valPE1 & 0x01000000) ||
748 !(valPE2 & 0x01000000)) {
749 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
750 err = -1;
751 }
752
753 /* SDR0_PExRCSSET rstdl */
754 if (!(valPE0 & 0x00010000) ||
755 !(valPE1 & 0x00010000) ||
756 !(valPE2 & 0x00010000)) {
757 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
758 err = -1;
759 }
760
761 /* SDR0_PExRCSSET rstpyn */
762 if ((valPE0 & 0x00001000) ||
763 (valPE1 & 0x00001000) ||
764 (valPE2 & 0x00001000)) {
765 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
766 err = -1;
767 }
768
769 /* SDR0_PExRCSSET hldplb */
770 if ((valPE0 & 0x10000000) ||
771 (valPE1 & 0x10000000) ||
772 (valPE2 & 0x10000000)) {
773 printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
774 err = -1;
775 }
776
777 /* SDR0_PExRCSSET rdy */
778 if ((valPE0 & 0x00100000) ||
779 (valPE1 & 0x00100000) ||
780 (valPE2 & 0x00100000)) {
781 printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
782 err = -1;
783 }
784
785 /* SDR0_PExRCSSET shutdown */
786 if ((valPE0 & 0x00000100) ||
787 (valPE1 & 0x00000100) ||
788 (valPE2 & 0x00000100)) {
789 printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
790 err = -1;
791 }
792
793 return err;
794 }
795
796 /* Global PCIe core initializations for 440SPe core */
ppc440spe_pciex_core_init(struct device_node * np)797 static int __init ppc440spe_pciex_core_init(struct device_node *np)
798 {
799 int time_out = 20;
800
801 /* Set PLL clock receiver to LVPECL */
802 dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
803
804 /* Shouldn't we do all the calibration stuff etc... here ? */
805 if (ppc440spe_pciex_check_reset(np))
806 return -ENXIO;
807
808 if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
809 printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
810 "failed (0x%08x)\n",
811 mfdcri(SDR0, PESDR0_PLLLCT2));
812 return -1;
813 }
814
815 /* De-assert reset of PCIe PLL, wait for lock */
816 dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
817 udelay(3);
818
819 while (time_out) {
820 if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
821 time_out--;
822 udelay(1);
823 } else
824 break;
825 }
826 if (!time_out) {
827 printk(KERN_INFO "PCIE: VCO output not locked\n");
828 return -1;
829 }
830
831 pr_debug("PCIE initialization OK\n");
832
833 return 3;
834 }
835
ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port * port)836 static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
837 {
838 u32 val = 1 << 24;
839
840 if (port->endpoint)
841 val = PTYPE_LEGACY_ENDPOINT << 20;
842 else
843 val = PTYPE_ROOT_PORT << 20;
844
845 if (port->index == 0)
846 val |= LNKW_X8 << 12;
847 else
848 val |= LNKW_X4 << 12;
849
850 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
851 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
852 if (ppc440spe_revA())
853 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
854 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
855 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
856 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
857 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
858 if (port->index == 0) {
859 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
860 0x35000000);
861 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
862 0x35000000);
863 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
864 0x35000000);
865 mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
866 0x35000000);
867 }
868 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
869 (1 << 24) | (1 << 16), 1 << 12);
870
871 return ppc4xx_pciex_port_reset_sdr(port);
872 }
873
ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port * port)874 static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
875 {
876 return ppc440spe_pciex_init_port_hw(port);
877 }
878
ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port * port)879 static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
880 {
881 int rc = ppc440spe_pciex_init_port_hw(port);
882
883 port->has_ibpre = 1;
884
885 return rc;
886 }
887
ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port * port)888 static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
889 {
890 /* XXX Check what that value means... I hate magic */
891 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
892
893 /*
894 * Set buffer allocations and then assert VRB and TXE.
895 */
896 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
897 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
898 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
899 out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
900 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
901 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
902 out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
903 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
904
905 return 0;
906 }
907
ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port * port)908 static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
909 {
910 /* Report CRS to the operating system */
911 out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
912
913 return 0;
914 }
915
916 static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
917 {
918 .want_sdr = true,
919 .core_init = ppc440spe_pciex_core_init,
920 .port_init_hw = ppc440speA_pciex_init_port_hw,
921 .setup_utl = ppc440speA_pciex_init_utl,
922 .check_link = ppc4xx_pciex_check_link_sdr,
923 };
924
925 static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
926 {
927 .want_sdr = true,
928 .core_init = ppc440spe_pciex_core_init,
929 .port_init_hw = ppc440speB_pciex_init_port_hw,
930 .setup_utl = ppc440speB_pciex_init_utl,
931 .check_link = ppc4xx_pciex_check_link_sdr,
932 };
933
ppc460ex_pciex_core_init(struct device_node * np)934 static int __init ppc460ex_pciex_core_init(struct device_node *np)
935 {
936 /* Nothing to do, return 2 ports */
937 return 2;
938 }
939
ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port * port)940 static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
941 {
942 u32 val;
943 u32 utlset1;
944
945 if (port->endpoint)
946 val = PTYPE_LEGACY_ENDPOINT << 20;
947 else
948 val = PTYPE_ROOT_PORT << 20;
949
950 if (port->index == 0) {
951 val |= LNKW_X1 << 12;
952 utlset1 = 0x20000000;
953 } else {
954 val |= LNKW_X4 << 12;
955 utlset1 = 0x20101101;
956 }
957
958 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
959 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
960 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
961
962 switch (port->index) {
963 case 0:
964 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
965 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
966 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
967
968 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
969 break;
970
971 case 1:
972 mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
973 mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
974 mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
975 mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
976 mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
977 mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
978 mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
979 mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
980 mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
981 mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
982 mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
983 mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
984
985 mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
986 break;
987 }
988
989 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
990 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
991 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
992
993 /* Poll for PHY reset */
994 /* XXX FIXME add timeout */
995 switch (port->index) {
996 case 0:
997 while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
998 udelay(10);
999 break;
1000 case 1:
1001 while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1002 udelay(10);
1003 break;
1004 }
1005
1006 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1007 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1008 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1009 PESDRx_RCSSET_RSTPYN);
1010
1011 port->has_ibpre = 1;
1012
1013 return ppc4xx_pciex_port_reset_sdr(port);
1014 }
1015
ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port * port)1016 static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1017 {
1018 dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1019
1020 /*
1021 * Set buffer allocations and then assert VRB and TXE.
1022 */
1023 out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
1024 out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
1025 out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
1026 out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
1027 out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
1028 out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
1029 out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
1030 out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1031 out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
1032
1033 return 0;
1034 }
1035
1036 static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1037 {
1038 .want_sdr = true,
1039 .core_init = ppc460ex_pciex_core_init,
1040 .port_init_hw = ppc460ex_pciex_init_port_hw,
1041 .setup_utl = ppc460ex_pciex_init_utl,
1042 .check_link = ppc4xx_pciex_check_link_sdr,
1043 };
1044
apm821xx_pciex_core_init(struct device_node * np)1045 static int __init apm821xx_pciex_core_init(struct device_node *np)
1046 {
1047 /* Return the number of pcie port */
1048 return 1;
1049 }
1050
apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port * port)1051 static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1052 {
1053 u32 val;
1054
1055 /*
1056 * Do a software reset on PCIe ports.
1057 * This code is to fix the issue that pci drivers doesn't re-assign
1058 * bus number for PCIE devices after Uboot
1059 * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
1060 * PT quad port, SAS LSI 1064E)
1061 */
1062
1063 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
1064 mdelay(10);
1065
1066 if (port->endpoint)
1067 val = PTYPE_LEGACY_ENDPOINT << 20;
1068 else
1069 val = PTYPE_ROOT_PORT << 20;
1070
1071 val |= LNKW_X1 << 12;
1072
1073 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
1074 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1075 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1076
1077 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
1078 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
1079 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
1080
1081 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
1082 mdelay(50);
1083 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
1084
1085 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1086 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1087 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1088
1089 /* Poll for PHY reset */
1090 val = PESDR0_460EX_RSTSTA - port->sdr_base;
1091 if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) {
1092 printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
1093 return -EBUSY;
1094 } else {
1095 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1096 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1097 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1098 PESDRx_RCSSET_RSTPYN);
1099
1100 port->has_ibpre = 1;
1101 return 0;
1102 }
1103 }
1104
1105 static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
1106 .want_sdr = true,
1107 .core_init = apm821xx_pciex_core_init,
1108 .port_init_hw = apm821xx_pciex_init_port_hw,
1109 .setup_utl = ppc460ex_pciex_init_utl,
1110 .check_link = ppc4xx_pciex_check_link_sdr,
1111 };
1112
ppc460sx_pciex_core_init(struct device_node * np)1113 static int __init ppc460sx_pciex_core_init(struct device_node *np)
1114 {
1115 /* HSS drive amplitude */
1116 mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1117 mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1118 mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1119 mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1120 mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1121 mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1122 mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1123 mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1124
1125 mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1126 mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1127 mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1128 mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1129
1130 mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1131 mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1132 mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1133 mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1134
1135 /* HSS TX pre-emphasis */
1136 mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1137 mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1138 mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1139 mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1140 mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1141 mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1142 mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1143 mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1144
1145 mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1146 mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1147 mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1148 mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1149
1150 mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1151 mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1152 mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1153 mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1154
1155 /* HSS TX calibration control */
1156 mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1157 mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1158 mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1159
1160 /* HSS TX slew control */
1161 mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1162 mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1163 mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1164
1165 /* Set HSS PRBS enabled */
1166 mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
1167 mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
1168
1169 udelay(100);
1170
1171 /* De-assert PLLRESET */
1172 dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1173
1174 /* Reset DL, UTL, GPL before configuration */
1175 mtdcri(SDR0, PESDR0_460SX_RCSSET,
1176 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1177 mtdcri(SDR0, PESDR1_460SX_RCSSET,
1178 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1179 mtdcri(SDR0, PESDR2_460SX_RCSSET,
1180 PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1181
1182 udelay(100);
1183
1184 /*
1185 * If bifurcation is not enabled, u-boot would have disabled the
1186 * third PCIe port
1187 */
1188 if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1189 0x00000001)) {
1190 printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1191 printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1192 return 3;
1193 }
1194
1195 printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1196 return 2;
1197 }
1198
ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port * port)1199 static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1200 {
1201
1202 if (port->endpoint)
1203 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1204 0x01000000, 0);
1205 else
1206 dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1207 0, 0x01000000);
1208
1209 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1210 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1211 PESDRx_RCSSET_RSTPYN);
1212
1213 port->has_ibpre = 1;
1214
1215 return ppc4xx_pciex_port_reset_sdr(port);
1216 }
1217
ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port * port)1218 static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1219 {
1220 /* Max 128 Bytes */
1221 out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
1222 /* Assert VRB and TXE - per datasheet turn off addr validation */
1223 out_be32(port->utl_base + PEUTL_PCTL, 0x80800000);
1224 return 0;
1225 }
1226
ppc460sx_pciex_check_link(struct ppc4xx_pciex_port * port)1227 static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
1228 {
1229 void __iomem *mbase;
1230 int attempt = 50;
1231
1232 port->link = 0;
1233
1234 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1235 if (mbase == NULL) {
1236 printk(KERN_ERR "%pOF: Can't map internal config space !",
1237 port->node);
1238 return;
1239 }
1240
1241 while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
1242 & PECFG_460SX_DLLSTA_LINKUP))) {
1243 attempt--;
1244 mdelay(10);
1245 }
1246 if (attempt)
1247 port->link = 1;
1248 iounmap(mbase);
1249 }
1250
1251 static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1252 .want_sdr = true,
1253 .core_init = ppc460sx_pciex_core_init,
1254 .port_init_hw = ppc460sx_pciex_init_port_hw,
1255 .setup_utl = ppc460sx_pciex_init_utl,
1256 .check_link = ppc460sx_pciex_check_link,
1257 };
1258
1259 #endif /* CONFIG_44x */
1260
1261 #ifdef CONFIG_476FPE
ppc_476fpe_pciex_core_init(struct device_node * np)1262 static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
1263 {
1264 return 4;
1265 }
1266
ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port * port)1267 static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
1268 {
1269 u32 timeout_ms = 20;
1270 u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
1271 void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
1272 0x1000);
1273
1274 printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
1275
1276 if (mbase == NULL) {
1277 printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
1278 port->index);
1279 return;
1280 }
1281
1282 while (timeout_ms--) {
1283 val = in_le32(mbase + PECFG_TLDLP);
1284
1285 if ((val & mask) == mask)
1286 break;
1287 msleep(10);
1288 }
1289
1290 if (val & PECFG_TLDLP_PRESENT) {
1291 printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
1292 port->link = 1;
1293 } else
1294 printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
1295
1296 iounmap(mbase);
1297 }
1298
1299 static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
1300 {
1301 .core_init = ppc_476fpe_pciex_core_init,
1302 .check_link = ppc_476fpe_pciex_check_link,
1303 };
1304 #endif /* CONFIG_476FPE */
1305
1306 /* Check that the core has been initied and if not, do it */
ppc4xx_pciex_check_core_init(struct device_node * np)1307 static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1308 {
1309 static int core_init;
1310 int count = -ENODEV;
1311
1312 if (core_init++)
1313 return 0;
1314
1315 #ifdef CONFIG_44x
1316 if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1317 if (ppc440spe_revA())
1318 ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1319 else
1320 ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1321 }
1322 if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1323 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1324 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1325 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1326 if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
1327 ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
1328 #endif /* CONFIG_44x */
1329 #ifdef CONFIG_476FPE
1330 if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")
1331 || of_device_is_compatible(np, "ibm,plb-pciex-476gtr"))
1332 ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
1333 #endif
1334 if (ppc4xx_pciex_hwops == NULL) {
1335 printk(KERN_WARNING "PCIE: unknown host type %pOF\n", np);
1336 return -ENODEV;
1337 }
1338
1339 count = ppc4xx_pciex_hwops->core_init(np);
1340 if (count > 0) {
1341 ppc4xx_pciex_ports =
1342 kcalloc(count, sizeof(struct ppc4xx_pciex_port),
1343 GFP_KERNEL);
1344 if (ppc4xx_pciex_ports) {
1345 ppc4xx_pciex_port_count = count;
1346 return 0;
1347 }
1348 printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1349 return -ENOMEM;
1350 }
1351 return -ENODEV;
1352 }
1353
ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port * port)1354 static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1355 {
1356 /* We map PCI Express configuration based on the reg property */
1357 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1358 RES_TO_U32_HIGH(port->cfg_space.start));
1359 dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1360 RES_TO_U32_LOW(port->cfg_space.start));
1361
1362 /* XXX FIXME: Use size from reg property. For now, map 512M */
1363 dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1364
1365 /* We map UTL registers based on the reg property */
1366 dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1367 RES_TO_U32_HIGH(port->utl_regs.start));
1368 dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1369 RES_TO_U32_LOW(port->utl_regs.start));
1370
1371 /* XXX FIXME: Use size from reg property */
1372 dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1373
1374 /* Disable all other outbound windows */
1375 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1376 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1377 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1378 dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1379 }
1380
ppc4xx_pciex_port_init(struct ppc4xx_pciex_port * port)1381 static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1382 {
1383 int rc = 0;
1384
1385 /* Init HW */
1386 if (ppc4xx_pciex_hwops->port_init_hw)
1387 rc = ppc4xx_pciex_hwops->port_init_hw(port);
1388 if (rc != 0)
1389 return rc;
1390
1391 /*
1392 * Initialize mapping: disable all regions and configure
1393 * CFG and REG regions based on resources in the device tree
1394 */
1395 ppc4xx_pciex_port_init_mapping(port);
1396
1397 if (ppc4xx_pciex_hwops->check_link)
1398 ppc4xx_pciex_hwops->check_link(port);
1399
1400 /*
1401 * Map UTL
1402 */
1403 port->utl_base = ioremap(port->utl_regs.start, 0x100);
1404 BUG_ON(port->utl_base == NULL);
1405
1406 /*
1407 * Setup UTL registers --BenH.
1408 */
1409 if (ppc4xx_pciex_hwops->setup_utl)
1410 ppc4xx_pciex_hwops->setup_utl(port);
1411
1412 /*
1413 * Check for VC0 active or PLL Locked and assert RDY.
1414 */
1415 if (port->sdr_base) {
1416 if (of_device_is_compatible(port->node,
1417 "ibm,plb-pciex-460sx")){
1418 if (port->link && ppc4xx_pciex_wait_on_sdr(port,
1419 PESDRn_RCSSTS,
1420 1 << 12, 1 << 12, 5000)) {
1421 printk(KERN_INFO "PCIE%d: PLL not locked\n",
1422 port->index);
1423 port->link = 0;
1424 }
1425 } else if (port->link &&
1426 ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1427 1 << 16, 1 << 16, 5000)) {
1428 printk(KERN_INFO "PCIE%d: VC0 not active\n",
1429 port->index);
1430 port->link = 0;
1431 }
1432
1433 dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1434 }
1435
1436 msleep(100);
1437
1438 return 0;
1439 }
1440
ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port * port,struct pci_bus * bus,unsigned int devfn)1441 static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1442 struct pci_bus *bus,
1443 unsigned int devfn)
1444 {
1445 static int message;
1446
1447 /* Endpoint can not generate upstream(remote) config cycles */
1448 if (port->endpoint && bus->number != port->hose->first_busno)
1449 return PCIBIOS_DEVICE_NOT_FOUND;
1450
1451 /* Check we are within the mapped range */
1452 if (bus->number > port->hose->last_busno) {
1453 if (!message) {
1454 printk(KERN_WARNING "Warning! Probing bus %u"
1455 " out of range !\n", bus->number);
1456 message++;
1457 }
1458 return PCIBIOS_DEVICE_NOT_FOUND;
1459 }
1460
1461 /* The root complex has only one device / function */
1462 if (bus->number == port->hose->first_busno && devfn != 0)
1463 return PCIBIOS_DEVICE_NOT_FOUND;
1464
1465 /* The other side of the RC has only one device as well */
1466 if (bus->number == (port->hose->first_busno + 1) &&
1467 PCI_SLOT(devfn) != 0)
1468 return PCIBIOS_DEVICE_NOT_FOUND;
1469
1470 /* Check if we have a link */
1471 if ((bus->number != port->hose->first_busno) && !port->link)
1472 return PCIBIOS_DEVICE_NOT_FOUND;
1473
1474 return 0;
1475 }
1476
ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port * port,struct pci_bus * bus,unsigned int devfn)1477 static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1478 struct pci_bus *bus,
1479 unsigned int devfn)
1480 {
1481 int relbus;
1482
1483 /* Remove the casts when we finally remove the stupid volatile
1484 * in struct pci_controller
1485 */
1486 if (bus->number == port->hose->first_busno)
1487 return (void __iomem *)port->hose->cfg_addr;
1488
1489 relbus = bus->number - (port->hose->first_busno + 1);
1490 return (void __iomem *)port->hose->cfg_data +
1491 ((relbus << 20) | (devfn << 12));
1492 }
1493
ppc4xx_pciex_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)1494 static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1495 int offset, int len, u32 *val)
1496 {
1497 struct pci_controller *hose = pci_bus_to_host(bus);
1498 struct ppc4xx_pciex_port *port =
1499 &ppc4xx_pciex_ports[hose->indirect_type];
1500 void __iomem *addr;
1501 u32 gpl_cfg;
1502
1503 BUG_ON(hose != port->hose);
1504
1505 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1506 return PCIBIOS_DEVICE_NOT_FOUND;
1507
1508 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1509
1510 /*
1511 * Reading from configuration space of non-existing device can
1512 * generate transaction errors. For the read duration we suppress
1513 * assertion of machine check exceptions to avoid those.
1514 */
1515 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1516 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1517
1518 /* Make sure no CRS is recorded */
1519 out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1520
1521 switch (len) {
1522 case 1:
1523 *val = in_8((u8 *)(addr + offset));
1524 break;
1525 case 2:
1526 *val = in_le16((u16 *)(addr + offset));
1527 break;
1528 default:
1529 *val = in_le32((u32 *)(addr + offset));
1530 break;
1531 }
1532
1533 pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1534 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1535 bus->number, hose->first_busno, hose->last_busno,
1536 devfn, offset, len, addr + offset, *val);
1537
1538 /* Check for CRS (440SPe rev B does that for us but heh ..) */
1539 if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1540 pr_debug("Got CRS !\n");
1541 if (len != 4 || offset != 0)
1542 return PCIBIOS_DEVICE_NOT_FOUND;
1543 *val = 0xffff0001;
1544 }
1545
1546 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1547
1548 return PCIBIOS_SUCCESSFUL;
1549 }
1550
ppc4xx_pciex_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)1551 static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1552 int offset, int len, u32 val)
1553 {
1554 struct pci_controller *hose = pci_bus_to_host(bus);
1555 struct ppc4xx_pciex_port *port =
1556 &ppc4xx_pciex_ports[hose->indirect_type];
1557 void __iomem *addr;
1558 u32 gpl_cfg;
1559
1560 if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1561 return PCIBIOS_DEVICE_NOT_FOUND;
1562
1563 addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1564
1565 /*
1566 * Reading from configuration space of non-existing device can
1567 * generate transaction errors. For the read duration we suppress
1568 * assertion of machine check exceptions to avoid those.
1569 */
1570 gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1571 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1572
1573 pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1574 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1575 bus->number, hose->first_busno, hose->last_busno,
1576 devfn, offset, len, addr + offset, val);
1577
1578 switch (len) {
1579 case 1:
1580 out_8((u8 *)(addr + offset), val);
1581 break;
1582 case 2:
1583 out_le16((u16 *)(addr + offset), val);
1584 break;
1585 default:
1586 out_le32((u32 *)(addr + offset), val);
1587 break;
1588 }
1589
1590 dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1591
1592 return PCIBIOS_SUCCESSFUL;
1593 }
1594
1595 static struct pci_ops ppc4xx_pciex_pci_ops =
1596 {
1597 .read = ppc4xx_pciex_read_config,
1598 .write = ppc4xx_pciex_write_config,
1599 };
1600
ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port * port,struct pci_controller * hose,void __iomem * mbase,u64 plb_addr,u64 pci_addr,u64 size,unsigned int flags,int index)1601 static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
1602 struct pci_controller *hose,
1603 void __iomem *mbase,
1604 u64 plb_addr,
1605 u64 pci_addr,
1606 u64 size,
1607 unsigned int flags,
1608 int index)
1609 {
1610 u32 lah, lal, pciah, pcial, sa;
1611
1612 if (!is_power_of_2(size) ||
1613 (index < 2 && size < 0x100000) ||
1614 (index == 2 && size < 0x100) ||
1615 (plb_addr & (size - 1)) != 0) {
1616 printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn);
1617 return -1;
1618 }
1619
1620 /* Calculate register values */
1621 lah = RES_TO_U32_HIGH(plb_addr);
1622 lal = RES_TO_U32_LOW(plb_addr);
1623 pciah = RES_TO_U32_HIGH(pci_addr);
1624 pcial = RES_TO_U32_LOW(pci_addr);
1625 sa = (0xffffffffu << ilog2(size)) | 0x1;
1626
1627 /* Program register values */
1628 switch (index) {
1629 case 0:
1630 out_le32(mbase + PECFG_POM0LAH, pciah);
1631 out_le32(mbase + PECFG_POM0LAL, pcial);
1632 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1633 dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1634 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1635 /*Enabled and single region */
1636 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1637 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1638 sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
1639 | DCRO_PEGPL_OMRxMSKL_VAL);
1640 else if (of_device_is_compatible(
1641 port->node, "ibm,plb-pciex-476fpe") ||
1642 of_device_is_compatible(
1643 port->node, "ibm,plb-pciex-476gtr"))
1644 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1645 sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
1646 | DCRO_PEGPL_OMRxMSKL_VAL);
1647 else
1648 dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1649 sa | DCRO_PEGPL_OMR1MSKL_UOT
1650 | DCRO_PEGPL_OMRxMSKL_VAL);
1651 break;
1652 case 1:
1653 out_le32(mbase + PECFG_POM1LAH, pciah);
1654 out_le32(mbase + PECFG_POM1LAL, pcial);
1655 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1656 dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1657 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1658 dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
1659 sa | DCRO_PEGPL_OMRxMSKL_VAL);
1660 break;
1661 case 2:
1662 out_le32(mbase + PECFG_POM2LAH, pciah);
1663 out_le32(mbase + PECFG_POM2LAL, pcial);
1664 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1665 dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1666 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1667 /* Note that 3 here means enabled | IO space !!! */
1668 dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
1669 sa | DCRO_PEGPL_OMR3MSKL_IO
1670 | DCRO_PEGPL_OMRxMSKL_VAL);
1671 break;
1672 }
1673
1674 return 0;
1675 }
1676
ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port * port,struct pci_controller * hose,void __iomem * mbase)1677 static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1678 struct pci_controller *hose,
1679 void __iomem *mbase)
1680 {
1681 int i, j, found_isa_hole = 0;
1682
1683 /* Setup outbound memory windows */
1684 for (i = j = 0; i < 3; i++) {
1685 struct resource *res = &hose->mem_resources[i];
1686 resource_size_t offset = hose->mem_offset[i];
1687
1688 /* we only care about memory windows */
1689 if (!(res->flags & IORESOURCE_MEM))
1690 continue;
1691 if (j > 1) {
1692 printk(KERN_WARNING "%pOF: Too many ranges\n",
1693 port->node);
1694 break;
1695 }
1696
1697 /* Configure the resource */
1698 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1699 res->start,
1700 res->start - offset,
1701 resource_size(res),
1702 res->flags,
1703 j) == 0) {
1704 j++;
1705
1706 /* If the resource PCI address is 0 then we have our
1707 * ISA memory hole
1708 */
1709 if (res->start == offset)
1710 found_isa_hole = 1;
1711 }
1712 }
1713
1714 /* Handle ISA memory hole if not already covered */
1715 if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1716 if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1717 hose->isa_mem_phys, 0,
1718 hose->isa_mem_size, 0, j) == 0)
1719 printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
1720 hose->dn);
1721
1722 /* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1723 * Note also that it -has- to be region index 2 on this HW
1724 */
1725 if (hose->io_resource.flags & IORESOURCE_IO)
1726 ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1727 hose->io_base_phys, 0,
1728 0x10000, IORESOURCE_IO, 2);
1729 }
1730
ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port * port,struct pci_controller * hose,void __iomem * mbase,struct resource * res)1731 static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1732 struct pci_controller *hose,
1733 void __iomem *mbase,
1734 struct resource *res)
1735 {
1736 resource_size_t size = resource_size(res);
1737 u64 sa;
1738
1739 if (port->endpoint) {
1740 resource_size_t ep_addr = 0;
1741 resource_size_t ep_size = 32 << 20;
1742
1743 /* Currently we map a fixed 64MByte window to PLB address
1744 * 0 (SDRAM). This should probably be configurable via a dts
1745 * property.
1746 */
1747
1748 /* Calculate window size */
1749 sa = (0xffffffffffffffffull << ilog2(ep_size));
1750
1751 /* Setup BAR0 */
1752 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1753 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1754 PCI_BASE_ADDRESS_MEM_TYPE_64);
1755
1756 /* Disable BAR1 & BAR2 */
1757 out_le32(mbase + PECFG_BAR1MPA, 0);
1758 out_le32(mbase + PECFG_BAR2HMPA, 0);
1759 out_le32(mbase + PECFG_BAR2LMPA, 0);
1760
1761 out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1762 out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1763
1764 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1765 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1766 } else {
1767 /* Calculate window size */
1768 sa = (0xffffffffffffffffull << ilog2(size));
1769 if (res->flags & IORESOURCE_PREFETCH)
1770 sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1771
1772 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
1773 of_device_is_compatible(
1774 port->node, "ibm,plb-pciex-476fpe") ||
1775 of_device_is_compatible(
1776 port->node, "ibm,plb-pciex-476gtr"))
1777 sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
1778
1779 out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1780 out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1781
1782 /* The setup of the split looks weird to me ... let's see
1783 * if it works
1784 */
1785 out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1786 out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1787 out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1788 out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1789 out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1790 out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1791
1792 out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1793 out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1794 }
1795
1796 /* Enable inbound mapping */
1797 out_le32(mbase + PECFG_PIMEN, 0x1);
1798
1799 /* Enable I/O, Mem, and Busmaster cycles */
1800 out_le16(mbase + PCI_COMMAND,
1801 in_le16(mbase + PCI_COMMAND) |
1802 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1803 }
1804
ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port * port)1805 static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1806 {
1807 struct resource dma_window;
1808 struct pci_controller *hose = NULL;
1809 const int *bus_range;
1810 int primary, busses;
1811 void __iomem *mbase = NULL, *cfg_data = NULL;
1812 const u32 *pval;
1813 u32 val;
1814
1815 /* Check if primary bridge */
1816 primary = of_property_read_bool(port->node, "primary");
1817
1818 /* Get bus range if any */
1819 bus_range = of_get_property(port->node, "bus-range", NULL);
1820
1821 /* Allocate the host controller data structure */
1822 hose = pcibios_alloc_controller(port->node);
1823 if (!hose)
1824 goto fail;
1825
1826 /* We stick the port number in "indirect_type" so the config space
1827 * ops can retrieve the port data structure easily
1828 */
1829 hose->indirect_type = port->index;
1830
1831 /* Get bus range */
1832 hose->first_busno = bus_range ? bus_range[0] : 0x0;
1833 hose->last_busno = bus_range ? bus_range[1] : 0xff;
1834
1835 /* Because of how big mapping the config space is (1M per bus), we
1836 * limit how many busses we support. In the long run, we could replace
1837 * that with something akin to kmap_atomic instead. We set aside 1 bus
1838 * for the host itself too.
1839 */
1840 busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
1841 if (busses > MAX_PCIE_BUS_MAPPED) {
1842 busses = MAX_PCIE_BUS_MAPPED;
1843 hose->last_busno = hose->first_busno + busses;
1844 }
1845
1846 if (!port->endpoint) {
1847 /* Only map the external config space in cfg_data for
1848 * PCIe root-complexes. External space is 1M per bus
1849 */
1850 cfg_data = ioremap(port->cfg_space.start +
1851 (hose->first_busno + 1) * 0x100000,
1852 busses * 0x100000);
1853 if (cfg_data == NULL) {
1854 printk(KERN_ERR "%pOF: Can't map external config space !",
1855 port->node);
1856 goto fail;
1857 }
1858 hose->cfg_data = cfg_data;
1859 }
1860
1861 /* Always map the host config space in cfg_addr.
1862 * Internal space is 4K
1863 */
1864 mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1865 if (mbase == NULL) {
1866 printk(KERN_ERR "%pOF: Can't map internal config space !",
1867 port->node);
1868 goto fail;
1869 }
1870 hose->cfg_addr = mbase;
1871
1872 pr_debug("PCIE %pOF, bus %d..%d\n", port->node,
1873 hose->first_busno, hose->last_busno);
1874 pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
1875 hose->cfg_addr, hose->cfg_data);
1876
1877 /* Setup config space */
1878 hose->ops = &ppc4xx_pciex_pci_ops;
1879 port->hose = hose;
1880 mbase = (void __iomem *)hose->cfg_addr;
1881
1882 if (!port->endpoint) {
1883 /*
1884 * Set bus numbers on our root port
1885 */
1886 out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
1887 out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
1888 out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
1889 }
1890
1891 /*
1892 * OMRs are already reset, also disable PIMs
1893 */
1894 out_le32(mbase + PECFG_PIMEN, 0);
1895
1896 /* Parse outbound mapping resources */
1897 pci_process_bridge_OF_ranges(hose, port->node, primary);
1898
1899 /* Parse inbound mapping resources */
1900 if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
1901 goto fail;
1902
1903 /* Configure outbound ranges POMs */
1904 ppc4xx_configure_pciex_POMs(port, hose, mbase);
1905
1906 /* Configure inbound ranges PIMs */
1907 ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
1908
1909 /* The root complex doesn't show up if we don't set some vendor
1910 * and device IDs into it. The defaults below are the same bogus
1911 * one that the initial code in arch/ppc had. This can be
1912 * overwritten by setting the "vendor-id/device-id" properties
1913 * in the pciex node.
1914 */
1915
1916 /* Get the (optional) vendor-/device-id from the device-tree */
1917 pval = of_get_property(port->node, "vendor-id", NULL);
1918 if (pval) {
1919 val = *pval;
1920 } else {
1921 if (!port->endpoint)
1922 val = 0xaaa0 + port->index;
1923 else
1924 val = 0xeee0 + port->index;
1925 }
1926 out_le16(mbase + 0x200, val);
1927
1928 pval = of_get_property(port->node, "device-id", NULL);
1929 if (pval) {
1930 val = *pval;
1931 } else {
1932 if (!port->endpoint)
1933 val = 0xbed0 + port->index;
1934 else
1935 val = 0xfed0 + port->index;
1936 }
1937 out_le16(mbase + 0x202, val);
1938
1939 /* Enable Bus master, memory, and io space */
1940 if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1941 out_le16(mbase + 0x204, 0x7);
1942
1943 if (!port->endpoint) {
1944 /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
1945 out_le32(mbase + 0x208, 0x06040001);
1946
1947 printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
1948 port->index);
1949 } else {
1950 /* Set Class Code to Processor/PPC */
1951 out_le32(mbase + 0x208, 0x0b200001);
1952
1953 printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
1954 port->index);
1955 }
1956
1957 return;
1958 fail:
1959 if (hose)
1960 pcibios_free_controller(hose);
1961 if (cfg_data)
1962 iounmap(cfg_data);
1963 if (mbase)
1964 iounmap(mbase);
1965 }
1966
ppc4xx_probe_pciex_bridge(struct device_node * np)1967 static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
1968 {
1969 struct ppc4xx_pciex_port *port;
1970 const u32 *pval;
1971 int portno;
1972 unsigned int dcrs;
1973
1974 /* First, proceed to core initialization as we assume there's
1975 * only one PCIe core in the system
1976 */
1977 if (ppc4xx_pciex_check_core_init(np))
1978 return;
1979
1980 /* Get the port number from the device-tree */
1981 pval = of_get_property(np, "port", NULL);
1982 if (pval == NULL) {
1983 printk(KERN_ERR "PCIE: Can't find port number for %pOF\n", np);
1984 return;
1985 }
1986 portno = *pval;
1987 if (portno >= ppc4xx_pciex_port_count) {
1988 printk(KERN_ERR "PCIE: port number out of range for %pOF\n",
1989 np);
1990 return;
1991 }
1992 port = &ppc4xx_pciex_ports[portno];
1993 port->index = portno;
1994
1995 /*
1996 * Check if device is enabled
1997 */
1998 if (!of_device_is_available(np)) {
1999 printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
2000 return;
2001 }
2002
2003 port->node = of_node_get(np);
2004 if (ppc4xx_pciex_hwops->want_sdr) {
2005 pval = of_get_property(np, "sdr-base", NULL);
2006 if (pval == NULL) {
2007 printk(KERN_ERR "PCIE: missing sdr-base for %pOF\n",
2008 np);
2009 return;
2010 }
2011 port->sdr_base = *pval;
2012 }
2013
2014 /* Check if device_type property is set to "pci" or "pci-endpoint".
2015 * Resulting from this setup this PCIe port will be configured
2016 * as root-complex or as endpoint.
2017 */
2018 if (of_node_is_type(port->node, "pci-endpoint")) {
2019 port->endpoint = 1;
2020 } else if (of_node_is_type(port->node, "pci")) {
2021 port->endpoint = 0;
2022 } else {
2023 printk(KERN_ERR "PCIE: missing or incorrect device_type for %pOF\n",
2024 np);
2025 return;
2026 }
2027
2028 /* Fetch config space registers address */
2029 if (of_address_to_resource(np, 0, &port->cfg_space)) {
2030 printk(KERN_ERR "%pOF: Can't get PCI-E config space !", np);
2031 return;
2032 }
2033 /* Fetch host bridge internal registers address */
2034 if (of_address_to_resource(np, 1, &port->utl_regs)) {
2035 printk(KERN_ERR "%pOF: Can't get UTL register base !", np);
2036 return;
2037 }
2038
2039 /* Map DCRs */
2040 dcrs = dcr_resource_start(np, 0);
2041 if (dcrs == 0) {
2042 printk(KERN_ERR "%pOF: Can't get DCR register base !", np);
2043 return;
2044 }
2045 port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
2046
2047 /* Initialize the port specific registers */
2048 if (ppc4xx_pciex_port_init(port)) {
2049 printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
2050 return;
2051 }
2052
2053 /* Setup the linux hose data structure */
2054 ppc4xx_pciex_port_setup_hose(port);
2055 }
2056
2057 #endif /* CONFIG_PPC4xx_PCI_EXPRESS */
2058
ppc4xx_pci_find_bridges(void)2059 static int __init ppc4xx_pci_find_bridges(void)
2060 {
2061 struct device_node *np;
2062
2063 pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
2064
2065 #ifdef CONFIG_PPC4xx_PCI_EXPRESS
2066 for_each_compatible_node(np, NULL, "ibm,plb-pciex")
2067 ppc4xx_probe_pciex_bridge(np);
2068 #endif
2069 for_each_compatible_node(np, NULL, "ibm,plb-pcix")
2070 ppc4xx_probe_pcix_bridge(np);
2071 for_each_compatible_node(np, NULL, "ibm,plb-pci")
2072 ppc4xx_probe_pci_bridge(np);
2073
2074 return 0;
2075 }
2076 arch_initcall(ppc4xx_pci_find_bridges);
2077
2078