1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Support for the Tundra TSI148 VME-PCI Bridge Chip
4 *
5 * Author: Martyn Welch <[email protected]>
6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * Based on work by Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 */
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/mm.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/proc_fs.h>
18 #include <linux/pci.h>
19 #include <linux/poll.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/time.h>
26 #include <linux/io.h>
27 #include <linux/uaccess.h>
28 #include <linux/byteorder/generic.h>
29
30 #include "vme.h"
31 #include "vme_bridge.h"
32 #include "vme_tsi148.h"
33
34 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
35 static void tsi148_remove(struct pci_dev *);
36
37 /* Module parameter */
38 static bool err_chk;
39 static u32 geoid;
40
41 static const char driver_name[] = "vme_tsi148";
42
43 static const struct pci_device_id tsi148_ids[] = {
44 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
45 { },
46 };
47
48 MODULE_DEVICE_TABLE(pci, tsi148_ids);
49
50 static struct pci_driver tsi148_driver = {
51 .name = driver_name,
52 .id_table = tsi148_ids,
53 .probe = tsi148_probe,
54 .remove = tsi148_remove,
55 };
56
reg_join(unsigned int high,unsigned int low,unsigned long long * variable)57 static void reg_join(unsigned int high, unsigned int low,
58 unsigned long long *variable)
59 {
60 *variable = (unsigned long long)high << 32;
61 *variable |= (unsigned long long)low;
62 }
63
reg_split(unsigned long long variable,unsigned int * high,unsigned int * low)64 static void reg_split(unsigned long long variable, unsigned int *high,
65 unsigned int *low)
66 {
67 *low = (unsigned int)variable & 0xFFFFFFFF;
68 *high = (unsigned int)(variable >> 32);
69 }
70
71 /*
72 * Wakes up DMA queue.
73 */
tsi148_DMA_irqhandler(struct tsi148_driver * bridge,int channel_mask)74 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
75 int channel_mask)
76 {
77 u32 serviced = 0;
78
79 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
80 wake_up(&bridge->dma_queue[0]);
81 serviced |= TSI148_LCSR_INTC_DMA0C;
82 }
83 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
84 wake_up(&bridge->dma_queue[1]);
85 serviced |= TSI148_LCSR_INTC_DMA1C;
86 }
87
88 return serviced;
89 }
90
91 /*
92 * Wake up location monitor queue
93 */
tsi148_LM_irqhandler(struct tsi148_driver * bridge,u32 stat)94 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
95 {
96 int i;
97 u32 serviced = 0;
98
99 for (i = 0; i < 4; i++) {
100 if (stat & TSI148_LCSR_INTS_LMS[i]) {
101 /* We only enable interrupts if the callback is set */
102 bridge->lm_callback[i](bridge->lm_data[i]);
103 serviced |= TSI148_LCSR_INTC_LMC[i];
104 }
105 }
106
107 return serviced;
108 }
109
110 /*
111 * Wake up mail box queue.
112 *
113 * XXX This functionality is not exposed up though API.
114 */
tsi148_MB_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)115 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
116 {
117 int i;
118 u32 val;
119 u32 serviced = 0;
120 struct tsi148_driver *bridge;
121
122 bridge = tsi148_bridge->driver_priv;
123
124 for (i = 0; i < 4; i++) {
125 if (stat & TSI148_LCSR_INTS_MBS[i]) {
126 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
127 dev_err(tsi148_bridge->parent, "VME Mailbox %d received: 0x%x\n",
128 i, val);
129 serviced |= TSI148_LCSR_INTC_MBC[i];
130 }
131 }
132
133 return serviced;
134 }
135
136 /*
137 * Display error & status message when PERR (PCI) exception interrupt occurs.
138 */
tsi148_PERR_irqhandler(struct vme_bridge * tsi148_bridge)139 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
140 {
141 struct tsi148_driver *bridge;
142
143 bridge = tsi148_bridge->driver_priv;
144
145 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
146 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
147 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
148 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
149
150 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
151 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
152 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
153
154 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
155
156 return TSI148_LCSR_INTC_PERRC;
157 }
158
159 /*
160 * Save address and status when VME error interrupt occurs.
161 */
tsi148_VERR_irqhandler(struct vme_bridge * tsi148_bridge)162 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
163 {
164 unsigned int error_addr_high, error_addr_low;
165 unsigned long long error_addr;
166 u32 error_attrib;
167 int error_am;
168 struct tsi148_driver *bridge;
169
170 bridge = tsi148_bridge->driver_priv;
171
172 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
173 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
174 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
175 error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
176
177 reg_join(error_addr_high, error_addr_low, &error_addr);
178
179 /* Check for exception register overflow (we have lost error data) */
180 if (error_attrib & TSI148_LCSR_VEAT_VEOF)
181 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow Occurred\n");
182
183 if (err_chk)
184 vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
185 else
186 dev_err(tsi148_bridge->parent,
187 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
188 error_addr, error_attrib);
189
190 /* Clear Status */
191 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
192
193 return TSI148_LCSR_INTC_VERRC;
194 }
195
196 /*
197 * Wake up IACK queue.
198 */
tsi148_IACK_irqhandler(struct tsi148_driver * bridge)199 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
200 {
201 wake_up(&bridge->iack_queue);
202
203 return TSI148_LCSR_INTC_IACKC;
204 }
205
206 /*
207 * Calling VME bus interrupt callback if provided.
208 */
tsi148_VIRQ_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)209 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
210 u32 stat)
211 {
212 int vec, i, serviced = 0;
213 struct tsi148_driver *bridge;
214
215 bridge = tsi148_bridge->driver_priv;
216
217 for (i = 7; i > 0; i--) {
218 if (stat & (1 << i)) {
219 /*
220 * Note: Even though the registers are defined as
221 * 32-bits in the spec, we only want to issue 8-bit
222 * IACK cycles on the bus, read from offset 3.
223 */
224 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
225
226 vme_irq_handler(tsi148_bridge, i, vec);
227
228 serviced |= (1 << i);
229 }
230 }
231
232 return serviced;
233 }
234
235 /*
236 * Top level interrupt handler. Clears appropriate interrupt status bits and
237 * then calls appropriate sub handler(s).
238 */
tsi148_irqhandler(int irq,void * ptr)239 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
240 {
241 u32 stat, enable, serviced = 0;
242 struct vme_bridge *tsi148_bridge;
243 struct tsi148_driver *bridge;
244
245 tsi148_bridge = ptr;
246
247 bridge = tsi148_bridge->driver_priv;
248
249 /* Determine which interrupts are unmasked and set */
250 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
251 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
252
253 /* Only look at unmasked interrupts */
254 stat &= enable;
255
256 if (unlikely(!stat))
257 return IRQ_NONE;
258
259 /* Call subhandlers as appropriate */
260 /* DMA irqs */
261 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
262 serviced |= tsi148_DMA_irqhandler(bridge, stat);
263
264 /* Location monitor irqs */
265 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
266 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
267 serviced |= tsi148_LM_irqhandler(bridge, stat);
268
269 /* Mail box irqs */
270 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
271 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
272 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
273
274 /* PCI bus error */
275 if (stat & TSI148_LCSR_INTS_PERRS)
276 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
277
278 /* VME bus error */
279 if (stat & TSI148_LCSR_INTS_VERRS)
280 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
281
282 /* IACK irq */
283 if (stat & TSI148_LCSR_INTS_IACKS)
284 serviced |= tsi148_IACK_irqhandler(bridge);
285
286 /* VME bus irqs */
287 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
288 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
289 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
290 TSI148_LCSR_INTS_IRQ1S))
291 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
292
293 /* Clear serviced interrupts */
294 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
295
296 return IRQ_HANDLED;
297 }
298
tsi148_irq_init(struct vme_bridge * tsi148_bridge)299 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
300 {
301 int result;
302 unsigned int tmp;
303 struct pci_dev *pdev;
304 struct tsi148_driver *bridge;
305
306 pdev = to_pci_dev(tsi148_bridge->parent);
307
308 bridge = tsi148_bridge->driver_priv;
309
310 result = request_irq(pdev->irq,
311 tsi148_irqhandler,
312 IRQF_SHARED,
313 driver_name, tsi148_bridge);
314 if (result) {
315 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq vector %02X\n",
316 pdev->irq);
317 return result;
318 }
319
320 /* Enable and unmask interrupts */
321 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
322 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
323 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
324 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
325 TSI148_LCSR_INTEO_IACKEO;
326
327 /* This leaves the following interrupts masked.
328 * TSI148_LCSR_INTEO_VIEEO
329 * TSI148_LCSR_INTEO_SYSFLEO
330 * TSI148_LCSR_INTEO_ACFLEO
331 */
332
333 /* Don't enable Location Monitor interrupts here - they will be
334 * enabled when the location monitors are properly configured and
335 * a callback has been attached.
336 * TSI148_LCSR_INTEO_LM0EO
337 * TSI148_LCSR_INTEO_LM1EO
338 * TSI148_LCSR_INTEO_LM2EO
339 * TSI148_LCSR_INTEO_LM3EO
340 */
341
342 /* Don't enable VME interrupts until we add a handler, else the board
343 * will respond to it and we don't want that unless it knows how to
344 * properly deal with it.
345 * TSI148_LCSR_INTEO_IRQ7EO
346 * TSI148_LCSR_INTEO_IRQ6EO
347 * TSI148_LCSR_INTEO_IRQ5EO
348 * TSI148_LCSR_INTEO_IRQ4EO
349 * TSI148_LCSR_INTEO_IRQ3EO
350 * TSI148_LCSR_INTEO_IRQ2EO
351 * TSI148_LCSR_INTEO_IRQ1EO
352 */
353
354 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
355 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
356
357 return 0;
358 }
359
tsi148_irq_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)360 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
361 struct pci_dev *pdev)
362 {
363 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
364
365 /* Turn off interrupts */
366 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
367 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
368
369 /* Clear all interrupts */
370 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
371
372 /* Detach interrupt handler */
373 free_irq(pdev->irq, tsi148_bridge);
374 }
375
376 /*
377 * Check to see if an IACk has been received, return true (1) or false (0).
378 */
tsi148_iack_received(struct tsi148_driver * bridge)379 static int tsi148_iack_received(struct tsi148_driver *bridge)
380 {
381 u32 tmp;
382
383 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
384
385 if (tmp & TSI148_LCSR_VICR_IRQS)
386 return 0;
387 else
388 return 1;
389 }
390
391 /*
392 * Configure VME interrupt
393 */
tsi148_irq_set(struct vme_bridge * tsi148_bridge,int level,int state,int sync)394 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
395 int state, int sync)
396 {
397 struct pci_dev *pdev;
398 u32 tmp;
399 struct tsi148_driver *bridge;
400
401 bridge = tsi148_bridge->driver_priv;
402
403 /* We need to do the ordering differently for enabling and disabling */
404 if (state == 0) {
405 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
406 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
407 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
408
409 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
410 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
411 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
412
413 if (sync != 0) {
414 pdev = to_pci_dev(tsi148_bridge->parent);
415 synchronize_irq(pdev->irq);
416 }
417 } else {
418 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
419 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
420 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
421
422 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
423 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
424 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
425 }
426 }
427
428 /*
429 * Generate a VME bus interrupt at the requested level & vector. Wait for
430 * interrupt to be acked.
431 */
tsi148_irq_generate(struct vme_bridge * tsi148_bridge,int level,int statid)432 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
433 int statid)
434 {
435 u32 tmp;
436 struct tsi148_driver *bridge;
437
438 bridge = tsi148_bridge->driver_priv;
439
440 mutex_lock(&bridge->vme_int);
441
442 /* Read VICR register */
443 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
444
445 /* Set Status/ID */
446 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
447 (statid & TSI148_LCSR_VICR_STID_M);
448 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
449
450 /* Assert VMEbus IRQ */
451 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
452 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
453
454 /* XXX Consider implementing a timeout? */
455 wait_event_interruptible(bridge->iack_queue,
456 tsi148_iack_received(bridge));
457
458 mutex_unlock(&bridge->vme_int);
459
460 return 0;
461 }
462
463 /*
464 * Initialize a slave window with the requested attributes.
465 */
tsi148_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)466 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
467 unsigned long long vme_base, unsigned long long size,
468 dma_addr_t pci_base, u32 aspace, u32 cycle)
469 {
470 unsigned int i, addr = 0, granularity = 0;
471 unsigned int temp_ctl = 0;
472 unsigned int vme_base_low, vme_base_high;
473 unsigned int vme_bound_low, vme_bound_high;
474 unsigned int pci_offset_low, pci_offset_high;
475 unsigned long long vme_bound, pci_offset;
476 struct vme_bridge *tsi148_bridge;
477 struct tsi148_driver *bridge;
478
479 tsi148_bridge = image->parent;
480 bridge = tsi148_bridge->driver_priv;
481
482 i = image->number;
483
484 switch (aspace) {
485 case VME_A16:
486 granularity = 0x10;
487 addr |= TSI148_LCSR_ITAT_AS_A16;
488 break;
489 case VME_A24:
490 granularity = 0x1000;
491 addr |= TSI148_LCSR_ITAT_AS_A24;
492 break;
493 case VME_A32:
494 granularity = 0x10000;
495 addr |= TSI148_LCSR_ITAT_AS_A32;
496 break;
497 case VME_A64:
498 granularity = 0x10000;
499 addr |= TSI148_LCSR_ITAT_AS_A64;
500 break;
501 default:
502 dev_err(tsi148_bridge->parent, "Invalid address space\n");
503 return -EINVAL;
504 }
505
506 /* Convert 64-bit variables to 2x 32-bit variables */
507 reg_split(vme_base, &vme_base_high, &vme_base_low);
508
509 /*
510 * Bound address is a valid address for the window, adjust
511 * accordingly
512 */
513 vme_bound = vme_base + size - granularity;
514 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
515 pci_offset = (unsigned long long)pci_base - vme_base;
516 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
517
518 if (vme_base_low & (granularity - 1)) {
519 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
520 return -EINVAL;
521 }
522 if (vme_bound_low & (granularity - 1)) {
523 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
524 return -EINVAL;
525 }
526 if (pci_offset_low & (granularity - 1)) {
527 dev_err(tsi148_bridge->parent, "Invalid PCI Offset alignment\n");
528 return -EINVAL;
529 }
530
531 /* Disable while we are mucking around */
532 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
533 TSI148_LCSR_OFFSET_ITAT);
534 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
535 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
536 TSI148_LCSR_OFFSET_ITAT);
537
538 /* Setup mapping */
539 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
540 TSI148_LCSR_OFFSET_ITSAU);
541 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
542 TSI148_LCSR_OFFSET_ITSAL);
543 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
544 TSI148_LCSR_OFFSET_ITEAU);
545 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
546 TSI148_LCSR_OFFSET_ITEAL);
547 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
548 TSI148_LCSR_OFFSET_ITOFU);
549 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
550 TSI148_LCSR_OFFSET_ITOFL);
551
552 /* Setup 2eSST speeds */
553 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
554 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
555 case VME_2eSST160:
556 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
557 break;
558 case VME_2eSST267:
559 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
560 break;
561 case VME_2eSST320:
562 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
563 break;
564 }
565
566 /* Setup cycle types */
567 temp_ctl &= ~(0x1F << 7);
568 if (cycle & VME_BLT)
569 temp_ctl |= TSI148_LCSR_ITAT_BLT;
570 if (cycle & VME_MBLT)
571 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
572 if (cycle & VME_2eVME)
573 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
574 if (cycle & VME_2eSST)
575 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
576 if (cycle & VME_2eSSTB)
577 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
578
579 /* Setup address space */
580 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
581 temp_ctl |= addr;
582
583 temp_ctl &= ~0xF;
584 if (cycle & VME_SUPER)
585 temp_ctl |= TSI148_LCSR_ITAT_SUPR;
586 if (cycle & VME_USER)
587 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
588 if (cycle & VME_PROG)
589 temp_ctl |= TSI148_LCSR_ITAT_PGM;
590 if (cycle & VME_DATA)
591 temp_ctl |= TSI148_LCSR_ITAT_DATA;
592
593 /* Write ctl reg without enable */
594 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
595 TSI148_LCSR_OFFSET_ITAT);
596
597 if (enabled)
598 temp_ctl |= TSI148_LCSR_ITAT_EN;
599
600 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
601 TSI148_LCSR_OFFSET_ITAT);
602
603 return 0;
604 }
605
606 /*
607 * Get slave window configuration.
608 */
tsi148_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)609 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
610 unsigned long long *vme_base, unsigned long long *size,
611 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
612 {
613 unsigned int i, granularity = 0, ctl = 0;
614 unsigned int vme_base_low, vme_base_high;
615 unsigned int vme_bound_low, vme_bound_high;
616 unsigned int pci_offset_low, pci_offset_high;
617 unsigned long long vme_bound, pci_offset;
618 struct tsi148_driver *bridge;
619
620 bridge = image->parent->driver_priv;
621
622 i = image->number;
623
624 /* Read registers */
625 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
626 TSI148_LCSR_OFFSET_ITAT);
627
628 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
629 TSI148_LCSR_OFFSET_ITSAU);
630 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
631 TSI148_LCSR_OFFSET_ITSAL);
632 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
633 TSI148_LCSR_OFFSET_ITEAU);
634 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
635 TSI148_LCSR_OFFSET_ITEAL);
636 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
637 TSI148_LCSR_OFFSET_ITOFU);
638 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
639 TSI148_LCSR_OFFSET_ITOFL);
640
641 /* Convert 64-bit variables to 2x 32-bit variables */
642 reg_join(vme_base_high, vme_base_low, vme_base);
643 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
644 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
645
646 *pci_base = (dma_addr_t)(*vme_base + pci_offset);
647
648 *enabled = 0;
649 *aspace = 0;
650 *cycle = 0;
651
652 if (ctl & TSI148_LCSR_ITAT_EN)
653 *enabled = 1;
654
655 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
656 granularity = 0x10;
657 *aspace |= VME_A16;
658 }
659 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
660 granularity = 0x1000;
661 *aspace |= VME_A24;
662 }
663 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
664 granularity = 0x10000;
665 *aspace |= VME_A32;
666 }
667 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
668 granularity = 0x10000;
669 *aspace |= VME_A64;
670 }
671
672 /* Need granularity before we set the size */
673 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
674
675 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
676 *cycle |= VME_2eSST160;
677 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
678 *cycle |= VME_2eSST267;
679 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
680 *cycle |= VME_2eSST320;
681
682 if (ctl & TSI148_LCSR_ITAT_BLT)
683 *cycle |= VME_BLT;
684 if (ctl & TSI148_LCSR_ITAT_MBLT)
685 *cycle |= VME_MBLT;
686 if (ctl & TSI148_LCSR_ITAT_2eVME)
687 *cycle |= VME_2eVME;
688 if (ctl & TSI148_LCSR_ITAT_2eSST)
689 *cycle |= VME_2eSST;
690 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
691 *cycle |= VME_2eSSTB;
692
693 if (ctl & TSI148_LCSR_ITAT_SUPR)
694 *cycle |= VME_SUPER;
695 if (ctl & TSI148_LCSR_ITAT_NPRIV)
696 *cycle |= VME_USER;
697 if (ctl & TSI148_LCSR_ITAT_PGM)
698 *cycle |= VME_PROG;
699 if (ctl & TSI148_LCSR_ITAT_DATA)
700 *cycle |= VME_DATA;
701
702 return 0;
703 }
704
705 /*
706 * Allocate and map PCI Resource
707 */
tsi148_alloc_resource(struct vme_master_resource * image,unsigned long long size)708 static int tsi148_alloc_resource(struct vme_master_resource *image,
709 unsigned long long size)
710 {
711 unsigned long long existing_size;
712 int retval = 0;
713 struct pci_dev *pdev;
714 struct vme_bridge *tsi148_bridge;
715
716 tsi148_bridge = image->parent;
717
718 pdev = to_pci_dev(tsi148_bridge->parent);
719
720 existing_size = (unsigned long long)(image->bus_resource.end -
721 image->bus_resource.start);
722
723 /* If the existing size is OK, return */
724 if ((size != 0) && (existing_size == (size - 1)))
725 return 0;
726
727 if (existing_size != 0) {
728 iounmap(image->kern_base);
729 image->kern_base = NULL;
730 kfree(image->bus_resource.name);
731 release_resource(&image->bus_resource);
732 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
733 }
734
735 /* Exit here if size is zero */
736 if (size == 0)
737 return 0;
738
739 if (!image->bus_resource.name) {
740 image->bus_resource.name = kmalloc(VMENAMSIZ + 3, GFP_ATOMIC);
741 if (!image->bus_resource.name) {
742 retval = -ENOMEM;
743 goto err_name;
744 }
745 }
746
747 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
748 image->number);
749
750 image->bus_resource.start = 0;
751 image->bus_resource.end = (unsigned long)size;
752 image->bus_resource.flags = IORESOURCE_MEM;
753
754 retval = pci_bus_alloc_resource(pdev->bus, &image->bus_resource,
755 size, 0x10000, PCIBIOS_MIN_MEM,
756 0, NULL, NULL);
757 if (retval) {
758 dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
759 image->number, (unsigned long)size,
760 (unsigned long)image->bus_resource.start);
761 goto err_resource;
762 }
763
764 image->kern_base = ioremap(image->bus_resource.start, size);
765 if (!image->kern_base) {
766 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
767 retval = -ENOMEM;
768 goto err_remap;
769 }
770
771 return 0;
772
773 err_remap:
774 release_resource(&image->bus_resource);
775 err_resource:
776 kfree(image->bus_resource.name);
777 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
778 err_name:
779 return retval;
780 }
781
782 /*
783 * Free and unmap PCI Resource
784 */
tsi148_free_resource(struct vme_master_resource * image)785 static void tsi148_free_resource(struct vme_master_resource *image)
786 {
787 iounmap(image->kern_base);
788 image->kern_base = NULL;
789 release_resource(&image->bus_resource);
790 kfree(image->bus_resource.name);
791 memset(&image->bus_resource, 0, sizeof(image->bus_resource));
792 }
793
794 /*
795 * Set the attributes of an outbound window.
796 */
tsi148_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)797 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
798 unsigned long long vme_base, unsigned long long size,
799 u32 aspace, u32 cycle, u32 dwidth)
800 {
801 int retval = 0;
802 unsigned int i;
803 unsigned int temp_ctl = 0;
804 unsigned int pci_base_low, pci_base_high;
805 unsigned int pci_bound_low, pci_bound_high;
806 unsigned int vme_offset_low, vme_offset_high;
807 unsigned long long pci_bound, vme_offset, pci_base;
808 struct vme_bridge *tsi148_bridge;
809 struct tsi148_driver *bridge;
810 struct pci_bus_region region;
811 struct pci_dev *pdev;
812
813 tsi148_bridge = image->parent;
814
815 bridge = tsi148_bridge->driver_priv;
816
817 pdev = to_pci_dev(tsi148_bridge->parent);
818
819 /* Verify input data */
820 if (vme_base & 0xFFFF) {
821 dev_err(tsi148_bridge->parent, "Invalid VME Window alignment\n");
822 retval = -EINVAL;
823 goto err_window;
824 }
825
826 if ((size == 0) && (enabled != 0)) {
827 dev_err(tsi148_bridge->parent, "Size must be non-zero for enabled windows\n");
828 retval = -EINVAL;
829 goto err_window;
830 }
831
832 spin_lock(&image->lock);
833
834 /* Let's allocate the resource here rather than further up the stack as
835 * it avoids pushing loads of bus dependent stuff up the stack. If size
836 * is zero, any existing resource will be freed.
837 */
838 retval = tsi148_alloc_resource(image, size);
839 if (retval) {
840 spin_unlock(&image->lock);
841 dev_err(tsi148_bridge->parent, "Unable to allocate memory for resource\n");
842 goto err_res;
843 }
844
845 if (size == 0) {
846 pci_base = 0;
847 pci_bound = 0;
848 vme_offset = 0;
849 } else {
850 pcibios_resource_to_bus(pdev->bus, ®ion,
851 &image->bus_resource);
852 pci_base = region.start;
853
854 /*
855 * Bound address is a valid address for the window, adjust
856 * according to window granularity.
857 */
858 pci_bound = pci_base + (size - 0x10000);
859 vme_offset = vme_base - pci_base;
860 }
861
862 /* Convert 64-bit variables to 2x 32-bit variables */
863 reg_split(pci_base, &pci_base_high, &pci_base_low);
864 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
865 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
866
867 if (pci_base_low & 0xFFFF) {
868 spin_unlock(&image->lock);
869 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
870 retval = -EINVAL;
871 goto err_gran;
872 }
873 if (pci_bound_low & 0xFFFF) {
874 spin_unlock(&image->lock);
875 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
876 retval = -EINVAL;
877 goto err_gran;
878 }
879 if (vme_offset_low & 0xFFFF) {
880 spin_unlock(&image->lock);
881 dev_err(tsi148_bridge->parent, "Invalid VME Offset alignment\n");
882 retval = -EINVAL;
883 goto err_gran;
884 }
885
886 i = image->number;
887
888 /* Disable while we are mucking around */
889 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
890 TSI148_LCSR_OFFSET_OTAT);
891 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
892 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
893 TSI148_LCSR_OFFSET_OTAT);
894
895 /* Setup 2eSST speeds */
896 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
897 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
898 case VME_2eSST160:
899 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
900 break;
901 case VME_2eSST267:
902 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
903 break;
904 case VME_2eSST320:
905 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
906 break;
907 }
908
909 /* Setup cycle types */
910 if (cycle & VME_BLT) {
911 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
912 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
913 }
914 if (cycle & VME_MBLT) {
915 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
916 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
917 }
918 if (cycle & VME_2eVME) {
919 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
920 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
921 }
922 if (cycle & VME_2eSST) {
923 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
924 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
925 }
926 if (cycle & VME_2eSSTB) {
927 dev_warn(tsi148_bridge->parent, "Currently not setting Broadcast Select Registers\n");
928 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
929 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
930 }
931
932 /* Setup data width */
933 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
934 switch (dwidth) {
935 case VME_D16:
936 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
937 break;
938 case VME_D32:
939 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
940 break;
941 default:
942 spin_unlock(&image->lock);
943 dev_err(tsi148_bridge->parent, "Invalid data width\n");
944 retval = -EINVAL;
945 goto err_dwidth;
946 }
947
948 /* Setup address space */
949 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
950 switch (aspace) {
951 case VME_A16:
952 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
953 break;
954 case VME_A24:
955 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
956 break;
957 case VME_A32:
958 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
959 break;
960 case VME_A64:
961 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
962 break;
963 case VME_CRCSR:
964 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
965 break;
966 case VME_USER1:
967 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
968 break;
969 case VME_USER2:
970 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
971 break;
972 case VME_USER3:
973 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
974 break;
975 case VME_USER4:
976 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
977 break;
978 default:
979 spin_unlock(&image->lock);
980 dev_err(tsi148_bridge->parent, "Invalid address space\n");
981 retval = -EINVAL;
982 goto err_aspace;
983 }
984
985 temp_ctl &= ~(3 << 4);
986 if (cycle & VME_SUPER)
987 temp_ctl |= TSI148_LCSR_OTAT_SUP;
988 if (cycle & VME_PROG)
989 temp_ctl |= TSI148_LCSR_OTAT_PGM;
990
991 /* Setup mapping */
992 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
993 TSI148_LCSR_OFFSET_OTSAU);
994 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
995 TSI148_LCSR_OFFSET_OTSAL);
996 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
997 TSI148_LCSR_OFFSET_OTEAU);
998 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
999 TSI148_LCSR_OFFSET_OTEAL);
1000 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1001 TSI148_LCSR_OFFSET_OTOFU);
1002 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1003 TSI148_LCSR_OFFSET_OTOFL);
1004
1005 /* Write ctl reg without enable */
1006 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1007 TSI148_LCSR_OFFSET_OTAT);
1008
1009 if (enabled)
1010 temp_ctl |= TSI148_LCSR_OTAT_EN;
1011
1012 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1013 TSI148_LCSR_OFFSET_OTAT);
1014
1015 spin_unlock(&image->lock);
1016 return 0;
1017
1018 err_aspace:
1019 err_dwidth:
1020 err_gran:
1021 tsi148_free_resource(image);
1022 err_res:
1023 err_window:
1024 return retval;
1025 }
1026
1027 /*
1028 * Set the attributes of an outbound window.
1029 *
1030 * XXX Not parsing prefetch information.
1031 */
__tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1032 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1033 unsigned long long *vme_base, unsigned long long *size,
1034 u32 *aspace, u32 *cycle, u32 *dwidth)
1035 {
1036 unsigned int i, ctl;
1037 unsigned int pci_base_low, pci_base_high;
1038 unsigned int pci_bound_low, pci_bound_high;
1039 unsigned int vme_offset_low, vme_offset_high;
1040
1041 unsigned long long pci_base, pci_bound, vme_offset;
1042 struct tsi148_driver *bridge;
1043
1044 bridge = image->parent->driver_priv;
1045
1046 i = image->number;
1047
1048 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1049 TSI148_LCSR_OFFSET_OTAT);
1050
1051 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1052 TSI148_LCSR_OFFSET_OTSAU);
1053 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1054 TSI148_LCSR_OFFSET_OTSAL);
1055 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1056 TSI148_LCSR_OFFSET_OTEAU);
1057 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1058 TSI148_LCSR_OFFSET_OTEAL);
1059 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1060 TSI148_LCSR_OFFSET_OTOFU);
1061 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1062 TSI148_LCSR_OFFSET_OTOFL);
1063
1064 /* Convert 64-bit variables to 2x 32-bit variables */
1065 reg_join(pci_base_high, pci_base_low, &pci_base);
1066 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1067 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1068
1069 *vme_base = pci_base + vme_offset;
1070 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1071
1072 *enabled = 0;
1073 *aspace = 0;
1074 *cycle = 0;
1075 *dwidth = 0;
1076
1077 if (ctl & TSI148_LCSR_OTAT_EN)
1078 *enabled = 1;
1079
1080 /* Setup address space */
1081 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1082 *aspace |= VME_A16;
1083 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1084 *aspace |= VME_A24;
1085 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1086 *aspace |= VME_A32;
1087 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1088 *aspace |= VME_A64;
1089 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1090 *aspace |= VME_CRCSR;
1091 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1092 *aspace |= VME_USER1;
1093 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1094 *aspace |= VME_USER2;
1095 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1096 *aspace |= VME_USER3;
1097 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1098 *aspace |= VME_USER4;
1099
1100 /* Setup 2eSST speeds */
1101 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1102 *cycle |= VME_2eSST160;
1103 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1104 *cycle |= VME_2eSST267;
1105 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1106 *cycle |= VME_2eSST320;
1107
1108 /* Setup cycle types */
1109 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1110 *cycle |= VME_SCT;
1111 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1112 *cycle |= VME_BLT;
1113 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1114 *cycle |= VME_MBLT;
1115 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1116 *cycle |= VME_2eVME;
1117 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1118 *cycle |= VME_2eSST;
1119 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1120 *cycle |= VME_2eSSTB;
1121
1122 if (ctl & TSI148_LCSR_OTAT_SUP)
1123 *cycle |= VME_SUPER;
1124 else
1125 *cycle |= VME_USER;
1126
1127 if (ctl & TSI148_LCSR_OTAT_PGM)
1128 *cycle |= VME_PROG;
1129 else
1130 *cycle |= VME_DATA;
1131
1132 /* Setup data width */
1133 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1134 *dwidth = VME_D16;
1135 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1136 *dwidth = VME_D32;
1137
1138 return 0;
1139 }
1140
tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1141 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1142 unsigned long long *vme_base, unsigned long long *size,
1143 u32 *aspace, u32 *cycle, u32 *dwidth)
1144 {
1145 int retval;
1146
1147 spin_lock(&image->lock);
1148
1149 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1150 cycle, dwidth);
1151
1152 spin_unlock(&image->lock);
1153
1154 return retval;
1155 }
1156
tsi148_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1157 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1158 size_t count, loff_t offset)
1159 {
1160 int retval, enabled;
1161 unsigned long long vme_base, size;
1162 u32 aspace, cycle, dwidth;
1163 struct vme_error_handler *handler = NULL;
1164 struct vme_bridge *tsi148_bridge;
1165 void __iomem *addr = image->kern_base + offset;
1166 unsigned int done = 0;
1167 unsigned int count32;
1168
1169 tsi148_bridge = image->parent;
1170
1171 spin_lock(&image->lock);
1172
1173 if (err_chk) {
1174 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1175 &cycle, &dwidth);
1176 handler = vme_register_error_handler(tsi148_bridge, aspace,
1177 vme_base + offset, count);
1178 if (!handler) {
1179 spin_unlock(&image->lock);
1180 return -ENOMEM;
1181 }
1182 }
1183
1184 /* The following code handles VME address alignment. We cannot use
1185 * memcpy_xxx here because it may cut data transfers in to 8-bit
1186 * cycles when D16 or D32 cycles are required on the VME bus.
1187 * On the other hand, the bridge itself assures that the maximum data
1188 * cycle configured for the transfer is used and splits it
1189 * automatically for non-aligned addresses, so we don't want the
1190 * overhead of needlessly forcing small transfers for the entire cycle.
1191 */
1192 if ((uintptr_t)addr & 0x1) {
1193 *(u8 *)buf = ioread8(addr);
1194 done += 1;
1195 if (done == count)
1196 goto out;
1197 }
1198 if ((uintptr_t)(addr + done) & 0x2) {
1199 if ((count - done) < 2) {
1200 *(u8 *)(buf + done) = ioread8(addr + done);
1201 done += 1;
1202 goto out;
1203 } else {
1204 *(u16 *)(buf + done) = ioread16(addr + done);
1205 done += 2;
1206 }
1207 }
1208
1209 count32 = (count - done) & ~0x3;
1210 while (done < count32) {
1211 *(u32 *)(buf + done) = ioread32(addr + done);
1212 done += 4;
1213 }
1214
1215 if ((count - done) & 0x2) {
1216 *(u16 *)(buf + done) = ioread16(addr + done);
1217 done += 2;
1218 }
1219 if ((count - done) & 0x1) {
1220 *(u8 *)(buf + done) = ioread8(addr + done);
1221 done += 1;
1222 }
1223
1224 out:
1225 retval = count;
1226
1227 if (err_chk) {
1228 if (handler->num_errors) {
1229 dev_err(image->parent->parent,
1230 "First VME read error detected an at address 0x%llx\n",
1231 handler->first_error);
1232 retval = handler->first_error - (vme_base + offset);
1233 }
1234 vme_unregister_error_handler(handler);
1235 }
1236
1237 spin_unlock(&image->lock);
1238
1239 return retval;
1240 }
1241
tsi148_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1242 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1243 size_t count, loff_t offset)
1244 {
1245 int retval = 0, enabled;
1246 unsigned long long vme_base, size;
1247 u32 aspace, cycle, dwidth;
1248 void __iomem *addr = image->kern_base + offset;
1249 unsigned int done = 0;
1250 unsigned int count32;
1251
1252 struct vme_error_handler *handler = NULL;
1253 struct vme_bridge *tsi148_bridge;
1254 struct tsi148_driver *bridge;
1255
1256 tsi148_bridge = image->parent;
1257
1258 bridge = tsi148_bridge->driver_priv;
1259
1260 spin_lock(&image->lock);
1261
1262 if (err_chk) {
1263 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1264 &cycle, &dwidth);
1265 handler = vme_register_error_handler(tsi148_bridge, aspace,
1266 vme_base + offset, count);
1267 if (!handler) {
1268 spin_unlock(&image->lock);
1269 return -ENOMEM;
1270 }
1271 }
1272
1273 /* Here we apply for the same strategy we do in master_read
1274 * function in order to assure the correct cycles.
1275 */
1276 if ((uintptr_t)addr & 0x1) {
1277 iowrite8(*(u8 *)buf, addr);
1278 done += 1;
1279 if (done == count)
1280 goto out;
1281 }
1282 if ((uintptr_t)(addr + done) & 0x2) {
1283 if ((count - done) < 2) {
1284 iowrite8(*(u8 *)(buf + done), addr + done);
1285 done += 1;
1286 goto out;
1287 } else {
1288 iowrite16(*(u16 *)(buf + done), addr + done);
1289 done += 2;
1290 }
1291 }
1292
1293 count32 = (count - done) & ~0x3;
1294 while (done < count32) {
1295 iowrite32(*(u32 *)(buf + done), addr + done);
1296 done += 4;
1297 }
1298
1299 if ((count - done) & 0x2) {
1300 iowrite16(*(u16 *)(buf + done), addr + done);
1301 done += 2;
1302 }
1303 if ((count - done) & 0x1) {
1304 iowrite8(*(u8 *)(buf + done), addr + done);
1305 done += 1;
1306 }
1307
1308 out:
1309 retval = count;
1310
1311 /*
1312 * Writes are posted. We need to do a read on the VME bus to flush out
1313 * all of the writes before we check for errors. We can't guarantee
1314 * that reading the data we have just written is safe. It is believed
1315 * that there isn't any read, write re-ordering, so we can read any
1316 * location in VME space, so lets read the Device ID from the tsi148's
1317 * own registers as mapped into CR/CSR space.
1318 *
1319 * We check for saved errors in the written address range/space.
1320 */
1321
1322 if (err_chk) {
1323 ioread16(bridge->flush_image->kern_base + 0x7F000);
1324
1325 if (handler->num_errors) {
1326 dev_warn(tsi148_bridge->parent,
1327 "First VME write error detected an at address 0x%llx\n",
1328 handler->first_error);
1329 retval = handler->first_error - (vme_base + offset);
1330 }
1331 vme_unregister_error_handler(handler);
1332 }
1333
1334 spin_unlock(&image->lock);
1335
1336 return retval;
1337 }
1338
1339 /*
1340 * Perform an RMW cycle on the VME bus.
1341 *
1342 * Requires a previously configured master window, returns final value.
1343 */
tsi148_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)1344 static unsigned int tsi148_master_rmw(struct vme_master_resource *image, unsigned int mask,
1345 unsigned int compare, unsigned int swap, loff_t offset)
1346 {
1347 unsigned long long pci_addr;
1348 unsigned int pci_addr_high, pci_addr_low;
1349 u32 tmp, result;
1350 int i;
1351 struct tsi148_driver *bridge;
1352
1353 bridge = image->parent->driver_priv;
1354
1355 /* Find the PCI address that maps to the desired VME address */
1356 i = image->number;
1357
1358 /* Locking as we can only do one of these at a time */
1359 mutex_lock(&bridge->vme_rmw);
1360
1361 /* Lock image */
1362 spin_lock(&image->lock);
1363
1364 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1365 TSI148_LCSR_OFFSET_OTSAU);
1366 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1367 TSI148_LCSR_OFFSET_OTSAL);
1368
1369 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1370 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1371
1372 /* Configure registers */
1373 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1374 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1375 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1376 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1377 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1378
1379 /* Enable RMW */
1380 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1381 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1382 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1383
1384 /* Kick process off with a read to the required address. */
1385 result = ioread32be(image->kern_base + offset);
1386
1387 /* Disable RMW */
1388 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1389 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1390 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1391
1392 spin_unlock(&image->lock);
1393
1394 mutex_unlock(&bridge->vme_rmw);
1395
1396 return result;
1397 }
1398
tsi148_dma_set_vme_src_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1399 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1400 u32 aspace, u32 cycle, u32 dwidth)
1401 {
1402 u32 val;
1403
1404 val = be32_to_cpu(*attr);
1405
1406 /* Setup 2eSST speeds */
1407 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1408 case VME_2eSST160:
1409 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1410 break;
1411 case VME_2eSST267:
1412 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1413 break;
1414 case VME_2eSST320:
1415 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1416 break;
1417 }
1418
1419 /* Setup cycle types */
1420 if (cycle & VME_SCT)
1421 val |= TSI148_LCSR_DSAT_TM_SCT;
1422
1423 if (cycle & VME_BLT)
1424 val |= TSI148_LCSR_DSAT_TM_BLT;
1425
1426 if (cycle & VME_MBLT)
1427 val |= TSI148_LCSR_DSAT_TM_MBLT;
1428
1429 if (cycle & VME_2eVME)
1430 val |= TSI148_LCSR_DSAT_TM_2eVME;
1431
1432 if (cycle & VME_2eSST)
1433 val |= TSI148_LCSR_DSAT_TM_2eSST;
1434
1435 if (cycle & VME_2eSSTB) {
1436 dev_err(dev, "Currently not setting Broadcast Select Registers\n");
1437 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1438 }
1439
1440 /* Setup data width */
1441 switch (dwidth) {
1442 case VME_D16:
1443 val |= TSI148_LCSR_DSAT_DBW_16;
1444 break;
1445 case VME_D32:
1446 val |= TSI148_LCSR_DSAT_DBW_32;
1447 break;
1448 default:
1449 dev_err(dev, "Invalid data width\n");
1450 return -EINVAL;
1451 }
1452
1453 /* Setup address space */
1454 switch (aspace) {
1455 case VME_A16:
1456 val |= TSI148_LCSR_DSAT_AMODE_A16;
1457 break;
1458 case VME_A24:
1459 val |= TSI148_LCSR_DSAT_AMODE_A24;
1460 break;
1461 case VME_A32:
1462 val |= TSI148_LCSR_DSAT_AMODE_A32;
1463 break;
1464 case VME_A64:
1465 val |= TSI148_LCSR_DSAT_AMODE_A64;
1466 break;
1467 case VME_CRCSR:
1468 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1469 break;
1470 case VME_USER1:
1471 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1472 break;
1473 case VME_USER2:
1474 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1475 break;
1476 case VME_USER3:
1477 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1478 break;
1479 case VME_USER4:
1480 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1481 break;
1482 default:
1483 dev_err(dev, "Invalid address space\n");
1484 return -EINVAL;
1485 }
1486
1487 if (cycle & VME_SUPER)
1488 val |= TSI148_LCSR_DSAT_SUP;
1489 if (cycle & VME_PROG)
1490 val |= TSI148_LCSR_DSAT_PGM;
1491
1492 *attr = cpu_to_be32(val);
1493
1494 return 0;
1495 }
1496
tsi148_dma_set_vme_dest_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1497 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1498 u32 aspace, u32 cycle, u32 dwidth)
1499 {
1500 u32 val;
1501
1502 val = be32_to_cpu(*attr);
1503
1504 /* Setup 2eSST speeds */
1505 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1506 case VME_2eSST160:
1507 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1508 break;
1509 case VME_2eSST267:
1510 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1511 break;
1512 case VME_2eSST320:
1513 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1514 break;
1515 }
1516
1517 /* Setup cycle types */
1518 if (cycle & VME_SCT)
1519 val |= TSI148_LCSR_DDAT_TM_SCT;
1520
1521 if (cycle & VME_BLT)
1522 val |= TSI148_LCSR_DDAT_TM_BLT;
1523
1524 if (cycle & VME_MBLT)
1525 val |= TSI148_LCSR_DDAT_TM_MBLT;
1526
1527 if (cycle & VME_2eVME)
1528 val |= TSI148_LCSR_DDAT_TM_2eVME;
1529
1530 if (cycle & VME_2eSST)
1531 val |= TSI148_LCSR_DDAT_TM_2eSST;
1532
1533 if (cycle & VME_2eSSTB) {
1534 dev_err(dev, "Currently not setting Broadcast Select Registers\n");
1535 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1536 }
1537
1538 /* Setup data width */
1539 switch (dwidth) {
1540 case VME_D16:
1541 val |= TSI148_LCSR_DDAT_DBW_16;
1542 break;
1543 case VME_D32:
1544 val |= TSI148_LCSR_DDAT_DBW_32;
1545 break;
1546 default:
1547 dev_err(dev, "Invalid data width\n");
1548 return -EINVAL;
1549 }
1550
1551 /* Setup address space */
1552 switch (aspace) {
1553 case VME_A16:
1554 val |= TSI148_LCSR_DDAT_AMODE_A16;
1555 break;
1556 case VME_A24:
1557 val |= TSI148_LCSR_DDAT_AMODE_A24;
1558 break;
1559 case VME_A32:
1560 val |= TSI148_LCSR_DDAT_AMODE_A32;
1561 break;
1562 case VME_A64:
1563 val |= TSI148_LCSR_DDAT_AMODE_A64;
1564 break;
1565 case VME_CRCSR:
1566 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1567 break;
1568 case VME_USER1:
1569 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1570 break;
1571 case VME_USER2:
1572 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1573 break;
1574 case VME_USER3:
1575 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1576 break;
1577 case VME_USER4:
1578 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1579 break;
1580 default:
1581 dev_err(dev, "Invalid address space\n");
1582 return -EINVAL;
1583 }
1584
1585 if (cycle & VME_SUPER)
1586 val |= TSI148_LCSR_DDAT_SUP;
1587 if (cycle & VME_PROG)
1588 val |= TSI148_LCSR_DDAT_PGM;
1589
1590 *attr = cpu_to_be32(val);
1591
1592 return 0;
1593 }
1594
1595 /*
1596 * Add a link list descriptor to the list
1597 *
1598 * Note: DMA engine expects the DMA descriptor to be big endian.
1599 */
tsi148_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1600 static int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
1601 struct vme_dma_attr *dest, size_t count)
1602 {
1603 struct tsi148_dma_entry *entry, *prev;
1604 u32 address_high, address_low, val;
1605 struct vme_dma_pattern *pattern_attr;
1606 struct vme_dma_pci *pci_attr;
1607 struct vme_dma_vme *vme_attr;
1608 int retval = 0;
1609 struct vme_bridge *tsi148_bridge;
1610
1611 tsi148_bridge = list->parent->parent;
1612
1613 /* Descriptor must be aligned on 64-bit boundaries */
1614 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1615 if (!entry) {
1616 retval = -ENOMEM;
1617 goto err_mem;
1618 }
1619
1620 /* Test descriptor alignment */
1621 if ((unsigned long)&entry->descriptor & 0x7) {
1622 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 byte boundary as required: %p\n",
1623 &entry->descriptor);
1624 retval = -EINVAL;
1625 goto err_align;
1626 }
1627
1628 /* Given we are going to fill out the structure, we probably don't
1629 * need to zero it, but better safe than sorry for now.
1630 */
1631 memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1632
1633 /* Fill out source part */
1634 switch (src->type) {
1635 case VME_DMA_PATTERN:
1636 pattern_attr = src->private;
1637
1638 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1639
1640 val = TSI148_LCSR_DSAT_TYP_PAT;
1641
1642 /* Default behaviour is 32 bit pattern */
1643 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1644 val |= TSI148_LCSR_DSAT_PSZ;
1645
1646 /* It seems that the default behaviour is to increment */
1647 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1648 val |= TSI148_LCSR_DSAT_NIN;
1649 entry->descriptor.dsat = cpu_to_be32(val);
1650 break;
1651 case VME_DMA_PCI:
1652 pci_attr = src->private;
1653
1654 reg_split((unsigned long long)pci_attr->address, &address_high, &address_low);
1655 entry->descriptor.dsau = cpu_to_be32(address_high);
1656 entry->descriptor.dsal = cpu_to_be32(address_low);
1657 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1658 break;
1659 case VME_DMA_VME:
1660 vme_attr = src->private;
1661
1662 reg_split((unsigned long long)vme_attr->address, &address_high, &address_low);
1663 entry->descriptor.dsau = cpu_to_be32(address_high);
1664 entry->descriptor.dsal = cpu_to_be32(address_low);
1665 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1666
1667 retval = tsi148_dma_set_vme_src_attributes(tsi148_bridge->parent,
1668 &entry->descriptor.dsat,
1669 vme_attr->aspace,
1670 vme_attr->cycle,
1671 vme_attr->dwidth);
1672 if (retval < 0)
1673 goto err_source;
1674 break;
1675 default:
1676 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1677 retval = -EINVAL;
1678 goto err_source;
1679 }
1680
1681 /* Assume last link - this will be over-written by adding another */
1682 entry->descriptor.dnlau = cpu_to_be32(0);
1683 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1684
1685 /* Fill out destination part */
1686 switch (dest->type) {
1687 case VME_DMA_PCI:
1688 pci_attr = dest->private;
1689
1690 reg_split((unsigned long long)pci_attr->address, &address_high,
1691 &address_low);
1692 entry->descriptor.ddau = cpu_to_be32(address_high);
1693 entry->descriptor.ddal = cpu_to_be32(address_low);
1694 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1695 break;
1696 case VME_DMA_VME:
1697 vme_attr = dest->private;
1698
1699 reg_split((unsigned long long)vme_attr->address, &address_high,
1700 &address_low);
1701 entry->descriptor.ddau = cpu_to_be32(address_high);
1702 entry->descriptor.ddal = cpu_to_be32(address_low);
1703 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1704
1705 retval = tsi148_dma_set_vme_dest_attributes(tsi148_bridge->parent,
1706 &entry->descriptor.ddat,
1707 vme_attr->aspace,
1708 vme_attr->cycle,
1709 vme_attr->dwidth);
1710 if (retval < 0)
1711 goto err_dest;
1712 break;
1713 default:
1714 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1715 retval = -EINVAL;
1716 goto err_dest;
1717 }
1718
1719 /* Fill out count */
1720 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1721
1722 /* Add to list */
1723 list_add_tail(&entry->list, &list->entries);
1724
1725 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1726 &entry->descriptor,
1727 sizeof(entry->descriptor),
1728 DMA_TO_DEVICE);
1729 if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1730 dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1731 retval = -EINVAL;
1732 goto err_dma;
1733 }
1734
1735 /* Fill out previous descriptors "Next Address" */
1736 if (entry->list.prev != &list->entries) {
1737 reg_split((unsigned long long)entry->dma_handle, &address_high,
1738 &address_low);
1739 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1740 list);
1741 prev->descriptor.dnlau = cpu_to_be32(address_high);
1742 prev->descriptor.dnlal = cpu_to_be32(address_low);
1743 }
1744
1745 return 0;
1746
1747 err_dma:
1748 list_del(&entry->list);
1749 err_dest:
1750 err_source:
1751 err_align:
1752 kfree(entry);
1753 err_mem:
1754 return retval;
1755 }
1756
1757 /*
1758 * Check to see if the provided DMA channel is busy.
1759 */
tsi148_dma_busy(struct vme_bridge * tsi148_bridge,int channel)1760 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1761 {
1762 u32 tmp;
1763 struct tsi148_driver *bridge;
1764
1765 bridge = tsi148_bridge->driver_priv;
1766
1767 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1768 TSI148_LCSR_OFFSET_DSTA);
1769
1770 if (tmp & TSI148_LCSR_DSTA_BSY)
1771 return 0;
1772 else
1773 return 1;
1774 }
1775
1776 /*
1777 * Execute a previously generated link list
1778 *
1779 * XXX Need to provide control register configuration.
1780 */
tsi148_dma_list_exec(struct vme_dma_list * list)1781 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1782 {
1783 struct vme_dma_resource *ctrlr;
1784 int channel, retval;
1785 struct tsi148_dma_entry *entry;
1786 u32 bus_addr_high, bus_addr_low;
1787 u32 val, dctlreg = 0;
1788 struct vme_bridge *tsi148_bridge;
1789 struct tsi148_driver *bridge;
1790
1791 ctrlr = list->parent;
1792
1793 tsi148_bridge = ctrlr->parent;
1794
1795 bridge = tsi148_bridge->driver_priv;
1796
1797 mutex_lock(&ctrlr->mtx);
1798
1799 channel = ctrlr->number;
1800
1801 if (!list_empty(&ctrlr->running)) {
1802 /*
1803 * XXX We have an active DMA transfer and currently haven't
1804 * sorted out the mechanism for "pending" DMA transfers.
1805 * Return busy.
1806 */
1807 /* Need to add to pending here */
1808 mutex_unlock(&ctrlr->mtx);
1809 return -EBUSY;
1810 }
1811
1812 list_add(&list->list, &ctrlr->running);
1813
1814 /* Get first bus address and write into registers */
1815 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1816 list);
1817
1818 mutex_unlock(&ctrlr->mtx);
1819
1820 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1821
1822 iowrite32be(bus_addr_high, bridge->base +
1823 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1824 iowrite32be(bus_addr_low, bridge->base +
1825 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1826
1827 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1828 TSI148_LCSR_OFFSET_DCTL);
1829
1830 /* Start the operation */
1831 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1832 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1833
1834 retval = wait_event_interruptible(bridge->dma_queue[channel],
1835 tsi148_dma_busy(ctrlr->parent, channel));
1836
1837 if (retval) {
1838 iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1839 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1840 /* Wait for the operation to abort */
1841 wait_event(bridge->dma_queue[channel],
1842 tsi148_dma_busy(ctrlr->parent, channel));
1843 retval = -EINTR;
1844 goto exit;
1845 }
1846
1847 /*
1848 * Read status register, this register is valid until we kick off a
1849 * new transfer.
1850 */
1851 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1852 TSI148_LCSR_OFFSET_DSTA);
1853
1854 if (val & TSI148_LCSR_DSTA_VBE) {
1855 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1856 retval = -EIO;
1857 }
1858
1859 exit:
1860 /* Remove list from running list */
1861 mutex_lock(&ctrlr->mtx);
1862 list_del(&list->list);
1863 mutex_unlock(&ctrlr->mtx);
1864
1865 return retval;
1866 }
1867
1868 /*
1869 * Clean up a previously generated link list
1870 *
1871 * We have a separate function, don't assume that the chain can't be reused.
1872 */
tsi148_dma_list_empty(struct vme_dma_list * list)1873 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1874 {
1875 struct list_head *pos, *temp;
1876 struct tsi148_dma_entry *entry;
1877
1878 struct vme_bridge *tsi148_bridge = list->parent->parent;
1879
1880 /* detach and free each entry */
1881 list_for_each_safe(pos, temp, &list->entries) {
1882 list_del(pos);
1883 entry = list_entry(pos, struct tsi148_dma_entry, list);
1884
1885 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1886 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1887 kfree(entry);
1888 }
1889
1890 return 0;
1891 }
1892
1893 /*
1894 * All 4 location monitors reside at the same base - this is therefore a
1895 * system wide configuration.
1896 *
1897 * This does not enable the LM monitor - that should be done when the first
1898 * callback is attached and disabled when the last callback is removed.
1899 */
tsi148_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1900 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1901 u32 aspace, u32 cycle)
1902 {
1903 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1904 int i;
1905 struct vme_bridge *tsi148_bridge;
1906 struct tsi148_driver *bridge;
1907
1908 tsi148_bridge = lm->parent;
1909
1910 bridge = tsi148_bridge->driver_priv;
1911
1912 mutex_lock(&lm->mtx);
1913
1914 /* If we already have a callback attached, we can't move it! */
1915 for (i = 0; i < lm->monitors; i++) {
1916 if (bridge->lm_callback[i]) {
1917 mutex_unlock(&lm->mtx);
1918 dev_err(tsi148_bridge->parent, "Location monitor callback attached, can't reset\n");
1919 return -EBUSY;
1920 }
1921 }
1922
1923 switch (aspace) {
1924 case VME_A16:
1925 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1926 break;
1927 case VME_A24:
1928 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1929 break;
1930 case VME_A32:
1931 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1932 break;
1933 case VME_A64:
1934 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1935 break;
1936 default:
1937 mutex_unlock(&lm->mtx);
1938 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1939 return -EINVAL;
1940 }
1941
1942 if (cycle & VME_SUPER)
1943 lm_ctl |= TSI148_LCSR_LMAT_SUPR;
1944 if (cycle & VME_USER)
1945 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1946 if (cycle & VME_PROG)
1947 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1948 if (cycle & VME_DATA)
1949 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1950
1951 reg_split(lm_base, &lm_base_high, &lm_base_low);
1952
1953 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1954 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1955 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1956
1957 mutex_unlock(&lm->mtx);
1958
1959 return 0;
1960 }
1961
1962 /* Get configuration of the callback monitor and return whether it is enabled
1963 * or disabled.
1964 */
tsi148_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)1965 static int tsi148_lm_get(struct vme_lm_resource *lm,
1966 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1967 {
1968 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1969 struct tsi148_driver *bridge;
1970
1971 bridge = lm->parent->driver_priv;
1972
1973 mutex_lock(&lm->mtx);
1974
1975 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1976 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1977 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1978
1979 reg_join(lm_base_high, lm_base_low, lm_base);
1980
1981 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1982 enabled = 1;
1983
1984 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
1985 *aspace |= VME_A16;
1986
1987 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
1988 *aspace |= VME_A24;
1989
1990 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
1991 *aspace |= VME_A32;
1992
1993 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
1994 *aspace |= VME_A64;
1995
1996 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
1997 *cycle |= VME_SUPER;
1998 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
1999 *cycle |= VME_USER;
2000 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2001 *cycle |= VME_PROG;
2002 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2003 *cycle |= VME_DATA;
2004
2005 mutex_unlock(&lm->mtx);
2006
2007 return enabled;
2008 }
2009
2010 /*
2011 * Attach a callback to a specific location monitor.
2012 *
2013 * Callback will be passed the monitor triggered.
2014 */
tsi148_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(void *),void * data)2015 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2016 void (*callback)(void *), void *data)
2017 {
2018 u32 lm_ctl, tmp;
2019 struct vme_bridge *tsi148_bridge;
2020 struct tsi148_driver *bridge;
2021
2022 tsi148_bridge = lm->parent;
2023
2024 bridge = tsi148_bridge->driver_priv;
2025
2026 mutex_lock(&lm->mtx);
2027
2028 /* Ensure that the location monitor is configured - need PGM or DATA */
2029 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2030 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2031 mutex_unlock(&lm->mtx);
2032 dev_err(tsi148_bridge->parent, "Location monitor not properly configured\n");
2033 return -EINVAL;
2034 }
2035
2036 /* Check that a callback isn't already attached */
2037 if (bridge->lm_callback[monitor]) {
2038 mutex_unlock(&lm->mtx);
2039 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2040 return -EBUSY;
2041 }
2042
2043 /* Attach callback */
2044 bridge->lm_callback[monitor] = callback;
2045 bridge->lm_data[monitor] = data;
2046
2047 /* Enable Location Monitor interrupt */
2048 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2049 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2050 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2051
2052 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2053 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2054 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2055
2056 /* Ensure that global Location Monitor Enable set */
2057 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2058 lm_ctl |= TSI148_LCSR_LMAT_EN;
2059 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2060 }
2061
2062 mutex_unlock(&lm->mtx);
2063
2064 return 0;
2065 }
2066
2067 /*
2068 * Detach a callback function forn a specific location monitor.
2069 */
tsi148_lm_detach(struct vme_lm_resource * lm,int monitor)2070 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2071 {
2072 u32 lm_en, tmp;
2073 struct tsi148_driver *bridge;
2074
2075 bridge = lm->parent->driver_priv;
2076
2077 mutex_lock(&lm->mtx);
2078
2079 /* Disable Location Monitor and ensure previous interrupts are clear */
2080 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2081 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2082 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2083
2084 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2085 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2086 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2087
2088 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2089 bridge->base + TSI148_LCSR_INTC);
2090
2091 /* Detach callback */
2092 bridge->lm_callback[monitor] = NULL;
2093 bridge->lm_data[monitor] = NULL;
2094
2095 /* If all location monitors disabled, disable global Location Monitor */
2096 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2097 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2098 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2099 tmp &= ~TSI148_LCSR_LMAT_EN;
2100 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2101 }
2102
2103 mutex_unlock(&lm->mtx);
2104
2105 return 0;
2106 }
2107
2108 /*
2109 * Determine Geographical Addressing
2110 */
tsi148_slot_get(struct vme_bridge * tsi148_bridge)2111 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2112 {
2113 u32 slot = 0;
2114 struct tsi148_driver *bridge;
2115
2116 bridge = tsi148_bridge->driver_priv;
2117
2118 if (!geoid) {
2119 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2120 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2121 } else {
2122 slot = geoid;
2123 }
2124
2125 return (int)slot;
2126 }
2127
tsi148_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)2128 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2129 dma_addr_t *dma)
2130 {
2131 struct pci_dev *pdev;
2132
2133 /* Find pci_dev container of dev */
2134 pdev = to_pci_dev(parent);
2135
2136 return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL);
2137 }
2138
tsi148_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)2139 static void tsi148_free_consistent(struct device *parent, size_t size,
2140 void *vaddr, dma_addr_t dma)
2141 {
2142 struct pci_dev *pdev;
2143
2144 /* Find pci_dev container of dev */
2145 pdev = to_pci_dev(parent);
2146
2147 dma_free_coherent(&pdev->dev, size, vaddr, dma);
2148 }
2149
2150 /*
2151 * Configure CR/CSR space
2152 *
2153 * Access to the CR/CSR can be configured at power-up. The location of the
2154 * CR/CSR registers in the CR/CSR address space is determined by the boards
2155 * Auto-ID or Geographic address. This function ensures that the window is
2156 * enabled at an offset consistent with the boards geopgraphic address.
2157 *
2158 * Each board has a 512kB window, with the highest 4kB being used for the
2159 * boards registers, this means there is a fix length 508kB window which must
2160 * be mapped onto PCI memory.
2161 */
tsi148_crcsr_init(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2162 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2163 struct pci_dev *pdev)
2164 {
2165 u32 cbar, crat, vstat;
2166 u32 crcsr_bus_high, crcsr_bus_low;
2167 int retval;
2168 struct tsi148_driver *bridge;
2169
2170 bridge = tsi148_bridge->driver_priv;
2171
2172 /* Allocate mem for CR/CSR image */
2173 bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev,
2174 VME_CRCSR_BUF_SIZE,
2175 &bridge->crcsr_bus, GFP_KERNEL);
2176 if (!bridge->crcsr_kernel) {
2177 dev_err(tsi148_bridge->parent, "Failed to allocate memory for CR/CSR image\n");
2178 return -ENOMEM;
2179 }
2180
2181 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2182
2183 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2184 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2185
2186 /* Ensure that the CR/CSR is configured at the correct offset */
2187 cbar = ioread32be(bridge->base + TSI148_CBAR);
2188 cbar = (cbar & TSI148_CRCSR_CBAR_M) >> 3;
2189
2190 vstat = tsi148_slot_get(tsi148_bridge);
2191
2192 if (cbar != vstat) {
2193 cbar = vstat;
2194 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2195 iowrite32be(cbar << 3, bridge->base + TSI148_CBAR);
2196 }
2197 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2198
2199 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2200 if (crat & TSI148_LCSR_CRAT_EN) {
2201 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2202 } else {
2203 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2204 iowrite32be(crat | TSI148_LCSR_CRAT_EN, bridge->base + TSI148_LCSR_CRAT);
2205 }
2206
2207 /* If we want flushed, error-checked writes, set up a window
2208 * over the CR/CSR registers. We read from here to safely flush
2209 * through VME writes.
2210 */
2211 if (err_chk) {
2212 retval = tsi148_master_set(bridge->flush_image, 1, (vstat * 0x80000),
2213 0x80000, VME_CRCSR, VME_SCT, VME_D16);
2214 if (retval)
2215 dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
2216 }
2217
2218 return 0;
2219 }
2220
tsi148_crcsr_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2221 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2222 struct pci_dev *pdev)
2223 {
2224 u32 crat;
2225 struct tsi148_driver *bridge;
2226
2227 bridge = tsi148_bridge->driver_priv;
2228
2229 /* Turn off CR/CSR space */
2230 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2231 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2232 bridge->base + TSI148_LCSR_CRAT);
2233
2234 /* Free image */
2235 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2236 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2237
2238 dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE,
2239 bridge->crcsr_kernel, bridge->crcsr_bus);
2240 }
2241
tsi148_probe(struct pci_dev * pdev,const struct pci_device_id * id)2242 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2243 {
2244 int retval, i, master_num;
2245 u32 data;
2246 struct list_head *pos = NULL, *n;
2247 struct vme_bridge *tsi148_bridge;
2248 struct tsi148_driver *tsi148_device;
2249 struct vme_master_resource *master_image;
2250 struct vme_slave_resource *slave_image;
2251 struct vme_dma_resource *dma_ctrlr;
2252 struct vme_lm_resource *lm;
2253
2254 if (geoid >= VME_MAX_SLOTS) {
2255 dev_err(&pdev->dev, "VME geographical address must be between 0 and %d (exclusive), but got %d\n",
2256 VME_MAX_SLOTS, geoid);
2257 return -EINVAL;
2258 }
2259
2260 /* If we want to support more than one of each bridge, we need to
2261 * dynamically generate this so we get one per device
2262 */
2263 tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
2264 if (!tsi148_bridge) {
2265 retval = -ENOMEM;
2266 goto err_struct;
2267 }
2268 vme_init_bridge(tsi148_bridge);
2269
2270 tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
2271 if (!tsi148_device) {
2272 retval = -ENOMEM;
2273 goto err_driver;
2274 }
2275
2276 tsi148_bridge->driver_priv = tsi148_device;
2277
2278 /* Enable the device */
2279 retval = pci_enable_device(pdev);
2280 if (retval) {
2281 dev_err(&pdev->dev, "Unable to enable device\n");
2282 goto err_enable;
2283 }
2284
2285 /* Map Registers */
2286 retval = pci_request_regions(pdev, driver_name);
2287 if (retval) {
2288 dev_err(&pdev->dev, "Unable to reserve resources\n");
2289 goto err_resource;
2290 }
2291
2292 /* map registers in BAR 0 */
2293 tsi148_device->base = ioremap(pci_resource_start(pdev, 0),
2294 4096);
2295 if (!tsi148_device->base) {
2296 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2297 retval = -EIO;
2298 goto err_remap;
2299 }
2300
2301 /* Check to see if the mapping worked out */
2302 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2303 if (data != PCI_VENDOR_ID_TUNDRA) {
2304 dev_err(&pdev->dev, "CRG region check failed\n");
2305 retval = -EIO;
2306 goto err_test;
2307 }
2308
2309 /* Initialize wait queues & mutual exclusion flags */
2310 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2311 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2312 init_waitqueue_head(&tsi148_device->iack_queue);
2313 mutex_init(&tsi148_device->vme_int);
2314 mutex_init(&tsi148_device->vme_rmw);
2315
2316 tsi148_bridge->parent = &pdev->dev;
2317 strscpy(tsi148_bridge->name, driver_name, VMENAMSIZ);
2318
2319 /* Setup IRQ */
2320 retval = tsi148_irq_init(tsi148_bridge);
2321 if (retval != 0) {
2322 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2323 goto err_irq;
2324 }
2325
2326 /* If we are going to flush writes, we need to read from the VME bus.
2327 * We need to do this safely, thus we read the devices own CR/CSR
2328 * register. To do this we must set up a window in CR/CSR space and
2329 * hence have one less master window resource available.
2330 */
2331 master_num = TSI148_MAX_MASTER;
2332 if (err_chk) {
2333 master_num--;
2334
2335 tsi148_device->flush_image =
2336 kmalloc(sizeof(*tsi148_device->flush_image),
2337 GFP_KERNEL);
2338 if (!tsi148_device->flush_image) {
2339 retval = -ENOMEM;
2340 goto err_master;
2341 }
2342 tsi148_device->flush_image->parent = tsi148_bridge;
2343 spin_lock_init(&tsi148_device->flush_image->lock);
2344 tsi148_device->flush_image->locked = 1;
2345 tsi148_device->flush_image->number = master_num;
2346 memset(&tsi148_device->flush_image->bus_resource, 0,
2347 sizeof(tsi148_device->flush_image->bus_resource));
2348 tsi148_device->flush_image->kern_base = NULL;
2349 }
2350
2351 /* Add master windows to list */
2352 for (i = 0; i < master_num; i++) {
2353 master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
2354 if (!master_image) {
2355 retval = -ENOMEM;
2356 goto err_master;
2357 }
2358 master_image->parent = tsi148_bridge;
2359 spin_lock_init(&master_image->lock);
2360 master_image->locked = 0;
2361 master_image->number = i;
2362 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2363 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2364 VME_USER3 | VME_USER4;
2365 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2366 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2367 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2368 VME_PROG | VME_DATA;
2369 master_image->width_attr = VME_D16 | VME_D32;
2370 memset(&master_image->bus_resource, 0,
2371 sizeof(master_image->bus_resource));
2372 master_image->kern_base = NULL;
2373 list_add_tail(&master_image->list,
2374 &tsi148_bridge->master_resources);
2375 }
2376
2377 /* Add slave windows to list */
2378 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2379 slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
2380 if (!slave_image) {
2381 retval = -ENOMEM;
2382 goto err_slave;
2383 }
2384 slave_image->parent = tsi148_bridge;
2385 mutex_init(&slave_image->mtx);
2386 slave_image->locked = 0;
2387 slave_image->number = i;
2388 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2389 VME_A64;
2390 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2391 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2392 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2393 VME_PROG | VME_DATA;
2394 list_add_tail(&slave_image->list,
2395 &tsi148_bridge->slave_resources);
2396 }
2397
2398 /* Add dma engines to list */
2399 for (i = 0; i < TSI148_MAX_DMA; i++) {
2400 dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
2401 if (!dma_ctrlr) {
2402 retval = -ENOMEM;
2403 goto err_dma;
2404 }
2405 dma_ctrlr->parent = tsi148_bridge;
2406 mutex_init(&dma_ctrlr->mtx);
2407 dma_ctrlr->locked = 0;
2408 dma_ctrlr->number = i;
2409 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2410 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2411 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2412 VME_DMA_PATTERN_TO_MEM;
2413 INIT_LIST_HEAD(&dma_ctrlr->pending);
2414 INIT_LIST_HEAD(&dma_ctrlr->running);
2415 list_add_tail(&dma_ctrlr->list,
2416 &tsi148_bridge->dma_resources);
2417 }
2418
2419 /* Add location monitor to list */
2420 lm = kmalloc(sizeof(*lm), GFP_KERNEL);
2421 if (!lm) {
2422 retval = -ENOMEM;
2423 goto err_lm;
2424 }
2425 lm->parent = tsi148_bridge;
2426 mutex_init(&lm->mtx);
2427 lm->locked = 0;
2428 lm->number = 1;
2429 lm->monitors = 4;
2430 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2431
2432 tsi148_bridge->slave_get = tsi148_slave_get;
2433 tsi148_bridge->slave_set = tsi148_slave_set;
2434 tsi148_bridge->master_get = tsi148_master_get;
2435 tsi148_bridge->master_set = tsi148_master_set;
2436 tsi148_bridge->master_read = tsi148_master_read;
2437 tsi148_bridge->master_write = tsi148_master_write;
2438 tsi148_bridge->master_rmw = tsi148_master_rmw;
2439 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2440 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2441 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2442 tsi148_bridge->irq_set = tsi148_irq_set;
2443 tsi148_bridge->irq_generate = tsi148_irq_generate;
2444 tsi148_bridge->lm_set = tsi148_lm_set;
2445 tsi148_bridge->lm_get = tsi148_lm_get;
2446 tsi148_bridge->lm_attach = tsi148_lm_attach;
2447 tsi148_bridge->lm_detach = tsi148_lm_detach;
2448 tsi148_bridge->slot_get = tsi148_slot_get;
2449 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2450 tsi148_bridge->free_consistent = tsi148_free_consistent;
2451
2452 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2453 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2454 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2455 if (!geoid)
2456 dev_info(&pdev->dev, "VME geographical address is %d\n",
2457 data & TSI148_LCSR_VSTAT_GA_M);
2458 else
2459 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2460 geoid);
2461
2462 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2463 err_chk ? "enabled" : "disabled");
2464
2465 retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2466 if (retval) {
2467 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2468 goto err_crcsr;
2469 }
2470
2471 retval = vme_register_bridge(tsi148_bridge);
2472 if (retval != 0) {
2473 dev_err(&pdev->dev, "Chip Registration failed.\n");
2474 goto err_reg;
2475 }
2476
2477 pci_set_drvdata(pdev, tsi148_bridge);
2478
2479 /* Clear VME bus "board fail", and "power-up reset" lines */
2480 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2481 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2482 data |= TSI148_LCSR_VSTAT_CPURST;
2483 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2484
2485 return 0;
2486
2487 err_reg:
2488 tsi148_crcsr_exit(tsi148_bridge, pdev);
2489 err_crcsr:
2490 err_lm:
2491 /* resources are stored in link list */
2492 list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2493 lm = list_entry(pos, struct vme_lm_resource, list);
2494 list_del(pos);
2495 kfree(lm);
2496 }
2497 err_dma:
2498 /* resources are stored in link list */
2499 list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2500 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2501 list_del(pos);
2502 kfree(dma_ctrlr);
2503 }
2504 err_slave:
2505 /* resources are stored in link list */
2506 list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2507 slave_image = list_entry(pos, struct vme_slave_resource, list);
2508 list_del(pos);
2509 kfree(slave_image);
2510 }
2511 err_master:
2512 /* resources are stored in link list */
2513 list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2514 master_image = list_entry(pos, struct vme_master_resource, list);
2515 list_del(pos);
2516 kfree(master_image);
2517 }
2518
2519 tsi148_irq_exit(tsi148_bridge, pdev);
2520 err_irq:
2521 err_test:
2522 iounmap(tsi148_device->base);
2523 err_remap:
2524 pci_release_regions(pdev);
2525 err_resource:
2526 pci_disable_device(pdev);
2527 err_enable:
2528 kfree(tsi148_device);
2529 err_driver:
2530 kfree(tsi148_bridge);
2531 err_struct:
2532 return retval;
2533 }
2534
tsi148_remove(struct pci_dev * pdev)2535 static void tsi148_remove(struct pci_dev *pdev)
2536 {
2537 struct list_head *pos = NULL;
2538 struct list_head *tmplist;
2539 struct vme_master_resource *master_image;
2540 struct vme_slave_resource *slave_image;
2541 struct vme_dma_resource *dma_ctrlr;
2542 int i;
2543 struct tsi148_driver *bridge;
2544 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2545
2546 bridge = tsi148_bridge->driver_priv;
2547
2548 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2549
2550 /*
2551 * Shutdown all inbound and outbound windows.
2552 */
2553 for (i = 0; i < 8; i++) {
2554 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2555 TSI148_LCSR_OFFSET_ITAT);
2556 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2557 TSI148_LCSR_OFFSET_OTAT);
2558 }
2559
2560 /*
2561 * Shutdown Location monitor.
2562 */
2563 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2564
2565 /*
2566 * Shutdown CRG map.
2567 */
2568 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2569
2570 /*
2571 * Clear error status.
2572 */
2573 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2574 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2575 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2576
2577 /*
2578 * Remove VIRQ interrupt (if any)
2579 */
2580 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2581 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2582
2583 /*
2584 * Map all Interrupts to PCI INTA
2585 */
2586 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2587 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2588
2589 tsi148_irq_exit(tsi148_bridge, pdev);
2590
2591 vme_unregister_bridge(tsi148_bridge);
2592
2593 tsi148_crcsr_exit(tsi148_bridge, pdev);
2594
2595 /* resources are stored in link list */
2596 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2597 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2598 list_del(pos);
2599 kfree(dma_ctrlr);
2600 }
2601
2602 /* resources are stored in link list */
2603 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2604 slave_image = list_entry(pos, struct vme_slave_resource, list);
2605 list_del(pos);
2606 kfree(slave_image);
2607 }
2608
2609 /* resources are stored in link list */
2610 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2611 master_image = list_entry(pos, struct vme_master_resource, list);
2612 list_del(pos);
2613 kfree(master_image);
2614 }
2615
2616 iounmap(bridge->base);
2617
2618 pci_release_regions(pdev);
2619
2620 pci_disable_device(pdev);
2621
2622 kfree(tsi148_bridge->driver_priv);
2623
2624 kfree(tsi148_bridge);
2625 }
2626
2627 module_pci_driver(tsi148_driver);
2628
2629 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2630 module_param(err_chk, bool, 0);
2631
2632 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2633 module_param(geoid, uint, 0);
2634
2635 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2636 MODULE_LICENSE("GPL");
2637