Root/
1 | /* |
2 | ** |
3 | ** PCI Lower Bus Adapter (LBA) manager |
4 | ** |
5 | ** (c) Copyright 1999,2000 Grant Grundler |
6 | ** (c) Copyright 1999,2000 Hewlett-Packard Company |
7 | ** |
8 | ** This program is free software; you can redistribute it and/or modify |
9 | ** it under the terms of the GNU General Public License as published by |
10 | ** the Free Software Foundation; either version 2 of the License, or |
11 | ** (at your option) any later version. |
12 | ** |
13 | ** |
14 | ** This module primarily provides access to PCI bus (config/IOport |
15 | ** spaces) on platforms with an SBA/LBA chipset. A/B/C/J/L/N-class |
16 | ** with 4 digit model numbers - eg C3000 (and A400...sigh). |
17 | ** |
18 | ** LBA driver isn't as simple as the Dino driver because: |
19 | ** (a) this chip has substantial bug fixes between revisions |
20 | ** (Only one Dino bug has a software workaround :^( ) |
21 | ** (b) has more options which we don't (yet) support (DMA hints, OLARD) |
22 | ** (c) IRQ support lives in the I/O SAPIC driver (not with PCI driver) |
23 | ** (d) play nicely with both PAT and "Legacy" PA-RISC firmware (PDC). |
24 | ** (dino only deals with "Legacy" PDC) |
25 | ** |
26 | ** LBA driver passes the I/O SAPIC HPA to the I/O SAPIC driver. |
27 | ** (I/O SAPIC is integratd in the LBA chip). |
28 | ** |
29 | ** FIXME: Add support to SBA and LBA drivers for DMA hint sets |
30 | ** FIXME: Add support for PCI card hot-plug (OLARD). |
31 | */ |
32 | |
33 | #include <linux/delay.h> |
34 | #include <linux/types.h> |
35 | #include <linux/kernel.h> |
36 | #include <linux/spinlock.h> |
37 | #include <linux/init.h> /* for __init and __devinit */ |
38 | #include <linux/pci.h> |
39 | #include <linux/ioport.h> |
40 | #include <linux/slab.h> |
41 | |
42 | #include <asm/byteorder.h> |
43 | #include <asm/pdc.h> |
44 | #include <asm/pdcpat.h> |
45 | #include <asm/page.h> |
46 | |
47 | #include <asm/ropes.h> |
48 | #include <asm/hardware.h> /* for register_parisc_driver() stuff */ |
49 | #include <asm/parisc-device.h> |
50 | #include <asm/io.h> /* read/write stuff */ |
51 | |
52 | #undef DEBUG_LBA /* general stuff */ |
53 | #undef DEBUG_LBA_PORT /* debug I/O Port access */ |
54 | #undef DEBUG_LBA_CFG /* debug Config Space Access (ie PCI Bus walk) */ |
55 | #undef DEBUG_LBA_PAT /* debug PCI Resource Mgt code - PDC PAT only */ |
56 | |
57 | #undef FBB_SUPPORT /* Fast Back-Back xfers - NOT READY YET */ |
58 | |
59 | |
60 | #ifdef DEBUG_LBA |
61 | #define DBG(x...) printk(x) |
62 | #else |
63 | #define DBG(x...) |
64 | #endif |
65 | |
66 | #ifdef DEBUG_LBA_PORT |
67 | #define DBG_PORT(x...) printk(x) |
68 | #else |
69 | #define DBG_PORT(x...) |
70 | #endif |
71 | |
72 | #ifdef DEBUG_LBA_CFG |
73 | #define DBG_CFG(x...) printk(x) |
74 | #else |
75 | #define DBG_CFG(x...) |
76 | #endif |
77 | |
78 | #ifdef DEBUG_LBA_PAT |
79 | #define DBG_PAT(x...) printk(x) |
80 | #else |
81 | #define DBG_PAT(x...) |
82 | #endif |
83 | |
84 | |
85 | /* |
86 | ** Config accessor functions only pass in the 8-bit bus number and not |
87 | ** the 8-bit "PCI Segment" number. Each LBA will be assigned a PCI bus |
88 | ** number based on what firmware wrote into the scratch register. |
89 | ** |
90 | ** The "secondary" bus number is set to this before calling |
91 | ** pci_register_ops(). If any PPB's are present, the scan will |
92 | ** discover them and update the "secondary" and "subordinate" |
93 | ** fields in the pci_bus structure. |
94 | ** |
95 | ** Changes in the configuration *may* result in a different |
96 | ** bus number for each LBA depending on what firmware does. |
97 | */ |
98 | |
99 | #define MODULE_NAME "LBA" |
100 | |
101 | /* non-postable I/O port space, densely packed */ |
102 | #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL) |
103 | static void __iomem *astro_iop_base __read_mostly; |
104 | |
105 | static u32 lba_t32; |
106 | |
107 | /* lba flags */ |
108 | #define LBA_FLAG_SKIP_PROBE 0x10 |
109 | |
110 | #define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE) |
111 | |
112 | |
113 | /* Looks nice and keeps the compiler happy */ |
114 | #define LBA_DEV(d) ((struct lba_device *) (d)) |
115 | |
116 | |
117 | /* |
118 | ** Only allow 8 subsidiary busses per LBA |
119 | ** Problem is the PCI bus numbering is globally shared. |
120 | */ |
121 | #define LBA_MAX_NUM_BUSES 8 |
122 | |
123 | /************************************ |
124 | * LBA register read and write support |
125 | * |
126 | * BE WARNED: register writes are posted. |
127 | * (ie follow writes which must reach HW with a read) |
128 | */ |
129 | #define READ_U8(addr) __raw_readb(addr) |
130 | #define READ_U16(addr) __raw_readw(addr) |
131 | #define READ_U32(addr) __raw_readl(addr) |
132 | #define WRITE_U8(value, addr) __raw_writeb(value, addr) |
133 | #define WRITE_U16(value, addr) __raw_writew(value, addr) |
134 | #define WRITE_U32(value, addr) __raw_writel(value, addr) |
135 | |
136 | #define READ_REG8(addr) readb(addr) |
137 | #define READ_REG16(addr) readw(addr) |
138 | #define READ_REG32(addr) readl(addr) |
139 | #define READ_REG64(addr) readq(addr) |
140 | #define WRITE_REG8(value, addr) writeb(value, addr) |
141 | #define WRITE_REG16(value, addr) writew(value, addr) |
142 | #define WRITE_REG32(value, addr) writel(value, addr) |
143 | |
144 | |
145 | #define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8)) |
146 | #define LBA_CFG_BUS(tok) ((u8) ((tok)>>16)) |
147 | #define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f) |
148 | #define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7) |
149 | |
150 | |
151 | /* |
152 | ** Extract LBA (Rope) number from HPA |
153 | ** REVISIT: 16 ropes for Stretch/Ike? |
154 | */ |
155 | #define ROPES_PER_IOC 8 |
156 | #define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1)) |
157 | |
158 | |
159 | static void |
160 | lba_dump_res(struct resource *r, int d) |
161 | { |
162 | int i; |
163 | |
164 | if (NULL == r) |
165 | return; |
166 | |
167 | printk(KERN_DEBUG "(%p)", r->parent); |
168 | for (i = d; i ; --i) printk(" "); |
169 | printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r, |
170 | (long)r->start, (long)r->end, r->flags); |
171 | lba_dump_res(r->child, d+2); |
172 | lba_dump_res(r->sibling, d); |
173 | } |
174 | |
175 | |
176 | /* |
177 | ** LBA rev 2.0, 2.1, 2.2, and 3.0 bus walks require a complex |
178 | ** workaround for cfg cycles: |
179 | ** -- preserve LBA state |
180 | ** -- prevent any DMA from occurring |
181 | ** -- turn on smart mode |
182 | ** -- probe with config writes before doing config reads |
183 | ** -- check ERROR_STATUS |
184 | ** -- clear ERROR_STATUS |
185 | ** -- restore LBA state |
186 | ** |
187 | ** The workaround is only used for device discovery. |
188 | */ |
189 | |
190 | static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d) |
191 | { |
192 | u8 first_bus = d->hba.hba_bus->busn_res.start; |
193 | u8 last_sub_bus = d->hba.hba_bus->busn_res.end; |
194 | |
195 | if ((bus < first_bus) || |
196 | (bus > last_sub_bus) || |
197 | ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) { |
198 | return 0; |
199 | } |
200 | |
201 | return 1; |
202 | } |
203 | |
204 | |
205 | |
206 | #define LBA_CFG_SETUP(d, tok) { \ |
207 | /* Save contents of error config register. */ \ |
208 | error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \ |
209 | \ |
210 | /* Save contents of status control register. */ \ |
211 | status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \ |
212 | \ |
213 | /* For LBA rev 2.0, 2.1, 2.2, and 3.0, we must disable DMA \ |
214 | ** arbitration for full bus walks. \ |
215 | */ \ |
216 | /* Save contents of arb mask register. */ \ |
217 | arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \ |
218 | \ |
219 | /* \ |
220 | * Turn off all device arbitration bits (i.e. everything \ |
221 | * except arbitration enable bit). \ |
222 | */ \ |
223 | WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \ |
224 | \ |
225 | /* \ |
226 | * Set the smart mode bit so that master aborts don't cause \ |
227 | * LBA to go into PCI fatal mode (required). \ |
228 | */ \ |
229 | WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \ |
230 | } |
231 | |
232 | |
233 | #define LBA_CFG_PROBE(d, tok) { \ |
234 | /* \ |
235 | * Setup Vendor ID write and read back the address register \ |
236 | * to make sure that LBA is the bus master. \ |
237 | */ \ |
238 | WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\ |
239 | /* \ |
240 | * Read address register to ensure that LBA is the bus master, \ |
241 | * which implies that DMA traffic has stopped when DMA arb is off. \ |
242 | */ \ |
243 | lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ |
244 | /* \ |
245 | * Generate a cfg write cycle (will have no affect on \ |
246 | * Vendor ID register since read-only). \ |
247 | */ \ |
248 | WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \ |
249 | /* \ |
250 | * Make sure write has completed before proceeding further, \ |
251 | * i.e. before setting clear enable. \ |
252 | */ \ |
253 | lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ |
254 | } |
255 | |
256 | |
257 | /* |
258 | * HPREVISIT: |
259 | * -- Can't tell if config cycle got the error. |
260 | * |
261 | * OV bit is broken until rev 4.0, so can't use OV bit and |
262 | * LBA_ERROR_LOG_ADDR to tell if error belongs to config cycle. |
263 | * |
264 | * As of rev 4.0, no longer need the error check. |
265 | * |
266 | * -- Even if we could tell, we still want to return -1 |
267 | * for **ANY** error (not just master abort). |
268 | * |
269 | * -- Only clear non-fatal errors (we don't want to bring |
270 | * LBA out of pci-fatal mode). |
271 | * |
272 | * Actually, there is still a race in which |
273 | * we could be clearing a fatal error. We will |
274 | * live with this during our initial bus walk |
275 | * until rev 4.0 (no driver activity during |
276 | * initial bus walk). The initial bus walk |
277 | * has race conditions concerning the use of |
278 | * smart mode as well. |
279 | */ |
280 | |
281 | #define LBA_MASTER_ABORT_ERROR 0xc |
282 | #define LBA_FATAL_ERROR 0x10 |
283 | |
284 | #define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \ |
285 | u32 error_status = 0; \ |
286 | /* \ |
287 | * Set clear enable (CE) bit. Unset by HW when new \ |
288 | * errors are logged -- LBA HW ERS section 14.3.3). \ |
289 | */ \ |
290 | WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \ |
291 | error_status = READ_REG32(base + LBA_ERROR_STATUS); \ |
292 | if ((error_status & 0x1f) != 0) { \ |
293 | /* \ |
294 | * Fail the config read request. \ |
295 | */ \ |
296 | error = 1; \ |
297 | if ((error_status & LBA_FATAL_ERROR) == 0) { \ |
298 | /* \ |
299 | * Clear error status (if fatal bit not set) by setting \ |
300 | * clear error log bit (CL). \ |
301 | */ \ |
302 | WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \ |
303 | } \ |
304 | } \ |
305 | } |
306 | |
307 | #define LBA_CFG_TR4_ADDR_SETUP(d, addr) \ |
308 | WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); |
309 | |
310 | #define LBA_CFG_ADDR_SETUP(d, addr) { \ |
311 | WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ |
312 | /* \ |
313 | * Read address register to ensure that LBA is the bus master, \ |
314 | * which implies that DMA traffic has stopped when DMA arb is off. \ |
315 | */ \ |
316 | lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \ |
317 | } |
318 | |
319 | |
320 | #define LBA_CFG_RESTORE(d, base) { \ |
321 | /* \ |
322 | * Restore status control register (turn off clear enable). \ |
323 | */ \ |
324 | WRITE_REG32(status_control, base + LBA_STAT_CTL); \ |
325 | /* \ |
326 | * Restore error config register (turn off smart mode). \ |
327 | */ \ |
328 | WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \ |
329 | /* \ |
330 | * Restore arb mask register (reenables DMA arbitration). \ |
331 | */ \ |
332 | WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \ |
333 | } |
334 | |
335 | |
336 | |
337 | static unsigned int |
338 | lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size) |
339 | { |
340 | u32 data = ~0U; |
341 | int error = 0; |
342 | u32 arb_mask = 0; /* used by LBA_CFG_SETUP/RESTORE */ |
343 | u32 error_config = 0; /* used by LBA_CFG_SETUP/RESTORE */ |
344 | u32 status_control = 0; /* used by LBA_CFG_SETUP/RESTORE */ |
345 | |
346 | LBA_CFG_SETUP(d, tok); |
347 | LBA_CFG_PROBE(d, tok); |
348 | LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error); |
349 | if (!error) { |
350 | void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; |
351 | |
352 | LBA_CFG_ADDR_SETUP(d, tok | reg); |
353 | switch (size) { |
354 | case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break; |
355 | case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break; |
356 | case 4: data = READ_REG32(data_reg); break; |
357 | } |
358 | } |
359 | LBA_CFG_RESTORE(d, d->hba.base_addr); |
360 | return(data); |
361 | } |
362 | |
363 | |
364 | static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data) |
365 | { |
366 | struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); |
367 | u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; |
368 | u32 tok = LBA_CFG_TOK(local_bus, devfn); |
369 | void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; |
370 | |
371 | if ((pos > 255) || (devfn > 255)) |
372 | return -EINVAL; |
373 | |
374 | /* FIXME: B2K/C3600 workaround is always use old method... */ |
375 | /* if (!LBA_SKIP_PROBE(d)) */ { |
376 | /* original - Generate config cycle on broken elroy |
377 | with risk we will miss PCI bus errors. */ |
378 | *data = lba_rd_cfg(d, tok, pos, size); |
379 | DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data); |
380 | return 0; |
381 | } |
382 | |
383 | if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->busn_res.start, devfn, d)) { |
384 | DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos); |
385 | /* either don't want to look or know device isn't present. */ |
386 | *data = ~0U; |
387 | return(0); |
388 | } |
389 | |
390 | /* Basic Algorithm |
391 | ** Should only get here on fully working LBA rev. |
392 | ** This is how simple the code should have been. |
393 | */ |
394 | LBA_CFG_ADDR_SETUP(d, tok | pos); |
395 | switch(size) { |
396 | case 1: *data = READ_REG8 (data_reg + (pos & 3)); break; |
397 | case 2: *data = READ_REG16(data_reg + (pos & 2)); break; |
398 | case 4: *data = READ_REG32(data_reg); break; |
399 | } |
400 | DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data); |
401 | return 0; |
402 | } |
403 | |
404 | |
405 | static void |
406 | lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size) |
407 | { |
408 | int error = 0; |
409 | u32 arb_mask = 0; |
410 | u32 error_config = 0; |
411 | u32 status_control = 0; |
412 | void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; |
413 | |
414 | LBA_CFG_SETUP(d, tok); |
415 | LBA_CFG_ADDR_SETUP(d, tok | reg); |
416 | switch (size) { |
417 | case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break; |
418 | case 2: WRITE_REG16(data, data_reg + (reg & 2)); break; |
419 | case 4: WRITE_REG32(data, data_reg); break; |
420 | } |
421 | LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error); |
422 | LBA_CFG_RESTORE(d, d->hba.base_addr); |
423 | } |
424 | |
425 | |
426 | /* |
427 | * LBA 4.0 config write code implements non-postable semantics |
428 | * by doing a read of CONFIG ADDR after the write. |
429 | */ |
430 | |
431 | static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data) |
432 | { |
433 | struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); |
434 | u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; |
435 | u32 tok = LBA_CFG_TOK(local_bus,devfn); |
436 | |
437 | if ((pos > 255) || (devfn > 255)) |
438 | return -EINVAL; |
439 | |
440 | if (!LBA_SKIP_PROBE(d)) { |
441 | /* Original Workaround */ |
442 | lba_wr_cfg(d, tok, pos, (u32) data, size); |
443 | DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data); |
444 | return 0; |
445 | } |
446 | |
447 | if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->busn_res.start, devfn, d))) { |
448 | DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data); |
449 | return 1; /* New Workaround */ |
450 | } |
451 | |
452 | DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data); |
453 | |
454 | /* Basic Algorithm */ |
455 | LBA_CFG_ADDR_SETUP(d, tok | pos); |
456 | switch(size) { |
457 | case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3)); |
458 | break; |
459 | case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2)); |
460 | break; |
461 | case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA); |
462 | break; |
463 | } |
464 | /* flush posted write */ |
465 | lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR); |
466 | return 0; |
467 | } |
468 | |
469 | |
470 | static struct pci_ops elroy_cfg_ops = { |
471 | .read = elroy_cfg_read, |
472 | .write = elroy_cfg_write, |
473 | }; |
474 | |
475 | /* |
476 | * The mercury_cfg_ops are slightly misnamed; they're also used for Elroy |
477 | * TR4.0 as no additional bugs were found in this areea between Elroy and |
478 | * Mercury |
479 | */ |
480 | |
481 | static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data) |
482 | { |
483 | struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); |
484 | u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; |
485 | u32 tok = LBA_CFG_TOK(local_bus, devfn); |
486 | void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; |
487 | |
488 | if ((pos > 255) || (devfn > 255)) |
489 | return -EINVAL; |
490 | |
491 | LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); |
492 | switch(size) { |
493 | case 1: |
494 | *data = READ_REG8(data_reg + (pos & 3)); |
495 | break; |
496 | case 2: |
497 | *data = READ_REG16(data_reg + (pos & 2)); |
498 | break; |
499 | case 4: |
500 | *data = READ_REG32(data_reg); break; |
501 | break; |
502 | } |
503 | |
504 | DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data); |
505 | return 0; |
506 | } |
507 | |
508 | /* |
509 | * LBA 4.0 config write code implements non-postable semantics |
510 | * by doing a read of CONFIG ADDR after the write. |
511 | */ |
512 | |
513 | static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data) |
514 | { |
515 | struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge)); |
516 | void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA; |
517 | u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start; |
518 | u32 tok = LBA_CFG_TOK(local_bus,devfn); |
519 | |
520 | if ((pos > 255) || (devfn > 255)) |
521 | return -EINVAL; |
522 | |
523 | DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data); |
524 | |
525 | LBA_CFG_TR4_ADDR_SETUP(d, tok | pos); |
526 | switch(size) { |
527 | case 1: |
528 | WRITE_REG8 (data, data_reg + (pos & 3)); |
529 | break; |
530 | case 2: |
531 | WRITE_REG16(data, data_reg + (pos & 2)); |
532 | break; |
533 | case 4: |
534 | WRITE_REG32(data, data_reg); |
535 | break; |
536 | } |
537 | |
538 | /* flush posted write */ |
539 | lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR); |
540 | return 0; |
541 | } |
542 | |
543 | static struct pci_ops mercury_cfg_ops = { |
544 | .read = mercury_cfg_read, |
545 | .write = mercury_cfg_write, |
546 | }; |
547 | |
548 | |
549 | static void |
550 | lba_bios_init(void) |
551 | { |
552 | DBG(MODULE_NAME ": lba_bios_init\n"); |
553 | } |
554 | |
555 | |
556 | #ifdef CONFIG_64BIT |
557 | |
558 | /* |
559 | * truncate_pat_collision: Deal with overlaps or outright collisions |
560 | * between PAT PDC reported ranges. |
561 | * |
562 | * Broken PA8800 firmware will report lmmio range that |
563 | * overlaps with CPU HPA. Just truncate the lmmio range. |
564 | * |
565 | * BEWARE: conflicts with this lmmio range may be an |
566 | * elmmio range which is pointing down another rope. |
567 | * |
568 | * FIXME: only deals with one collision per range...theoretically we |
569 | * could have several. Supporting more than one collision will get messy. |
570 | */ |
571 | static unsigned long |
572 | truncate_pat_collision(struct resource *root, struct resource *new) |
573 | { |
574 | unsigned long start = new->start; |
575 | unsigned long end = new->end; |
576 | struct resource *tmp = root->child; |
577 | |
578 | if (end <= start || start < root->start || !tmp) |
579 | return 0; |
580 | |
581 | /* find first overlap */ |
582 | while (tmp && tmp->end < start) |
583 | tmp = tmp->sibling; |
584 | |
585 | /* no entries overlap */ |
586 | if (!tmp) return 0; |
587 | |
588 | /* found one that starts behind the new one |
589 | ** Don't need to do anything. |
590 | */ |
591 | if (tmp->start >= end) return 0; |
592 | |
593 | if (tmp->start <= start) { |
594 | /* "front" of new one overlaps */ |
595 | new->start = tmp->end + 1; |
596 | |
597 | if (tmp->end >= end) { |
598 | /* AACCKK! totally overlaps! drop this range. */ |
599 | return 1; |
600 | } |
601 | } |
602 | |
603 | if (tmp->end < end ) { |
604 | /* "end" of new one overlaps */ |
605 | new->end = tmp->start - 1; |
606 | } |
607 | |
608 | printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] " |
609 | "to [%lx,%lx]\n", |
610 | start, end, |
611 | (long)new->start, (long)new->end ); |
612 | |
613 | return 0; /* truncation successful */ |
614 | } |
615 | |
616 | #else |
617 | #define truncate_pat_collision(r,n) (0) |
618 | #endif |
619 | |
620 | /* |
621 | ** The algorithm is generic code. |
622 | ** But it needs to access local data structures to get the IRQ base. |
623 | ** Could make this a "pci_fixup_irq(bus, region)" but not sure |
624 | ** it's worth it. |
625 | ** |
626 | ** Called by do_pci_scan_bus() immediately after each PCI bus is walked. |
627 | ** Resources aren't allocated until recursive buswalk below HBA is completed. |
628 | */ |
629 | static void |
630 | lba_fixup_bus(struct pci_bus *bus) |
631 | { |
632 | struct list_head *ln; |
633 | #ifdef FBB_SUPPORT |
634 | u16 status; |
635 | #endif |
636 | struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge)); |
637 | |
638 | DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n", |
639 | bus, (int)bus->busn_res.start, bus->bridge->platform_data); |
640 | |
641 | /* |
642 | ** Properly Setup MMIO resources for this bus. |
643 | ** pci_alloc_primary_bus() mangles this. |
644 | */ |
645 | if (bus->parent) { |
646 | int i; |
647 | /* PCI-PCI Bridge */ |
648 | pci_read_bridge_bases(bus); |
649 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { |
650 | pci_claim_resource(bus->self, i); |
651 | } |
652 | } else { |
653 | /* Host-PCI Bridge */ |
654 | int err; |
655 | |
656 | DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n", |
657 | ldev->hba.io_space.name, |
658 | ldev->hba.io_space.start, ldev->hba.io_space.end, |
659 | ldev->hba.io_space.flags); |
660 | DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n", |
661 | ldev->hba.lmmio_space.name, |
662 | ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end, |
663 | ldev->hba.lmmio_space.flags); |
664 | |
665 | err = request_resource(&ioport_resource, &(ldev->hba.io_space)); |
666 | if (err < 0) { |
667 | lba_dump_res(&ioport_resource, 2); |
668 | BUG(); |
669 | } |
670 | |
671 | if (ldev->hba.elmmio_space.start) { |
672 | err = request_resource(&iomem_resource, |
673 | &(ldev->hba.elmmio_space)); |
674 | if (err < 0) { |
675 | |
676 | printk("FAILED: lba_fixup_bus() request for " |
677 | "elmmio_space [%lx/%lx]\n", |
678 | (long)ldev->hba.elmmio_space.start, |
679 | (long)ldev->hba.elmmio_space.end); |
680 | |
681 | /* lba_dump_res(&iomem_resource, 2); */ |
682 | /* BUG(); */ |
683 | } |
684 | } |
685 | |
686 | if (ldev->hba.lmmio_space.flags) { |
687 | err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space)); |
688 | if (err < 0) { |
689 | printk(KERN_ERR "FAILED: lba_fixup_bus() request for " |
690 | "lmmio_space [%lx/%lx]\n", |
691 | (long)ldev->hba.lmmio_space.start, |
692 | (long)ldev->hba.lmmio_space.end); |
693 | } |
694 | } |
695 | |
696 | #ifdef CONFIG_64BIT |
697 | /* GMMIO is distributed range. Every LBA/Rope gets part it. */ |
698 | if (ldev->hba.gmmio_space.flags) { |
699 | err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space)); |
700 | if (err < 0) { |
701 | printk("FAILED: lba_fixup_bus() request for " |
702 | "gmmio_space [%lx/%lx]\n", |
703 | (long)ldev->hba.gmmio_space.start, |
704 | (long)ldev->hba.gmmio_space.end); |
705 | lba_dump_res(&iomem_resource, 2); |
706 | BUG(); |
707 | } |
708 | } |
709 | #endif |
710 | |
711 | } |
712 | |
713 | list_for_each(ln, &bus->devices) { |
714 | int i; |
715 | struct pci_dev *dev = pci_dev_b(ln); |
716 | |
717 | DBG("lba_fixup_bus() %s\n", pci_name(dev)); |
718 | |
719 | /* Virtualize Device/Bridge Resources. */ |
720 | for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { |
721 | struct resource *res = &dev->resource[i]; |
722 | |
723 | /* If resource not allocated - skip it */ |
724 | if (!res->start) |
725 | continue; |
726 | |
727 | /* |
728 | ** FIXME: this will result in whinging for devices |
729 | ** that share expansion ROMs (think quad tulip), but |
730 | ** isn't harmful. |
731 | */ |
732 | pci_claim_resource(dev, i); |
733 | } |
734 | |
735 | #ifdef FBB_SUPPORT |
736 | /* |
737 | ** If one device does not support FBB transfers, |
738 | ** No one on the bus can be allowed to use them. |
739 | */ |
740 | (void) pci_read_config_word(dev, PCI_STATUS, &status); |
741 | bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK); |
742 | #endif |
743 | |
744 | /* |
745 | ** P2PB's have no IRQs. ignore them. |
746 | */ |
747 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) |
748 | continue; |
749 | |
750 | /* Adjust INTERRUPT_LINE for this dev */ |
751 | iosapic_fixup_irq(ldev->iosapic_obj, dev); |
752 | } |
753 | |
754 | #ifdef FBB_SUPPORT |
755 | /* FIXME/REVISIT - finish figuring out to set FBB on both |
756 | ** pci_setup_bridge() clobbers PCI_BRIDGE_CONTROL. |
757 | ** Can't fixup here anyway....garr... |
758 | */ |
759 | if (fbb_enable) { |
760 | if (bus->parent) { |
761 | u8 control; |
762 | /* enable on PPB */ |
763 | (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control); |
764 | (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK); |
765 | |
766 | } else { |
767 | /* enable on LBA */ |
768 | } |
769 | fbb_enable = PCI_COMMAND_FAST_BACK; |
770 | } |
771 | |
772 | /* Lastly enable FBB/PERR/SERR on all devices too */ |
773 | list_for_each(ln, &bus->devices) { |
774 | (void) pci_read_config_word(dev, PCI_COMMAND, &status); |
775 | status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable; |
776 | (void) pci_write_config_word(dev, PCI_COMMAND, status); |
777 | } |
778 | #endif |
779 | } |
780 | |
781 | |
782 | static struct pci_bios_ops lba_bios_ops = { |
783 | .init = lba_bios_init, |
784 | .fixup_bus = lba_fixup_bus, |
785 | }; |
786 | |
787 | |
788 | |
789 | |
790 | /******************************************************* |
791 | ** |
792 | ** LBA Sprockets "I/O Port" Space Accessor Functions |
793 | ** |
794 | ** This set of accessor functions is intended for use with |
795 | ** "legacy firmware" (ie Sprockets on Allegro/Forte boxes). |
796 | ** |
797 | ** Many PCI devices don't require use of I/O port space (eg Tulip, |
798 | ** NCR720) since they export the same registers to both MMIO and |
799 | ** I/O port space. In general I/O port space is slower than |
800 | ** MMIO since drivers are designed so PIO writes can be posted. |
801 | ** |
802 | ********************************************************/ |
803 | |
804 | #define LBA_PORT_IN(size, mask) \ |
805 | static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \ |
806 | { \ |
807 | u##size t; \ |
808 | t = READ_REG##size(astro_iop_base + addr); \ |
809 | DBG_PORT(" 0x%x\n", t); \ |
810 | return (t); \ |
811 | } |
812 | |
813 | LBA_PORT_IN( 8, 3) |
814 | LBA_PORT_IN(16, 2) |
815 | LBA_PORT_IN(32, 0) |
816 | |
817 | |
818 | |
819 | /* |
820 | ** BUG X4107: Ordering broken - DMA RD return can bypass PIO WR |
821 | ** |
822 | ** Fixed in Elroy 2.2. The READ_U32(..., LBA_FUNC_ID) below is |
823 | ** guarantee non-postable completion semantics - not avoid X4107. |
824 | ** The READ_U32 only guarantees the write data gets to elroy but |
825 | ** out to the PCI bus. We can't read stuff from I/O port space |
826 | ** since we don't know what has side-effects. Attempting to read |
827 | ** from configuration space would be suicidal given the number of |
828 | ** bugs in that elroy functionality. |
829 | ** |
830 | ** Description: |
831 | ** DMA read results can improperly pass PIO writes (X4107). The |
832 | ** result of this bug is that if a processor modifies a location in |
833 | ** memory after having issued PIO writes, the PIO writes are not |
834 | ** guaranteed to be completed before a PCI device is allowed to see |
835 | ** the modified data in a DMA read. |
836 | ** |
837 | ** Note that IKE bug X3719 in TR1 IKEs will result in the same |
838 | ** symptom. |
839 | ** |
840 | ** Workaround: |
841 | ** The workaround for this bug is to always follow a PIO write with |
842 | ** a PIO read to the same bus before starting DMA on that PCI bus. |
843 | ** |
844 | */ |
845 | #define LBA_PORT_OUT(size, mask) \ |
846 | static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \ |
847 | { \ |
848 | DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \ |
849 | WRITE_REG##size(val, astro_iop_base + addr); \ |
850 | if (LBA_DEV(d)->hw_rev < 3) \ |
851 | lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \ |
852 | } |
853 | |
854 | LBA_PORT_OUT( 8, 3) |
855 | LBA_PORT_OUT(16, 2) |
856 | LBA_PORT_OUT(32, 0) |
857 | |
858 | |
859 | static struct pci_port_ops lba_astro_port_ops = { |
860 | .inb = lba_astro_in8, |
861 | .inw = lba_astro_in16, |
862 | .inl = lba_astro_in32, |
863 | .outb = lba_astro_out8, |
864 | .outw = lba_astro_out16, |
865 | .outl = lba_astro_out32 |
866 | }; |
867 | |
868 | |
869 | #ifdef CONFIG_64BIT |
870 | #define PIOP_TO_GMMIO(lba, addr) \ |
871 | ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3)) |
872 | |
873 | /******************************************************* |
874 | ** |
875 | ** LBA PAT "I/O Port" Space Accessor Functions |
876 | ** |
877 | ** This set of accessor functions is intended for use with |
878 | ** "PAT PDC" firmware (ie Prelude/Rhapsody/Piranha boxes). |
879 | ** |
880 | ** This uses the PIOP space located in the first 64MB of GMMIO. |
881 | ** Each rope gets a full 64*KB* (ie 4 bytes per page) this way. |
882 | ** bits 1:0 stay the same. bits 15:2 become 25:12. |
883 | ** Then add the base and we can generate an I/O Port cycle. |
884 | ********************************************************/ |
885 | #undef LBA_PORT_IN |
886 | #define LBA_PORT_IN(size, mask) \ |
887 | static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \ |
888 | { \ |
889 | u##size t; \ |
890 | DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \ |
891 | t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \ |
892 | DBG_PORT(" 0x%x\n", t); \ |
893 | return (t); \ |
894 | } |
895 | |
896 | LBA_PORT_IN( 8, 3) |
897 | LBA_PORT_IN(16, 2) |
898 | LBA_PORT_IN(32, 0) |
899 | |
900 | |
901 | #undef LBA_PORT_OUT |
902 | #define LBA_PORT_OUT(size, mask) \ |
903 | static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \ |
904 | { \ |
905 | void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \ |
906 | DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \ |
907 | WRITE_REG##size(val, where); \ |
908 | /* flush the I/O down to the elroy at least */ \ |
909 | lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \ |
910 | } |
911 | |
912 | LBA_PORT_OUT( 8, 3) |
913 | LBA_PORT_OUT(16, 2) |
914 | LBA_PORT_OUT(32, 0) |
915 | |
916 | |
917 | static struct pci_port_ops lba_pat_port_ops = { |
918 | .inb = lba_pat_in8, |
919 | .inw = lba_pat_in16, |
920 | .inl = lba_pat_in32, |
921 | .outb = lba_pat_out8, |
922 | .outw = lba_pat_out16, |
923 | .outl = lba_pat_out32 |
924 | }; |
925 | |
926 | |
927 | |
928 | /* |
929 | ** make range information from PDC available to PCI subsystem. |
930 | ** We make the PDC call here in order to get the PCI bus range |
931 | ** numbers. The rest will get forwarded in pcibios_fixup_bus(). |
932 | ** We don't have a struct pci_bus assigned to us yet. |
933 | */ |
934 | static void |
935 | lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) |
936 | { |
937 | unsigned long bytecnt; |
938 | long io_count; |
939 | long status; /* PDC return status */ |
940 | long pa_count; |
941 | pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; /* PA_VIEW */ |
942 | pdc_pat_cell_mod_maddr_block_t *io_pdc_cell; /* IO_VIEW */ |
943 | int i; |
944 | |
945 | pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); |
946 | if (!pa_pdc_cell) |
947 | return; |
948 | |
949 | io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL); |
950 | if (!io_pdc_cell) { |
951 | kfree(pa_pdc_cell); |
952 | return; |
953 | } |
954 | |
955 | /* return cell module (IO view) */ |
956 | status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, |
957 | PA_VIEW, pa_pdc_cell); |
958 | pa_count = pa_pdc_cell->mod[1]; |
959 | |
960 | status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index, |
961 | IO_VIEW, io_pdc_cell); |
962 | io_count = io_pdc_cell->mod[1]; |
963 | |
964 | /* We've already done this once for device discovery...*/ |
965 | if (status != PDC_OK) { |
966 | panic("pdc_pat_cell_module() call failed for LBA!\n"); |
967 | } |
968 | |
969 | if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) { |
970 | panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n"); |
971 | } |
972 | |
973 | /* |
974 | ** Inspect the resources PAT tells us about |
975 | */ |
976 | for (i = 0; i < pa_count; i++) { |
977 | struct { |
978 | unsigned long type; |
979 | unsigned long start; |
980 | unsigned long end; /* aka finish */ |
981 | } *p, *io; |
982 | struct resource *r; |
983 | |
984 | p = (void *) &(pa_pdc_cell->mod[2+i*3]); |
985 | io = (void *) &(io_pdc_cell->mod[2+i*3]); |
986 | |
987 | /* Convert the PAT range data to PCI "struct resource" */ |
988 | switch(p->type & 0xff) { |
989 | case PAT_PBNUM: |
990 | lba_dev->hba.bus_num.start = p->start; |
991 | lba_dev->hba.bus_num.end = p->end; |
992 | lba_dev->hba.bus_num.flags = IORESOURCE_BUS; |
993 | break; |
994 | |
995 | case PAT_LMMIO: |
996 | /* used to fix up pre-initialized MEM BARs */ |
997 | if (!lba_dev->hba.lmmio_space.start) { |
998 | sprintf(lba_dev->hba.lmmio_name, |
999 | "PCI%02x LMMIO", |
1000 | (int)lba_dev->hba.bus_num.start); |
1001 | lba_dev->hba.lmmio_space_offset = p->start - |
1002 | io->start; |
1003 | r = &lba_dev->hba.lmmio_space; |
1004 | r->name = lba_dev->hba.lmmio_name; |
1005 | } else if (!lba_dev->hba.elmmio_space.start) { |
1006 | sprintf(lba_dev->hba.elmmio_name, |
1007 | "PCI%02x ELMMIO", |
1008 | (int)lba_dev->hba.bus_num.start); |
1009 | r = &lba_dev->hba.elmmio_space; |
1010 | r->name = lba_dev->hba.elmmio_name; |
1011 | } else { |
1012 | printk(KERN_WARNING MODULE_NAME |
1013 | " only supports 2 LMMIO resources!\n"); |
1014 | break; |
1015 | } |
1016 | |
1017 | r->start = p->start; |
1018 | r->end = p->end; |
1019 | r->flags = IORESOURCE_MEM; |
1020 | r->parent = r->sibling = r->child = NULL; |
1021 | break; |
1022 | |
1023 | case PAT_GMMIO: |
1024 | /* MMIO space > 4GB phys addr; for 64-bit BAR */ |
1025 | sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO", |
1026 | (int)lba_dev->hba.bus_num.start); |
1027 | r = &lba_dev->hba.gmmio_space; |
1028 | r->name = lba_dev->hba.gmmio_name; |
1029 | r->start = p->start; |
1030 | r->end = p->end; |
1031 | r->flags = IORESOURCE_MEM; |
1032 | r->parent = r->sibling = r->child = NULL; |
1033 | break; |
1034 | |
1035 | case PAT_NPIOP: |
1036 | printk(KERN_WARNING MODULE_NAME |
1037 | " range[%d] : ignoring NPIOP (0x%lx)\n", |
1038 | i, p->start); |
1039 | break; |
1040 | |
1041 | case PAT_PIOP: |
1042 | /* |
1043 | ** Postable I/O port space is per PCI host adapter. |
1044 | ** base of 64MB PIOP region |
1045 | */ |
1046 | lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024); |
1047 | |
1048 | sprintf(lba_dev->hba.io_name, "PCI%02x Ports", |
1049 | (int)lba_dev->hba.bus_num.start); |
1050 | r = &lba_dev->hba.io_space; |
1051 | r->name = lba_dev->hba.io_name; |
1052 | r->start = HBA_PORT_BASE(lba_dev->hba.hba_num); |
1053 | r->end = r->start + HBA_PORT_SPACE_SIZE - 1; |
1054 | r->flags = IORESOURCE_IO; |
1055 | r->parent = r->sibling = r->child = NULL; |
1056 | break; |
1057 | |
1058 | default: |
1059 | printk(KERN_WARNING MODULE_NAME |
1060 | " range[%d] : unknown pat range type (0x%lx)\n", |
1061 | i, p->type & 0xff); |
1062 | break; |
1063 | } |
1064 | } |
1065 | |
1066 | kfree(pa_pdc_cell); |
1067 | kfree(io_pdc_cell); |
1068 | } |
1069 | #else |
1070 | /* keep compiler from complaining about missing declarations */ |
1071 | #define lba_pat_port_ops lba_astro_port_ops |
1072 | #define lba_pat_resources(pa_dev, lba_dev) |
1073 | #endif /* CONFIG_64BIT */ |
1074 | |
1075 | |
1076 | extern void sba_distributed_lmmio(struct parisc_device *, struct resource *); |
1077 | extern void sba_directed_lmmio(struct parisc_device *, struct resource *); |
1078 | |
1079 | |
1080 | static void |
1081 | lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) |
1082 | { |
1083 | struct resource *r; |
1084 | int lba_num; |
1085 | |
1086 | lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND; |
1087 | |
1088 | /* |
1089 | ** With "legacy" firmware, the lowest byte of FW_SCRATCH |
1090 | ** represents bus->secondary and the second byte represents |
1091 | ** bus->subsidiary (i.e. highest PPB programmed by firmware). |
1092 | ** PCI bus walk *should* end up with the same result. |
1093 | ** FIXME: But we don't have sanity checks in PCI or LBA. |
1094 | */ |
1095 | lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH); |
1096 | r = &(lba_dev->hba.bus_num); |
1097 | r->name = "LBA PCI Busses"; |
1098 | r->start = lba_num & 0xff; |
1099 | r->end = (lba_num>>8) & 0xff; |
1100 | |
1101 | /* Set up local PCI Bus resources - we don't need them for |
1102 | ** Legacy boxes but it's nice to see in /proc/iomem. |
1103 | */ |
1104 | r = &(lba_dev->hba.lmmio_space); |
1105 | sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO", |
1106 | (int)lba_dev->hba.bus_num.start); |
1107 | r->name = lba_dev->hba.lmmio_name; |
1108 | |
1109 | #if 1 |
1110 | /* We want the CPU -> IO routing of addresses. |
1111 | * The SBA BASE/MASK registers control CPU -> IO routing. |
1112 | * Ask SBA what is routed to this rope/LBA. |
1113 | */ |
1114 | sba_distributed_lmmio(pa_dev, r); |
1115 | #else |
1116 | /* |
1117 | * The LBA BASE/MASK registers control IO -> System routing. |
1118 | * |
1119 | * The following code works but doesn't get us what we want. |
1120 | * Well, only because firmware (v5.0) on C3000 doesn't program |
1121 | * the LBA BASE/MASE registers to be the exact inverse of |
1122 | * the corresponding SBA registers. Other Astro/Pluto |
1123 | * based platform firmware may do it right. |
1124 | * |
1125 | * Should someone want to mess with MSI, they may need to |
1126 | * reprogram LBA BASE/MASK registers. Thus preserve the code |
1127 | * below until MSI is known to work on C3000/A500/N4000/RP3440. |
1128 | * |
1129 | * Using the code below, /proc/iomem shows: |
1130 | * ... |
1131 | * f0000000-f0ffffff : PCI00 LMMIO |
1132 | * f05d0000-f05d0000 : lcd_data |
1133 | * f05d0008-f05d0008 : lcd_cmd |
1134 | * f1000000-f1ffffff : PCI01 LMMIO |
1135 | * f4000000-f4ffffff : PCI02 LMMIO |
1136 | * f4000000-f4001fff : sym53c8xx |
1137 | * f4002000-f4003fff : sym53c8xx |
1138 | * f4004000-f40043ff : sym53c8xx |
1139 | * f4005000-f40053ff : sym53c8xx |
1140 | * f4007000-f4007fff : ohci_hcd |
1141 | * f4008000-f40083ff : tulip |
1142 | * f6000000-f6ffffff : PCI03 LMMIO |
1143 | * f8000000-fbffffff : PCI00 ELMMIO |
1144 | * fa100000-fa4fffff : stifb mmio |
1145 | * fb000000-fb1fffff : stifb fb |
1146 | * |
1147 | * But everything listed under PCI02 actually lives under PCI00. |
1148 | * This is clearly wrong. |
1149 | * |
1150 | * Asking SBA how things are routed tells the correct story: |
1151 | * LMMIO_BASE/MASK/ROUTE f4000001 fc000000 00000000 |
1152 | * DIR0_BASE/MASK/ROUTE fa000001 fe000000 00000006 |
1153 | * DIR1_BASE/MASK/ROUTE f9000001 ff000000 00000004 |
1154 | * DIR2_BASE/MASK/ROUTE f0000000 fc000000 00000000 |
1155 | * DIR3_BASE/MASK/ROUTE f0000000 fc000000 00000000 |
1156 | * |
1157 | * Which looks like this in /proc/iomem: |
1158 | * f4000000-f47fffff : PCI00 LMMIO |
1159 | * f4000000-f4001fff : sym53c8xx |
1160 | * ...[deteled core devices - same as above]... |
1161 | * f4008000-f40083ff : tulip |
1162 | * f4800000-f4ffffff : PCI01 LMMIO |
1163 | * f6000000-f67fffff : PCI02 LMMIO |
1164 | * f7000000-f77fffff : PCI03 LMMIO |
1165 | * f9000000-f9ffffff : PCI02 ELMMIO |
1166 | * fa000000-fbffffff : PCI03 ELMMIO |
1167 | * fa100000-fa4fffff : stifb mmio |
1168 | * fb000000-fb1fffff : stifb fb |
1169 | * |
1170 | * ie all Built-in core are under now correctly under PCI00. |
1171 | * The "PCI02 ELMMIO" directed range is for: |
1172 | * +-[02]---03.0 3Dfx Interactive, Inc. Voodoo 2 |
1173 | * |
1174 | * All is well now. |
1175 | */ |
1176 | r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE); |
1177 | if (r->start & 1) { |
1178 | unsigned long rsize; |
1179 | |
1180 | r->flags = IORESOURCE_MEM; |
1181 | /* mmio_mask also clears Enable bit */ |
1182 | r->start &= mmio_mask; |
1183 | r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start); |
1184 | rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK); |
1185 | |
1186 | /* |
1187 | ** Each rope only gets part of the distributed range. |
1188 | ** Adjust "window" for this rope. |
1189 | */ |
1190 | rsize /= ROPES_PER_IOC; |
1191 | r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start); |
1192 | r->end = r->start + rsize; |
1193 | } else { |
1194 | r->end = r->start = 0; /* Not enabled. */ |
1195 | } |
1196 | #endif |
1197 | |
1198 | /* |
1199 | ** "Directed" ranges are used when the "distributed range" isn't |
1200 | ** sufficient for all devices below a given LBA. Typically devices |
1201 | ** like graphics cards or X25 may need a directed range when the |
1202 | ** bus has multiple slots (ie multiple devices) or the device |
1203 | ** needs more than the typical 4 or 8MB a distributed range offers. |
1204 | ** |
1205 | ** The main reason for ignoring it now frigging complications. |
1206 | ** Directed ranges may overlap (and have precedence) over |
1207 | ** distributed ranges. Or a distributed range assigned to a unused |
1208 | ** rope may be used by a directed range on a different rope. |
1209 | ** Support for graphics devices may require fixing this |
1210 | ** since they may be assigned a directed range which overlaps |
1211 | ** an existing (but unused portion of) distributed range. |
1212 | */ |
1213 | r = &(lba_dev->hba.elmmio_space); |
1214 | sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO", |
1215 | (int)lba_dev->hba.bus_num.start); |
1216 | r->name = lba_dev->hba.elmmio_name; |
1217 | |
1218 | #if 1 |
1219 | /* See comment which precedes call to sba_directed_lmmio() */ |
1220 | sba_directed_lmmio(pa_dev, r); |
1221 | #else |
1222 | r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE); |
1223 | |
1224 | if (r->start & 1) { |
1225 | unsigned long rsize; |
1226 | r->flags = IORESOURCE_MEM; |
1227 | /* mmio_mask also clears Enable bit */ |
1228 | r->start &= mmio_mask; |
1229 | r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start); |
1230 | rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK); |
1231 | r->end = r->start + ~rsize; |
1232 | } |
1233 | #endif |
1234 | |
1235 | r = &(lba_dev->hba.io_space); |
1236 | sprintf(lba_dev->hba.io_name, "PCI%02x Ports", |
1237 | (int)lba_dev->hba.bus_num.start); |
1238 | r->name = lba_dev->hba.io_name; |
1239 | r->flags = IORESOURCE_IO; |
1240 | r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L; |
1241 | r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1)); |
1242 | |
1243 | /* Virtualize the I/O Port space ranges */ |
1244 | lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num); |
1245 | r->start |= lba_num; |
1246 | r->end |= lba_num; |
1247 | } |
1248 | |
1249 | |
1250 | /************************************************************************** |
1251 | ** |
1252 | ** LBA initialization code (HW and SW) |
1253 | ** |
1254 | ** o identify LBA chip itself |
1255 | ** o initialize LBA chip modes (HardFail) |
1256 | ** o FIXME: initialize DMA hints for reasonable defaults |
1257 | ** o enable configuration functions |
1258 | ** o call pci_register_ops() to discover devs (fixup/fixup_bus get invoked) |
1259 | ** |
1260 | **************************************************************************/ |
1261 | |
1262 | static int __init |
1263 | lba_hw_init(struct lba_device *d) |
1264 | { |
1265 | u32 stat; |
1266 | u32 bus_reset; /* PDC_PAT_BUG */ |
1267 | |
1268 | #if 0 |
1269 | printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n", |
1270 | d->hba.base_addr, |
1271 | READ_REG64(d->hba.base_addr + LBA_STAT_CTL), |
1272 | READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG), |
1273 | READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS), |
1274 | READ_REG64(d->hba.base_addr + LBA_DMA_CTL) ); |
1275 | printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n", |
1276 | READ_REG64(d->hba.base_addr + LBA_ARB_MASK), |
1277 | READ_REG64(d->hba.base_addr + LBA_ARB_PRI), |
1278 | READ_REG64(d->hba.base_addr + LBA_ARB_MODE), |
1279 | READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) ); |
1280 | printk(KERN_DEBUG " HINT cfg 0x%Lx\n", |
1281 | READ_REG64(d->hba.base_addr + LBA_HINT_CFG)); |
1282 | printk(KERN_DEBUG " HINT reg "); |
1283 | { int i; |
1284 | for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8) |
1285 | printk(" %Lx", READ_REG64(d->hba.base_addr + i)); |
1286 | } |
1287 | printk("\n"); |
1288 | #endif /* DEBUG_LBA_PAT */ |
1289 | |
1290 | #ifdef CONFIG_64BIT |
1291 | /* |
1292 | * FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support |
1293 | * Only N-Class and up can really make use of Get slot status. |
1294 | * maybe L-class too but I've never played with it there. |
1295 | */ |
1296 | #endif |
1297 | |
1298 | /* PDC_PAT_BUG: exhibited in rev 40.48 on L2000 */ |
1299 | bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1; |
1300 | if (bus_reset) { |
1301 | printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n"); |
1302 | } |
1303 | |
1304 | stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); |
1305 | if (stat & LBA_SMART_MODE) { |
1306 | printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n"); |
1307 | stat &= ~LBA_SMART_MODE; |
1308 | WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG); |
1309 | } |
1310 | |
1311 | /* Set HF mode as the default (vs. -1 mode). */ |
1312 | stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); |
1313 | WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL); |
1314 | |
1315 | /* |
1316 | ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal |
1317 | ** if it's not already set. If we just cleared the PCI Bus Reset |
1318 | ** signal, wait a bit for the PCI devices to recover and setup. |
1319 | */ |
1320 | if (bus_reset) |
1321 | mdelay(pci_post_reset_delay); |
1322 | |
1323 | if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) { |
1324 | /* |
1325 | ** PDC_PAT_BUG: PDC rev 40.48 on L2000. |
1326 | ** B2000/C3600/J6000 also have this problem? |
1327 | ** |
1328 | ** Elroys with hot pluggable slots don't get configured |
1329 | ** correctly if the slot is empty. ARB_MASK is set to 0 |
1330 | ** and we can't master transactions on the bus if it's |
1331 | ** not at least one. 0x3 enables elroy and first slot. |
1332 | */ |
1333 | printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n"); |
1334 | WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK); |
1335 | } |
1336 | |
1337 | /* |
1338 | ** FIXME: Hint registers are programmed with default hint |
1339 | ** values by firmware. Hints should be sane even if we |
1340 | ** can't reprogram them the way drivers want. |
1341 | */ |
1342 | return 0; |
1343 | } |
1344 | |
1345 | /* |
1346 | * Unfortunately, when firmware numbers busses, it doesn't take into account |
1347 | * Cardbus bridges. So we have to renumber the busses to suit ourselves. |
1348 | * Elroy/Mercury don't actually know what bus number they're attached to; |
1349 | * we use bus 0 to indicate the directly attached bus and any other bus |
1350 | * number will be taken care of by the PCI-PCI bridge. |
1351 | */ |
1352 | static unsigned int lba_next_bus = 0; |
1353 | |
1354 | /* |
1355 | * Determine if lba should claim this chip (return 0) or not (return 1). |
1356 | * If so, initialize the chip and tell other partners in crime they |
1357 | * have work to do. |
1358 | */ |
1359 | static int __init |
1360 | lba_driver_probe(struct parisc_device *dev) |
1361 | { |
1362 | struct lba_device *lba_dev; |
1363 | LIST_HEAD(resources); |
1364 | struct pci_bus *lba_bus; |
1365 | struct pci_ops *cfg_ops; |
1366 | u32 func_class; |
1367 | void *tmp_obj; |
1368 | char *version; |
1369 | void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096); |
1370 | int max; |
1371 | |
1372 | /* Read HW Rev First */ |
1373 | func_class = READ_REG32(addr + LBA_FCLASS); |
1374 | |
1375 | if (IS_ELROY(dev)) { |
1376 | func_class &= 0xf; |
1377 | switch (func_class) { |
1378 | case 0: version = "TR1.0"; break; |
1379 | case 1: version = "TR2.0"; break; |
1380 | case 2: version = "TR2.1"; break; |
1381 | case 3: version = "TR2.2"; break; |
1382 | case 4: version = "TR3.0"; break; |
1383 | case 5: version = "TR4.0"; break; |
1384 | default: version = "TR4+"; |
1385 | } |
1386 | |
1387 | printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n", |
1388 | version, func_class & 0xf, (long)dev->hpa.start); |
1389 | |
1390 | if (func_class < 2) { |
1391 | printk(KERN_WARNING "Can't support LBA older than " |
1392 | "TR2.1 - continuing under adversity.\n"); |
1393 | } |
1394 | |
1395 | #if 0 |
1396 | /* Elroy TR4.0 should work with simple algorithm. |
1397 | But it doesn't. Still missing something. *sigh* |
1398 | */ |
1399 | if (func_class > 4) { |
1400 | cfg_ops = &mercury_cfg_ops; |
1401 | } else |
1402 | #endif |
1403 | { |
1404 | cfg_ops = &elroy_cfg_ops; |
1405 | } |
1406 | |
1407 | } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) { |
1408 | int major, minor; |
1409 | |
1410 | func_class &= 0xff; |
1411 | major = func_class >> 4, minor = func_class & 0xf; |
1412 | |
1413 | /* We could use one printk for both Elroy and Mercury, |
1414 | * but for the mask for func_class. |
1415 | */ |
1416 | printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n", |
1417 | IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major, |
1418 | minor, func_class, (long)dev->hpa.start); |
1419 | |
1420 | cfg_ops = &mercury_cfg_ops; |
1421 | } else { |
1422 | printk(KERN_ERR "Unknown LBA found at 0x%lx\n", |
1423 | (long)dev->hpa.start); |
1424 | return -ENODEV; |
1425 | } |
1426 | |
1427 | /* Tell I/O SAPIC driver we have a IRQ handler/region. */ |
1428 | tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE); |
1429 | |
1430 | /* NOTE: PCI devices (e.g. 103c:1005 graphics card) which don't |
1431 | ** have an IRT entry will get NULL back from iosapic code. |
1432 | */ |
1433 | |
1434 | lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL); |
1435 | if (!lba_dev) { |
1436 | printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n"); |
1437 | return(1); |
1438 | } |
1439 | |
1440 | |
1441 | /* ---------- First : initialize data we already have --------- */ |
1442 | |
1443 | lba_dev->hw_rev = func_class; |
1444 | lba_dev->hba.base_addr = addr; |
1445 | lba_dev->hba.dev = dev; |
1446 | lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */ |
1447 | lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */ |
1448 | parisc_set_drvdata(dev, lba_dev); |
1449 | |
1450 | /* ------------ Second : initialize common stuff ---------- */ |
1451 | pci_bios = &lba_bios_ops; |
1452 | pcibios_register_hba(HBA_DATA(lba_dev)); |
1453 | spin_lock_init(&lba_dev->lba_lock); |
1454 | |
1455 | if (lba_hw_init(lba_dev)) |
1456 | return(1); |
1457 | |
1458 | /* ---------- Third : setup I/O Port and MMIO resources --------- */ |
1459 | |
1460 | if (is_pdc_pat()) { |
1461 | /* PDC PAT firmware uses PIOP region of GMMIO space. */ |
1462 | pci_port = &lba_pat_port_ops; |
1463 | /* Go ask PDC PAT what resources this LBA has */ |
1464 | lba_pat_resources(dev, lba_dev); |
1465 | } else { |
1466 | if (!astro_iop_base) { |
1467 | /* Sprockets PDC uses NPIOP region */ |
1468 | astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024); |
1469 | pci_port = &lba_astro_port_ops; |
1470 | } |
1471 | |
1472 | /* Poke the chip a bit for /proc output */ |
1473 | lba_legacy_resources(dev, lba_dev); |
1474 | } |
1475 | |
1476 | if (lba_dev->hba.bus_num.start < lba_next_bus) |
1477 | lba_dev->hba.bus_num.start = lba_next_bus; |
1478 | |
1479 | /* Overlaps with elmmio can (and should) fail here. |
1480 | * We will prune (or ignore) the distributed range. |
1481 | * |
1482 | * FIXME: SBA code should register all elmmio ranges first. |
1483 | * that would take care of elmmio ranges routed |
1484 | * to a different rope (already discovered) from |
1485 | * getting registered *after* LBA code has already |
1486 | * registered it's distributed lmmio range. |
1487 | */ |
1488 | if (truncate_pat_collision(&iomem_resource, |
1489 | &(lba_dev->hba.lmmio_space))) { |
1490 | printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n", |
1491 | (long)lba_dev->hba.lmmio_space.start, |
1492 | (long)lba_dev->hba.lmmio_space.end); |
1493 | lba_dev->hba.lmmio_space.flags = 0; |
1494 | } |
1495 | |
1496 | pci_add_resource_offset(&resources, &lba_dev->hba.io_space, |
1497 | HBA_PORT_BASE(lba_dev->hba.hba_num)); |
1498 | if (lba_dev->hba.elmmio_space.start) |
1499 | pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space, |
1500 | lba_dev->hba.lmmio_space_offset); |
1501 | if (lba_dev->hba.lmmio_space.flags) |
1502 | pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space, |
1503 | lba_dev->hba.lmmio_space_offset); |
1504 | if (lba_dev->hba.gmmio_space.flags) |
1505 | pci_add_resource(&resources, &lba_dev->hba.gmmio_space); |
1506 | |
1507 | pci_add_resource(&resources, &lba_dev->hba.bus_num); |
1508 | |
1509 | dev->dev.platform_data = lba_dev; |
1510 | lba_bus = lba_dev->hba.hba_bus = |
1511 | pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start, |
1512 | cfg_ops, NULL, &resources); |
1513 | if (!lba_bus) { |
1514 | pci_free_resource_list(&resources); |
1515 | return 0; |
1516 | } |
1517 | |
1518 | max = pci_scan_child_bus(lba_bus); |
1519 | |
1520 | /* This is in lieu of calling pci_assign_unassigned_resources() */ |
1521 | if (is_pdc_pat()) { |
1522 | /* assign resources to un-initialized devices */ |
1523 | |
1524 | DBG_PAT("LBA pci_bus_size_bridges()\n"); |
1525 | pci_bus_size_bridges(lba_bus); |
1526 | |
1527 | DBG_PAT("LBA pci_bus_assign_resources()\n"); |
1528 | pci_bus_assign_resources(lba_bus); |
1529 | |
1530 | #ifdef DEBUG_LBA_PAT |
1531 | DBG_PAT("\nLBA PIOP resource tree\n"); |
1532 | lba_dump_res(&lba_dev->hba.io_space, 2); |
1533 | DBG_PAT("\nLBA LMMIO resource tree\n"); |
1534 | lba_dump_res(&lba_dev->hba.lmmio_space, 2); |
1535 | #endif |
1536 | } |
1537 | pci_enable_bridges(lba_bus); |
1538 | |
1539 | /* |
1540 | ** Once PCI register ops has walked the bus, access to config |
1541 | ** space is restricted. Avoids master aborts on config cycles. |
1542 | ** Early LBA revs go fatal on *any* master abort. |
1543 | */ |
1544 | if (cfg_ops == &elroy_cfg_ops) { |
1545 | lba_dev->flags |= LBA_FLAG_SKIP_PROBE; |
1546 | } |
1547 | |
1548 | lba_next_bus = max + 1; |
1549 | pci_bus_add_devices(lba_bus); |
1550 | |
1551 | /* Whew! Finally done! Tell services we got this one covered. */ |
1552 | return 0; |
1553 | } |
1554 | |
1555 | static struct parisc_device_id lba_tbl[] = { |
1556 | { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa }, |
1557 | { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa }, |
1558 | { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa }, |
1559 | { 0, } |
1560 | }; |
1561 | |
1562 | static struct parisc_driver lba_driver = { |
1563 | .name = MODULE_NAME, |
1564 | .id_table = lba_tbl, |
1565 | .probe = lba_driver_probe, |
1566 | }; |
1567 | |
1568 | /* |
1569 | ** One time initialization to let the world know the LBA was found. |
1570 | ** Must be called exactly once before pci_init(). |
1571 | */ |
1572 | void __init lba_init(void) |
1573 | { |
1574 | register_parisc_driver(&lba_driver); |
1575 | } |
1576 | |
1577 | /* |
1578 | ** Initialize the IBASE/IMASK registers for LBA (Elroy). |
1579 | ** Only called from sba_iommu.c in order to route ranges (MMIO vs DMA). |
1580 | ** sba_iommu is responsible for locking (none needed at init time). |
1581 | */ |
1582 | void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask) |
1583 | { |
1584 | void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096); |
1585 | |
1586 | imask <<= 2; /* adjust for hints - 2 more bits */ |
1587 | |
1588 | /* Make sure we aren't trying to set bits that aren't writeable. */ |
1589 | WARN_ON((ibase & 0x001fffff) != 0); |
1590 | WARN_ON((imask & 0x001fffff) != 0); |
1591 | |
1592 | DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask); |
1593 | WRITE_REG32( imask, base_addr + LBA_IMASK); |
1594 | WRITE_REG32( ibase, base_addr + LBA_IBASE); |
1595 | iounmap(base_addr); |
1596 | } |
1597 | |
1598 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9