Root/
1 | /* |
2 | * Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved. |
3 | * Copyright (C) 2008-2009 MontaVista Software, Inc. |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of version 2 of the GNU General Public License |
7 | * as published by the Free Software Foundation. |
8 | * |
9 | * This program is distributed in the hope that it would be useful, but |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
12 | * |
13 | * You should have received a copy of the GNU General Public |
14 | * License along with this program; if not, write the Free Software |
15 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
16 | * |
17 | * For further information regarding this notice, see: |
18 | * |
19 | * http://oss.sgi.com/projects/GenInfo/NoticeExplan |
20 | */ |
21 | |
22 | #include <linux/module.h> |
23 | #include <linux/types.h> |
24 | #include <linux/pci.h> |
25 | #include <linux/delay.h> |
26 | #include <linux/init.h> |
27 | #include <linux/kernel.h> |
28 | #include <linux/ioport.h> |
29 | #include <linux/blkdev.h> |
30 | #include <linux/scatterlist.h> |
31 | #include <linux/ioc4.h> |
32 | #include <linux/io.h> |
33 | #include <linux/ide.h> |
34 | |
35 | #define DRV_NAME "SGIIOC4" |
36 | |
37 | /* IOC4 Specific Definitions */ |
38 | #define IOC4_CMD_OFFSET 0x100 |
39 | #define IOC4_CTRL_OFFSET 0x120 |
40 | #define IOC4_DMA_OFFSET 0x140 |
41 | #define IOC4_INTR_OFFSET 0x0 |
42 | |
43 | #define IOC4_TIMING 0x00 |
44 | #define IOC4_DMA_PTR_L 0x01 |
45 | #define IOC4_DMA_PTR_H 0x02 |
46 | #define IOC4_DMA_ADDR_L 0x03 |
47 | #define IOC4_DMA_ADDR_H 0x04 |
48 | #define IOC4_BC_DEV 0x05 |
49 | #define IOC4_BC_MEM 0x06 |
50 | #define IOC4_DMA_CTRL 0x07 |
51 | #define IOC4_DMA_END_ADDR 0x08 |
52 | |
53 | /* Bits in the IOC4 Control/Status Register */ |
54 | #define IOC4_S_DMA_START 0x01 |
55 | #define IOC4_S_DMA_STOP 0x02 |
56 | #define IOC4_S_DMA_DIR 0x04 |
57 | #define IOC4_S_DMA_ACTIVE 0x08 |
58 | #define IOC4_S_DMA_ERROR 0x10 |
59 | #define IOC4_ATA_MEMERR 0x02 |
60 | |
61 | /* Read/Write Directions */ |
62 | #define IOC4_DMA_WRITE 0x04 |
63 | #define IOC4_DMA_READ 0x00 |
64 | |
65 | /* Interrupt Register Offsets */ |
66 | #define IOC4_INTR_REG 0x03 |
67 | #define IOC4_INTR_SET 0x05 |
68 | #define IOC4_INTR_CLEAR 0x07 |
69 | |
70 | #define IOC4_IDE_CACHELINE_SIZE 128 |
71 | #define IOC4_CMD_CTL_BLK_SIZE 0x20 |
72 | #define IOC4_SUPPORTED_FIRMWARE_REV 46 |
73 | |
74 | struct ioc4_dma_regs { |
75 | u32 timing_reg0; |
76 | u32 timing_reg1; |
77 | u32 low_mem_ptr; |
78 | u32 high_mem_ptr; |
79 | u32 low_mem_addr; |
80 | u32 high_mem_addr; |
81 | u32 dev_byte_count; |
82 | u32 mem_byte_count; |
83 | u32 status; |
84 | }; |
85 | |
86 | /* Each Physical Region Descriptor Entry size is 16 bytes (2 * 64 bits) */ |
87 | /* IOC4 has only 1 IDE channel */ |
88 | #define IOC4_PRD_BYTES 16 |
89 | #define IOC4_PRD_ENTRIES (PAGE_SIZE / (4 * IOC4_PRD_BYTES)) |
90 | |
91 | |
92 | static void sgiioc4_init_hwif_ports(struct ide_hw *hw, |
93 | unsigned long data_port, |
94 | unsigned long ctrl_port, |
95 | unsigned long irq_port) |
96 | { |
97 | unsigned long reg = data_port; |
98 | int i; |
99 | |
100 | /* Registers are word (32 bit) aligned */ |
101 | for (i = 0; i <= 7; i++) |
102 | hw->io_ports_array[i] = reg + i * 4; |
103 | |
104 | hw->io_ports.ctl_addr = ctrl_port; |
105 | hw->io_ports.irq_addr = irq_port; |
106 | } |
107 | |
108 | static int sgiioc4_checkirq(ide_hwif_t *hwif) |
109 | { |
110 | unsigned long intr_addr = hwif->io_ports.irq_addr + IOC4_INTR_REG * 4; |
111 | |
112 | if (readl((void __iomem *)intr_addr) & 0x03) |
113 | return 1; |
114 | |
115 | return 0; |
116 | } |
117 | |
118 | static u8 sgiioc4_read_status(ide_hwif_t *); |
119 | |
120 | static int sgiioc4_clearirq(ide_drive_t *drive) |
121 | { |
122 | u32 intr_reg; |
123 | ide_hwif_t *hwif = drive->hwif; |
124 | struct ide_io_ports *io_ports = &hwif->io_ports; |
125 | unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2); |
126 | |
127 | /* Code to check for PCI error conditions */ |
128 | intr_reg = readl((void __iomem *)other_ir); |
129 | if (intr_reg & 0x03) { /* Valid IOC4-IDE interrupt */ |
130 | /* |
131 | * Using sgiioc4_read_status to read the Status register has a |
132 | * side effect of clearing the interrupt. The first read should |
133 | * clear it if it is set. The second read should return |
134 | * a "clear" status if it got cleared. If not, then spin |
135 | * for a bit trying to clear it. |
136 | */ |
137 | u8 stat = sgiioc4_read_status(hwif); |
138 | int count = 0; |
139 | |
140 | stat = sgiioc4_read_status(hwif); |
141 | while ((stat & ATA_BUSY) && (count++ < 100)) { |
142 | udelay(1); |
143 | stat = sgiioc4_read_status(hwif); |
144 | } |
145 | |
146 | if (intr_reg & 0x02) { |
147 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
148 | /* Error when transferring DMA data on PCI bus */ |
149 | u32 pci_err_addr_low, pci_err_addr_high, |
150 | pci_stat_cmd_reg; |
151 | |
152 | pci_err_addr_low = |
153 | readl((void __iomem *)io_ports->irq_addr); |
154 | pci_err_addr_high = |
155 | readl((void __iomem *)(io_ports->irq_addr + 4)); |
156 | pci_read_config_dword(dev, PCI_COMMAND, |
157 | &pci_stat_cmd_reg); |
158 | printk(KERN_ERR "%s(%s): PCI Bus Error when doing DMA: " |
159 | "status-cmd reg is 0x%x\n", |
160 | __func__, drive->name, pci_stat_cmd_reg); |
161 | printk(KERN_ERR "%s(%s): PCI Error Address is 0x%x%x\n", |
162 | __func__, drive->name, |
163 | pci_err_addr_high, pci_err_addr_low); |
164 | /* Clear the PCI Error indicator */ |
165 | pci_write_config_dword(dev, PCI_COMMAND, 0x00000146); |
166 | } |
167 | |
168 | /* Clear the Interrupt, Error bits on the IOC4 */ |
169 | writel(0x03, (void __iomem *)other_ir); |
170 | |
171 | intr_reg = readl((void __iomem *)other_ir); |
172 | } |
173 | |
174 | return intr_reg & 3; |
175 | } |
176 | |
177 | static void sgiioc4_dma_start(ide_drive_t *drive) |
178 | { |
179 | ide_hwif_t *hwif = drive->hwif; |
180 | unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4; |
181 | unsigned int reg = readl((void __iomem *)ioc4_dma_addr); |
182 | unsigned int temp_reg = reg | IOC4_S_DMA_START; |
183 | |
184 | writel(temp_reg, (void __iomem *)ioc4_dma_addr); |
185 | } |
186 | |
187 | static u32 sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base) |
188 | { |
189 | unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4; |
190 | u32 ioc4_dma; |
191 | int count; |
192 | |
193 | count = 0; |
194 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
195 | while ((ioc4_dma & IOC4_S_DMA_STOP) && (count++ < 200)) { |
196 | udelay(1); |
197 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
198 | } |
199 | return ioc4_dma; |
200 | } |
201 | |
202 | /* Stops the IOC4 DMA Engine */ |
203 | static int sgiioc4_dma_end(ide_drive_t *drive) |
204 | { |
205 | u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; |
206 | ide_hwif_t *hwif = drive->hwif; |
207 | unsigned long dma_base = hwif->dma_base; |
208 | int dma_stat = 0; |
209 | unsigned long *ending_dma = ide_get_hwifdata(hwif); |
210 | |
211 | writel(IOC4_S_DMA_STOP, (void __iomem *)(dma_base + IOC4_DMA_CTRL * 4)); |
212 | |
213 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); |
214 | |
215 | if (ioc4_dma & IOC4_S_DMA_STOP) { |
216 | printk(KERN_ERR |
217 | "%s(%s): IOC4 DMA STOP bit is still 1 :" |
218 | "ioc4_dma_reg 0x%x\n", |
219 | __func__, drive->name, ioc4_dma); |
220 | dma_stat = 1; |
221 | } |
222 | |
223 | /* |
224 | * The IOC4 will DMA 1's to the ending DMA area to indicate that |
225 | * previous data DMA is complete. This is necessary because of relaxed |
226 | * ordering between register reads and DMA writes on the Altix. |
227 | */ |
228 | while ((cnt++ < 200) && (!valid)) { |
229 | for (num = 0; num < 16; num++) { |
230 | if (ending_dma[num]) { |
231 | valid = 1; |
232 | break; |
233 | } |
234 | } |
235 | udelay(1); |
236 | } |
237 | if (!valid) { |
238 | printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__, |
239 | drive->name); |
240 | dma_stat = 1; |
241 | } |
242 | |
243 | bc_dev = readl((void __iomem *)(dma_base + IOC4_BC_DEV * 4)); |
244 | bc_mem = readl((void __iomem *)(dma_base + IOC4_BC_MEM * 4)); |
245 | |
246 | if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) { |
247 | if (bc_dev > bc_mem + 8) { |
248 | printk(KERN_ERR |
249 | "%s(%s): WARNING!! byte_count_dev %d " |
250 | "!= byte_count_mem %d\n", |
251 | __func__, drive->name, bc_dev, bc_mem); |
252 | } |
253 | } |
254 | |
255 | return dma_stat; |
256 | } |
257 | |
258 | static void sgiioc4_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) |
259 | { |
260 | } |
261 | |
262 | /* Returns 1 if DMA IRQ issued, 0 otherwise */ |
263 | static int sgiioc4_dma_test_irq(ide_drive_t *drive) |
264 | { |
265 | return sgiioc4_checkirq(drive->hwif); |
266 | } |
267 | |
268 | static void sgiioc4_dma_host_set(ide_drive_t *drive, int on) |
269 | { |
270 | if (!on) |
271 | sgiioc4_clearirq(drive); |
272 | } |
273 | |
274 | static void sgiioc4_resetproc(ide_drive_t *drive) |
275 | { |
276 | struct ide_cmd *cmd = &drive->hwif->cmd; |
277 | |
278 | sgiioc4_dma_end(drive); |
279 | ide_dma_unmap_sg(drive, cmd); |
280 | sgiioc4_clearirq(drive); |
281 | } |
282 | |
283 | static void sgiioc4_dma_lost_irq(ide_drive_t *drive) |
284 | { |
285 | sgiioc4_resetproc(drive); |
286 | |
287 | ide_dma_lost_irq(drive); |
288 | } |
289 | |
290 | static u8 sgiioc4_read_status(ide_hwif_t *hwif) |
291 | { |
292 | unsigned long port = hwif->io_ports.status_addr; |
293 | u8 reg = (u8) readb((void __iomem *) port); |
294 | |
295 | if (!(reg & ATA_BUSY)) { /* Not busy... check for interrupt */ |
296 | unsigned long other_ir = port - 0x110; |
297 | unsigned int intr_reg = (u32) readl((void __iomem *) other_ir); |
298 | |
299 | /* Clear the Interrupt, Error bits on the IOC4 */ |
300 | if (intr_reg & 0x03) { |
301 | writel(0x03, (void __iomem *) other_ir); |
302 | intr_reg = (u32) readl((void __iomem *) other_ir); |
303 | } |
304 | } |
305 | |
306 | return reg; |
307 | } |
308 | |
309 | /* Creates a DMA map for the scatter-gather list entries */ |
310 | static int __devinit ide_dma_sgiioc4(ide_hwif_t *hwif, |
311 | const struct ide_port_info *d) |
312 | { |
313 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
314 | unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET; |
315 | int num_ports = sizeof(struct ioc4_dma_regs); |
316 | void *pad; |
317 | |
318 | printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); |
319 | |
320 | if (request_mem_region(dma_base, num_ports, hwif->name) == NULL) { |
321 | printk(KERN_ERR "%s(%s) -- ERROR: addresses 0x%08lx to 0x%08lx " |
322 | "already in use\n", __func__, hwif->name, |
323 | dma_base, dma_base + num_ports - 1); |
324 | return -1; |
325 | } |
326 | |
327 | hwif->dma_base = (unsigned long)hwif->io_ports.irq_addr + |
328 | IOC4_DMA_OFFSET; |
329 | |
330 | hwif->sg_max_nents = IOC4_PRD_ENTRIES; |
331 | |
332 | hwif->prd_max_nents = IOC4_PRD_ENTRIES; |
333 | hwif->prd_ent_size = IOC4_PRD_BYTES; |
334 | |
335 | if (ide_allocate_dma_engine(hwif)) |
336 | goto dma_pci_alloc_failure; |
337 | |
338 | pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE, |
339 | (dma_addr_t *)&hwif->extra_base); |
340 | if (pad) { |
341 | ide_set_hwifdata(hwif, pad); |
342 | return 0; |
343 | } |
344 | |
345 | ide_release_dma_engine(hwif); |
346 | |
347 | printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n", |
348 | __func__, hwif->name); |
349 | printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name); |
350 | |
351 | dma_pci_alloc_failure: |
352 | release_mem_region(dma_base, num_ports); |
353 | |
354 | return -1; |
355 | } |
356 | |
357 | /* Initializes the IOC4 DMA Engine */ |
358 | static void sgiioc4_configure_for_dma(int dma_direction, ide_drive_t *drive) |
359 | { |
360 | u32 ioc4_dma; |
361 | ide_hwif_t *hwif = drive->hwif; |
362 | unsigned long dma_base = hwif->dma_base; |
363 | unsigned long ioc4_dma_addr = dma_base + IOC4_DMA_CTRL * 4; |
364 | u32 dma_addr, ending_dma_addr; |
365 | |
366 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
367 | |
368 | if (ioc4_dma & IOC4_S_DMA_ACTIVE) { |
369 | printk(KERN_WARNING "%s(%s): Warning!! DMA from previous " |
370 | "transfer was still active\n", __func__, drive->name); |
371 | writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); |
372 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); |
373 | |
374 | if (ioc4_dma & IOC4_S_DMA_STOP) |
375 | printk(KERN_ERR "%s(%s): IOC4 DMA STOP bit is " |
376 | "still 1\n", __func__, drive->name); |
377 | } |
378 | |
379 | ioc4_dma = readl((void __iomem *)ioc4_dma_addr); |
380 | if (ioc4_dma & IOC4_S_DMA_ERROR) { |
381 | printk(KERN_WARNING "%s(%s): Warning!! DMA Error during " |
382 | "previous transfer, status 0x%x\n", |
383 | __func__, drive->name, ioc4_dma); |
384 | writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr); |
385 | ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); |
386 | |
387 | if (ioc4_dma & IOC4_S_DMA_STOP) |
388 | printk(KERN_ERR "%s(%s): IOC4 DMA STOP bit is " |
389 | "still 1\n", __func__, drive->name); |
390 | } |
391 | |
392 | /* Address of the Scatter Gather List */ |
393 | dma_addr = cpu_to_le32(hwif->dmatable_dma); |
394 | writel(dma_addr, (void __iomem *)(dma_base + IOC4_DMA_PTR_L * 4)); |
395 | |
396 | /* Address of the Ending DMA */ |
397 | memset(ide_get_hwifdata(hwif), 0, IOC4_IDE_CACHELINE_SIZE); |
398 | ending_dma_addr = cpu_to_le32(hwif->extra_base); |
399 | writel(ending_dma_addr, (void __iomem *)(dma_base + |
400 | IOC4_DMA_END_ADDR * 4)); |
401 | |
402 | writel(dma_direction, (void __iomem *)ioc4_dma_addr); |
403 | } |
404 | |
405 | /* IOC4 Scatter Gather list Format */ |
406 | /* 128 Bit entries to support 64 bit addresses in the future */ |
407 | /* The Scatter Gather list Entry should be in the BIG-ENDIAN Format */ |
408 | /* --------------------------------------------------------------------- */ |
409 | /* | Upper 32 bits - Zero | Lower 32 bits- address | */ |
410 | /* --------------------------------------------------------------------- */ |
411 | /* | Upper 32 bits - Zero |EOL| 15 unused | 16 Bit Length| */ |
412 | /* --------------------------------------------------------------------- */ |
413 | /* Creates the scatter gather list, DMA Table */ |
414 | |
415 | static int sgiioc4_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd) |
416 | { |
417 | ide_hwif_t *hwif = drive->hwif; |
418 | unsigned int *table = hwif->dmatable_cpu; |
419 | unsigned int count = 0, i = cmd->sg_nents; |
420 | struct scatterlist *sg = hwif->sg_table; |
421 | |
422 | while (i && sg_dma_len(sg)) { |
423 | dma_addr_t cur_addr; |
424 | int cur_len; |
425 | cur_addr = sg_dma_address(sg); |
426 | cur_len = sg_dma_len(sg); |
427 | |
428 | while (cur_len) { |
429 | if (count++ >= IOC4_PRD_ENTRIES) { |
430 | printk(KERN_WARNING |
431 | "%s: DMA table too small\n", |
432 | drive->name); |
433 | return 0; |
434 | } else { |
435 | u32 bcount = |
436 | 0x10000 - (cur_addr & 0xffff); |
437 | |
438 | if (bcount > cur_len) |
439 | bcount = cur_len; |
440 | |
441 | /* |
442 | * Put the address, length in |
443 | * the IOC4 dma-table format |
444 | */ |
445 | *table = 0x0; |
446 | table++; |
447 | *table = cpu_to_be32(cur_addr); |
448 | table++; |
449 | *table = 0x0; |
450 | table++; |
451 | |
452 | *table = cpu_to_be32(bcount); |
453 | table++; |
454 | |
455 | cur_addr += bcount; |
456 | cur_len -= bcount; |
457 | } |
458 | } |
459 | |
460 | sg = sg_next(sg); |
461 | i--; |
462 | } |
463 | |
464 | if (count) { |
465 | table--; |
466 | *table |= cpu_to_be32(0x80000000); |
467 | return count; |
468 | } |
469 | |
470 | return 0; /* revert to PIO for this request */ |
471 | } |
472 | |
473 | static int sgiioc4_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) |
474 | { |
475 | int ddir; |
476 | u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE); |
477 | |
478 | if (sgiioc4_build_dmatable(drive, cmd) == 0) |
479 | /* try PIO instead of DMA */ |
480 | return 1; |
481 | |
482 | if (write) |
483 | /* Writes TO the IOC4 FROM Main Memory */ |
484 | ddir = IOC4_DMA_READ; |
485 | else |
486 | /* Writes FROM the IOC4 TO Main Memory */ |
487 | ddir = IOC4_DMA_WRITE; |
488 | |
489 | sgiioc4_configure_for_dma(ddir, drive); |
490 | |
491 | return 0; |
492 | } |
493 | |
494 | static const struct ide_tp_ops sgiioc4_tp_ops = { |
495 | .exec_command = ide_exec_command, |
496 | .read_status = sgiioc4_read_status, |
497 | .read_altstatus = ide_read_altstatus, |
498 | .write_devctl = ide_write_devctl, |
499 | |
500 | .dev_select = ide_dev_select, |
501 | .tf_load = ide_tf_load, |
502 | .tf_read = ide_tf_read, |
503 | |
504 | .input_data = ide_input_data, |
505 | .output_data = ide_output_data, |
506 | }; |
507 | |
508 | static const struct ide_port_ops sgiioc4_port_ops = { |
509 | .set_dma_mode = sgiioc4_set_dma_mode, |
510 | /* reset DMA engine, clear IRQs */ |
511 | .resetproc = sgiioc4_resetproc, |
512 | }; |
513 | |
514 | static const struct ide_dma_ops sgiioc4_dma_ops = { |
515 | .dma_host_set = sgiioc4_dma_host_set, |
516 | .dma_setup = sgiioc4_dma_setup, |
517 | .dma_start = sgiioc4_dma_start, |
518 | .dma_end = sgiioc4_dma_end, |
519 | .dma_test_irq = sgiioc4_dma_test_irq, |
520 | .dma_lost_irq = sgiioc4_dma_lost_irq, |
521 | }; |
522 | |
523 | static const struct ide_port_info sgiioc4_port_info __devinitconst = { |
524 | .name = DRV_NAME, |
525 | .chipset = ide_pci, |
526 | .init_dma = ide_dma_sgiioc4, |
527 | .tp_ops = &sgiioc4_tp_ops, |
528 | .port_ops = &sgiioc4_port_ops, |
529 | .dma_ops = &sgiioc4_dma_ops, |
530 | .host_flags = IDE_HFLAG_MMIO, |
531 | .irq_flags = IRQF_SHARED, |
532 | .mwdma_mask = ATA_MWDMA2_ONLY, |
533 | }; |
534 | |
535 | static int __devinit sgiioc4_ide_setup_pci_device(struct pci_dev *dev) |
536 | { |
537 | unsigned long cmd_base, irqport; |
538 | unsigned long bar0, cmd_phys_base, ctl; |
539 | void __iomem *virt_base; |
540 | struct ide_hw hw, *hws[] = { &hw }; |
541 | int rc; |
542 | |
543 | /* Get the CmdBlk and CtrlBlk base registers */ |
544 | bar0 = pci_resource_start(dev, 0); |
545 | virt_base = pci_ioremap_bar(dev, 0); |
546 | if (virt_base == NULL) { |
547 | printk(KERN_ERR "%s: Unable to remap BAR 0 address: 0x%lx\n", |
548 | DRV_NAME, bar0); |
549 | return -ENOMEM; |
550 | } |
551 | cmd_base = (unsigned long)virt_base + IOC4_CMD_OFFSET; |
552 | ctl = (unsigned long)virt_base + IOC4_CTRL_OFFSET; |
553 | irqport = (unsigned long)virt_base + IOC4_INTR_OFFSET; |
554 | |
555 | cmd_phys_base = bar0 + IOC4_CMD_OFFSET; |
556 | if (request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, |
557 | DRV_NAME) == NULL) { |
558 | printk(KERN_ERR "%s %s -- ERROR: addresses 0x%08lx to 0x%08lx " |
559 | "already in use\n", DRV_NAME, pci_name(dev), |
560 | cmd_phys_base, cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); |
561 | rc = -EBUSY; |
562 | goto req_mem_rgn_err; |
563 | } |
564 | |
565 | /* Initialize the IO registers */ |
566 | memset(&hw, 0, sizeof(hw)); |
567 | sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); |
568 | hw.irq = dev->irq; |
569 | hw.dev = &dev->dev; |
570 | |
571 | /* Initialize chipset IRQ registers */ |
572 | writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); |
573 | |
574 | rc = ide_host_add(&sgiioc4_port_info, hws, 1, NULL); |
575 | if (!rc) |
576 | return 0; |
577 | |
578 | release_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE); |
579 | req_mem_rgn_err: |
580 | iounmap(virt_base); |
581 | return rc; |
582 | } |
583 | |
584 | static unsigned int __devinit pci_init_sgiioc4(struct pci_dev *dev) |
585 | { |
586 | int ret; |
587 | |
588 | printk(KERN_INFO "%s: IDE controller at PCI slot %s, revision %d\n", |
589 | DRV_NAME, pci_name(dev), dev->revision); |
590 | |
591 | if (dev->revision < IOC4_SUPPORTED_FIRMWARE_REV) { |
592 | printk(KERN_ERR "Skipping %s IDE controller in slot %s: " |
593 | "firmware is obsolete - please upgrade to " |
594 | "revision46 or higher\n", |
595 | DRV_NAME, pci_name(dev)); |
596 | ret = -EAGAIN; |
597 | goto out; |
598 | } |
599 | ret = sgiioc4_ide_setup_pci_device(dev); |
600 | out: |
601 | return ret; |
602 | } |
603 | |
604 | int __devinit ioc4_ide_attach_one(struct ioc4_driver_data *idd) |
605 | { |
606 | /* |
607 | * PCI-RT does not bring out IDE connection. |
608 | * Do not attach to this particular IOC4. |
609 | */ |
610 | if (idd->idd_variant == IOC4_VARIANT_PCI_RT) |
611 | return 0; |
612 | |
613 | return pci_init_sgiioc4(idd->idd_pdev); |
614 | } |
615 | |
616 | static struct ioc4_submodule __devinitdata ioc4_ide_submodule = { |
617 | .is_name = "IOC4_ide", |
618 | .is_owner = THIS_MODULE, |
619 | .is_probe = ioc4_ide_attach_one, |
620 | }; |
621 | |
622 | static int __init ioc4_ide_init(void) |
623 | { |
624 | return ioc4_register_submodule(&ioc4_ide_submodule); |
625 | } |
626 | |
627 | late_initcall(ioc4_ide_init); /* Call only after IDE init is done */ |
628 | |
629 | MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon"); |
630 | MODULE_DESCRIPTION("IDE PCI driver module for SGI IOC4 Base-IO Card"); |
631 | MODULE_LICENSE("GPL"); |
632 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9