Root/
1 | /* |
2 | * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines. |
3 | * Due to massive hardware bugs, UltraDMA is only supported |
4 | * on the 646U2 and not on the 646U. |
5 | * |
6 | * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) |
7 | * Copyright (C) 1998 David S. Miller (davem@redhat.com) |
8 | * |
9 | * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> |
10 | * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz |
11 | * Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com> |
12 | */ |
13 | |
14 | #include <linux/module.h> |
15 | #include <linux/types.h> |
16 | #include <linux/pci.h> |
17 | #include <linux/ide.h> |
18 | #include <linux/init.h> |
19 | |
20 | #include <asm/io.h> |
21 | |
22 | #define DRV_NAME "cmd64x" |
23 | |
24 | /* |
25 | * CMD64x specific registers definition. |
26 | */ |
27 | #define CFR 0x50 |
28 | #define CFR_INTR_CH0 0x04 |
29 | |
30 | #define CMDTIM 0x52 |
31 | #define ARTTIM0 0x53 |
32 | #define DRWTIM0 0x54 |
33 | #define ARTTIM1 0x55 |
34 | #define DRWTIM1 0x56 |
35 | #define ARTTIM23 0x57 |
36 | #define ARTTIM23_DIS_RA2 0x04 |
37 | #define ARTTIM23_DIS_RA3 0x08 |
38 | #define ARTTIM23_INTR_CH1 0x10 |
39 | #define DRWTIM2 0x58 |
40 | #define BRST 0x59 |
41 | #define DRWTIM3 0x5b |
42 | |
43 | #define BMIDECR0 0x70 |
44 | #define MRDMODE 0x71 |
45 | #define MRDMODE_INTR_CH0 0x04 |
46 | #define MRDMODE_INTR_CH1 0x08 |
47 | #define UDIDETCR0 0x73 |
48 | #define DTPR0 0x74 |
49 | #define BMIDECR1 0x78 |
50 | #define BMIDECSR 0x79 |
51 | #define UDIDETCR1 0x7B |
52 | #define DTPR1 0x7C |
53 | |
54 | static void cmd64x_program_timings(ide_drive_t *drive, u8 mode) |
55 | { |
56 | ide_hwif_t *hwif = drive->hwif; |
57 | struct pci_dev *dev = to_pci_dev(drive->hwif->dev); |
58 | int bus_speed = ide_pci_clk ? ide_pci_clk : 33; |
59 | const unsigned long T = 1000000 / bus_speed; |
60 | static const u8 recovery_values[] = |
61 | {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0}; |
62 | static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0}; |
63 | static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23}; |
64 | static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3}; |
65 | struct ide_timing t; |
66 | u8 arttim = 0; |
67 | |
68 | ide_timing_compute(drive, mode, &t, T, 0); |
69 | |
70 | /* |
71 | * In case we've got too long recovery phase, try to lengthen |
72 | * the active phase |
73 | */ |
74 | if (t.recover > 16) { |
75 | t.active += t.recover - 16; |
76 | t.recover = 16; |
77 | } |
78 | if (t.active > 16) /* shouldn't actually happen... */ |
79 | t.active = 16; |
80 | |
81 | /* |
82 | * Convert values to internal chipset representation |
83 | */ |
84 | t.recover = recovery_values[t.recover]; |
85 | t.active &= 0x0f; |
86 | |
87 | /* Program the active/recovery counts into the DRWTIM register */ |
88 | pci_write_config_byte(dev, drwtim_regs[drive->dn], |
89 | (t.active << 4) | t.recover); |
90 | |
91 | /* |
92 | * The primary channel has individual address setup timing registers |
93 | * for each drive and the hardware selects the slowest timing itself. |
94 | * The secondary channel has one common register and we have to select |
95 | * the slowest address setup timing ourselves. |
96 | */ |
97 | if (hwif->channel) { |
98 | ide_drive_t *pair = ide_get_pair_dev(drive); |
99 | |
100 | if (pair) { |
101 | struct ide_timing tp; |
102 | |
103 | ide_timing_compute(pair, pair->pio_mode, &tp, T, 0); |
104 | ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP); |
105 | if (pair->dma_mode) { |
106 | ide_timing_compute(pair, pair->dma_mode, |
107 | &tp, T, 0); |
108 | ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP); |
109 | } |
110 | } |
111 | } |
112 | |
113 | if (t.setup > 5) /* shouldn't actually happen... */ |
114 | t.setup = 5; |
115 | |
116 | /* |
117 | * Program the address setup clocks into the ARTTIM registers. |
118 | * Avoid clearing the secondary channel's interrupt bit. |
119 | */ |
120 | (void) pci_read_config_byte (dev, arttim_regs[drive->dn], &arttim); |
121 | if (hwif->channel) |
122 | arttim &= ~ARTTIM23_INTR_CH1; |
123 | arttim &= ~0xc0; |
124 | arttim |= setup_values[t.setup]; |
125 | (void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim); |
126 | } |
127 | |
128 | /* |
129 | * Attempts to set drive's PIO mode. |
130 | * Special cases are 8: prefetch off, 9: prefetch on (both never worked) |
131 | */ |
132 | |
133 | static void cmd64x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) |
134 | { |
135 | const u8 pio = drive->pio_mode - XFER_PIO_0; |
136 | |
137 | /* |
138 | * Filter out the prefetch control values |
139 | * to prevent PIO5 from being programmed |
140 | */ |
141 | if (pio == 8 || pio == 9) |
142 | return; |
143 | |
144 | cmd64x_program_timings(drive, XFER_PIO_0 + pio); |
145 | } |
146 | |
147 | static void cmd64x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) |
148 | { |
149 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
150 | u8 unit = drive->dn & 0x01; |
151 | u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0; |
152 | const u8 speed = drive->dma_mode; |
153 | |
154 | pci_read_config_byte(dev, pciU, ®U); |
155 | regU &= ~(unit ? 0xCA : 0x35); |
156 | |
157 | switch(speed) { |
158 | case XFER_UDMA_5: |
159 | regU |= unit ? 0x0A : 0x05; |
160 | break; |
161 | case XFER_UDMA_4: |
162 | regU |= unit ? 0x4A : 0x15; |
163 | break; |
164 | case XFER_UDMA_3: |
165 | regU |= unit ? 0x8A : 0x25; |
166 | break; |
167 | case XFER_UDMA_2: |
168 | regU |= unit ? 0x42 : 0x11; |
169 | break; |
170 | case XFER_UDMA_1: |
171 | regU |= unit ? 0x82 : 0x21; |
172 | break; |
173 | case XFER_UDMA_0: |
174 | regU |= unit ? 0xC2 : 0x31; |
175 | break; |
176 | case XFER_MW_DMA_2: |
177 | case XFER_MW_DMA_1: |
178 | case XFER_MW_DMA_0: |
179 | cmd64x_program_timings(drive, speed); |
180 | break; |
181 | } |
182 | |
183 | pci_write_config_byte(dev, pciU, regU); |
184 | } |
185 | |
186 | static void cmd648_clear_irq(ide_drive_t *drive) |
187 | { |
188 | ide_hwif_t *hwif = drive->hwif; |
189 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
190 | unsigned long base = pci_resource_start(dev, 4); |
191 | u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : |
192 | MRDMODE_INTR_CH0; |
193 | u8 mrdmode = inb(base + 1); |
194 | |
195 | /* clear the interrupt bit */ |
196 | outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask, |
197 | base + 1); |
198 | } |
199 | |
200 | static void cmd64x_clear_irq(ide_drive_t *drive) |
201 | { |
202 | ide_hwif_t *hwif = drive->hwif; |
203 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
204 | int irq_reg = hwif->channel ? ARTTIM23 : CFR; |
205 | u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 : |
206 | CFR_INTR_CH0; |
207 | u8 irq_stat = 0; |
208 | |
209 | (void) pci_read_config_byte(dev, irq_reg, &irq_stat); |
210 | /* clear the interrupt bit */ |
211 | (void) pci_write_config_byte(dev, irq_reg, irq_stat | irq_mask); |
212 | } |
213 | |
214 | static int cmd648_test_irq(ide_hwif_t *hwif) |
215 | { |
216 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
217 | unsigned long base = pci_resource_start(dev, 4); |
218 | u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : |
219 | MRDMODE_INTR_CH0; |
220 | u8 mrdmode = inb(base + 1); |
221 | |
222 | pr_debug("%s: mrdmode: 0x%02x irq_mask: 0x%02x\n", |
223 | hwif->name, mrdmode, irq_mask); |
224 | |
225 | return (mrdmode & irq_mask) ? 1 : 0; |
226 | } |
227 | |
228 | static int cmd64x_test_irq(ide_hwif_t *hwif) |
229 | { |
230 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
231 | int irq_reg = hwif->channel ? ARTTIM23 : CFR; |
232 | u8 irq_mask = hwif->channel ? ARTTIM23_INTR_CH1 : |
233 | CFR_INTR_CH0; |
234 | u8 irq_stat = 0; |
235 | |
236 | (void) pci_read_config_byte(dev, irq_reg, &irq_stat); |
237 | |
238 | pr_debug("%s: irq_stat: 0x%02x irq_mask: 0x%02x\n", |
239 | hwif->name, irq_stat, irq_mask); |
240 | |
241 | return (irq_stat & irq_mask) ? 1 : 0; |
242 | } |
243 | |
244 | /* |
245 | * ASUS P55T2P4D with CMD646 chipset revision 0x01 requires the old |
246 | * event order for DMA transfers. |
247 | */ |
248 | |
249 | static int cmd646_1_dma_end(ide_drive_t *drive) |
250 | { |
251 | ide_hwif_t *hwif = drive->hwif; |
252 | u8 dma_stat = 0, dma_cmd = 0; |
253 | |
254 | /* get DMA status */ |
255 | dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); |
256 | /* read DMA command state */ |
257 | dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); |
258 | /* stop DMA */ |
259 | outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); |
260 | /* clear the INTR & ERROR bits */ |
261 | outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); |
262 | /* verify good DMA status */ |
263 | return (dma_stat & 7) != 4; |
264 | } |
265 | |
266 | static int init_chipset_cmd64x(struct pci_dev *dev) |
267 | { |
268 | u8 mrdmode = 0; |
269 | |
270 | /* Set a good latency timer and cache line size value. */ |
271 | (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); |
272 | /* FIXME: pci_set_master() to ensure a good latency timer value */ |
273 | |
274 | /* |
275 | * Enable interrupts, select MEMORY READ LINE for reads. |
276 | * |
277 | * NOTE: although not mentioned in the PCI0646U specs, |
278 | * bits 0-1 are write only and won't be read back as |
279 | * set or not -- PCI0646U2 specs clarify this point. |
280 | */ |
281 | (void) pci_read_config_byte (dev, MRDMODE, &mrdmode); |
282 | mrdmode &= ~0x30; |
283 | (void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02)); |
284 | |
285 | return 0; |
286 | } |
287 | |
288 | static u8 cmd64x_cable_detect(ide_hwif_t *hwif) |
289 | { |
290 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
291 | u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01; |
292 | |
293 | switch (dev->device) { |
294 | case PCI_DEVICE_ID_CMD_648: |
295 | case PCI_DEVICE_ID_CMD_649: |
296 | pci_read_config_byte(dev, BMIDECSR, &bmidecsr); |
297 | return (bmidecsr & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; |
298 | default: |
299 | return ATA_CBL_PATA40; |
300 | } |
301 | } |
302 | |
303 | static const struct ide_port_ops cmd64x_port_ops = { |
304 | .set_pio_mode = cmd64x_set_pio_mode, |
305 | .set_dma_mode = cmd64x_set_dma_mode, |
306 | .clear_irq = cmd64x_clear_irq, |
307 | .test_irq = cmd64x_test_irq, |
308 | .cable_detect = cmd64x_cable_detect, |
309 | }; |
310 | |
311 | static const struct ide_port_ops cmd648_port_ops = { |
312 | .set_pio_mode = cmd64x_set_pio_mode, |
313 | .set_dma_mode = cmd64x_set_dma_mode, |
314 | .clear_irq = cmd648_clear_irq, |
315 | .test_irq = cmd648_test_irq, |
316 | .cable_detect = cmd64x_cable_detect, |
317 | }; |
318 | |
319 | static const struct ide_dma_ops cmd646_rev1_dma_ops = { |
320 | .dma_host_set = ide_dma_host_set, |
321 | .dma_setup = ide_dma_setup, |
322 | .dma_start = ide_dma_start, |
323 | .dma_end = cmd646_1_dma_end, |
324 | .dma_test_irq = ide_dma_test_irq, |
325 | .dma_lost_irq = ide_dma_lost_irq, |
326 | .dma_timer_expiry = ide_dma_sff_timer_expiry, |
327 | .dma_sff_read_status = ide_dma_sff_read_status, |
328 | }; |
329 | |
330 | static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { |
331 | { /* 0: CMD643 */ |
332 | .name = DRV_NAME, |
333 | .init_chipset = init_chipset_cmd64x, |
334 | .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, |
335 | .port_ops = &cmd64x_port_ops, |
336 | .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | |
337 | IDE_HFLAG_ABUSE_PREFETCH | |
338 | IDE_HFLAG_SERIALIZE, |
339 | .pio_mask = ATA_PIO5, |
340 | .mwdma_mask = ATA_MWDMA2, |
341 | .udma_mask = 0x00, /* no udma */ |
342 | }, |
343 | { /* 1: CMD646 */ |
344 | .name = DRV_NAME, |
345 | .init_chipset = init_chipset_cmd64x, |
346 | .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, |
347 | .port_ops = &cmd648_port_ops, |
348 | .host_flags = IDE_HFLAG_ABUSE_PREFETCH | |
349 | IDE_HFLAG_SERIALIZE, |
350 | .pio_mask = ATA_PIO5, |
351 | .mwdma_mask = ATA_MWDMA2, |
352 | .udma_mask = ATA_UDMA2, |
353 | }, |
354 | { /* 2: CMD648 */ |
355 | .name = DRV_NAME, |
356 | .init_chipset = init_chipset_cmd64x, |
357 | .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, |
358 | .port_ops = &cmd648_port_ops, |
359 | .host_flags = IDE_HFLAG_ABUSE_PREFETCH, |
360 | .pio_mask = ATA_PIO5, |
361 | .mwdma_mask = ATA_MWDMA2, |
362 | .udma_mask = ATA_UDMA4, |
363 | }, |
364 | { /* 3: CMD649 */ |
365 | .name = DRV_NAME, |
366 | .init_chipset = init_chipset_cmd64x, |
367 | .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, |
368 | .port_ops = &cmd648_port_ops, |
369 | .host_flags = IDE_HFLAG_ABUSE_PREFETCH, |
370 | .pio_mask = ATA_PIO5, |
371 | .mwdma_mask = ATA_MWDMA2, |
372 | .udma_mask = ATA_UDMA5, |
373 | } |
374 | }; |
375 | |
376 | static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
377 | { |
378 | struct ide_port_info d; |
379 | u8 idx = id->driver_data; |
380 | |
381 | d = cmd64x_chipsets[idx]; |
382 | |
383 | if (idx == 1) { |
384 | /* |
385 | * UltraDMA only supported on PCI646U and PCI646U2, which |
386 | * correspond to revisions 0x03, 0x05 and 0x07 respectively. |
387 | * Actually, although the CMD tech support people won't |
388 | * tell me the details, the 0x03 revision cannot support |
389 | * UDMA correctly without hardware modifications, and even |
390 | * then it only works with Quantum disks due to some |
391 | * hold time assumptions in the 646U part which are fixed |
392 | * in the 646U2. |
393 | * |
394 | * So we only do UltraDMA on revision 0x05 and 0x07 chipsets. |
395 | */ |
396 | if (dev->revision < 5) { |
397 | d.udma_mask = 0x00; |
398 | /* |
399 | * The original PCI0646 didn't have the primary |
400 | * channel enable bit, it appeared starting with |
401 | * PCI0646U (i.e. revision ID 3). |
402 | */ |
403 | if (dev->revision < 3) { |
404 | d.enablebits[0].reg = 0; |
405 | d.port_ops = &cmd64x_port_ops; |
406 | if (dev->revision == 1) |
407 | d.dma_ops = &cmd646_rev1_dma_ops; |
408 | } |
409 | } |
410 | } |
411 | |
412 | return ide_pci_init_one(dev, &d, NULL); |
413 | } |
414 | |
415 | static const struct pci_device_id cmd64x_pci_tbl[] = { |
416 | { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 }, |
417 | { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 }, |
418 | { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 2 }, |
419 | { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 3 }, |
420 | { 0, }, |
421 | }; |
422 | MODULE_DEVICE_TABLE(pci, cmd64x_pci_tbl); |
423 | |
424 | static struct pci_driver cmd64x_pci_driver = { |
425 | .name = "CMD64x_IDE", |
426 | .id_table = cmd64x_pci_tbl, |
427 | .probe = cmd64x_init_one, |
428 | .remove = ide_pci_remove, |
429 | .suspend = ide_pci_suspend, |
430 | .resume = ide_pci_resume, |
431 | }; |
432 | |
433 | static int __init cmd64x_ide_init(void) |
434 | { |
435 | return ide_pci_register_driver(&cmd64x_pci_driver); |
436 | } |
437 | |
438 | static void __exit cmd64x_ide_exit(void) |
439 | { |
440 | pci_unregister_driver(&cmd64x_pci_driver); |
441 | } |
442 | |
443 | module_init(cmd64x_ide_init); |
444 | module_exit(cmd64x_ide_exit); |
445 | |
446 | MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz"); |
447 | MODULE_DESCRIPTION("PCI driver module for CMD64x IDE"); |
448 | MODULE_LICENSE("GPL"); |
449 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9