Root/
1 | /* |
2 | * sata_qstor.c - Pacific Digital Corporation QStor SATA |
3 | * |
4 | * Maintained by: Mark Lord <mlord@pobox.com> |
5 | * |
6 | * Copyright 2005 Pacific Digital Corporation. |
7 | * (OSL/GPL code release authorized by Jalil Fadavi). |
8 | * |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2, or (at your option) |
13 | * any later version. |
14 | * |
15 | * This program is distributed in the hope that it will be useful, |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
18 | * GNU General Public License for more details. |
19 | * |
20 | * You should have received a copy of the GNU General Public License |
21 | * along with this program; see the file COPYING. If not, write to |
22 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
23 | * |
24 | * |
25 | * libata documentation is available via 'make {ps|pdf}docs', |
26 | * as Documentation/DocBook/libata.* |
27 | * |
28 | */ |
29 | |
30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> |
32 | #include <linux/gfp.h> |
33 | #include <linux/pci.h> |
34 | #include <linux/init.h> |
35 | #include <linux/blkdev.h> |
36 | #include <linux/delay.h> |
37 | #include <linux/interrupt.h> |
38 | #include <linux/device.h> |
39 | #include <scsi/scsi_host.h> |
40 | #include <linux/libata.h> |
41 | |
42 | #define DRV_NAME "sata_qstor" |
43 | #define DRV_VERSION "0.09" |
44 | |
45 | enum { |
46 | QS_MMIO_BAR = 4, |
47 | |
48 | QS_PORTS = 4, |
49 | QS_MAX_PRD = LIBATA_MAX_PRD, |
50 | QS_CPB_ORDER = 6, |
51 | QS_CPB_BYTES = (1 << QS_CPB_ORDER), |
52 | QS_PRD_BYTES = QS_MAX_PRD * 16, |
53 | QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES, |
54 | |
55 | /* global register offsets */ |
56 | QS_HCF_CNFG3 = 0x0003, /* host configuration offset */ |
57 | QS_HID_HPHY = 0x0004, /* host physical interface info */ |
58 | QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */ |
59 | QS_HST_SFF = 0x0100, /* host status fifo offset */ |
60 | QS_HVS_SERD3 = 0x0393, /* PHY enable offset */ |
61 | |
62 | /* global control bits */ |
63 | QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */ |
64 | QS_CNFG3_GSRST = 0x01, /* global chip reset */ |
65 | QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/ |
66 | |
67 | /* per-channel register offsets */ |
68 | QS_CCF_CPBA = 0x0710, /* chan CPB base address */ |
69 | QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */ |
70 | QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */ |
71 | QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */ |
72 | QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */ |
73 | QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */ |
74 | QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */ |
75 | QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */ |
76 | QS_CCT_CFF = 0x0a00, /* chan command fifo offset */ |
77 | |
78 | /* channel control bits */ |
79 | QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */ |
80 | QS_CTR0_CLER = (1 << 2), /* clear channel errors */ |
81 | QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */ |
82 | QS_CTR1_RCHN = (1 << 4), /* reset channel logic */ |
83 | QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */ |
84 | |
85 | /* pkt sub-field headers */ |
86 | QS_HCB_HDR = 0x01, /* Host Control Block header */ |
87 | QS_DCB_HDR = 0x02, /* Device Control Block header */ |
88 | |
89 | /* pkt HCB flag bits */ |
90 | QS_HF_DIRO = (1 << 0), /* data DIRection Out */ |
91 | QS_HF_DAT = (1 << 3), /* DATa pkt */ |
92 | QS_HF_IEN = (1 << 4), /* Interrupt ENable */ |
93 | QS_HF_VLD = (1 << 5), /* VaLiD pkt */ |
94 | |
95 | /* pkt DCB flag bits */ |
96 | QS_DF_PORD = (1 << 2), /* Pio OR Dma */ |
97 | QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */ |
98 | |
99 | /* PCI device IDs */ |
100 | board_2068_idx = 0, /* QStor 4-port SATA/RAID */ |
101 | }; |
102 | |
103 | enum { |
104 | QS_DMA_BOUNDARY = ~0UL |
105 | }; |
106 | |
107 | typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t; |
108 | |
109 | struct qs_port_priv { |
110 | u8 *pkt; |
111 | dma_addr_t pkt_dma; |
112 | qs_state_t state; |
113 | }; |
114 | |
115 | static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); |
116 | static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); |
117 | static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
118 | static int qs_port_start(struct ata_port *ap); |
119 | static void qs_host_stop(struct ata_host *host); |
120 | static void qs_qc_prep(struct ata_queued_cmd *qc); |
121 | static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); |
122 | static int qs_check_atapi_dma(struct ata_queued_cmd *qc); |
123 | static void qs_freeze(struct ata_port *ap); |
124 | static void qs_thaw(struct ata_port *ap); |
125 | static int qs_prereset(struct ata_link *link, unsigned long deadline); |
126 | static void qs_error_handler(struct ata_port *ap); |
127 | |
128 | static struct scsi_host_template qs_ata_sht = { |
129 | ATA_BASE_SHT(DRV_NAME), |
130 | .sg_tablesize = QS_MAX_PRD, |
131 | .dma_boundary = QS_DMA_BOUNDARY, |
132 | }; |
133 | |
134 | static struct ata_port_operations qs_ata_ops = { |
135 | .inherits = &ata_sff_port_ops, |
136 | |
137 | .check_atapi_dma = qs_check_atapi_dma, |
138 | .qc_prep = qs_qc_prep, |
139 | .qc_issue = qs_qc_issue, |
140 | |
141 | .freeze = qs_freeze, |
142 | .thaw = qs_thaw, |
143 | .prereset = qs_prereset, |
144 | .softreset = ATA_OP_NULL, |
145 | .error_handler = qs_error_handler, |
146 | .lost_interrupt = ATA_OP_NULL, |
147 | |
148 | .scr_read = qs_scr_read, |
149 | .scr_write = qs_scr_write, |
150 | |
151 | .port_start = qs_port_start, |
152 | .host_stop = qs_host_stop, |
153 | }; |
154 | |
155 | static const struct ata_port_info qs_port_info[] = { |
156 | /* board_2068_idx */ |
157 | { |
158 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING, |
159 | .pio_mask = ATA_PIO4_ONLY, |
160 | .udma_mask = ATA_UDMA6, |
161 | .port_ops = &qs_ata_ops, |
162 | }, |
163 | }; |
164 | |
165 | static const struct pci_device_id qs_ata_pci_tbl[] = { |
166 | { PCI_VDEVICE(PDC, 0x2068), board_2068_idx }, |
167 | |
168 | { } /* terminate list */ |
169 | }; |
170 | |
171 | static struct pci_driver qs_ata_pci_driver = { |
172 | .name = DRV_NAME, |
173 | .id_table = qs_ata_pci_tbl, |
174 | .probe = qs_ata_init_one, |
175 | .remove = ata_pci_remove_one, |
176 | }; |
177 | |
178 | static void __iomem *qs_mmio_base(struct ata_host *host) |
179 | { |
180 | return host->iomap[QS_MMIO_BAR]; |
181 | } |
182 | |
183 | static int qs_check_atapi_dma(struct ata_queued_cmd *qc) |
184 | { |
185 | return 1; /* ATAPI DMA not supported */ |
186 | } |
187 | |
188 | static inline void qs_enter_reg_mode(struct ata_port *ap) |
189 | { |
190 | u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); |
191 | struct qs_port_priv *pp = ap->private_data; |
192 | |
193 | pp->state = qs_state_mmio; |
194 | writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); |
195 | readb(chan + QS_CCT_CTR0); /* flush */ |
196 | } |
197 | |
198 | static inline void qs_reset_channel_logic(struct ata_port *ap) |
199 | { |
200 | u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); |
201 | |
202 | writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1); |
203 | readb(chan + QS_CCT_CTR0); /* flush */ |
204 | qs_enter_reg_mode(ap); |
205 | } |
206 | |
207 | static void qs_freeze(struct ata_port *ap) |
208 | { |
209 | u8 __iomem *mmio_base = qs_mmio_base(ap->host); |
210 | |
211 | writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ |
212 | qs_enter_reg_mode(ap); |
213 | } |
214 | |
215 | static void qs_thaw(struct ata_port *ap) |
216 | { |
217 | u8 __iomem *mmio_base = qs_mmio_base(ap->host); |
218 | |
219 | qs_enter_reg_mode(ap); |
220 | writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */ |
221 | } |
222 | |
223 | static int qs_prereset(struct ata_link *link, unsigned long deadline) |
224 | { |
225 | struct ata_port *ap = link->ap; |
226 | |
227 | qs_reset_channel_logic(ap); |
228 | return ata_sff_prereset(link, deadline); |
229 | } |
230 | |
231 | static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) |
232 | { |
233 | if (sc_reg > SCR_CONTROL) |
234 | return -EINVAL; |
235 | *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8)); |
236 | return 0; |
237 | } |
238 | |
239 | static void qs_error_handler(struct ata_port *ap) |
240 | { |
241 | qs_enter_reg_mode(ap); |
242 | ata_sff_error_handler(ap); |
243 | } |
244 | |
245 | static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) |
246 | { |
247 | if (sc_reg > SCR_CONTROL) |
248 | return -EINVAL; |
249 | writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8)); |
250 | return 0; |
251 | } |
252 | |
253 | static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) |
254 | { |
255 | struct scatterlist *sg; |
256 | struct ata_port *ap = qc->ap; |
257 | struct qs_port_priv *pp = ap->private_data; |
258 | u8 *prd = pp->pkt + QS_CPB_BYTES; |
259 | unsigned int si; |
260 | |
261 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
262 | u64 addr; |
263 | u32 len; |
264 | |
265 | addr = sg_dma_address(sg); |
266 | *(__le64 *)prd = cpu_to_le64(addr); |
267 | prd += sizeof(u64); |
268 | |
269 | len = sg_dma_len(sg); |
270 | *(__le32 *)prd = cpu_to_le32(len); |
271 | prd += sizeof(u64); |
272 | |
273 | VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si, |
274 | (unsigned long long)addr, len); |
275 | } |
276 | |
277 | return si; |
278 | } |
279 | |
280 | static void qs_qc_prep(struct ata_queued_cmd *qc) |
281 | { |
282 | struct qs_port_priv *pp = qc->ap->private_data; |
283 | u8 dflags = QS_DF_PORD, *buf = pp->pkt; |
284 | u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD; |
285 | u64 addr; |
286 | unsigned int nelem; |
287 | |
288 | VPRINTK("ENTER\n"); |
289 | |
290 | qs_enter_reg_mode(qc->ap); |
291 | if (qc->tf.protocol != ATA_PROT_DMA) |
292 | return; |
293 | |
294 | nelem = qs_fill_sg(qc); |
295 | |
296 | if ((qc->tf.flags & ATA_TFLAG_WRITE)) |
297 | hflags |= QS_HF_DIRO; |
298 | if ((qc->tf.flags & ATA_TFLAG_LBA48)) |
299 | dflags |= QS_DF_ELBA; |
300 | |
301 | /* host control block (HCB) */ |
302 | buf[ 0] = QS_HCB_HDR; |
303 | buf[ 1] = hflags; |
304 | *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes); |
305 | *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem); |
306 | addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES; |
307 | *(__le64 *)(&buf[16]) = cpu_to_le64(addr); |
308 | |
309 | /* device control block (DCB) */ |
310 | buf[24] = QS_DCB_HDR; |
311 | buf[28] = dflags; |
312 | |
313 | /* frame information structure (FIS) */ |
314 | ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); |
315 | } |
316 | |
317 | static inline void qs_packet_start(struct ata_queued_cmd *qc) |
318 | { |
319 | struct ata_port *ap = qc->ap; |
320 | u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); |
321 | |
322 | VPRINTK("ENTER, ap %p\n", ap); |
323 | |
324 | writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0); |
325 | wmb(); /* flush PRDs and pkt to memory */ |
326 | writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF); |
327 | readl(chan + QS_CCT_CFF); /* flush */ |
328 | } |
329 | |
330 | static unsigned int qs_qc_issue(struct ata_queued_cmd *qc) |
331 | { |
332 | struct qs_port_priv *pp = qc->ap->private_data; |
333 | |
334 | switch (qc->tf.protocol) { |
335 | case ATA_PROT_DMA: |
336 | pp->state = qs_state_pkt; |
337 | qs_packet_start(qc); |
338 | return 0; |
339 | |
340 | case ATAPI_PROT_DMA: |
341 | BUG(); |
342 | break; |
343 | |
344 | default: |
345 | break; |
346 | } |
347 | |
348 | pp->state = qs_state_mmio; |
349 | return ata_sff_qc_issue(qc); |
350 | } |
351 | |
352 | static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status) |
353 | { |
354 | qc->err_mask |= ac_err_mask(status); |
355 | |
356 | if (!qc->err_mask) { |
357 | ata_qc_complete(qc); |
358 | } else { |
359 | struct ata_port *ap = qc->ap; |
360 | struct ata_eh_info *ehi = &ap->link.eh_info; |
361 | |
362 | ata_ehi_clear_desc(ehi); |
363 | ata_ehi_push_desc(ehi, "status 0x%02X", status); |
364 | |
365 | if (qc->err_mask == AC_ERR_DEV) |
366 | ata_port_abort(ap); |
367 | else |
368 | ata_port_freeze(ap); |
369 | } |
370 | } |
371 | |
372 | static inline unsigned int qs_intr_pkt(struct ata_host *host) |
373 | { |
374 | unsigned int handled = 0; |
375 | u8 sFFE; |
376 | u8 __iomem *mmio_base = qs_mmio_base(host); |
377 | |
378 | do { |
379 | u32 sff0 = readl(mmio_base + QS_HST_SFF); |
380 | u32 sff1 = readl(mmio_base + QS_HST_SFF + 4); |
381 | u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */ |
382 | sFFE = sff1 >> 31; /* empty flag */ |
383 | |
384 | if (sEVLD) { |
385 | u8 sDST = sff0 >> 16; /* dev status */ |
386 | u8 sHST = sff1 & 0x3f; /* host status */ |
387 | unsigned int port_no = (sff1 >> 8) & 0x03; |
388 | struct ata_port *ap = host->ports[port_no]; |
389 | struct qs_port_priv *pp = ap->private_data; |
390 | struct ata_queued_cmd *qc; |
391 | |
392 | DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", |
393 | sff1, sff0, port_no, sHST, sDST); |
394 | handled = 1; |
395 | if (!pp || pp->state != qs_state_pkt) |
396 | continue; |
397 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
398 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { |
399 | switch (sHST) { |
400 | case 0: /* successful CPB */ |
401 | case 3: /* device error */ |
402 | qs_enter_reg_mode(qc->ap); |
403 | qs_do_or_die(qc, sDST); |
404 | break; |
405 | default: |
406 | break; |
407 | } |
408 | } |
409 | } |
410 | } while (!sFFE); |
411 | return handled; |
412 | } |
413 | |
414 | static inline unsigned int qs_intr_mmio(struct ata_host *host) |
415 | { |
416 | unsigned int handled = 0, port_no; |
417 | |
418 | for (port_no = 0; port_no < host->n_ports; ++port_no) { |
419 | struct ata_port *ap = host->ports[port_no]; |
420 | struct qs_port_priv *pp = ap->private_data; |
421 | struct ata_queued_cmd *qc; |
422 | |
423 | qc = ata_qc_from_tag(ap, ap->link.active_tag); |
424 | if (!qc) { |
425 | /* |
426 | * The qstor hardware generates spurious |
427 | * interrupts from time to time when switching |
428 | * in and out of packet mode. There's no |
429 | * obvious way to know if we're here now due |
430 | * to that, so just ack the irq and pretend we |
431 | * knew it was ours.. (ugh). This does not |
432 | * affect packet mode. |
433 | */ |
434 | ata_sff_check_status(ap); |
435 | handled = 1; |
436 | continue; |
437 | } |
438 | |
439 | if (!pp || pp->state != qs_state_mmio) |
440 | continue; |
441 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) |
442 | handled |= ata_sff_port_intr(ap, qc); |
443 | } |
444 | return handled; |
445 | } |
446 | |
447 | static irqreturn_t qs_intr(int irq, void *dev_instance) |
448 | { |
449 | struct ata_host *host = dev_instance; |
450 | unsigned int handled = 0; |
451 | unsigned long flags; |
452 | |
453 | VPRINTK("ENTER\n"); |
454 | |
455 | spin_lock_irqsave(&host->lock, flags); |
456 | handled = qs_intr_pkt(host) | qs_intr_mmio(host); |
457 | spin_unlock_irqrestore(&host->lock, flags); |
458 | |
459 | VPRINTK("EXIT\n"); |
460 | |
461 | return IRQ_RETVAL(handled); |
462 | } |
463 | |
464 | static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base) |
465 | { |
466 | port->cmd_addr = |
467 | port->data_addr = base + 0x400; |
468 | port->error_addr = |
469 | port->feature_addr = base + 0x408; /* hob_feature = 0x409 */ |
470 | port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */ |
471 | port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */ |
472 | port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */ |
473 | port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */ |
474 | port->device_addr = base + 0x430; |
475 | port->status_addr = |
476 | port->command_addr = base + 0x438; |
477 | port->altstatus_addr = |
478 | port->ctl_addr = base + 0x440; |
479 | port->scr_addr = base + 0xc00; |
480 | } |
481 | |
482 | static int qs_port_start(struct ata_port *ap) |
483 | { |
484 | struct device *dev = ap->host->dev; |
485 | struct qs_port_priv *pp; |
486 | void __iomem *mmio_base = qs_mmio_base(ap->host); |
487 | void __iomem *chan = mmio_base + (ap->port_no * 0x4000); |
488 | u64 addr; |
489 | |
490 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
491 | if (!pp) |
492 | return -ENOMEM; |
493 | pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma, |
494 | GFP_KERNEL); |
495 | if (!pp->pkt) |
496 | return -ENOMEM; |
497 | memset(pp->pkt, 0, QS_PKT_BYTES); |
498 | ap->private_data = pp; |
499 | |
500 | qs_enter_reg_mode(ap); |
501 | addr = (u64)pp->pkt_dma; |
502 | writel((u32) addr, chan + QS_CCF_CPBA); |
503 | writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4); |
504 | return 0; |
505 | } |
506 | |
507 | static void qs_host_stop(struct ata_host *host) |
508 | { |
509 | void __iomem *mmio_base = qs_mmio_base(host); |
510 | |
511 | writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ |
512 | writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ |
513 | } |
514 | |
515 | static void qs_host_init(struct ata_host *host, unsigned int chip_id) |
516 | { |
517 | void __iomem *mmio_base = host->iomap[QS_MMIO_BAR]; |
518 | unsigned int port_no; |
519 | |
520 | writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ |
521 | writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ |
522 | |
523 | /* reset each channel in turn */ |
524 | for (port_no = 0; port_no < host->n_ports; ++port_no) { |
525 | u8 __iomem *chan = mmio_base + (port_no * 0x4000); |
526 | writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1); |
527 | writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); |
528 | readb(chan + QS_CCT_CTR0); /* flush */ |
529 | } |
530 | writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */ |
531 | |
532 | for (port_no = 0; port_no < host->n_ports; ++port_no) { |
533 | u8 __iomem *chan = mmio_base + (port_no * 0x4000); |
534 | /* set FIFO depths to same settings as Windows driver */ |
535 | writew(32, chan + QS_CFC_HUFT); |
536 | writew(32, chan + QS_CFC_HDFT); |
537 | writew(10, chan + QS_CFC_DUFT); |
538 | writew( 8, chan + QS_CFC_DDFT); |
539 | /* set CPB size in bytes, as a power of two */ |
540 | writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP); |
541 | } |
542 | writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */ |
543 | } |
544 | |
545 | /* |
546 | * The QStor understands 64-bit buses, and uses 64-bit fields |
547 | * for DMA pointers regardless of bus width. We just have to |
548 | * make sure our DMA masks are set appropriately for whatever |
549 | * bridge lies between us and the QStor, and then the DMA mapping |
550 | * code will ensure we only ever "see" appropriate buffer addresses. |
551 | * If we're 32-bit limited somewhere, then our 64-bit fields will |
552 | * just end up with zeros in the upper 32-bits, without any special |
553 | * logic required outside of this routine (below). |
554 | */ |
555 | static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) |
556 | { |
557 | u32 bus_info = readl(mmio_base + QS_HID_HPHY); |
558 | int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT); |
559 | |
560 | if (have_64bit_bus && |
561 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
562 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
563 | if (rc) { |
564 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
565 | if (rc) { |
566 | dev_err(&pdev->dev, |
567 | "64-bit DMA enable failed\n"); |
568 | return rc; |
569 | } |
570 | } |
571 | } else { |
572 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
573 | if (rc) { |
574 | dev_err(&pdev->dev, "32-bit DMA enable failed\n"); |
575 | return rc; |
576 | } |
577 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
578 | if (rc) { |
579 | dev_err(&pdev->dev, |
580 | "32-bit consistent DMA enable failed\n"); |
581 | return rc; |
582 | } |
583 | } |
584 | return 0; |
585 | } |
586 | |
587 | static int qs_ata_init_one(struct pci_dev *pdev, |
588 | const struct pci_device_id *ent) |
589 | { |
590 | unsigned int board_idx = (unsigned int) ent->driver_data; |
591 | const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL }; |
592 | struct ata_host *host; |
593 | int rc, port_no; |
594 | |
595 | ata_print_version_once(&pdev->dev, DRV_VERSION); |
596 | |
597 | /* alloc host */ |
598 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS); |
599 | if (!host) |
600 | return -ENOMEM; |
601 | |
602 | /* acquire resources and fill host */ |
603 | rc = pcim_enable_device(pdev); |
604 | if (rc) |
605 | return rc; |
606 | |
607 | if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0) |
608 | return -ENODEV; |
609 | |
610 | rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME); |
611 | if (rc) |
612 | return rc; |
613 | host->iomap = pcim_iomap_table(pdev); |
614 | |
615 | rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]); |
616 | if (rc) |
617 | return rc; |
618 | |
619 | for (port_no = 0; port_no < host->n_ports; ++port_no) { |
620 | struct ata_port *ap = host->ports[port_no]; |
621 | unsigned int offset = port_no * 0x4000; |
622 | void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset; |
623 | |
624 | qs_ata_setup_port(&ap->ioaddr, chan); |
625 | |
626 | ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio"); |
627 | ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port"); |
628 | } |
629 | |
630 | /* initialize adapter */ |
631 | qs_host_init(host, board_idx); |
632 | |
633 | pci_set_master(pdev); |
634 | return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED, |
635 | &qs_ata_sht); |
636 | } |
637 | |
638 | module_pci_driver(qs_ata_pci_driver); |
639 | |
640 | MODULE_AUTHOR("Mark Lord"); |
641 | MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver"); |
642 | MODULE_LICENSE("GPL"); |
643 | MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl); |
644 | MODULE_VERSION(DRV_VERSION); |
645 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9