Root/drivers/ata/pata_pxa.c

1/*
2 * Generic PXA PATA driver
3 *
4 * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/blkdev.h>
25#include <linux/ata.h>
26#include <linux/libata.h>
27#include <linux/platform_device.h>
28#include <linux/gpio.h>
29#include <linux/slab.h>
30#include <linux/completion.h>
31
32#include <scsi/scsi_host.h>
33
34#include <mach/pxa2xx-regs.h>
35#include <mach/pata_pxa.h>
36#include <mach/dma.h>
37
38#define DRV_NAME "pata_pxa"
39#define DRV_VERSION "0.1"
40
41struct pata_pxa_data {
42    uint32_t dma_channel;
43    struct pxa_dma_desc *dma_desc;
44    dma_addr_t dma_desc_addr;
45    uint32_t dma_desc_id;
46
47    /* DMA IO physical address */
48    uint32_t dma_io_addr;
49    /* PXA DREQ<0:2> pin selector */
50    uint32_t dma_dreq;
51    /* DMA DCSR register value */
52    uint32_t dma_dcsr;
53
54    struct completion dma_done;
55};
56
57/*
58 * Setup the DMA descriptors. The size is transfer capped at 4k per descriptor,
59 * if the transfer is longer, it is split into multiple chained descriptors.
60 */
61static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc)
62{
63    struct pata_pxa_data *pd = qc->ap->private_data;
64
65    uint32_t cpu_len, seg_len;
66    dma_addr_t cpu_addr;
67
68    cpu_addr = sg_dma_address(sg);
69    cpu_len = sg_dma_len(sg);
70
71    do {
72        seg_len = (cpu_len > 0x1000) ? 0x1000 : cpu_len;
73
74        pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr +
75            ((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc));
76
77        pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 |
78                    DCMD_WIDTH2 | (DCMD_LENGTH & seg_len);
79
80        if (qc->tf.flags & ATA_TFLAG_WRITE) {
81            pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr;
82            pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr;
83            pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR |
84                        DCMD_FLOWTRG;
85        } else {
86            pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr;
87            pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr;
88            pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR |
89                        DCMD_FLOWSRC;
90        }
91
92        cpu_len -= seg_len;
93        cpu_addr += seg_len;
94        pd->dma_desc_id++;
95
96    } while (cpu_len);
97
98    /* Should not happen */
99    if (seg_len & 0x1f)
100        DALGN |= (1 << pd->dma_dreq);
101}
102
103/*
104 * Prepare taskfile for submission.
105 */
106static void pxa_qc_prep(struct ata_queued_cmd *qc)
107{
108    struct pata_pxa_data *pd = qc->ap->private_data;
109    int si = 0;
110    struct scatterlist *sg;
111
112    if (!(qc->flags & ATA_QCFLAG_DMAMAP))
113        return;
114
115    pd->dma_desc_id = 0;
116
117    DCSR(pd->dma_channel) = 0;
118    DALGN &= ~(1 << pd->dma_dreq);
119
120    for_each_sg(qc->sg, sg, qc->n_elem, si)
121        pxa_load_dmac(sg, qc);
122
123    pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP;
124
125    /* Fire IRQ only at the end of last block */
126    pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN;
127
128    DDADR(pd->dma_channel) = pd->dma_desc_addr;
129    DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel;
130
131}
132
133/*
134 * Configure the DMA controller, load the DMA descriptors, but don't start the
135 * DMA controller yet. Only issue the ATA command.
136 */
137static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
138{
139    qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
140}
141
142/*
143 * Execute the DMA transfer.
144 */
145static void pxa_bmdma_start(struct ata_queued_cmd *qc)
146{
147    struct pata_pxa_data *pd = qc->ap->private_data;
148    init_completion(&pd->dma_done);
149    DCSR(pd->dma_channel) = DCSR_RUN;
150}
151
152/*
153 * Wait until the DMA transfer completes, then stop the DMA controller.
154 */
155static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
156{
157    struct pata_pxa_data *pd = qc->ap->private_data;
158
159    if ((DCSR(pd->dma_channel) & DCSR_RUN) &&
160        wait_for_completion_timeout(&pd->dma_done, HZ))
161        dev_err(qc->ap->dev, "Timeout waiting for DMA completion!");
162
163    DCSR(pd->dma_channel) = 0;
164}
165
166/*
167 * Read DMA status. The bmdma_stop() will take care of properly finishing the
168 * DMA transfer so we always have DMA-complete interrupt here.
169 */
170static unsigned char pxa_bmdma_status(struct ata_port *ap)
171{
172    struct pata_pxa_data *pd = ap->private_data;
173    unsigned char ret = ATA_DMA_INTR;
174
175    if (pd->dma_dcsr & DCSR_BUSERR)
176        ret |= ATA_DMA_ERR;
177
178    return ret;
179}
180
181/*
182 * No IRQ register present so we do nothing.
183 */
184static void pxa_irq_clear(struct ata_port *ap)
185{
186}
187
188/*
189 * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
190 * unclear why ATAPI has DMA issues.
191 */
192static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
193{
194    return -EOPNOTSUPP;
195}
196
197static struct scsi_host_template pxa_ata_sht = {
198    ATA_BMDMA_SHT(DRV_NAME),
199};
200
201static struct ata_port_operations pxa_ata_port_ops = {
202    .inherits = &ata_bmdma_port_ops,
203    .cable_detect = ata_cable_40wire,
204
205    .bmdma_setup = pxa_bmdma_setup,
206    .bmdma_start = pxa_bmdma_start,
207    .bmdma_stop = pxa_bmdma_stop,
208    .bmdma_status = pxa_bmdma_status,
209
210    .check_atapi_dma = pxa_check_atapi_dma,
211
212    .sff_irq_clear = pxa_irq_clear,
213
214    .qc_prep = pxa_qc_prep,
215};
216
217/*
218 * DMA interrupt handler.
219 */
220static void pxa_ata_dma_irq(int dma, void *port)
221{
222    struct ata_port *ap = port;
223    struct pata_pxa_data *pd = ap->private_data;
224
225    pd->dma_dcsr = DCSR(dma);
226    DCSR(dma) = pd->dma_dcsr;
227
228    if (pd->dma_dcsr & DCSR_STOPSTATE)
229        complete(&pd->dma_done);
230}
231
232static int __devinit pxa_ata_probe(struct platform_device *pdev)
233{
234    struct ata_host *host;
235    struct ata_port *ap;
236    struct pata_pxa_data *data;
237    struct resource *cmd_res;
238    struct resource *ctl_res;
239    struct resource *dma_res;
240    struct resource *irq_res;
241    struct pata_pxa_pdata *pdata = pdev->dev.platform_data;
242    int ret = 0;
243
244    /*
245     * Resource validation, three resources are needed:
246     * - CMD port base address
247     * - CTL port base address
248     * - DMA port base address
249     * - IRQ pin
250     */
251    if (pdev->num_resources != 4) {
252        dev_err(&pdev->dev, "invalid number of resources\n");
253        return -EINVAL;
254    }
255
256    /*
257     * CMD port base address
258     */
259    cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
260    if (unlikely(cmd_res == NULL))
261        return -EINVAL;
262
263    /*
264     * CTL port base address
265     */
266    ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
267    if (unlikely(ctl_res == NULL))
268        return -EINVAL;
269
270    /*
271     * DMA port base address
272     */
273    dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
274    if (unlikely(dma_res == NULL))
275        return -EINVAL;
276
277    /*
278     * IRQ pin
279     */
280    irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
281    if (unlikely(irq_res == NULL))
282        return -EINVAL;
283
284    /*
285     * Allocate the host
286     */
287    host = ata_host_alloc(&pdev->dev, 1);
288    if (!host)
289        return -ENOMEM;
290
291    ap = host->ports[0];
292    ap->ops = &pxa_ata_port_ops;
293    ap->pio_mask = ATA_PIO4;
294    ap->mwdma_mask = ATA_MWDMA2;
295
296    ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
297                        resource_size(cmd_res));
298    ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
299                        resource_size(ctl_res));
300    ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
301                        resource_size(dma_res));
302
303    /*
304     * Adjust register offsets
305     */
306    ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
307    ap->ioaddr.data_addr = ap->ioaddr.cmd_addr +
308                    (ATA_REG_DATA << pdata->reg_shift);
309    ap->ioaddr.error_addr = ap->ioaddr.cmd_addr +
310                    (ATA_REG_ERR << pdata->reg_shift);
311    ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr +
312                    (ATA_REG_FEATURE << pdata->reg_shift);
313    ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr +
314                    (ATA_REG_NSECT << pdata->reg_shift);
315    ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr +
316                    (ATA_REG_LBAL << pdata->reg_shift);
317    ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr +
318                    (ATA_REG_LBAM << pdata->reg_shift);
319    ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr +
320                    (ATA_REG_LBAH << pdata->reg_shift);
321    ap->ioaddr.device_addr = ap->ioaddr.cmd_addr +
322                    (ATA_REG_DEVICE << pdata->reg_shift);
323    ap->ioaddr.status_addr = ap->ioaddr.cmd_addr +
324                    (ATA_REG_STATUS << pdata->reg_shift);
325    ap->ioaddr.command_addr = ap->ioaddr.cmd_addr +
326                    (ATA_REG_CMD << pdata->reg_shift);
327
328    /*
329     * Allocate and load driver's internal data structure
330     */
331    data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
332                                GFP_KERNEL);
333    if (!data)
334        return -ENOMEM;
335
336    ap->private_data = data;
337    data->dma_dreq = pdata->dma_dreq;
338    data->dma_io_addr = dma_res->start;
339
340    /*
341     * Allocate space for the DMA descriptors
342     */
343    data->dma_desc = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
344                    &data->dma_desc_addr, GFP_KERNEL);
345    if (!data->dma_desc)
346        return -EINVAL;
347
348    /*
349     * Request the DMA channel
350     */
351    data->dma_channel = pxa_request_dma(DRV_NAME, DMA_PRIO_LOW,
352                        pxa_ata_dma_irq, ap);
353    if (data->dma_channel < 0)
354        return -EBUSY;
355
356    /*
357     * Stop and clear the DMA channel
358     */
359    DCSR(data->dma_channel) = 0;
360
361    /*
362     * Activate the ATA host
363     */
364    ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
365                pdata->irq_flags, &pxa_ata_sht);
366    if (ret)
367        pxa_free_dma(data->dma_channel);
368
369    return ret;
370}
371
372static int __devexit pxa_ata_remove(struct platform_device *pdev)
373{
374    struct ata_host *host = dev_get_drvdata(&pdev->dev);
375    struct pata_pxa_data *data = host->ports[0]->private_data;
376
377    pxa_free_dma(data->dma_channel);
378
379    ata_host_detach(host);
380
381    return 0;
382}
383
384static struct platform_driver pxa_ata_driver = {
385    .probe = pxa_ata_probe,
386    .remove = __devexit_p(pxa_ata_remove),
387    .driver = {
388        .name = DRV_NAME,
389        .owner = THIS_MODULE,
390    },
391};
392
393module_platform_driver(pxa_ata_driver);
394
395MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
396MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
397MODULE_LICENSE("GPL");
398MODULE_VERSION(DRV_VERSION);
399MODULE_ALIAS("platform:" DRV_NAME);
400

Archive Download this file



interactive