Root/drivers/dma/imx-dma.c

1/*
2 * drivers/dma/imx-dma.c
3 *
4 * This file contains a driver for the Freescale i.MX DMA engine
5 * found on i.MX1/21/27
6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9 *
10 * The code contained herein is licensed under the GNU General Public
11 * License. You may obtain a copy of the GNU General Public License
12 * Version 2 or later at the following locations:
13 *
14 * http://www.opensource.org/licenses/gpl-license.html
15 * http://www.gnu.org/copyleft/gpl.html
16 */
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/mm.h>
20#include <linux/interrupt.h>
21#include <linux/spinlock.h>
22#include <linux/device.h>
23#include <linux/dma-mapping.h>
24#include <linux/slab.h>
25#include <linux/platform_device.h>
26#include <linux/clk.h>
27#include <linux/dmaengine.h>
28#include <linux/module.h>
29
30#include <asm/irq.h>
31#include <mach/dma.h>
32#include <mach/hardware.h>
33
34#include "dmaengine.h"
35#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
36#define IMX_DMA_CHANNELS 16
37
38#define IMX_DMA_2D_SLOTS 2
39#define IMX_DMA_2D_SLOT_A 0
40#define IMX_DMA_2D_SLOT_B 1
41
42#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
43#define IMX_DMA_MEMSIZE_32 (0 << 4)
44#define IMX_DMA_MEMSIZE_8 (1 << 4)
45#define IMX_DMA_MEMSIZE_16 (2 << 4)
46#define IMX_DMA_TYPE_LINEAR (0 << 10)
47#define IMX_DMA_TYPE_2D (1 << 10)
48#define IMX_DMA_TYPE_FIFO (2 << 10)
49
50#define IMX_DMA_ERR_BURST (1 << 0)
51#define IMX_DMA_ERR_REQUEST (1 << 1)
52#define IMX_DMA_ERR_TRANSFER (1 << 2)
53#define IMX_DMA_ERR_BUFFER (1 << 3)
54#define IMX_DMA_ERR_TIMEOUT (1 << 4)
55
56#define DMA_DCR 0x00 /* Control Register */
57#define DMA_DISR 0x04 /* Interrupt status Register */
58#define DMA_DIMR 0x08 /* Interrupt mask Register */
59#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
60#define DMA_DRTOSR 0x10 /* Request timeout Register */
61#define DMA_DSESR 0x14 /* Transfer Error Status Register */
62#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
63#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
64#define DMA_WSRA 0x40 /* W-Size Register A */
65#define DMA_XSRA 0x44 /* X-Size Register A */
66#define DMA_YSRA 0x48 /* Y-Size Register A */
67#define DMA_WSRB 0x4c /* W-Size Register B */
68#define DMA_XSRB 0x50 /* X-Size Register B */
69#define DMA_YSRB 0x54 /* Y-Size Register B */
70#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
71#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
72#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
73#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
74#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
75#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
76#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
77#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
78#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
79
80#define DCR_DRST (1<<1)
81#define DCR_DEN (1<<0)
82#define DBTOCR_EN (1<<15)
83#define DBTOCR_CNT(x) ((x) & 0x7fff)
84#define CNTR_CNT(x) ((x) & 0xffffff)
85#define CCR_ACRPT (1<<14)
86#define CCR_DMOD_LINEAR (0x0 << 12)
87#define CCR_DMOD_2D (0x1 << 12)
88#define CCR_DMOD_FIFO (0x2 << 12)
89#define CCR_DMOD_EOBFIFO (0x3 << 12)
90#define CCR_SMOD_LINEAR (0x0 << 10)
91#define CCR_SMOD_2D (0x1 << 10)
92#define CCR_SMOD_FIFO (0x2 << 10)
93#define CCR_SMOD_EOBFIFO (0x3 << 10)
94#define CCR_MDIR_DEC (1<<9)
95#define CCR_MSEL_B (1<<8)
96#define CCR_DSIZ_32 (0x0 << 6)
97#define CCR_DSIZ_8 (0x1 << 6)
98#define CCR_DSIZ_16 (0x2 << 6)
99#define CCR_SSIZ_32 (0x0 << 4)
100#define CCR_SSIZ_8 (0x1 << 4)
101#define CCR_SSIZ_16 (0x2 << 4)
102#define CCR_REN (1<<3)
103#define CCR_RPT (1<<2)
104#define CCR_FRC (1<<1)
105#define CCR_CEN (1<<0)
106#define RTOR_EN (1<<15)
107#define RTOR_CLK (1<<14)
108#define RTOR_PSC (1<<13)
109
110enum imxdma_prep_type {
111    IMXDMA_DESC_MEMCPY,
112    IMXDMA_DESC_INTERLEAVED,
113    IMXDMA_DESC_SLAVE_SG,
114    IMXDMA_DESC_CYCLIC,
115};
116
117struct imx_dma_2d_config {
118    u16 xsr;
119    u16 ysr;
120    u16 wsr;
121    int count;
122};
123
124struct imxdma_desc {
125    struct list_head node;
126    struct dma_async_tx_descriptor desc;
127    enum dma_status status;
128    dma_addr_t src;
129    dma_addr_t dest;
130    size_t len;
131    enum dma_transfer_direction direction;
132    enum imxdma_prep_type type;
133    /* For memcpy and interleaved */
134    unsigned int config_port;
135    unsigned int config_mem;
136    /* For interleaved transfers */
137    unsigned int x;
138    unsigned int y;
139    unsigned int w;
140    /* For slave sg and cyclic */
141    struct scatterlist *sg;
142    unsigned int sgcount;
143};
144
145struct imxdma_channel {
146    int hw_chaining;
147    struct timer_list watchdog;
148    struct imxdma_engine *imxdma;
149    unsigned int channel;
150
151    struct tasklet_struct dma_tasklet;
152    struct list_head ld_free;
153    struct list_head ld_queue;
154    struct list_head ld_active;
155    int descs_allocated;
156    enum dma_slave_buswidth word_size;
157    dma_addr_t per_address;
158    u32 watermark_level;
159    struct dma_chan chan;
160    struct dma_async_tx_descriptor desc;
161    enum dma_status status;
162    int dma_request;
163    struct scatterlist *sg_list;
164    u32 ccr_from_device;
165    u32 ccr_to_device;
166    bool enabled_2d;
167    int slot_2d;
168};
169
170struct imxdma_engine {
171    struct device *dev;
172    struct device_dma_parameters dma_parms;
173    struct dma_device dma_device;
174    void __iomem *base;
175    struct clk *dma_ahb;
176    struct clk *dma_ipg;
177    spinlock_t lock;
178    struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
179    struct imxdma_channel channel[IMX_DMA_CHANNELS];
180};
181
182static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
183{
184    return container_of(chan, struct imxdma_channel, chan);
185}
186
187static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
188{
189    struct imxdma_desc *desc;
190
191    if (!list_empty(&imxdmac->ld_active)) {
192        desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
193                    node);
194        if (desc->type == IMXDMA_DESC_CYCLIC)
195            return true;
196    }
197    return false;
198}
199
200
201
202static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
203                 unsigned offset)
204{
205    __raw_writel(val, imxdma->base + offset);
206}
207
208static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
209{
210    return __raw_readl(imxdma->base + offset);
211}
212
213static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
214{
215    if (cpu_is_mx27())
216        return imxdmac->hw_chaining;
217    else
218        return 0;
219}
220
221/*
222 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
223 */
224static inline int imxdma_sg_next(struct imxdma_desc *d)
225{
226    struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
227    struct imxdma_engine *imxdma = imxdmac->imxdma;
228    struct scatterlist *sg = d->sg;
229    unsigned long now;
230
231    now = min(d->len, sg_dma_len(sg));
232    if (d->len != IMX_DMA_LENGTH_LOOP)
233        d->len -= now;
234
235    if (d->direction == DMA_DEV_TO_MEM)
236        imx_dmav1_writel(imxdma, sg->dma_address,
237                 DMA_DAR(imxdmac->channel));
238    else
239        imx_dmav1_writel(imxdma, sg->dma_address,
240                 DMA_SAR(imxdmac->channel));
241
242    imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
243
244    dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
245        "size 0x%08x\n", __func__, imxdmac->channel,
246         imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
247         imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
248         imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
249
250    return now;
251}
252
253static void imxdma_enable_hw(struct imxdma_desc *d)
254{
255    struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
256    struct imxdma_engine *imxdma = imxdmac->imxdma;
257    int channel = imxdmac->channel;
258    unsigned long flags;
259
260    dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
261
262    local_irq_save(flags);
263
264    imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
265    imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
266             ~(1 << channel), DMA_DIMR);
267    imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
268             CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
269
270    if ((cpu_is_mx21() || cpu_is_mx27()) &&
271            d->sg && imxdma_hw_chain(imxdmac)) {
272        d->sg = sg_next(d->sg);
273        if (d->sg) {
274            u32 tmp;
275            imxdma_sg_next(d);
276            tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
277            imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
278                     DMA_CCR(channel));
279        }
280    }
281
282    local_irq_restore(flags);
283}
284
285static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
286{
287    struct imxdma_engine *imxdma = imxdmac->imxdma;
288    int channel = imxdmac->channel;
289    unsigned long flags;
290
291    dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
292
293    if (imxdma_hw_chain(imxdmac))
294        del_timer(&imxdmac->watchdog);
295
296    local_irq_save(flags);
297    imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
298             (1 << channel), DMA_DIMR);
299    imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
300             ~CCR_CEN, DMA_CCR(channel));
301    imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
302    local_irq_restore(flags);
303}
304
305static void imxdma_watchdog(unsigned long data)
306{
307    struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
308    struct imxdma_engine *imxdma = imxdmac->imxdma;
309    int channel = imxdmac->channel;
310
311    imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
312
313    /* Tasklet watchdog error handler */
314    tasklet_schedule(&imxdmac->dma_tasklet);
315    dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
316        imxdmac->channel);
317}
318
319static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
320{
321    struct imxdma_engine *imxdma = dev_id;
322    unsigned int err_mask;
323    int i, disr;
324    int errcode;
325
326    disr = imx_dmav1_readl(imxdma, DMA_DISR);
327
328    err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
329           imx_dmav1_readl(imxdma, DMA_DRTOSR) |
330           imx_dmav1_readl(imxdma, DMA_DSESR) |
331           imx_dmav1_readl(imxdma, DMA_DBOSR);
332
333    if (!err_mask)
334        return IRQ_HANDLED;
335
336    imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
337
338    for (i = 0; i < IMX_DMA_CHANNELS; i++) {
339        if (!(err_mask & (1 << i)))
340            continue;
341        errcode = 0;
342
343        if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
344            imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
345            errcode |= IMX_DMA_ERR_BURST;
346        }
347        if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
348            imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
349            errcode |= IMX_DMA_ERR_REQUEST;
350        }
351        if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
352            imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
353            errcode |= IMX_DMA_ERR_TRANSFER;
354        }
355        if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
356            imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
357            errcode |= IMX_DMA_ERR_BUFFER;
358        }
359        /* Tasklet error handler */
360        tasklet_schedule(&imxdma->channel[i].dma_tasklet);
361
362        printk(KERN_WARNING
363               "DMA timeout on channel %d -%s%s%s%s\n", i,
364               errcode & IMX_DMA_ERR_BURST ? " burst" : "",
365               errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
366               errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
367               errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
368    }
369    return IRQ_HANDLED;
370}
371
372static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
373{
374    struct imxdma_engine *imxdma = imxdmac->imxdma;
375    int chno = imxdmac->channel;
376    struct imxdma_desc *desc;
377
378    spin_lock(&imxdma->lock);
379    if (list_empty(&imxdmac->ld_active)) {
380        spin_unlock(&imxdma->lock);
381        goto out;
382    }
383
384    desc = list_first_entry(&imxdmac->ld_active,
385                struct imxdma_desc,
386                node);
387    spin_unlock(&imxdma->lock);
388
389    if (desc->sg) {
390        u32 tmp;
391        desc->sg = sg_next(desc->sg);
392
393        if (desc->sg) {
394            imxdma_sg_next(desc);
395
396            tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
397
398            if (imxdma_hw_chain(imxdmac)) {
399                /* FIXME: The timeout should probably be
400                 * configurable
401                 */
402                mod_timer(&imxdmac->watchdog,
403                    jiffies + msecs_to_jiffies(500));
404
405                tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
406                imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
407            } else {
408                imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
409                         DMA_CCR(chno));
410                tmp |= CCR_CEN;
411            }
412
413            imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
414
415            if (imxdma_chan_is_doing_cyclic(imxdmac))
416                /* Tasklet progression */
417                tasklet_schedule(&imxdmac->dma_tasklet);
418
419            return;
420        }
421
422        if (imxdma_hw_chain(imxdmac)) {
423            del_timer(&imxdmac->watchdog);
424            return;
425        }
426    }
427
428out:
429    imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
430    /* Tasklet irq */
431    tasklet_schedule(&imxdmac->dma_tasklet);
432}
433
434static irqreturn_t dma_irq_handler(int irq, void *dev_id)
435{
436    struct imxdma_engine *imxdma = dev_id;
437    int i, disr;
438
439    if (cpu_is_mx21() || cpu_is_mx27())
440        imxdma_err_handler(irq, dev_id);
441
442    disr = imx_dmav1_readl(imxdma, DMA_DISR);
443
444    dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
445
446    imx_dmav1_writel(imxdma, disr, DMA_DISR);
447    for (i = 0; i < IMX_DMA_CHANNELS; i++) {
448        if (disr & (1 << i))
449            dma_irq_handle_channel(&imxdma->channel[i]);
450    }
451
452    return IRQ_HANDLED;
453}
454
455static int imxdma_xfer_desc(struct imxdma_desc *d)
456{
457    struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
458    struct imxdma_engine *imxdma = imxdmac->imxdma;
459    unsigned long flags;
460    int slot = -1;
461    int i;
462
463    /* Configure and enable */
464    switch (d->type) {
465    case IMXDMA_DESC_INTERLEAVED:
466        /* Try to get a free 2D slot */
467        spin_lock_irqsave(&imxdma->lock, flags);
468        for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
469            if ((imxdma->slots_2d[i].count > 0) &&
470            ((imxdma->slots_2d[i].xsr != d->x) ||
471            (imxdma->slots_2d[i].ysr != d->y) ||
472            (imxdma->slots_2d[i].wsr != d->w)))
473                continue;
474            slot = i;
475            break;
476        }
477        if (slot < 0)
478            return -EBUSY;
479
480        imxdma->slots_2d[slot].xsr = d->x;
481        imxdma->slots_2d[slot].ysr = d->y;
482        imxdma->slots_2d[slot].wsr = d->w;
483        imxdma->slots_2d[slot].count++;
484
485        imxdmac->slot_2d = slot;
486        imxdmac->enabled_2d = true;
487        spin_unlock_irqrestore(&imxdma->lock, flags);
488
489        if (slot == IMX_DMA_2D_SLOT_A) {
490            d->config_mem &= ~CCR_MSEL_B;
491            d->config_port &= ~CCR_MSEL_B;
492            imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
493            imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
494            imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
495        } else {
496            d->config_mem |= CCR_MSEL_B;
497            d->config_port |= CCR_MSEL_B;
498            imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
499            imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
500            imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
501        }
502        /*
503         * We fall-through here intentionally, since a 2D transfer is
504         * similar to MEMCPY just adding the 2D slot configuration.
505         */
506    case IMXDMA_DESC_MEMCPY:
507        imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
508        imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
509        imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
510             DMA_CCR(imxdmac->channel));
511
512        imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
513
514        dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
515            "dma_length=%d\n", __func__, imxdmac->channel,
516            d->dest, d->src, d->len);
517
518        break;
519    /* Cyclic transfer is the same as slave_sg with special sg configuration. */
520    case IMXDMA_DESC_CYCLIC:
521    case IMXDMA_DESC_SLAVE_SG:
522        if (d->direction == DMA_DEV_TO_MEM) {
523            imx_dmav1_writel(imxdma, imxdmac->per_address,
524                     DMA_SAR(imxdmac->channel));
525            imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
526                     DMA_CCR(imxdmac->channel));
527
528            dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
529                "total length=%d dev_addr=0x%08x (dev2mem)\n",
530                __func__, imxdmac->channel, d->sg, d->sgcount,
531                d->len, imxdmac->per_address);
532        } else if (d->direction == DMA_MEM_TO_DEV) {
533            imx_dmav1_writel(imxdma, imxdmac->per_address,
534                     DMA_DAR(imxdmac->channel));
535            imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
536                     DMA_CCR(imxdmac->channel));
537
538            dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
539                "total length=%d dev_addr=0x%08x (mem2dev)\n",
540                __func__, imxdmac->channel, d->sg, d->sgcount,
541                d->len, imxdmac->per_address);
542        } else {
543            dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
544                __func__, imxdmac->channel);
545            return -EINVAL;
546        }
547
548        imxdma_sg_next(d);
549
550        break;
551    default:
552        return -EINVAL;
553    }
554    imxdma_enable_hw(d);
555    return 0;
556}
557
558static void imxdma_tasklet(unsigned long data)
559{
560    struct imxdma_channel *imxdmac = (void *)data;
561    struct imxdma_engine *imxdma = imxdmac->imxdma;
562    struct imxdma_desc *desc;
563
564    spin_lock(&imxdma->lock);
565
566    if (list_empty(&imxdmac->ld_active)) {
567        /* Someone might have called terminate all */
568        goto out;
569    }
570    desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
571
572    if (desc->desc.callback)
573        desc->desc.callback(desc->desc.callback_param);
574
575    /* If we are dealing with a cyclic descriptor keep it on ld_active
576     * and dont mark the descripor as complete.
577     * Only in non-cyclic cases it would be marked as complete
578     */
579    if (imxdma_chan_is_doing_cyclic(imxdmac))
580        goto out;
581    else
582        dma_cookie_complete(&desc->desc);
583
584    /* Free 2D slot if it was an interleaved transfer */
585    if (imxdmac->enabled_2d) {
586        imxdma->slots_2d[imxdmac->slot_2d].count--;
587        imxdmac->enabled_2d = false;
588    }
589
590    list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
591
592    if (!list_empty(&imxdmac->ld_queue)) {
593        desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
594                    node);
595        list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
596        if (imxdma_xfer_desc(desc) < 0)
597            dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
598                 __func__, imxdmac->channel);
599    }
600out:
601    spin_unlock(&imxdma->lock);
602}
603
604static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
605        unsigned long arg)
606{
607    struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
608    struct dma_slave_config *dmaengine_cfg = (void *)arg;
609    struct imxdma_engine *imxdma = imxdmac->imxdma;
610    unsigned long flags;
611    unsigned int mode = 0;
612
613    switch (cmd) {
614    case DMA_TERMINATE_ALL:
615        imxdma_disable_hw(imxdmac);
616
617        spin_lock_irqsave(&imxdma->lock, flags);
618        list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
619        list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
620        spin_unlock_irqrestore(&imxdma->lock, flags);
621        return 0;
622    case DMA_SLAVE_CONFIG:
623        if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
624            imxdmac->per_address = dmaengine_cfg->src_addr;
625            imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
626            imxdmac->word_size = dmaengine_cfg->src_addr_width;
627        } else {
628            imxdmac->per_address = dmaengine_cfg->dst_addr;
629            imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
630            imxdmac->word_size = dmaengine_cfg->dst_addr_width;
631        }
632
633        switch (imxdmac->word_size) {
634        case DMA_SLAVE_BUSWIDTH_1_BYTE:
635            mode = IMX_DMA_MEMSIZE_8;
636            break;
637        case DMA_SLAVE_BUSWIDTH_2_BYTES:
638            mode = IMX_DMA_MEMSIZE_16;
639            break;
640        default:
641        case DMA_SLAVE_BUSWIDTH_4_BYTES:
642            mode = IMX_DMA_MEMSIZE_32;
643            break;
644        }
645
646        imxdmac->hw_chaining = 1;
647        if (!imxdma_hw_chain(imxdmac))
648            return -EINVAL;
649        imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
650            ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
651            CCR_REN;
652        imxdmac->ccr_to_device =
653            (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
654            ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
655        imx_dmav1_writel(imxdma, imxdmac->dma_request,
656                 DMA_RSSR(imxdmac->channel));
657
658        /* Set burst length */
659        imx_dmav1_writel(imxdma, imxdmac->watermark_level *
660                imxdmac->word_size, DMA_BLR(imxdmac->channel));
661
662        return 0;
663    default:
664        return -ENOSYS;
665    }
666
667    return -EINVAL;
668}
669
670static enum dma_status imxdma_tx_status(struct dma_chan *chan,
671                        dma_cookie_t cookie,
672                        struct dma_tx_state *txstate)
673{
674    return dma_cookie_status(chan, cookie, txstate);
675}
676
677static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
678{
679    struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
680    struct imxdma_engine *imxdma = imxdmac->imxdma;
681    dma_cookie_t cookie;
682    unsigned long flags;
683
684    spin_lock_irqsave(&imxdma->lock, flags);
685    list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
686    cookie = dma_cookie_assign(tx);
687    spin_unlock_irqrestore(&imxdma->lock, flags);
688
689    return cookie;
690}
691
692static int imxdma_alloc_chan_resources(struct dma_chan *chan)
693{
694    struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
695    struct imx_dma_data *data = chan->private;
696
697    if (data != NULL)
698        imxdmac->dma_request = data->dma_request;
699
700    while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
701        struct imxdma_desc *desc;
702
703        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
704        if (!desc)
705            break;
706        __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
707        dma_async_tx_descriptor_init(&desc->desc, chan);
708        desc->desc.tx_submit = imxdma_tx_submit;
709        /* txd.flags will be overwritten in prep funcs */
710        desc->desc.flags = DMA_CTRL_ACK;
711        desc->status = DMA_SUCCESS;
712
713        list_add_tail(&desc->node, &imxdmac->ld_free);
714        imxdmac->descs_allocated++;
715    }
716
717    if (!imxdmac->descs_allocated)
718        return -ENOMEM;
719
720    return imxdmac->descs_allocated;
721}
722
723static void imxdma_free_chan_resources(struct dma_chan *chan)
724{
725    struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
726    struct imxdma_engine *imxdma = imxdmac->imxdma;
727    struct imxdma_desc *desc, *_desc;
728    unsigned long flags;
729
730    spin_lock_irqsave(&imxdma->lock, flags);
731
732    imxdma_disable_hw(imxdmac);
733    list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
734    list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
735
736    spin_unlock_irqrestore(&imxdma->lock, flags);
737
738    list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
739        kfree(desc);
740        imxdmac->descs_allocated--;
741    }
742    INIT_LIST_HEAD(&imxdmac->ld_free);
743
744    if (imxdmac->sg_list) {
745        kfree(imxdmac->sg_list);
746        imxdmac->sg_list = NULL;
747    }
748}
749
750static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
751        struct dma_chan *chan, struct scatterlist *sgl,
752        unsigned int sg_len, enum dma_transfer_direction direction,
753        unsigned long flags, void *context)
754{
755    struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
756    struct scatterlist *sg;
757    int i, dma_length = 0;
758    struct imxdma_desc *desc;
759
760    if (list_empty(&imxdmac->ld_free) ||
761        imxdma_chan_is_doing_cyclic(imxdmac))
762        return NULL;
763
764    desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
765
766    for_each_sg(sgl, sg, sg_len, i) {
767        dma_length += sg_dma_len(sg);
768    }
769
770    switch (imxdmac->word_size) {
771    case DMA_SLAVE_BUSWIDTH_4_BYTES:
772        if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
773            return NULL;
774        break;
775    case DMA_SLAVE_BUSWIDTH_2_BYTES:
776        if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
777            return NULL;
778        break;
779    case DMA_SLAVE_BUSWIDTH_1_BYTE:
780        break;
781    default:
782        return NULL;
783    }
784
785    desc->type = IMXDMA_DESC_SLAVE_SG;
786    desc->sg = sgl;
787    desc->sgcount = sg_len;
788    desc->len = dma_length;
789    desc->direction = direction;
790    if (direction == DMA_DEV_TO_MEM) {
791        desc->src = imxdmac->per_address;
792    } else {
793        desc->dest = imxdmac->per_address;
794    }
795    desc->desc.callback = NULL;
796    desc->desc.callback_param = NULL;
797
798    return &desc->desc;
799}
800
801static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
802        struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
803        size_t period_len, enum dma_transfer_direction direction,
804        void *context)
805{
806    struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
807    struct imxdma_engine *imxdma = imxdmac->imxdma;
808    struct imxdma_desc *desc;
809    int i;
810    unsigned int periods = buf_len / period_len;
811
812    dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
813            __func__, imxdmac->channel, buf_len, period_len);
814
815    if (list_empty(&imxdmac->ld_free) ||
816        imxdma_chan_is_doing_cyclic(imxdmac))
817        return NULL;
818
819    desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
820
821    if (imxdmac->sg_list)
822        kfree(imxdmac->sg_list);
823
824    imxdmac->sg_list = kcalloc(periods + 1,
825            sizeof(struct scatterlist), GFP_KERNEL);
826    if (!imxdmac->sg_list)
827        return NULL;
828
829    sg_init_table(imxdmac->sg_list, periods);
830
831    for (i = 0; i < periods; i++) {
832        imxdmac->sg_list[i].page_link = 0;
833        imxdmac->sg_list[i].offset = 0;
834        imxdmac->sg_list[i].dma_address = dma_addr;
835        sg_dma_len(&imxdmac->sg_list[i]) = period_len;
836        dma_addr += period_len;
837    }
838
839    /* close the loop */
840    imxdmac->sg_list[periods].offset = 0;
841    sg_dma_len(&imxdmac->sg_list[periods]) = 0;
842    imxdmac->sg_list[periods].page_link =
843        ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
844
845    desc->type = IMXDMA_DESC_CYCLIC;
846    desc->sg = imxdmac->sg_list;
847    desc->sgcount = periods;
848    desc->len = IMX_DMA_LENGTH_LOOP;
849    desc->direction = direction;
850    if (direction == DMA_DEV_TO_MEM) {
851        desc->src = imxdmac->per_address;
852    } else {
853        desc->dest = imxdmac->per_address;
854    }
855    desc->desc.callback = NULL;
856    desc->desc.callback_param = NULL;
857
858    return &desc->desc;
859}
860
861static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
862    struct dma_chan *chan, dma_addr_t dest,
863    dma_addr_t src, size_t len, unsigned long flags)
864{
865    struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
866    struct imxdma_engine *imxdma = imxdmac->imxdma;
867    struct imxdma_desc *desc;
868
869    dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
870            __func__, imxdmac->channel, src, dest, len);
871
872    if (list_empty(&imxdmac->ld_free) ||
873        imxdma_chan_is_doing_cyclic(imxdmac))
874        return NULL;
875
876    desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
877
878    desc->type = IMXDMA_DESC_MEMCPY;
879    desc->src = src;
880    desc->dest = dest;
881    desc->len = len;
882    desc->direction = DMA_MEM_TO_MEM;
883    desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
884    desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
885    desc->desc.callback = NULL;
886    desc->desc.callback_param = NULL;
887
888    return &desc->desc;
889}
890
891static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
892    struct dma_chan *chan, struct dma_interleaved_template *xt,
893    unsigned long flags)
894{
895    struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
896    struct imxdma_engine *imxdma = imxdmac->imxdma;
897    struct imxdma_desc *desc;
898
899    dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
900        " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
901        imxdmac->channel, xt->src_start, xt->dst_start,
902        xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
903        xt->numf, xt->frame_size);
904
905    if (list_empty(&imxdmac->ld_free) ||
906        imxdma_chan_is_doing_cyclic(imxdmac))
907        return NULL;
908
909    if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
910        return NULL;
911
912    desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
913
914    desc->type = IMXDMA_DESC_INTERLEAVED;
915    desc->src = xt->src_start;
916    desc->dest = xt->dst_start;
917    desc->x = xt->sgl[0].size;
918    desc->y = xt->numf;
919    desc->w = xt->sgl[0].icg + desc->x;
920    desc->len = desc->x * desc->y;
921    desc->direction = DMA_MEM_TO_MEM;
922    desc->config_port = IMX_DMA_MEMSIZE_32;
923    desc->config_mem = IMX_DMA_MEMSIZE_32;
924    if (xt->src_sgl)
925        desc->config_mem |= IMX_DMA_TYPE_2D;
926    if (xt->dst_sgl)
927        desc->config_port |= IMX_DMA_TYPE_2D;
928    desc->desc.callback = NULL;
929    desc->desc.callback_param = NULL;
930
931    return &desc->desc;
932}
933
934static void imxdma_issue_pending(struct dma_chan *chan)
935{
936    struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
937    struct imxdma_engine *imxdma = imxdmac->imxdma;
938    struct imxdma_desc *desc;
939    unsigned long flags;
940
941    spin_lock_irqsave(&imxdma->lock, flags);
942    if (list_empty(&imxdmac->ld_active) &&
943        !list_empty(&imxdmac->ld_queue)) {
944        desc = list_first_entry(&imxdmac->ld_queue,
945                    struct imxdma_desc, node);
946
947        if (imxdma_xfer_desc(desc) < 0) {
948            dev_warn(imxdma->dev,
949                 "%s: channel: %d couldn't issue DMA xfer\n",
950                 __func__, imxdmac->channel);
951        } else {
952            list_move_tail(imxdmac->ld_queue.next,
953                       &imxdmac->ld_active);
954        }
955    }
956    spin_unlock_irqrestore(&imxdma->lock, flags);
957}
958
959static int __init imxdma_probe(struct platform_device *pdev)
960    {
961    struct imxdma_engine *imxdma;
962    int ret, i;
963
964
965    imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
966    if (!imxdma)
967        return -ENOMEM;
968
969    if (cpu_is_mx1()) {
970        imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
971    } else if (cpu_is_mx21()) {
972        imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
973    } else if (cpu_is_mx27()) {
974        imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
975    } else {
976        kfree(imxdma);
977        return 0;
978    }
979
980    imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
981    if (IS_ERR(imxdma->dma_ipg)) {
982        ret = PTR_ERR(imxdma->dma_ipg);
983        goto err_clk;
984    }
985
986    imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
987    if (IS_ERR(imxdma->dma_ahb)) {
988        ret = PTR_ERR(imxdma->dma_ahb);
989        goto err_clk;
990    }
991
992    clk_prepare_enable(imxdma->dma_ipg);
993    clk_prepare_enable(imxdma->dma_ahb);
994
995    /* reset DMA module */
996    imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
997
998    if (cpu_is_mx1()) {
999        ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
1000        if (ret) {
1001            dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1002            goto err_enable;
1003        }
1004
1005        ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
1006        if (ret) {
1007            dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1008            free_irq(MX1_DMA_INT, NULL);
1009            goto err_enable;
1010        }
1011    }
1012
1013    /* enable DMA module */
1014    imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1015
1016    /* clear all interrupts */
1017    imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1018
1019    /* disable interrupts */
1020    imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1021
1022    INIT_LIST_HEAD(&imxdma->dma_device.channels);
1023
1024    dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1025    dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1026    dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1027    dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1028
1029    /* Initialize 2D global parameters */
1030    for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1031        imxdma->slots_2d[i].count = 0;
1032
1033    spin_lock_init(&imxdma->lock);
1034
1035    /* Initialize channel parameters */
1036    for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1037        struct imxdma_channel *imxdmac = &imxdma->channel[i];
1038
1039        if (cpu_is_mx21() || cpu_is_mx27()) {
1040            ret = request_irq(MX2x_INT_DMACH0 + i,
1041                    dma_irq_handler, 0, "DMA", imxdma);
1042            if (ret) {
1043                dev_warn(imxdma->dev, "Can't register IRQ %d "
1044                     "for DMA channel %d\n",
1045                     MX2x_INT_DMACH0 + i, i);
1046                goto err_init;
1047            }
1048            init_timer(&imxdmac->watchdog);
1049            imxdmac->watchdog.function = &imxdma_watchdog;
1050            imxdmac->watchdog.data = (unsigned long)imxdmac;
1051        }
1052
1053        imxdmac->imxdma = imxdma;
1054
1055        INIT_LIST_HEAD(&imxdmac->ld_queue);
1056        INIT_LIST_HEAD(&imxdmac->ld_free);
1057        INIT_LIST_HEAD(&imxdmac->ld_active);
1058
1059        tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1060                 (unsigned long)imxdmac);
1061        imxdmac->chan.device = &imxdma->dma_device;
1062        dma_cookie_init(&imxdmac->chan);
1063        imxdmac->channel = i;
1064
1065        /* Add the channel to the DMAC list */
1066        list_add_tail(&imxdmac->chan.device_node,
1067                  &imxdma->dma_device.channels);
1068    }
1069
1070    imxdma->dev = &pdev->dev;
1071    imxdma->dma_device.dev = &pdev->dev;
1072
1073    imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1074    imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1075    imxdma->dma_device.device_tx_status = imxdma_tx_status;
1076    imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1077    imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1078    imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1079    imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1080    imxdma->dma_device.device_control = imxdma_control;
1081    imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1082
1083    platform_set_drvdata(pdev, imxdma);
1084
1085    imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
1086    imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
1087    dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1088
1089    ret = dma_async_device_register(&imxdma->dma_device);
1090    if (ret) {
1091        dev_err(&pdev->dev, "unable to register\n");
1092        goto err_init;
1093    }
1094
1095    return 0;
1096
1097err_init:
1098
1099    if (cpu_is_mx21() || cpu_is_mx27()) {
1100        while (--i >= 0)
1101            free_irq(MX2x_INT_DMACH0 + i, NULL);
1102    } else if cpu_is_mx1() {
1103        free_irq(MX1_DMA_INT, NULL);
1104        free_irq(MX1_DMA_ERR, NULL);
1105    }
1106err_enable:
1107    clk_disable_unprepare(imxdma->dma_ipg);
1108    clk_disable_unprepare(imxdma->dma_ahb);
1109err_clk:
1110    kfree(imxdma);
1111    return ret;
1112}
1113
1114static int __exit imxdma_remove(struct platform_device *pdev)
1115{
1116    struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1117    int i;
1118
1119        dma_async_device_unregister(&imxdma->dma_device);
1120
1121    if (cpu_is_mx21() || cpu_is_mx27()) {
1122        for (i = 0; i < IMX_DMA_CHANNELS; i++)
1123            free_irq(MX2x_INT_DMACH0 + i, NULL);
1124    } else if cpu_is_mx1() {
1125        free_irq(MX1_DMA_INT, NULL);
1126        free_irq(MX1_DMA_ERR, NULL);
1127    }
1128
1129    clk_disable_unprepare(imxdma->dma_ipg);
1130    clk_disable_unprepare(imxdma->dma_ahb);
1131    kfree(imxdma);
1132
1133        return 0;
1134}
1135
1136static struct platform_driver imxdma_driver = {
1137    .driver = {
1138        .name = "imx-dma",
1139    },
1140    .remove = __exit_p(imxdma_remove),
1141};
1142
1143static int __init imxdma_module_init(void)
1144{
1145    return platform_driver_probe(&imxdma_driver, imxdma_probe);
1146}
1147subsys_initcall(imxdma_module_init);
1148
1149MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1150MODULE_DESCRIPTION("i.MX dma driver");
1151MODULE_LICENSE("GPL");
1152

Archive Download this file



interactive