Root/drivers/dma/fsldma.c

1/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA controller is also added.
14 *
15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms.
19 *
20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 */
26
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/dmapool.h>
36#include <linux/of_platform.h>
37
38#include "dmaengine.h"
39#include "fsldma.h"
40
41#define chan_dbg(chan, fmt, arg...) \
42    dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
43#define chan_err(chan, fmt, arg...) \
44    dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
45
46static const char msg_ld_oom[] = "No free memory for link descriptor";
47
48/*
49 * Register Helpers
50 */
51
52static void set_sr(struct fsldma_chan *chan, u32 val)
53{
54    DMA_OUT(chan, &chan->regs->sr, val, 32);
55}
56
57static u32 get_sr(struct fsldma_chan *chan)
58{
59    return DMA_IN(chan, &chan->regs->sr, 32);
60}
61
62static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
63{
64    DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
65}
66
67static dma_addr_t get_cdar(struct fsldma_chan *chan)
68{
69    return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
70}
71
72static u32 get_bcr(struct fsldma_chan *chan)
73{
74    return DMA_IN(chan, &chan->regs->bcr, 32);
75}
76
77/*
78 * Descriptor Helpers
79 */
80
81static void set_desc_cnt(struct fsldma_chan *chan,
82                struct fsl_dma_ld_hw *hw, u32 count)
83{
84    hw->count = CPU_TO_DMA(chan, count, 32);
85}
86
87static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
88{
89    return DMA_TO_CPU(chan, desc->hw.count, 32);
90}
91
92static void set_desc_src(struct fsldma_chan *chan,
93             struct fsl_dma_ld_hw *hw, dma_addr_t src)
94{
95    u64 snoop_bits;
96
97    snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
98        ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
99    hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
100}
101
102static dma_addr_t get_desc_src(struct fsldma_chan *chan,
103                   struct fsl_desc_sw *desc)
104{
105    u64 snoop_bits;
106
107    snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
108        ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
109    return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
110}
111
112static void set_desc_dst(struct fsldma_chan *chan,
113             struct fsl_dma_ld_hw *hw, dma_addr_t dst)
114{
115    u64 snoop_bits;
116
117    snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
118        ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
119    hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
120}
121
122static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
123                   struct fsl_desc_sw *desc)
124{
125    u64 snoop_bits;
126
127    snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
128        ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
129    return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
130}
131
132static void set_desc_next(struct fsldma_chan *chan,
133              struct fsl_dma_ld_hw *hw, dma_addr_t next)
134{
135    u64 snoop_bits;
136
137    snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
138        ? FSL_DMA_SNEN : 0;
139    hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
140}
141
142static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
143{
144    u64 snoop_bits;
145
146    snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
147        ? FSL_DMA_SNEN : 0;
148
149    desc->hw.next_ln_addr = CPU_TO_DMA(chan,
150        DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
151            | snoop_bits, 64);
152}
153
154/*
155 * DMA Engine Hardware Control Helpers
156 */
157
158static void dma_init(struct fsldma_chan *chan)
159{
160    /* Reset the channel */
161    DMA_OUT(chan, &chan->regs->mr, 0, 32);
162
163    switch (chan->feature & FSL_DMA_IP_MASK) {
164    case FSL_DMA_IP_85XX:
165        /* Set the channel to below modes:
166         * EIE - Error interrupt enable
167         * EOLNIE - End of links interrupt enable
168         * BWC - Bandwidth sharing among channels
169         */
170        DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
171                | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
172        break;
173    case FSL_DMA_IP_83XX:
174        /* Set the channel to below modes:
175         * EOTIE - End-of-transfer interrupt enable
176         * PRC_RM - PCI read multiple
177         */
178        DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
179                | FSL_DMA_MR_PRC_RM, 32);
180        break;
181    }
182}
183
184static int dma_is_idle(struct fsldma_chan *chan)
185{
186    u32 sr = get_sr(chan);
187    return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
188}
189
190/*
191 * Start the DMA controller
192 *
193 * Preconditions:
194 * - the CDAR register must point to the start descriptor
195 * - the MRn[CS] bit must be cleared
196 */
197static void dma_start(struct fsldma_chan *chan)
198{
199    u32 mode;
200
201    mode = DMA_IN(chan, &chan->regs->mr, 32);
202
203    if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
204        DMA_OUT(chan, &chan->regs->bcr, 0, 32);
205        mode |= FSL_DMA_MR_EMP_EN;
206    } else {
207        mode &= ~FSL_DMA_MR_EMP_EN;
208    }
209
210    if (chan->feature & FSL_DMA_CHAN_START_EXT) {
211        mode |= FSL_DMA_MR_EMS_EN;
212    } else {
213        mode &= ~FSL_DMA_MR_EMS_EN;
214        mode |= FSL_DMA_MR_CS;
215    }
216
217    DMA_OUT(chan, &chan->regs->mr, mode, 32);
218}
219
220static void dma_halt(struct fsldma_chan *chan)
221{
222    u32 mode;
223    int i;
224
225    /* read the mode register */
226    mode = DMA_IN(chan, &chan->regs->mr, 32);
227
228    /*
229     * The 85xx controller supports channel abort, which will stop
230     * the current transfer. On 83xx, this bit is the transfer error
231     * mask bit, which should not be changed.
232     */
233    if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
234        mode |= FSL_DMA_MR_CA;
235        DMA_OUT(chan, &chan->regs->mr, mode, 32);
236
237        mode &= ~FSL_DMA_MR_CA;
238    }
239
240    /* stop the DMA controller */
241    mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
242    DMA_OUT(chan, &chan->regs->mr, mode, 32);
243
244    /* wait for the DMA controller to become idle */
245    for (i = 0; i < 100; i++) {
246        if (dma_is_idle(chan))
247            return;
248
249        udelay(10);
250    }
251
252    if (!dma_is_idle(chan))
253        chan_err(chan, "DMA halt timeout!\n");
254}
255
256/**
257 * fsl_chan_set_src_loop_size - Set source address hold transfer size
258 * @chan : Freescale DMA channel
259 * @size : Address loop size, 0 for disable loop
260 *
261 * The set source address hold transfer size. The source
262 * address hold or loop transfer size is when the DMA transfer
263 * data from source address (SA), if the loop size is 4, the DMA will
264 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
265 * SA + 1 ... and so on.
266 */
267static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
268{
269    u32 mode;
270
271    mode = DMA_IN(chan, &chan->regs->mr, 32);
272
273    switch (size) {
274    case 0:
275        mode &= ~FSL_DMA_MR_SAHE;
276        break;
277    case 1:
278    case 2:
279    case 4:
280    case 8:
281        mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
282        break;
283    }
284
285    DMA_OUT(chan, &chan->regs->mr, mode, 32);
286}
287
288/**
289 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
290 * @chan : Freescale DMA channel
291 * @size : Address loop size, 0 for disable loop
292 *
293 * The set destination address hold transfer size. The destination
294 * address hold or loop transfer size is when the DMA transfer
295 * data to destination address (TA), if the loop size is 4, the DMA will
296 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
297 * TA + 1 ... and so on.
298 */
299static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
300{
301    u32 mode;
302
303    mode = DMA_IN(chan, &chan->regs->mr, 32);
304
305    switch (size) {
306    case 0:
307        mode &= ~FSL_DMA_MR_DAHE;
308        break;
309    case 1:
310    case 2:
311    case 4:
312    case 8:
313        mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
314        break;
315    }
316
317    DMA_OUT(chan, &chan->regs->mr, mode, 32);
318}
319
320/**
321 * fsl_chan_set_request_count - Set DMA Request Count for external control
322 * @chan : Freescale DMA channel
323 * @size : Number of bytes to transfer in a single request
324 *
325 * The Freescale DMA channel can be controlled by the external signal DREQ#.
326 * The DMA request count is how many bytes are allowed to transfer before
327 * pausing the channel, after which a new assertion of DREQ# resumes channel
328 * operation.
329 *
330 * A size of 0 disables external pause control. The maximum size is 1024.
331 */
332static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
333{
334    u32 mode;
335
336    BUG_ON(size > 1024);
337
338    mode = DMA_IN(chan, &chan->regs->mr, 32);
339    mode |= (__ilog2(size) << 24) & 0x0f000000;
340
341    DMA_OUT(chan, &chan->regs->mr, mode, 32);
342}
343
344/**
345 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
346 * @chan : Freescale DMA channel
347 * @enable : 0 is disabled, 1 is enabled.
348 *
349 * The Freescale DMA channel can be controlled by the external signal DREQ#.
350 * The DMA Request Count feature should be used in addition to this feature
351 * to set the number of bytes to transfer before pausing the channel.
352 */
353static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
354{
355    if (enable)
356        chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
357    else
358        chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
359}
360
361/**
362 * fsl_chan_toggle_ext_start - Toggle channel external start status
363 * @chan : Freescale DMA channel
364 * @enable : 0 is disabled, 1 is enabled.
365 *
366 * If enable the external start, the channel can be started by an
367 * external DMA start pin. So the dma_start() does not start the
368 * transfer immediately. The DMA channel will wait for the
369 * control pin asserted.
370 */
371static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
372{
373    if (enable)
374        chan->feature |= FSL_DMA_CHAN_START_EXT;
375    else
376        chan->feature &= ~FSL_DMA_CHAN_START_EXT;
377}
378
379static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
380{
381    struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
382
383    if (list_empty(&chan->ld_pending))
384        goto out_splice;
385
386    /*
387     * Add the hardware descriptor to the chain of hardware descriptors
388     * that already exists in memory.
389     *
390     * This will un-set the EOL bit of the existing transaction, and the
391     * last link in this transaction will become the EOL descriptor.
392     */
393    set_desc_next(chan, &tail->hw, desc->async_tx.phys);
394
395    /*
396     * Add the software descriptor and all children to the list
397     * of pending transactions
398     */
399out_splice:
400    list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
401}
402
403static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
404{
405    struct fsldma_chan *chan = to_fsl_chan(tx->chan);
406    struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
407    struct fsl_desc_sw *child;
408    unsigned long flags;
409    dma_cookie_t cookie;
410
411    spin_lock_irqsave(&chan->desc_lock, flags);
412
413    /*
414     * assign cookies to all of the software descriptors
415     * that make up this transaction
416     */
417    list_for_each_entry(child, &desc->tx_list, node) {
418        cookie = dma_cookie_assign(&child->async_tx);
419    }
420
421    /* put this transaction onto the tail of the pending queue */
422    append_ld_queue(chan, desc);
423
424    spin_unlock_irqrestore(&chan->desc_lock, flags);
425
426    return cookie;
427}
428
429/**
430 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
431 * @chan : Freescale DMA channel
432 *
433 * Return - The descriptor allocated. NULL for failed.
434 */
435static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
436{
437    struct fsl_desc_sw *desc;
438    dma_addr_t pdesc;
439
440    desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
441    if (!desc) {
442        chan_dbg(chan, "out of memory for link descriptor\n");
443        return NULL;
444    }
445
446    memset(desc, 0, sizeof(*desc));
447    INIT_LIST_HEAD(&desc->tx_list);
448    dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
449    desc->async_tx.tx_submit = fsl_dma_tx_submit;
450    desc->async_tx.phys = pdesc;
451
452#ifdef FSL_DMA_LD_DEBUG
453    chan_dbg(chan, "LD %p allocated\n", desc);
454#endif
455
456    return desc;
457}
458
459/**
460 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
461 * @chan : Freescale DMA channel
462 *
463 * This function will create a dma pool for descriptor allocation.
464 *
465 * Return - The number of descriptors allocated.
466 */
467static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
468{
469    struct fsldma_chan *chan = to_fsl_chan(dchan);
470
471    /* Has this channel already been allocated? */
472    if (chan->desc_pool)
473        return 1;
474
475    /*
476     * We need the descriptor to be aligned to 32bytes
477     * for meeting FSL DMA specification requirement.
478     */
479    chan->desc_pool = dma_pool_create(chan->name, chan->dev,
480                      sizeof(struct fsl_desc_sw),
481                      __alignof__(struct fsl_desc_sw), 0);
482    if (!chan->desc_pool) {
483        chan_err(chan, "unable to allocate descriptor pool\n");
484        return -ENOMEM;
485    }
486
487    /* there is at least one descriptor free to be allocated */
488    return 1;
489}
490
491/**
492 * fsldma_free_desc_list - Free all descriptors in a queue
493 * @chan: Freescae DMA channel
494 * @list: the list to free
495 *
496 * LOCKING: must hold chan->desc_lock
497 */
498static void fsldma_free_desc_list(struct fsldma_chan *chan,
499                  struct list_head *list)
500{
501    struct fsl_desc_sw *desc, *_desc;
502
503    list_for_each_entry_safe(desc, _desc, list, node) {
504        list_del(&desc->node);
505#ifdef FSL_DMA_LD_DEBUG
506        chan_dbg(chan, "LD %p free\n", desc);
507#endif
508        dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
509    }
510}
511
512static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
513                      struct list_head *list)
514{
515    struct fsl_desc_sw *desc, *_desc;
516
517    list_for_each_entry_safe_reverse(desc, _desc, list, node) {
518        list_del(&desc->node);
519#ifdef FSL_DMA_LD_DEBUG
520        chan_dbg(chan, "LD %p free\n", desc);
521#endif
522        dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
523    }
524}
525
526/**
527 * fsl_dma_free_chan_resources - Free all resources of the channel.
528 * @chan : Freescale DMA channel
529 */
530static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
531{
532    struct fsldma_chan *chan = to_fsl_chan(dchan);
533    unsigned long flags;
534
535    chan_dbg(chan, "free all channel resources\n");
536    spin_lock_irqsave(&chan->desc_lock, flags);
537    fsldma_free_desc_list(chan, &chan->ld_pending);
538    fsldma_free_desc_list(chan, &chan->ld_running);
539    spin_unlock_irqrestore(&chan->desc_lock, flags);
540
541    dma_pool_destroy(chan->desc_pool);
542    chan->desc_pool = NULL;
543}
544
545static struct dma_async_tx_descriptor *
546fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
547{
548    struct fsldma_chan *chan;
549    struct fsl_desc_sw *new;
550
551    if (!dchan)
552        return NULL;
553
554    chan = to_fsl_chan(dchan);
555
556    new = fsl_dma_alloc_descriptor(chan);
557    if (!new) {
558        chan_err(chan, "%s\n", msg_ld_oom);
559        return NULL;
560    }
561
562    new->async_tx.cookie = -EBUSY;
563    new->async_tx.flags = flags;
564
565    /* Insert the link descriptor to the LD ring */
566    list_add_tail(&new->node, &new->tx_list);
567
568    /* Set End-of-link to the last link descriptor of new list */
569    set_ld_eol(chan, new);
570
571    return &new->async_tx;
572}
573
574static struct dma_async_tx_descriptor *
575fsl_dma_prep_memcpy(struct dma_chan *dchan,
576    dma_addr_t dma_dst, dma_addr_t dma_src,
577    size_t len, unsigned long flags)
578{
579    struct fsldma_chan *chan;
580    struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
581    size_t copy;
582
583    if (!dchan)
584        return NULL;
585
586    if (!len)
587        return NULL;
588
589    chan = to_fsl_chan(dchan);
590
591    do {
592
593        /* Allocate the link descriptor from DMA pool */
594        new = fsl_dma_alloc_descriptor(chan);
595        if (!new) {
596            chan_err(chan, "%s\n", msg_ld_oom);
597            goto fail;
598        }
599
600        copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
601
602        set_desc_cnt(chan, &new->hw, copy);
603        set_desc_src(chan, &new->hw, dma_src);
604        set_desc_dst(chan, &new->hw, dma_dst);
605
606        if (!first)
607            first = new;
608        else
609            set_desc_next(chan, &prev->hw, new->async_tx.phys);
610
611        new->async_tx.cookie = 0;
612        async_tx_ack(&new->async_tx);
613
614        prev = new;
615        len -= copy;
616        dma_src += copy;
617        dma_dst += copy;
618
619        /* Insert the link descriptor to the LD ring */
620        list_add_tail(&new->node, &first->tx_list);
621    } while (len);
622
623    new->async_tx.flags = flags; /* client is in control of this ack */
624    new->async_tx.cookie = -EBUSY;
625
626    /* Set End-of-link to the last link descriptor of new list */
627    set_ld_eol(chan, new);
628
629    return &first->async_tx;
630
631fail:
632    if (!first)
633        return NULL;
634
635    fsldma_free_desc_list_reverse(chan, &first->tx_list);
636    return NULL;
637}
638
639static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
640    struct scatterlist *dst_sg, unsigned int dst_nents,
641    struct scatterlist *src_sg, unsigned int src_nents,
642    unsigned long flags)
643{
644    struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
645    struct fsldma_chan *chan = to_fsl_chan(dchan);
646    size_t dst_avail, src_avail;
647    dma_addr_t dst, src;
648    size_t len;
649
650    /* basic sanity checks */
651    if (dst_nents == 0 || src_nents == 0)
652        return NULL;
653
654    if (dst_sg == NULL || src_sg == NULL)
655        return NULL;
656
657    /*
658     * TODO: should we check that both scatterlists have the same
659     * TODO: number of bytes in total? Is that really an error?
660     */
661
662    /* get prepared for the loop */
663    dst_avail = sg_dma_len(dst_sg);
664    src_avail = sg_dma_len(src_sg);
665
666    /* run until we are out of scatterlist entries */
667    while (true) {
668
669        /* create the largest transaction possible */
670        len = min_t(size_t, src_avail, dst_avail);
671        len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
672        if (len == 0)
673            goto fetch;
674
675        dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
676        src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
677
678        /* allocate and populate the descriptor */
679        new = fsl_dma_alloc_descriptor(chan);
680        if (!new) {
681            chan_err(chan, "%s\n", msg_ld_oom);
682            goto fail;
683        }
684
685        set_desc_cnt(chan, &new->hw, len);
686        set_desc_src(chan, &new->hw, src);
687        set_desc_dst(chan, &new->hw, dst);
688
689        if (!first)
690            first = new;
691        else
692            set_desc_next(chan, &prev->hw, new->async_tx.phys);
693
694        new->async_tx.cookie = 0;
695        async_tx_ack(&new->async_tx);
696        prev = new;
697
698        /* Insert the link descriptor to the LD ring */
699        list_add_tail(&new->node, &first->tx_list);
700
701        /* update metadata */
702        dst_avail -= len;
703        src_avail -= len;
704
705fetch:
706        /* fetch the next dst scatterlist entry */
707        if (dst_avail == 0) {
708
709            /* no more entries: we're done */
710            if (dst_nents == 0)
711                break;
712
713            /* fetch the next entry: if there are no more: done */
714            dst_sg = sg_next(dst_sg);
715            if (dst_sg == NULL)
716                break;
717
718            dst_nents--;
719            dst_avail = sg_dma_len(dst_sg);
720        }
721
722        /* fetch the next src scatterlist entry */
723        if (src_avail == 0) {
724
725            /* no more entries: we're done */
726            if (src_nents == 0)
727                break;
728
729            /* fetch the next entry: if there are no more: done */
730            src_sg = sg_next(src_sg);
731            if (src_sg == NULL)
732                break;
733
734            src_nents--;
735            src_avail = sg_dma_len(src_sg);
736        }
737    }
738
739    new->async_tx.flags = flags; /* client is in control of this ack */
740    new->async_tx.cookie = -EBUSY;
741
742    /* Set End-of-link to the last link descriptor of new list */
743    set_ld_eol(chan, new);
744
745    return &first->async_tx;
746
747fail:
748    if (!first)
749        return NULL;
750
751    fsldma_free_desc_list_reverse(chan, &first->tx_list);
752    return NULL;
753}
754
755/**
756 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
757 * @chan: DMA channel
758 * @sgl: scatterlist to transfer to/from
759 * @sg_len: number of entries in @scatterlist
760 * @direction: DMA direction
761 * @flags: DMAEngine flags
762 * @context: transaction context (ignored)
763 *
764 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
765 * DMA_SLAVE API, this gets the device-specific information from the
766 * chan->private variable.
767 */
768static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
769    struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
770    enum dma_transfer_direction direction, unsigned long flags,
771    void *context)
772{
773    /*
774     * This operation is not supported on the Freescale DMA controller
775     *
776     * However, we need to provide the function pointer to allow the
777     * device_control() method to work.
778     */
779    return NULL;
780}
781
782static int fsl_dma_device_control(struct dma_chan *dchan,
783                  enum dma_ctrl_cmd cmd, unsigned long arg)
784{
785    struct dma_slave_config *config;
786    struct fsldma_chan *chan;
787    unsigned long flags;
788    int size;
789
790    if (!dchan)
791        return -EINVAL;
792
793    chan = to_fsl_chan(dchan);
794
795    switch (cmd) {
796    case DMA_TERMINATE_ALL:
797        spin_lock_irqsave(&chan->desc_lock, flags);
798
799        /* Halt the DMA engine */
800        dma_halt(chan);
801
802        /* Remove and free all of the descriptors in the LD queue */
803        fsldma_free_desc_list(chan, &chan->ld_pending);
804        fsldma_free_desc_list(chan, &chan->ld_running);
805        chan->idle = true;
806
807        spin_unlock_irqrestore(&chan->desc_lock, flags);
808        return 0;
809
810    case DMA_SLAVE_CONFIG:
811        config = (struct dma_slave_config *)arg;
812
813        /* make sure the channel supports setting burst size */
814        if (!chan->set_request_count)
815            return -ENXIO;
816
817        /* we set the controller burst size depending on direction */
818        if (config->direction == DMA_MEM_TO_DEV)
819            size = config->dst_addr_width * config->dst_maxburst;
820        else
821            size = config->src_addr_width * config->src_maxburst;
822
823        chan->set_request_count(chan, size);
824        return 0;
825
826    case FSLDMA_EXTERNAL_START:
827
828        /* make sure the channel supports external start */
829        if (!chan->toggle_ext_start)
830            return -ENXIO;
831
832        chan->toggle_ext_start(chan, arg);
833        return 0;
834
835    default:
836        return -ENXIO;
837    }
838
839    return 0;
840}
841
842/**
843 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
844 * @chan: Freescale DMA channel
845 * @desc: descriptor to cleanup and free
846 *
847 * This function is used on a descriptor which has been executed by the DMA
848 * controller. It will run any callbacks, submit any dependencies, and then
849 * free the descriptor.
850 */
851static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
852                      struct fsl_desc_sw *desc)
853{
854    struct dma_async_tx_descriptor *txd = &desc->async_tx;
855    struct device *dev = chan->common.device->dev;
856    dma_addr_t src = get_desc_src(chan, desc);
857    dma_addr_t dst = get_desc_dst(chan, desc);
858    u32 len = get_desc_cnt(chan, desc);
859
860    /* Run the link descriptor callback function */
861    if (txd->callback) {
862#ifdef FSL_DMA_LD_DEBUG
863        chan_dbg(chan, "LD %p callback\n", desc);
864#endif
865        txd->callback(txd->callback_param);
866    }
867
868    /* Run any dependencies */
869    dma_run_dependencies(txd);
870
871    /* Unmap the dst buffer, if requested */
872    if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
873        if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
874            dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
875        else
876            dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
877    }
878
879    /* Unmap the src buffer, if requested */
880    if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
881        if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
882            dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
883        else
884            dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
885    }
886
887#ifdef FSL_DMA_LD_DEBUG
888    chan_dbg(chan, "LD %p free\n", desc);
889#endif
890    dma_pool_free(chan->desc_pool, desc, txd->phys);
891}
892
893/**
894 * fsl_chan_xfer_ld_queue - transfer any pending transactions
895 * @chan : Freescale DMA channel
896 *
897 * HARDWARE STATE: idle
898 * LOCKING: must hold chan->desc_lock
899 */
900static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
901{
902    struct fsl_desc_sw *desc;
903
904    /*
905     * If the list of pending descriptors is empty, then we
906     * don't need to do any work at all
907     */
908    if (list_empty(&chan->ld_pending)) {
909        chan_dbg(chan, "no pending LDs\n");
910        return;
911    }
912
913    /*
914     * The DMA controller is not idle, which means that the interrupt
915     * handler will start any queued transactions when it runs after
916     * this transaction finishes
917     */
918    if (!chan->idle) {
919        chan_dbg(chan, "DMA controller still busy\n");
920        return;
921    }
922
923    /*
924     * If there are some link descriptors which have not been
925     * transferred, we need to start the controller
926     */
927
928    /*
929     * Move all elements from the queue of pending transactions
930     * onto the list of running transactions
931     */
932    chan_dbg(chan, "idle, starting controller\n");
933    desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
934    list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
935
936    /*
937     * The 85xx DMA controller doesn't clear the channel start bit
938     * automatically at the end of a transfer. Therefore we must clear
939     * it in software before starting the transfer.
940     */
941    if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
942        u32 mode;
943
944        mode = DMA_IN(chan, &chan->regs->mr, 32);
945        mode &= ~FSL_DMA_MR_CS;
946        DMA_OUT(chan, &chan->regs->mr, mode, 32);
947    }
948
949    /*
950     * Program the descriptor's address into the DMA controller,
951     * then start the DMA transaction
952     */
953    set_cdar(chan, desc->async_tx.phys);
954    get_cdar(chan);
955
956    dma_start(chan);
957    chan->idle = false;
958}
959
960/**
961 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
962 * @chan : Freescale DMA channel
963 */
964static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
965{
966    struct fsldma_chan *chan = to_fsl_chan(dchan);
967    unsigned long flags;
968
969    spin_lock_irqsave(&chan->desc_lock, flags);
970    fsl_chan_xfer_ld_queue(chan);
971    spin_unlock_irqrestore(&chan->desc_lock, flags);
972}
973
974/**
975 * fsl_tx_status - Determine the DMA status
976 * @chan : Freescale DMA channel
977 */
978static enum dma_status fsl_tx_status(struct dma_chan *dchan,
979                    dma_cookie_t cookie,
980                    struct dma_tx_state *txstate)
981{
982    struct fsldma_chan *chan = to_fsl_chan(dchan);
983    enum dma_status ret;
984    unsigned long flags;
985
986    spin_lock_irqsave(&chan->desc_lock, flags);
987    ret = dma_cookie_status(dchan, cookie, txstate);
988    spin_unlock_irqrestore(&chan->desc_lock, flags);
989
990    return ret;
991}
992
993/*----------------------------------------------------------------------------*/
994/* Interrupt Handling */
995/*----------------------------------------------------------------------------*/
996
997static irqreturn_t fsldma_chan_irq(int irq, void *data)
998{
999    struct fsldma_chan *chan = data;
1000    u32 stat;
1001
1002    /* save and clear the status register */
1003    stat = get_sr(chan);
1004    set_sr(chan, stat);
1005    chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1006
1007    /* check that this was really our device */
1008    stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1009    if (!stat)
1010        return IRQ_NONE;
1011
1012    if (stat & FSL_DMA_SR_TE)
1013        chan_err(chan, "Transfer Error!\n");
1014
1015    /*
1016     * Programming Error
1017     * The DMA_INTERRUPT async_tx is a NULL transfer, which will
1018     * triger a PE interrupt.
1019     */
1020    if (stat & FSL_DMA_SR_PE) {
1021        chan_dbg(chan, "irq: Programming Error INT\n");
1022        stat &= ~FSL_DMA_SR_PE;
1023        if (get_bcr(chan) != 0)
1024            chan_err(chan, "Programming Error!\n");
1025    }
1026
1027    /*
1028     * For MPC8349, EOCDI event need to update cookie
1029     * and start the next transfer if it exist.
1030     */
1031    if (stat & FSL_DMA_SR_EOCDI) {
1032        chan_dbg(chan, "irq: End-of-Chain link INT\n");
1033        stat &= ~FSL_DMA_SR_EOCDI;
1034    }
1035
1036    /*
1037     * If it current transfer is the end-of-transfer,
1038     * we should clear the Channel Start bit for
1039     * prepare next transfer.
1040     */
1041    if (stat & FSL_DMA_SR_EOLNI) {
1042        chan_dbg(chan, "irq: End-of-link INT\n");
1043        stat &= ~FSL_DMA_SR_EOLNI;
1044    }
1045
1046    /* check that the DMA controller is really idle */
1047    if (!dma_is_idle(chan))
1048        chan_err(chan, "irq: controller not idle!\n");
1049
1050    /* check that we handled all of the bits */
1051    if (stat)
1052        chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1053
1054    /*
1055     * Schedule the tasklet to handle all cleanup of the current
1056     * transaction. It will start a new transaction if there is
1057     * one pending.
1058     */
1059    tasklet_schedule(&chan->tasklet);
1060    chan_dbg(chan, "irq: Exit\n");
1061    return IRQ_HANDLED;
1062}
1063
1064static void dma_do_tasklet(unsigned long data)
1065{
1066    struct fsldma_chan *chan = (struct fsldma_chan *)data;
1067    struct fsl_desc_sw *desc, *_desc;
1068    LIST_HEAD(ld_cleanup);
1069    unsigned long flags;
1070
1071    chan_dbg(chan, "tasklet entry\n");
1072
1073    spin_lock_irqsave(&chan->desc_lock, flags);
1074
1075    /* update the cookie if we have some descriptors to cleanup */
1076    if (!list_empty(&chan->ld_running)) {
1077        dma_cookie_t cookie;
1078
1079        desc = to_fsl_desc(chan->ld_running.prev);
1080        cookie = desc->async_tx.cookie;
1081        dma_cookie_complete(&desc->async_tx);
1082
1083        chan_dbg(chan, "completed_cookie=%d\n", cookie);
1084    }
1085
1086    /*
1087     * move the descriptors to a temporary list so we can drop the lock
1088     * during the entire cleanup operation
1089     */
1090    list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1091
1092    /* the hardware is now idle and ready for more */
1093    chan->idle = true;
1094
1095    /*
1096     * Start any pending transactions automatically
1097     *
1098     * In the ideal case, we keep the DMA controller busy while we go
1099     * ahead and free the descriptors below.
1100     */
1101    fsl_chan_xfer_ld_queue(chan);
1102    spin_unlock_irqrestore(&chan->desc_lock, flags);
1103
1104    /* Run the callback for each descriptor, in order */
1105    list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1106
1107        /* Remove from the list of transactions */
1108        list_del(&desc->node);
1109
1110        /* Run all cleanup for this descriptor */
1111        fsldma_cleanup_descriptor(chan, desc);
1112    }
1113
1114    chan_dbg(chan, "tasklet exit\n");
1115}
1116
1117static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1118{
1119    struct fsldma_device *fdev = data;
1120    struct fsldma_chan *chan;
1121    unsigned int handled = 0;
1122    u32 gsr, mask;
1123    int i;
1124
1125    gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1126                           : in_le32(fdev->regs);
1127    mask = 0xff000000;
1128    dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1129
1130    for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1131        chan = fdev->chan[i];
1132        if (!chan)
1133            continue;
1134
1135        if (gsr & mask) {
1136            dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1137            fsldma_chan_irq(irq, chan);
1138            handled++;
1139        }
1140
1141        gsr &= ~mask;
1142        mask >>= 8;
1143    }
1144
1145    return IRQ_RETVAL(handled);
1146}
1147
1148static void fsldma_free_irqs(struct fsldma_device *fdev)
1149{
1150    struct fsldma_chan *chan;
1151    int i;
1152
1153    if (fdev->irq != NO_IRQ) {
1154        dev_dbg(fdev->dev, "free per-controller IRQ\n");
1155        free_irq(fdev->irq, fdev);
1156        return;
1157    }
1158
1159    for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1160        chan = fdev->chan[i];
1161        if (chan && chan->irq != NO_IRQ) {
1162            chan_dbg(chan, "free per-channel IRQ\n");
1163            free_irq(chan->irq, chan);
1164        }
1165    }
1166}
1167
1168static int fsldma_request_irqs(struct fsldma_device *fdev)
1169{
1170    struct fsldma_chan *chan;
1171    int ret;
1172    int i;
1173
1174    /* if we have a per-controller IRQ, use that */
1175    if (fdev->irq != NO_IRQ) {
1176        dev_dbg(fdev->dev, "request per-controller IRQ\n");
1177        ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1178                  "fsldma-controller", fdev);
1179        return ret;
1180    }
1181
1182    /* no per-controller IRQ, use the per-channel IRQs */
1183    for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1184        chan = fdev->chan[i];
1185        if (!chan)
1186            continue;
1187
1188        if (chan->irq == NO_IRQ) {
1189            chan_err(chan, "interrupts property missing in device tree\n");
1190            ret = -ENODEV;
1191            goto out_unwind;
1192        }
1193
1194        chan_dbg(chan, "request per-channel IRQ\n");
1195        ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1196                  "fsldma-chan", chan);
1197        if (ret) {
1198            chan_err(chan, "unable to request per-channel IRQ\n");
1199            goto out_unwind;
1200        }
1201    }
1202
1203    return 0;
1204
1205out_unwind:
1206    for (/* none */; i >= 0; i--) {
1207        chan = fdev->chan[i];
1208        if (!chan)
1209            continue;
1210
1211        if (chan->irq == NO_IRQ)
1212            continue;
1213
1214        free_irq(chan->irq, chan);
1215    }
1216
1217    return ret;
1218}
1219
1220/*----------------------------------------------------------------------------*/
1221/* OpenFirmware Subsystem */
1222/*----------------------------------------------------------------------------*/
1223
1224static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1225    struct device_node *node, u32 feature, const char *compatible)
1226{
1227    struct fsldma_chan *chan;
1228    struct resource res;
1229    int err;
1230
1231    /* alloc channel */
1232    chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1233    if (!chan) {
1234        dev_err(fdev->dev, "no free memory for DMA channels!\n");
1235        err = -ENOMEM;
1236        goto out_return;
1237    }
1238
1239    /* ioremap registers for use */
1240    chan->regs = of_iomap(node, 0);
1241    if (!chan->regs) {
1242        dev_err(fdev->dev, "unable to ioremap registers\n");
1243        err = -ENOMEM;
1244        goto out_free_chan;
1245    }
1246
1247    err = of_address_to_resource(node, 0, &res);
1248    if (err) {
1249        dev_err(fdev->dev, "unable to find 'reg' property\n");
1250        goto out_iounmap_regs;
1251    }
1252
1253    chan->feature = feature;
1254    if (!fdev->feature)
1255        fdev->feature = chan->feature;
1256
1257    /*
1258     * If the DMA device's feature is different than the feature
1259     * of its channels, report the bug
1260     */
1261    WARN_ON(fdev->feature != chan->feature);
1262
1263    chan->dev = fdev->dev;
1264    chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1265    if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1266        dev_err(fdev->dev, "too many channels for device\n");
1267        err = -EINVAL;
1268        goto out_iounmap_regs;
1269    }
1270
1271    fdev->chan[chan->id] = chan;
1272    tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1273    snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1274
1275    /* Initialize the channel */
1276    dma_init(chan);
1277
1278    /* Clear cdar registers */
1279    set_cdar(chan, 0);
1280
1281    switch (chan->feature & FSL_DMA_IP_MASK) {
1282    case FSL_DMA_IP_85XX:
1283        chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1284    case FSL_DMA_IP_83XX:
1285        chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1286        chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1287        chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1288        chan->set_request_count = fsl_chan_set_request_count;
1289    }
1290
1291    spin_lock_init(&chan->desc_lock);
1292    INIT_LIST_HEAD(&chan->ld_pending);
1293    INIT_LIST_HEAD(&chan->ld_running);
1294    chan->idle = true;
1295
1296    chan->common.device = &fdev->common;
1297    dma_cookie_init(&chan->common);
1298
1299    /* find the IRQ line, if it exists in the device tree */
1300    chan->irq = irq_of_parse_and_map(node, 0);
1301
1302    /* Add the channel to DMA device channel list */
1303    list_add_tail(&chan->common.device_node, &fdev->common.channels);
1304    fdev->common.chancnt++;
1305
1306    dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1307         chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1308
1309    return 0;
1310
1311out_iounmap_regs:
1312    iounmap(chan->regs);
1313out_free_chan:
1314    kfree(chan);
1315out_return:
1316    return err;
1317}
1318
1319static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1320{
1321    irq_dispose_mapping(chan->irq);
1322    list_del(&chan->common.device_node);
1323    iounmap(chan->regs);
1324    kfree(chan);
1325}
1326
1327static int __devinit fsldma_of_probe(struct platform_device *op)
1328{
1329    struct fsldma_device *fdev;
1330    struct device_node *child;
1331    int err;
1332
1333    fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1334    if (!fdev) {
1335        dev_err(&op->dev, "No enough memory for 'priv'\n");
1336        err = -ENOMEM;
1337        goto out_return;
1338    }
1339
1340    fdev->dev = &op->dev;
1341    INIT_LIST_HEAD(&fdev->common.channels);
1342
1343    /* ioremap the registers for use */
1344    fdev->regs = of_iomap(op->dev.of_node, 0);
1345    if (!fdev->regs) {
1346        dev_err(&op->dev, "unable to ioremap registers\n");
1347        err = -ENOMEM;
1348        goto out_free_fdev;
1349    }
1350
1351    /* map the channel IRQ if it exists, but don't hookup the handler yet */
1352    fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1353
1354    dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1355    dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1356    dma_cap_set(DMA_SG, fdev->common.cap_mask);
1357    dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1358    fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1359    fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1360    fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1361    fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1362    fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1363    fdev->common.device_tx_status = fsl_tx_status;
1364    fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1365    fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1366    fdev->common.device_control = fsl_dma_device_control;
1367    fdev->common.dev = &op->dev;
1368
1369    dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1370
1371    dev_set_drvdata(&op->dev, fdev);
1372
1373    /*
1374     * We cannot use of_platform_bus_probe() because there is no
1375     * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1376     * channel object.
1377     */
1378    for_each_child_of_node(op->dev.of_node, child) {
1379        if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1380            fsl_dma_chan_probe(fdev, child,
1381                FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1382                "fsl,eloplus-dma-channel");
1383        }
1384
1385        if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1386            fsl_dma_chan_probe(fdev, child,
1387                FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1388                "fsl,elo-dma-channel");
1389        }
1390    }
1391
1392    /*
1393     * Hookup the IRQ handler(s)
1394     *
1395     * If we have a per-controller interrupt, we prefer that to the
1396     * per-channel interrupts to reduce the number of shared interrupt
1397     * handlers on the same IRQ line
1398     */
1399    err = fsldma_request_irqs(fdev);
1400    if (err) {
1401        dev_err(fdev->dev, "unable to request IRQs\n");
1402        goto out_free_fdev;
1403    }
1404
1405    dma_async_device_register(&fdev->common);
1406    return 0;
1407
1408out_free_fdev:
1409    irq_dispose_mapping(fdev->irq);
1410    kfree(fdev);
1411out_return:
1412    return err;
1413}
1414
1415static int fsldma_of_remove(struct platform_device *op)
1416{
1417    struct fsldma_device *fdev;
1418    unsigned int i;
1419
1420    fdev = dev_get_drvdata(&op->dev);
1421    dma_async_device_unregister(&fdev->common);
1422
1423    fsldma_free_irqs(fdev);
1424
1425    for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1426        if (fdev->chan[i])
1427            fsl_dma_chan_remove(fdev->chan[i]);
1428    }
1429
1430    iounmap(fdev->regs);
1431    dev_set_drvdata(&op->dev, NULL);
1432    kfree(fdev);
1433
1434    return 0;
1435}
1436
1437static const struct of_device_id fsldma_of_ids[] = {
1438    { .compatible = "fsl,eloplus-dma", },
1439    { .compatible = "fsl,elo-dma", },
1440    {}
1441};
1442
1443static struct platform_driver fsldma_of_driver = {
1444    .driver = {
1445        .name = "fsl-elo-dma",
1446        .owner = THIS_MODULE,
1447        .of_match_table = fsldma_of_ids,
1448    },
1449    .probe = fsldma_of_probe,
1450    .remove = fsldma_of_remove,
1451};
1452
1453/*----------------------------------------------------------------------------*/
1454/* Module Init / Exit */
1455/*----------------------------------------------------------------------------*/
1456
1457static __init int fsldma_init(void)
1458{
1459    pr_info("Freescale Elo / Elo Plus DMA driver\n");
1460    return platform_driver_register(&fsldma_of_driver);
1461}
1462
1463static void __exit fsldma_exit(void)
1464{
1465    platform_driver_unregister(&fsldma_of_driver);
1466}
1467
1468subsys_initcall(fsldma_init);
1469module_exit(fsldma_exit);
1470
1471MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1472MODULE_LICENSE("GPL");
1473

Archive Download this file



interactive