Root/drivers/dma/ep93xx_dma.c

1/*
2 * Driver for the Cirrus Logic EP93xx DMA Controller
3 *
4 * Copyright (C) 2011 Mika Westerberg
5 *
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8 *
9 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 * Copyright (C) 2006 Applied Data Systems
11 * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12 *
13 * This driver is based on dw_dmac and amba-pl08x drivers.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 */
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29#include <mach/dma.h>
30
31#include "dmaengine.h"
32
33/* M2P registers */
34#define M2P_CONTROL 0x0000
35#define M2P_CONTROL_STALLINT BIT(0)
36#define M2P_CONTROL_NFBINT BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT BIT(3)
38#define M2P_CONTROL_ENABLE BIT(4)
39#define M2P_CONTROL_ICE BIT(6)
40
41#define M2P_INTERRUPT 0x0004
42#define M2P_INTERRUPT_STALL BIT(0)
43#define M2P_INTERRUPT_NFB BIT(1)
44#define M2P_INTERRUPT_ERROR BIT(3)
45
46#define M2P_PPALLOC 0x0008
47#define M2P_STATUS 0x000c
48
49#define M2P_MAXCNT0 0x0020
50#define M2P_BASE0 0x0024
51#define M2P_MAXCNT1 0x0030
52#define M2P_BASE1 0x0034
53
54#define M2P_STATE_IDLE 0
55#define M2P_STATE_STALL 1
56#define M2P_STATE_ON 2
57#define M2P_STATE_NEXT 3
58
59/* M2M registers */
60#define M2M_CONTROL 0x0000
61#define M2M_CONTROL_DONEINT BIT(2)
62#define M2M_CONTROL_ENABLE BIT(3)
63#define M2M_CONTROL_START BIT(4)
64#define M2M_CONTROL_DAH BIT(11)
65#define M2M_CONTROL_SAH BIT(12)
66#define M2M_CONTROL_PW_SHIFT 9
67#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT BIT(21)
75#define M2M_CONTROL_RSS_SHIFT 22
76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_NO_HDSK BIT(24)
80#define M2M_CONTROL_PWSC_SHIFT 25
81
82#define M2M_INTERRUPT 0x0004
83#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
99
100#define M2M_BCR0 0x0010
101#define M2M_BCR1 0x0014
102#define M2M_SAR_BASE0 0x0018
103#define M2M_SAR_BASE1 0x001c
104#define M2M_DAR_BASE0 0x002c
105#define M2M_DAR_BASE1 0x0030
106
107#define DMA_MAX_CHAN_BYTES 0xffff
108#define DMA_MAX_CHAN_DESCRIPTORS 32
109
110struct ep93xx_dma_engine;
111
112/**
113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
114 * @src_addr: source address of the transaction
115 * @dst_addr: destination address of the transaction
116 * @size: size of the transaction (in bytes)
117 * @complete: this descriptor is completed
118 * @txd: dmaengine API descriptor
119 * @tx_list: list of linked descriptors
120 * @node: link used for putting this into a channel queue
121 */
122struct ep93xx_dma_desc {
123    u32 src_addr;
124    u32 dst_addr;
125    size_t size;
126    bool complete;
127    struct dma_async_tx_descriptor txd;
128    struct list_head tx_list;
129    struct list_head node;
130};
131
132/**
133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
134 * @chan: dmaengine API channel
135 * @edma: pointer to to the engine device
136 * @regs: memory mapped registers
137 * @irq: interrupt number of the channel
138 * @clk: clock used by this channel
139 * @tasklet: channel specific tasklet used for callbacks
140 * @lock: lock protecting the fields following
141 * @flags: flags for the channel
142 * @buffer: which buffer to use next (0/1)
143 * @active: flattened chain of descriptors currently being processed
144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
147 * is set via %DMA_SLAVE_CONFIG before slave operation is
148 * prepared
149 * @runtime_ctrl: M2M runtime values for the control register.
150 *
151 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
152 * will have slightly different scheme here: @active points to a head of
153 * flattened DMA descriptor chain.
154 *
155 * @queue holds pending transactions. These are linked through the first
156 * descriptor in the chain. When a descriptor is moved to the @active queue,
157 * the first and chained descriptors are flattened into a single list.
158 *
159 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
160 * necessary channel configuration information. For memcpy channels this must
161 * be %NULL.
162 */
163struct ep93xx_dma_chan {
164    struct dma_chan chan;
165    const struct ep93xx_dma_engine *edma;
166    void __iomem *regs;
167    int irq;
168    struct clk *clk;
169    struct tasklet_struct tasklet;
170    /* protects the fields following */
171    spinlock_t lock;
172    unsigned long flags;
173/* Channel is configured for cyclic transfers */
174#define EP93XX_DMA_IS_CYCLIC 0
175
176    int buffer;
177    struct list_head active;
178    struct list_head queue;
179    struct list_head free_list;
180    u32 runtime_addr;
181    u32 runtime_ctrl;
182};
183
184/**
185 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
186 * @dma_dev: holds the dmaengine device
187 * @m2m: is this an M2M or M2P device
188 * @hw_setup: method which sets the channel up for operation
189 * @hw_shutdown: shuts the channel down and flushes whatever is left
190 * @hw_submit: pushes active descriptor(s) to the hardware
191 * @hw_interrupt: handle the interrupt
192 * @num_channels: number of channels for this instance
193 * @channels: array of channels
194 *
195 * There is one instance of this struct for the M2P channels and one for the
196 * M2M channels. hw_xxx() methods are used to perform operations which are
197 * different on M2M and M2P channels. These methods are called with channel
198 * lock held and interrupts disabled so they cannot sleep.
199 */
200struct ep93xx_dma_engine {
201    struct dma_device dma_dev;
202    bool m2m;
203    int (*hw_setup)(struct ep93xx_dma_chan *);
204    void (*hw_shutdown)(struct ep93xx_dma_chan *);
205    void (*hw_submit)(struct ep93xx_dma_chan *);
206    int (*hw_interrupt)(struct ep93xx_dma_chan *);
207#define INTERRUPT_UNKNOWN 0
208#define INTERRUPT_DONE 1
209#define INTERRUPT_NEXT_BUFFER 2
210
211    size_t num_channels;
212    struct ep93xx_dma_chan channels[];
213};
214
215static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216{
217    return &edmac->chan.dev->device;
218}
219
220static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221{
222    return container_of(chan, struct ep93xx_dma_chan, chan);
223}
224
225/**
226 * ep93xx_dma_set_active - set new active descriptor chain
227 * @edmac: channel
228 * @desc: head of the new active descriptor chain
229 *
230 * Sets @desc to be the head of the new active descriptor chain. This is the
231 * chain which is processed next. The active list must be empty before calling
232 * this function.
233 *
234 * Called with @edmac->lock held and interrupts disabled.
235 */
236static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237                  struct ep93xx_dma_desc *desc)
238{
239    BUG_ON(!list_empty(&edmac->active));
240
241    list_add_tail(&desc->node, &edmac->active);
242
243    /* Flatten the @desc->tx_list chain into @edmac->active list */
244    while (!list_empty(&desc->tx_list)) {
245        struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246            struct ep93xx_dma_desc, node);
247
248        /*
249         * We copy the callback parameters from the first descriptor
250         * to all the chained descriptors. This way we can call the
251         * callback without having to find out the first descriptor in
252         * the chain. Useful for cyclic transfers.
253         */
254        d->txd.callback = desc->txd.callback;
255        d->txd.callback_param = desc->txd.callback_param;
256
257        list_move_tail(&d->node, &edmac->active);
258    }
259}
260
261/* Called with @edmac->lock held and interrupts disabled */
262static struct ep93xx_dma_desc *
263ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264{
265    if (list_empty(&edmac->active))
266        return NULL;
267
268    return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
269}
270
271/**
272 * ep93xx_dma_advance_active - advances to the next active descriptor
273 * @edmac: channel
274 *
275 * Function advances active descriptor to the next in the @edmac->active and
276 * returns %true if we still have descriptors in the chain to process.
277 * Otherwise returns %false.
278 *
279 * When the channel is in cyclic mode always returns %true.
280 *
281 * Called with @edmac->lock held and interrupts disabled.
282 */
283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284{
285    struct ep93xx_dma_desc *desc;
286
287    list_rotate_left(&edmac->active);
288
289    if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290        return true;
291
292    desc = ep93xx_dma_get_active(edmac);
293    if (!desc)
294        return false;
295
296    /*
297     * If txd.cookie is set it means that we are back in the first
298     * descriptor in the chain and hence done with it.
299     */
300    return !desc->txd.cookie;
301}
302
303/*
304 * M2P DMA implementation
305 */
306
307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308{
309    writel(control, edmac->regs + M2P_CONTROL);
310    /*
311     * EP93xx User's Guide states that we must perform a dummy read after
312     * write to the control register.
313     */
314    readl(edmac->regs + M2P_CONTROL);
315}
316
317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318{
319    struct ep93xx_dma_data *data = edmac->chan.private;
320    u32 control;
321
322    writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324    control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325        | M2P_CONTROL_ENABLE;
326    m2p_set_control(edmac, control);
327
328    return 0;
329}
330
331static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
332{
333    return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
334}
335
336static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
337{
338    u32 control;
339
340    control = readl(edmac->regs + M2P_CONTROL);
341    control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
342    m2p_set_control(edmac, control);
343
344    while (m2p_channel_state(edmac) >= M2P_STATE_ON)
345        cpu_relax();
346
347    m2p_set_control(edmac, 0);
348
349    while (m2p_channel_state(edmac) == M2P_STATE_STALL)
350        cpu_relax();
351}
352
353static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
354{
355    struct ep93xx_dma_desc *desc;
356    u32 bus_addr;
357
358    desc = ep93xx_dma_get_active(edmac);
359    if (!desc) {
360        dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
361        return;
362    }
363
364    if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
365        bus_addr = desc->src_addr;
366    else
367        bus_addr = desc->dst_addr;
368
369    if (edmac->buffer == 0) {
370        writel(desc->size, edmac->regs + M2P_MAXCNT0);
371        writel(bus_addr, edmac->regs + M2P_BASE0);
372    } else {
373        writel(desc->size, edmac->regs + M2P_MAXCNT1);
374        writel(bus_addr, edmac->regs + M2P_BASE1);
375    }
376
377    edmac->buffer ^= 1;
378}
379
380static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
381{
382    u32 control = readl(edmac->regs + M2P_CONTROL);
383
384    m2p_fill_desc(edmac);
385    control |= M2P_CONTROL_STALLINT;
386
387    if (ep93xx_dma_advance_active(edmac)) {
388        m2p_fill_desc(edmac);
389        control |= M2P_CONTROL_NFBINT;
390    }
391
392    m2p_set_control(edmac, control);
393}
394
395static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
396{
397    u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
398    u32 control;
399
400    if (irq_status & M2P_INTERRUPT_ERROR) {
401        struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
402
403        /* Clear the error interrupt */
404        writel(1, edmac->regs + M2P_INTERRUPT);
405
406        /*
407         * It seems that there is no easy way of reporting errors back
408         * to client so we just report the error here and continue as
409         * usual.
410         *
411         * Revisit this when there is a mechanism to report back the
412         * errors.
413         */
414        dev_err(chan2dev(edmac),
415            "DMA transfer failed! Details:\n"
416            "\tcookie : %d\n"
417            "\tsrc_addr : 0x%08x\n"
418            "\tdst_addr : 0x%08x\n"
419            "\tsize : %zu\n",
420            desc->txd.cookie, desc->src_addr, desc->dst_addr,
421            desc->size);
422    }
423
424    switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
425    case M2P_INTERRUPT_STALL:
426        /* Disable interrupts */
427        control = readl(edmac->regs + M2P_CONTROL);
428        control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
429        m2p_set_control(edmac, control);
430
431        return INTERRUPT_DONE;
432
433    case M2P_INTERRUPT_NFB:
434        if (ep93xx_dma_advance_active(edmac))
435            m2p_fill_desc(edmac);
436
437        return INTERRUPT_NEXT_BUFFER;
438    }
439
440    return INTERRUPT_UNKNOWN;
441}
442
443/*
444 * M2M DMA implementation
445 */
446
447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
448{
449    const struct ep93xx_dma_data *data = edmac->chan.private;
450    u32 control = 0;
451
452    if (!data) {
453        /* This is memcpy channel, nothing to configure */
454        writel(control, edmac->regs + M2M_CONTROL);
455        return 0;
456    }
457
458    switch (data->port) {
459    case EP93XX_DMA_SSP:
460        /*
461         * This was found via experimenting - anything less than 5
462         * causes the channel to perform only a partial transfer which
463         * leads to problems since we don't get DONE interrupt then.
464         */
465        control = (5 << M2M_CONTROL_PWSC_SHIFT);
466        control |= M2M_CONTROL_NO_HDSK;
467
468        if (data->direction == DMA_MEM_TO_DEV) {
469            control |= M2M_CONTROL_DAH;
470            control |= M2M_CONTROL_TM_TX;
471            control |= M2M_CONTROL_RSS_SSPTX;
472        } else {
473            control |= M2M_CONTROL_SAH;
474            control |= M2M_CONTROL_TM_RX;
475            control |= M2M_CONTROL_RSS_SSPRX;
476        }
477        break;
478
479    case EP93XX_DMA_IDE:
480        /*
481         * This IDE part is totally untested. Values below are taken
482         * from the EP93xx Users's Guide and might not be correct.
483         */
484        if (data->direction == DMA_MEM_TO_DEV) {
485            /* Worst case from the UG */
486            control = (3 << M2M_CONTROL_PWSC_SHIFT);
487            control |= M2M_CONTROL_DAH;
488            control |= M2M_CONTROL_TM_TX;
489        } else {
490            control = (2 << M2M_CONTROL_PWSC_SHIFT);
491            control |= M2M_CONTROL_SAH;
492            control |= M2M_CONTROL_TM_RX;
493        }
494
495        control |= M2M_CONTROL_NO_HDSK;
496        control |= M2M_CONTROL_RSS_IDE;
497        control |= M2M_CONTROL_PW_16;
498        break;
499
500    default:
501        return -EINVAL;
502    }
503
504    writel(control, edmac->regs + M2M_CONTROL);
505    return 0;
506}
507
508static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
509{
510    /* Just disable the channel */
511    writel(0, edmac->regs + M2M_CONTROL);
512}
513
514static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
515{
516    struct ep93xx_dma_desc *desc;
517
518    desc = ep93xx_dma_get_active(edmac);
519    if (!desc) {
520        dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
521        return;
522    }
523
524    if (edmac->buffer == 0) {
525        writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
526        writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
527        writel(desc->size, edmac->regs + M2M_BCR0);
528    } else {
529        writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
530        writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
531        writel(desc->size, edmac->regs + M2M_BCR1);
532    }
533
534    edmac->buffer ^= 1;
535}
536
537static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
538{
539    struct ep93xx_dma_data *data = edmac->chan.private;
540    u32 control = readl(edmac->regs + M2M_CONTROL);
541
542    /*
543     * Since we allow clients to configure PW (peripheral width) we always
544     * clear PW bits here and then set them according what is given in
545     * the runtime configuration.
546     */
547    control &= ~M2M_CONTROL_PW_MASK;
548    control |= edmac->runtime_ctrl;
549
550    m2m_fill_desc(edmac);
551    control |= M2M_CONTROL_DONEINT;
552
553    if (ep93xx_dma_advance_active(edmac)) {
554        m2m_fill_desc(edmac);
555        control |= M2M_CONTROL_NFBINT;
556    }
557
558    /*
559     * Now we can finally enable the channel. For M2M channel this must be
560     * done _after_ the BCRx registers are programmed.
561     */
562    control |= M2M_CONTROL_ENABLE;
563    writel(control, edmac->regs + M2M_CONTROL);
564
565    if (!data) {
566        /*
567         * For memcpy channels the software trigger must be asserted
568         * in order to start the memcpy operation.
569         */
570        control |= M2M_CONTROL_START;
571        writel(control, edmac->regs + M2M_CONTROL);
572    }
573}
574
575/*
576 * According to EP93xx User's Guide, we should receive DONE interrupt when all
577 * M2M DMA controller transactions complete normally. This is not always the
578 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
579 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
580 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
581 * In effect, disabling the channel when only DONE bit is set could stop
582 * currently running DMA transfer. To avoid this, we use Buffer FSM and
583 * Control FSM to check current state of DMA channel.
584 */
585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
586{
587    u32 status = readl(edmac->regs + M2M_STATUS);
588    u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589    u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590    bool done = status & M2M_STATUS_DONE;
591    bool last_done;
592    u32 control;
593    struct ep93xx_dma_desc *desc;
594
595    /* Accept only DONE and NFB interrupts */
596    if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
597        return INTERRUPT_UNKNOWN;
598
599    if (done) {
600        /* Clear the DONE bit */
601        writel(0, edmac->regs + M2M_INTERRUPT);
602    }
603
604    /*
605     * Check whether we are done with descriptors or not. This, together
606     * with DMA channel state, determines action to take in interrupt.
607     */
608    desc = ep93xx_dma_get_active(edmac);
609    last_done = !desc || desc->txd.cookie;
610
611    /*
612     * Use M2M DMA Buffer FSM and Control FSM to check current state of
613     * DMA channel. Using DONE and NFB bits from channel status register
614     * or bits from channel interrupt register is not reliable.
615     */
616    if (!last_done &&
617        (buf_fsm == M2M_STATUS_BUF_NO ||
618         buf_fsm == M2M_STATUS_BUF_ON)) {
619        /*
620         * Two buffers are ready for update when Buffer FSM is in
621         * DMA_NO_BUF state. Only one buffer can be prepared without
622         * disabling the channel or polling the DONE bit.
623         * To simplify things, always prepare only one buffer.
624         */
625        if (ep93xx_dma_advance_active(edmac)) {
626            m2m_fill_desc(edmac);
627            if (done && !edmac->chan.private) {
628                /* Software trigger for memcpy channel */
629                control = readl(edmac->regs + M2M_CONTROL);
630                control |= M2M_CONTROL_START;
631                writel(control, edmac->regs + M2M_CONTROL);
632            }
633            return INTERRUPT_NEXT_BUFFER;
634        } else {
635            last_done = true;
636        }
637    }
638
639    /*
640     * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
641     * and Control FSM is in DMA_STALL state.
642     */
643    if (last_done &&
644        buf_fsm == M2M_STATUS_BUF_NO &&
645        ctl_fsm == M2M_STATUS_CTL_STALL) {
646        /* Disable interrupts and the channel */
647        control = readl(edmac->regs + M2M_CONTROL);
648        control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649                | M2M_CONTROL_ENABLE);
650        writel(control, edmac->regs + M2M_CONTROL);
651        return INTERRUPT_DONE;
652    }
653
654    /*
655     * Nothing to do this time.
656     */
657    return INTERRUPT_NEXT_BUFFER;
658}
659
660/*
661 * DMA engine API implementation
662 */
663
664static struct ep93xx_dma_desc *
665ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
666{
667    struct ep93xx_dma_desc *desc, *_desc;
668    struct ep93xx_dma_desc *ret = NULL;
669    unsigned long flags;
670
671    spin_lock_irqsave(&edmac->lock, flags);
672    list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
673        if (async_tx_test_ack(&desc->txd)) {
674            list_del_init(&desc->node);
675
676            /* Re-initialize the descriptor */
677            desc->src_addr = 0;
678            desc->dst_addr = 0;
679            desc->size = 0;
680            desc->complete = false;
681            desc->txd.cookie = 0;
682            desc->txd.callback = NULL;
683            desc->txd.callback_param = NULL;
684
685            ret = desc;
686            break;
687        }
688    }
689    spin_unlock_irqrestore(&edmac->lock, flags);
690    return ret;
691}
692
693static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
694                struct ep93xx_dma_desc *desc)
695{
696    if (desc) {
697        unsigned long flags;
698
699        spin_lock_irqsave(&edmac->lock, flags);
700        list_splice_init(&desc->tx_list, &edmac->free_list);
701        list_add(&desc->node, &edmac->free_list);
702        spin_unlock_irqrestore(&edmac->lock, flags);
703    }
704}
705
706/**
707 * ep93xx_dma_advance_work - start processing the next pending transaction
708 * @edmac: channel
709 *
710 * If we have pending transactions queued and we are currently idling, this
711 * function takes the next queued transaction from the @edmac->queue and
712 * pushes it to the hardware for execution.
713 */
714static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
715{
716    struct ep93xx_dma_desc *new;
717    unsigned long flags;
718
719    spin_lock_irqsave(&edmac->lock, flags);
720    if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
721        spin_unlock_irqrestore(&edmac->lock, flags);
722        return;
723    }
724
725    /* Take the next descriptor from the pending queue */
726    new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
727    list_del_init(&new->node);
728
729    ep93xx_dma_set_active(edmac, new);
730
731    /* Push it to the hardware */
732    edmac->edma->hw_submit(edmac);
733    spin_unlock_irqrestore(&edmac->lock, flags);
734}
735
736static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
737{
738    struct device *dev = desc->txd.chan->device->dev;
739
740    if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
741        if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
742            dma_unmap_single(dev, desc->src_addr, desc->size,
743                     DMA_TO_DEVICE);
744        else
745            dma_unmap_page(dev, desc->src_addr, desc->size,
746                       DMA_TO_DEVICE);
747    }
748    if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
749        if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
750            dma_unmap_single(dev, desc->dst_addr, desc->size,
751                     DMA_FROM_DEVICE);
752        else
753            dma_unmap_page(dev, desc->dst_addr, desc->size,
754                       DMA_FROM_DEVICE);
755    }
756}
757
758static void ep93xx_dma_tasklet(unsigned long data)
759{
760    struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
761    struct ep93xx_dma_desc *desc, *d;
762    dma_async_tx_callback callback = NULL;
763    void *callback_param = NULL;
764    LIST_HEAD(list);
765
766    spin_lock_irq(&edmac->lock);
767    /*
768     * If dma_terminate_all() was called before we get to run, the active
769     * list has become empty. If that happens we aren't supposed to do
770     * anything more than call ep93xx_dma_advance_work().
771     */
772    desc = ep93xx_dma_get_active(edmac);
773    if (desc) {
774        if (desc->complete) {
775            /* mark descriptor complete for non cyclic case only */
776            if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
777                dma_cookie_complete(&desc->txd);
778            list_splice_init(&edmac->active, &list);
779        }
780        callback = desc->txd.callback;
781        callback_param = desc->txd.callback_param;
782    }
783    spin_unlock_irq(&edmac->lock);
784
785    /* Pick up the next descriptor from the queue */
786    ep93xx_dma_advance_work(edmac);
787
788    /* Now we can release all the chained descriptors */
789    list_for_each_entry_safe(desc, d, &list, node) {
790        /*
791         * For the memcpy channels the API requires us to unmap the
792         * buffers unless requested otherwise.
793         */
794        if (!edmac->chan.private)
795            ep93xx_dma_unmap_buffers(desc);
796
797        ep93xx_dma_desc_put(edmac, desc);
798    }
799
800    if (callback)
801        callback(callback_param);
802}
803
804static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
805{
806    struct ep93xx_dma_chan *edmac = dev_id;
807    struct ep93xx_dma_desc *desc;
808    irqreturn_t ret = IRQ_HANDLED;
809
810    spin_lock(&edmac->lock);
811
812    desc = ep93xx_dma_get_active(edmac);
813    if (!desc) {
814        dev_warn(chan2dev(edmac),
815             "got interrupt while active list is empty\n");
816        spin_unlock(&edmac->lock);
817        return IRQ_NONE;
818    }
819
820    switch (edmac->edma->hw_interrupt(edmac)) {
821    case INTERRUPT_DONE:
822        desc->complete = true;
823        tasklet_schedule(&edmac->tasklet);
824        break;
825
826    case INTERRUPT_NEXT_BUFFER:
827        if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
828            tasklet_schedule(&edmac->tasklet);
829        break;
830
831    default:
832        dev_warn(chan2dev(edmac), "unknown interrupt!\n");
833        ret = IRQ_NONE;
834        break;
835    }
836
837    spin_unlock(&edmac->lock);
838    return ret;
839}
840
841/**
842 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
843 * @tx: descriptor to be executed
844 *
845 * Function will execute given descriptor on the hardware or if the hardware
846 * is busy, queue the descriptor to be executed later on. Returns cookie which
847 * can be used to poll the status of the descriptor.
848 */
849static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
850{
851    struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
852    struct ep93xx_dma_desc *desc;
853    dma_cookie_t cookie;
854    unsigned long flags;
855
856    spin_lock_irqsave(&edmac->lock, flags);
857    cookie = dma_cookie_assign(tx);
858
859    desc = container_of(tx, struct ep93xx_dma_desc, txd);
860
861    /*
862     * If nothing is currently prosessed, we push this descriptor
863     * directly to the hardware. Otherwise we put the descriptor
864     * to the pending queue.
865     */
866    if (list_empty(&edmac->active)) {
867        ep93xx_dma_set_active(edmac, desc);
868        edmac->edma->hw_submit(edmac);
869    } else {
870        list_add_tail(&desc->node, &edmac->queue);
871    }
872
873    spin_unlock_irqrestore(&edmac->lock, flags);
874    return cookie;
875}
876
877/**
878 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
879 * @chan: channel to allocate resources
880 *
881 * Function allocates necessary resources for the given DMA channel and
882 * returns number of allocated descriptors for the channel. Negative errno
883 * is returned in case of failure.
884 */
885static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
886{
887    struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
888    struct ep93xx_dma_data *data = chan->private;
889    const char *name = dma_chan_name(chan);
890    int ret, i;
891
892    /* Sanity check the channel parameters */
893    if (!edmac->edma->m2m) {
894        if (!data)
895            return -EINVAL;
896        if (data->port < EP93XX_DMA_I2S1 ||
897            data->port > EP93XX_DMA_IRDA)
898            return -EINVAL;
899        if (data->direction != ep93xx_dma_chan_direction(chan))
900            return -EINVAL;
901    } else {
902        if (data) {
903            switch (data->port) {
904            case EP93XX_DMA_SSP:
905            case EP93XX_DMA_IDE:
906                if (data->direction != DMA_MEM_TO_DEV &&
907                    data->direction != DMA_DEV_TO_MEM)
908                    return -EINVAL;
909                break;
910            default:
911                return -EINVAL;
912            }
913        }
914    }
915
916    if (data && data->name)
917        name = data->name;
918
919    ret = clk_enable(edmac->clk);
920    if (ret)
921        return ret;
922
923    ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
924    if (ret)
925        goto fail_clk_disable;
926
927    spin_lock_irq(&edmac->lock);
928    dma_cookie_init(&edmac->chan);
929    ret = edmac->edma->hw_setup(edmac);
930    spin_unlock_irq(&edmac->lock);
931
932    if (ret)
933        goto fail_free_irq;
934
935    for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
936        struct ep93xx_dma_desc *desc;
937
938        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
939        if (!desc) {
940            dev_warn(chan2dev(edmac), "not enough descriptors\n");
941            break;
942        }
943
944        INIT_LIST_HEAD(&desc->tx_list);
945
946        dma_async_tx_descriptor_init(&desc->txd, chan);
947        desc->txd.flags = DMA_CTRL_ACK;
948        desc->txd.tx_submit = ep93xx_dma_tx_submit;
949
950        ep93xx_dma_desc_put(edmac, desc);
951    }
952
953    return i;
954
955fail_free_irq:
956    free_irq(edmac->irq, edmac);
957fail_clk_disable:
958    clk_disable(edmac->clk);
959
960    return ret;
961}
962
963/**
964 * ep93xx_dma_free_chan_resources - release resources for the channel
965 * @chan: channel
966 *
967 * Function releases all the resources allocated for the given channel.
968 * The channel must be idle when this is called.
969 */
970static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
971{
972    struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
973    struct ep93xx_dma_desc *desc, *d;
974    unsigned long flags;
975    LIST_HEAD(list);
976
977    BUG_ON(!list_empty(&edmac->active));
978    BUG_ON(!list_empty(&edmac->queue));
979
980    spin_lock_irqsave(&edmac->lock, flags);
981    edmac->edma->hw_shutdown(edmac);
982    edmac->runtime_addr = 0;
983    edmac->runtime_ctrl = 0;
984    edmac->buffer = 0;
985    list_splice_init(&edmac->free_list, &list);
986    spin_unlock_irqrestore(&edmac->lock, flags);
987
988    list_for_each_entry_safe(desc, d, &list, node)
989        kfree(desc);
990
991    clk_disable(edmac->clk);
992    free_irq(edmac->irq, edmac);
993}
994
995/**
996 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
997 * @chan: channel
998 * @dest: destination bus address
999 * @src: source bus address
1000 * @len: size of the transaction
1001 * @flags: flags for the descriptor
1002 *
1003 * Returns a valid DMA descriptor or %NULL in case of failure.
1004 */
1005static struct dma_async_tx_descriptor *
1006ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
1007               dma_addr_t src, size_t len, unsigned long flags)
1008{
1009    struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1010    struct ep93xx_dma_desc *desc, *first;
1011    size_t bytes, offset;
1012
1013    first = NULL;
1014    for (offset = 0; offset < len; offset += bytes) {
1015        desc = ep93xx_dma_desc_get(edmac);
1016        if (!desc) {
1017            dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1018            goto fail;
1019        }
1020
1021        bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1022
1023        desc->src_addr = src + offset;
1024        desc->dst_addr = dest + offset;
1025        desc->size = bytes;
1026
1027        if (!first)
1028            first = desc;
1029        else
1030            list_add_tail(&desc->node, &first->tx_list);
1031    }
1032
1033    first->txd.cookie = -EBUSY;
1034    first->txd.flags = flags;
1035
1036    return &first->txd;
1037fail:
1038    ep93xx_dma_desc_put(edmac, first);
1039    return NULL;
1040}
1041
1042/**
1043 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1044 * @chan: channel
1045 * @sgl: list of buffers to transfer
1046 * @sg_len: number of entries in @sgl
1047 * @dir: direction of tha DMA transfer
1048 * @flags: flags for the descriptor
1049 * @context: operation context (ignored)
1050 *
1051 * Returns a valid DMA descriptor or %NULL in case of failure.
1052 */
1053static struct dma_async_tx_descriptor *
1054ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1055             unsigned int sg_len, enum dma_transfer_direction dir,
1056             unsigned long flags, void *context)
1057{
1058    struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1059    struct ep93xx_dma_desc *desc, *first;
1060    struct scatterlist *sg;
1061    int i;
1062
1063    if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1064        dev_warn(chan2dev(edmac),
1065             "channel was configured with different direction\n");
1066        return NULL;
1067    }
1068
1069    if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1070        dev_warn(chan2dev(edmac),
1071             "channel is already used for cyclic transfers\n");
1072        return NULL;
1073    }
1074
1075    first = NULL;
1076    for_each_sg(sgl, sg, sg_len, i) {
1077        size_t sg_len = sg_dma_len(sg);
1078
1079        if (sg_len > DMA_MAX_CHAN_BYTES) {
1080            dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1081                 sg_len);
1082            goto fail;
1083        }
1084
1085        desc = ep93xx_dma_desc_get(edmac);
1086        if (!desc) {
1087            dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1088            goto fail;
1089        }
1090
1091        if (dir == DMA_MEM_TO_DEV) {
1092            desc->src_addr = sg_dma_address(sg);
1093            desc->dst_addr = edmac->runtime_addr;
1094        } else {
1095            desc->src_addr = edmac->runtime_addr;
1096            desc->dst_addr = sg_dma_address(sg);
1097        }
1098        desc->size = sg_len;
1099
1100        if (!first)
1101            first = desc;
1102        else
1103            list_add_tail(&desc->node, &first->tx_list);
1104    }
1105
1106    first->txd.cookie = -EBUSY;
1107    first->txd.flags = flags;
1108
1109    return &first->txd;
1110
1111fail:
1112    ep93xx_dma_desc_put(edmac, first);
1113    return NULL;
1114}
1115
1116/**
1117 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1118 * @chan: channel
1119 * @dma_addr: DMA mapped address of the buffer
1120 * @buf_len: length of the buffer (in bytes)
1121 * @period_len: lenght of a single period
1122 * @dir: direction of the operation
1123 * @context: operation context (ignored)
1124 *
1125 * Prepares a descriptor for cyclic DMA operation. This means that once the
1126 * descriptor is submitted, we will be submitting in a @period_len sized
1127 * buffers and calling callback once the period has been elapsed. Transfer
1128 * terminates only when client calls dmaengine_terminate_all() for this
1129 * channel.
1130 *
1131 * Returns a valid DMA descriptor or %NULL in case of failure.
1132 */
1133static struct dma_async_tx_descriptor *
1134ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1135               size_t buf_len, size_t period_len,
1136               enum dma_transfer_direction dir, void *context)
1137{
1138    struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1139    struct ep93xx_dma_desc *desc, *first;
1140    size_t offset = 0;
1141
1142    if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1143        dev_warn(chan2dev(edmac),
1144             "channel was configured with different direction\n");
1145        return NULL;
1146    }
1147
1148    if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1149        dev_warn(chan2dev(edmac),
1150             "channel is already used for cyclic transfers\n");
1151        return NULL;
1152    }
1153
1154    if (period_len > DMA_MAX_CHAN_BYTES) {
1155        dev_warn(chan2dev(edmac), "too big period length %d\n",
1156             period_len);
1157        return NULL;
1158    }
1159
1160    /* Split the buffer into period size chunks */
1161    first = NULL;
1162    for (offset = 0; offset < buf_len; offset += period_len) {
1163        desc = ep93xx_dma_desc_get(edmac);
1164        if (!desc) {
1165            dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1166            goto fail;
1167        }
1168
1169        if (dir == DMA_MEM_TO_DEV) {
1170            desc->src_addr = dma_addr + offset;
1171            desc->dst_addr = edmac->runtime_addr;
1172        } else {
1173            desc->src_addr = edmac->runtime_addr;
1174            desc->dst_addr = dma_addr + offset;
1175        }
1176
1177        desc->size = period_len;
1178
1179        if (!first)
1180            first = desc;
1181        else
1182            list_add_tail(&desc->node, &first->tx_list);
1183    }
1184
1185    first->txd.cookie = -EBUSY;
1186
1187    return &first->txd;
1188
1189fail:
1190    ep93xx_dma_desc_put(edmac, first);
1191    return NULL;
1192}
1193
1194/**
1195 * ep93xx_dma_terminate_all - terminate all transactions
1196 * @edmac: channel
1197 *
1198 * Stops all DMA transactions. All descriptors are put back to the
1199 * @edmac->free_list and callbacks are _not_ called.
1200 */
1201static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1202{
1203    struct ep93xx_dma_desc *desc, *_d;
1204    unsigned long flags;
1205    LIST_HEAD(list);
1206
1207    spin_lock_irqsave(&edmac->lock, flags);
1208    /* First we disable and flush the DMA channel */
1209    edmac->edma->hw_shutdown(edmac);
1210    clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1211    list_splice_init(&edmac->active, &list);
1212    list_splice_init(&edmac->queue, &list);
1213    /*
1214     * We then re-enable the channel. This way we can continue submitting
1215     * the descriptors by just calling ->hw_submit() again.
1216     */
1217    edmac->edma->hw_setup(edmac);
1218    spin_unlock_irqrestore(&edmac->lock, flags);
1219
1220    list_for_each_entry_safe(desc, _d, &list, node)
1221        ep93xx_dma_desc_put(edmac, desc);
1222
1223    return 0;
1224}
1225
1226static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1227                   struct dma_slave_config *config)
1228{
1229    enum dma_slave_buswidth width;
1230    unsigned long flags;
1231    u32 addr, ctrl;
1232
1233    if (!edmac->edma->m2m)
1234        return -EINVAL;
1235
1236    switch (config->direction) {
1237    case DMA_DEV_TO_MEM:
1238        width = config->src_addr_width;
1239        addr = config->src_addr;
1240        break;
1241
1242    case DMA_MEM_TO_DEV:
1243        width = config->dst_addr_width;
1244        addr = config->dst_addr;
1245        break;
1246
1247    default:
1248        return -EINVAL;
1249    }
1250
1251    switch (width) {
1252    case DMA_SLAVE_BUSWIDTH_1_BYTE:
1253        ctrl = 0;
1254        break;
1255    case DMA_SLAVE_BUSWIDTH_2_BYTES:
1256        ctrl = M2M_CONTROL_PW_16;
1257        break;
1258    case DMA_SLAVE_BUSWIDTH_4_BYTES:
1259        ctrl = M2M_CONTROL_PW_32;
1260        break;
1261    default:
1262        return -EINVAL;
1263    }
1264
1265    spin_lock_irqsave(&edmac->lock, flags);
1266    edmac->runtime_addr = addr;
1267    edmac->runtime_ctrl = ctrl;
1268    spin_unlock_irqrestore(&edmac->lock, flags);
1269
1270    return 0;
1271}
1272
1273/**
1274 * ep93xx_dma_control - manipulate all pending operations on a channel
1275 * @chan: channel
1276 * @cmd: control command to perform
1277 * @arg: optional argument
1278 *
1279 * Controls the channel. Function returns %0 in case of success or negative
1280 * error in case of failure.
1281 */
1282static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1283                  unsigned long arg)
1284{
1285    struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1286    struct dma_slave_config *config;
1287
1288    switch (cmd) {
1289    case DMA_TERMINATE_ALL:
1290        return ep93xx_dma_terminate_all(edmac);
1291
1292    case DMA_SLAVE_CONFIG:
1293        config = (struct dma_slave_config *)arg;
1294        return ep93xx_dma_slave_config(edmac, config);
1295
1296    default:
1297        break;
1298    }
1299
1300    return -ENOSYS;
1301}
1302
1303/**
1304 * ep93xx_dma_tx_status - check if a transaction is completed
1305 * @chan: channel
1306 * @cookie: transaction specific cookie
1307 * @state: state of the transaction is stored here if given
1308 *
1309 * This function can be used to query state of a given transaction.
1310 */
1311static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1312                        dma_cookie_t cookie,
1313                        struct dma_tx_state *state)
1314{
1315    struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1316    enum dma_status ret;
1317    unsigned long flags;
1318
1319    spin_lock_irqsave(&edmac->lock, flags);
1320    ret = dma_cookie_status(chan, cookie, state);
1321    spin_unlock_irqrestore(&edmac->lock, flags);
1322
1323    return ret;
1324}
1325
1326/**
1327 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1328 * @chan: channel
1329 *
1330 * When this function is called, all pending transactions are pushed to the
1331 * hardware and executed.
1332 */
1333static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1334{
1335    ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1336}
1337
1338static int __init ep93xx_dma_probe(struct platform_device *pdev)
1339{
1340    struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1341    struct ep93xx_dma_engine *edma;
1342    struct dma_device *dma_dev;
1343    size_t edma_size;
1344    int ret, i;
1345
1346    edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1347    edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1348    if (!edma)
1349        return -ENOMEM;
1350
1351    dma_dev = &edma->dma_dev;
1352    edma->m2m = platform_get_device_id(pdev)->driver_data;
1353    edma->num_channels = pdata->num_channels;
1354
1355    INIT_LIST_HEAD(&dma_dev->channels);
1356    for (i = 0; i < pdata->num_channels; i++) {
1357        const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1358        struct ep93xx_dma_chan *edmac = &edma->channels[i];
1359
1360        edmac->chan.device = dma_dev;
1361        edmac->regs = cdata->base;
1362        edmac->irq = cdata->irq;
1363        edmac->edma = edma;
1364
1365        edmac->clk = clk_get(NULL, cdata->name);
1366        if (IS_ERR(edmac->clk)) {
1367            dev_warn(&pdev->dev, "failed to get clock for %s\n",
1368                 cdata->name);
1369            continue;
1370        }
1371
1372        spin_lock_init(&edmac->lock);
1373        INIT_LIST_HEAD(&edmac->active);
1374        INIT_LIST_HEAD(&edmac->queue);
1375        INIT_LIST_HEAD(&edmac->free_list);
1376        tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1377                 (unsigned long)edmac);
1378
1379        list_add_tail(&edmac->chan.device_node,
1380                  &dma_dev->channels);
1381    }
1382
1383    dma_cap_zero(dma_dev->cap_mask);
1384    dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1385    dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1386
1387    dma_dev->dev = &pdev->dev;
1388    dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1389    dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1390    dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1391    dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1392    dma_dev->device_control = ep93xx_dma_control;
1393    dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1394    dma_dev->device_tx_status = ep93xx_dma_tx_status;
1395
1396    dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1397
1398    if (edma->m2m) {
1399        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1400        dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1401
1402        edma->hw_setup = m2m_hw_setup;
1403        edma->hw_shutdown = m2m_hw_shutdown;
1404        edma->hw_submit = m2m_hw_submit;
1405        edma->hw_interrupt = m2m_hw_interrupt;
1406    } else {
1407        dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1408
1409        edma->hw_setup = m2p_hw_setup;
1410        edma->hw_shutdown = m2p_hw_shutdown;
1411        edma->hw_submit = m2p_hw_submit;
1412        edma->hw_interrupt = m2p_hw_interrupt;
1413    }
1414
1415    ret = dma_async_device_register(dma_dev);
1416    if (unlikely(ret)) {
1417        for (i = 0; i < edma->num_channels; i++) {
1418            struct ep93xx_dma_chan *edmac = &edma->channels[i];
1419            if (!IS_ERR_OR_NULL(edmac->clk))
1420                clk_put(edmac->clk);
1421        }
1422        kfree(edma);
1423    } else {
1424        dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1425             edma->m2m ? "M" : "P");
1426    }
1427
1428    return ret;
1429}
1430
1431static struct platform_device_id ep93xx_dma_driver_ids[] = {
1432    { "ep93xx-dma-m2p", 0 },
1433    { "ep93xx-dma-m2m", 1 },
1434    { },
1435};
1436
1437static struct platform_driver ep93xx_dma_driver = {
1438    .driver = {
1439        .name = "ep93xx-dma",
1440    },
1441    .id_table = ep93xx_dma_driver_ids,
1442};
1443
1444static int __init ep93xx_dma_module_init(void)
1445{
1446    return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1447}
1448subsys_initcall(ep93xx_dma_module_init);
1449
1450MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1451MODULE_DESCRIPTION("EP93xx DMA driver");
1452MODULE_LICENSE("GPL");
1453

Archive Download this file



interactive