Root/
1 | /* |
2 | * Driver for the Cirrus Logic EP93xx DMA Controller |
3 | * |
4 | * Copyright (C) 2011 Mika Westerberg |
5 | * |
6 | * DMA M2P implementation is based on the original |
7 | * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights: |
8 | * |
9 | * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> |
10 | * Copyright (C) 2006 Applied Data Systems |
11 | * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com> |
12 | * |
13 | * This driver is based on dw_dmac and amba-pl08x drivers. |
14 | * |
15 | * This program is free software; you can redistribute it and/or modify |
16 | * it under the terms of the GNU General Public License as published by |
17 | * the Free Software Foundation; either version 2 of the License, or |
18 | * (at your option) any later version. |
19 | */ |
20 | |
21 | #include <linux/clk.h> |
22 | #include <linux/init.h> |
23 | #include <linux/interrupt.h> |
24 | #include <linux/dmaengine.h> |
25 | #include <linux/module.h> |
26 | #include <linux/platform_device.h> |
27 | #include <linux/slab.h> |
28 | |
29 | #include <linux/platform_data/dma-ep93xx.h> |
30 | |
31 | #include "dmaengine.h" |
32 | |
33 | /* M2P registers */ |
34 | #define M2P_CONTROL 0x0000 |
35 | #define M2P_CONTROL_STALLINT BIT(0) |
36 | #define M2P_CONTROL_NFBINT BIT(1) |
37 | #define M2P_CONTROL_CH_ERROR_INT BIT(3) |
38 | #define M2P_CONTROL_ENABLE BIT(4) |
39 | #define M2P_CONTROL_ICE BIT(6) |
40 | |
41 | #define M2P_INTERRUPT 0x0004 |
42 | #define M2P_INTERRUPT_STALL BIT(0) |
43 | #define M2P_INTERRUPT_NFB BIT(1) |
44 | #define M2P_INTERRUPT_ERROR BIT(3) |
45 | |
46 | #define M2P_PPALLOC 0x0008 |
47 | #define M2P_STATUS 0x000c |
48 | |
49 | #define M2P_MAXCNT0 0x0020 |
50 | #define M2P_BASE0 0x0024 |
51 | #define M2P_MAXCNT1 0x0030 |
52 | #define M2P_BASE1 0x0034 |
53 | |
54 | #define M2P_STATE_IDLE 0 |
55 | #define M2P_STATE_STALL 1 |
56 | #define M2P_STATE_ON 2 |
57 | #define M2P_STATE_NEXT 3 |
58 | |
59 | /* M2M registers */ |
60 | #define M2M_CONTROL 0x0000 |
61 | #define M2M_CONTROL_DONEINT BIT(2) |
62 | #define M2M_CONTROL_ENABLE BIT(3) |
63 | #define M2M_CONTROL_START BIT(4) |
64 | #define M2M_CONTROL_DAH BIT(11) |
65 | #define M2M_CONTROL_SAH BIT(12) |
66 | #define M2M_CONTROL_PW_SHIFT 9 |
67 | #define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT) |
68 | #define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT) |
69 | #define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT) |
70 | #define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT) |
71 | #define M2M_CONTROL_TM_SHIFT 13 |
72 | #define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) |
73 | #define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) |
74 | #define M2M_CONTROL_NFBINT BIT(21) |
75 | #define M2M_CONTROL_RSS_SHIFT 22 |
76 | #define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) |
77 | #define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) |
78 | #define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT) |
79 | #define M2M_CONTROL_NO_HDSK BIT(24) |
80 | #define M2M_CONTROL_PWSC_SHIFT 25 |
81 | |
82 | #define M2M_INTERRUPT 0x0004 |
83 | #define M2M_INTERRUPT_MASK 6 |
84 | |
85 | #define M2M_STATUS 0x000c |
86 | #define M2M_STATUS_CTL_SHIFT 1 |
87 | #define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT) |
88 | #define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT) |
89 | #define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT) |
90 | #define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT) |
91 | #define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT) |
92 | #define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT) |
93 | #define M2M_STATUS_BUF_SHIFT 4 |
94 | #define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT) |
95 | #define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT) |
96 | #define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT) |
97 | #define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT) |
98 | #define M2M_STATUS_DONE BIT(6) |
99 | |
100 | #define M2M_BCR0 0x0010 |
101 | #define M2M_BCR1 0x0014 |
102 | #define M2M_SAR_BASE0 0x0018 |
103 | #define M2M_SAR_BASE1 0x001c |
104 | #define M2M_DAR_BASE0 0x002c |
105 | #define M2M_DAR_BASE1 0x0030 |
106 | |
107 | #define DMA_MAX_CHAN_BYTES 0xffff |
108 | #define DMA_MAX_CHAN_DESCRIPTORS 32 |
109 | |
110 | struct ep93xx_dma_engine; |
111 | |
112 | /** |
113 | * struct ep93xx_dma_desc - EP93xx specific transaction descriptor |
114 | * @src_addr: source address of the transaction |
115 | * @dst_addr: destination address of the transaction |
116 | * @size: size of the transaction (in bytes) |
117 | * @complete: this descriptor is completed |
118 | * @txd: dmaengine API descriptor |
119 | * @tx_list: list of linked descriptors |
120 | * @node: link used for putting this into a channel queue |
121 | */ |
122 | struct ep93xx_dma_desc { |
123 | u32 src_addr; |
124 | u32 dst_addr; |
125 | size_t size; |
126 | bool complete; |
127 | struct dma_async_tx_descriptor txd; |
128 | struct list_head tx_list; |
129 | struct list_head node; |
130 | }; |
131 | |
132 | /** |
133 | * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel |
134 | * @chan: dmaengine API channel |
135 | * @edma: pointer to to the engine device |
136 | * @regs: memory mapped registers |
137 | * @irq: interrupt number of the channel |
138 | * @clk: clock used by this channel |
139 | * @tasklet: channel specific tasklet used for callbacks |
140 | * @lock: lock protecting the fields following |
141 | * @flags: flags for the channel |
142 | * @buffer: which buffer to use next (0/1) |
143 | * @active: flattened chain of descriptors currently being processed |
144 | * @queue: pending descriptors which are handled next |
145 | * @free_list: list of free descriptors which can be used |
146 | * @runtime_addr: physical address currently used as dest/src (M2M only). This |
147 | * is set via %DMA_SLAVE_CONFIG before slave operation is |
148 | * prepared |
149 | * @runtime_ctrl: M2M runtime values for the control register. |
150 | * |
151 | * As EP93xx DMA controller doesn't support real chained DMA descriptors we |
152 | * will have slightly different scheme here: @active points to a head of |
153 | * flattened DMA descriptor chain. |
154 | * |
155 | * @queue holds pending transactions. These are linked through the first |
156 | * descriptor in the chain. When a descriptor is moved to the @active queue, |
157 | * the first and chained descriptors are flattened into a single list. |
158 | * |
159 | * @chan.private holds pointer to &struct ep93xx_dma_data which contains |
160 | * necessary channel configuration information. For memcpy channels this must |
161 | * be %NULL. |
162 | */ |
163 | struct ep93xx_dma_chan { |
164 | struct dma_chan chan; |
165 | const struct ep93xx_dma_engine *edma; |
166 | void __iomem *regs; |
167 | int irq; |
168 | struct clk *clk; |
169 | struct tasklet_struct tasklet; |
170 | /* protects the fields following */ |
171 | spinlock_t lock; |
172 | unsigned long flags; |
173 | /* Channel is configured for cyclic transfers */ |
174 | #define EP93XX_DMA_IS_CYCLIC 0 |
175 | |
176 | int buffer; |
177 | struct list_head active; |
178 | struct list_head queue; |
179 | struct list_head free_list; |
180 | u32 runtime_addr; |
181 | u32 runtime_ctrl; |
182 | }; |
183 | |
184 | /** |
185 | * struct ep93xx_dma_engine - the EP93xx DMA engine instance |
186 | * @dma_dev: holds the dmaengine device |
187 | * @m2m: is this an M2M or M2P device |
188 | * @hw_setup: method which sets the channel up for operation |
189 | * @hw_shutdown: shuts the channel down and flushes whatever is left |
190 | * @hw_submit: pushes active descriptor(s) to the hardware |
191 | * @hw_interrupt: handle the interrupt |
192 | * @num_channels: number of channels for this instance |
193 | * @channels: array of channels |
194 | * |
195 | * There is one instance of this struct for the M2P channels and one for the |
196 | * M2M channels. hw_xxx() methods are used to perform operations which are |
197 | * different on M2M and M2P channels. These methods are called with channel |
198 | * lock held and interrupts disabled so they cannot sleep. |
199 | */ |
200 | struct ep93xx_dma_engine { |
201 | struct dma_device dma_dev; |
202 | bool m2m; |
203 | int (*hw_setup)(struct ep93xx_dma_chan *); |
204 | void (*hw_shutdown)(struct ep93xx_dma_chan *); |
205 | void (*hw_submit)(struct ep93xx_dma_chan *); |
206 | int (*hw_interrupt)(struct ep93xx_dma_chan *); |
207 | #define INTERRUPT_UNKNOWN 0 |
208 | #define INTERRUPT_DONE 1 |
209 | #define INTERRUPT_NEXT_BUFFER 2 |
210 | |
211 | size_t num_channels; |
212 | struct ep93xx_dma_chan channels[]; |
213 | }; |
214 | |
215 | static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac) |
216 | { |
217 | return &edmac->chan.dev->device; |
218 | } |
219 | |
220 | static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan) |
221 | { |
222 | return container_of(chan, struct ep93xx_dma_chan, chan); |
223 | } |
224 | |
225 | /** |
226 | * ep93xx_dma_set_active - set new active descriptor chain |
227 | * @edmac: channel |
228 | * @desc: head of the new active descriptor chain |
229 | * |
230 | * Sets @desc to be the head of the new active descriptor chain. This is the |
231 | * chain which is processed next. The active list must be empty before calling |
232 | * this function. |
233 | * |
234 | * Called with @edmac->lock held and interrupts disabled. |
235 | */ |
236 | static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, |
237 | struct ep93xx_dma_desc *desc) |
238 | { |
239 | BUG_ON(!list_empty(&edmac->active)); |
240 | |
241 | list_add_tail(&desc->node, &edmac->active); |
242 | |
243 | /* Flatten the @desc->tx_list chain into @edmac->active list */ |
244 | while (!list_empty(&desc->tx_list)) { |
245 | struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, |
246 | struct ep93xx_dma_desc, node); |
247 | |
248 | /* |
249 | * We copy the callback parameters from the first descriptor |
250 | * to all the chained descriptors. This way we can call the |
251 | * callback without having to find out the first descriptor in |
252 | * the chain. Useful for cyclic transfers. |
253 | */ |
254 | d->txd.callback = desc->txd.callback; |
255 | d->txd.callback_param = desc->txd.callback_param; |
256 | |
257 | list_move_tail(&d->node, &edmac->active); |
258 | } |
259 | } |
260 | |
261 | /* Called with @edmac->lock held and interrupts disabled */ |
262 | static struct ep93xx_dma_desc * |
263 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) |
264 | { |
265 | if (list_empty(&edmac->active)) |
266 | return NULL; |
267 | |
268 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); |
269 | } |
270 | |
271 | /** |
272 | * ep93xx_dma_advance_active - advances to the next active descriptor |
273 | * @edmac: channel |
274 | * |
275 | * Function advances active descriptor to the next in the @edmac->active and |
276 | * returns %true if we still have descriptors in the chain to process. |
277 | * Otherwise returns %false. |
278 | * |
279 | * When the channel is in cyclic mode always returns %true. |
280 | * |
281 | * Called with @edmac->lock held and interrupts disabled. |
282 | */ |
283 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) |
284 | { |
285 | struct ep93xx_dma_desc *desc; |
286 | |
287 | list_rotate_left(&edmac->active); |
288 | |
289 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) |
290 | return true; |
291 | |
292 | desc = ep93xx_dma_get_active(edmac); |
293 | if (!desc) |
294 | return false; |
295 | |
296 | /* |
297 | * If txd.cookie is set it means that we are back in the first |
298 | * descriptor in the chain and hence done with it. |
299 | */ |
300 | return !desc->txd.cookie; |
301 | } |
302 | |
303 | /* |
304 | * M2P DMA implementation |
305 | */ |
306 | |
307 | static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control) |
308 | { |
309 | writel(control, edmac->regs + M2P_CONTROL); |
310 | /* |
311 | * EP93xx User's Guide states that we must perform a dummy read after |
312 | * write to the control register. |
313 | */ |
314 | readl(edmac->regs + M2P_CONTROL); |
315 | } |
316 | |
317 | static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) |
318 | { |
319 | struct ep93xx_dma_data *data = edmac->chan.private; |
320 | u32 control; |
321 | |
322 | writel(data->port & 0xf, edmac->regs + M2P_PPALLOC); |
323 | |
324 | control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE |
325 | | M2P_CONTROL_ENABLE; |
326 | m2p_set_control(edmac, control); |
327 | |
328 | return 0; |
329 | } |
330 | |
331 | static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) |
332 | { |
333 | return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; |
334 | } |
335 | |
336 | static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) |
337 | { |
338 | u32 control; |
339 | |
340 | control = readl(edmac->regs + M2P_CONTROL); |
341 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); |
342 | m2p_set_control(edmac, control); |
343 | |
344 | while (m2p_channel_state(edmac) >= M2P_STATE_ON) |
345 | cpu_relax(); |
346 | |
347 | m2p_set_control(edmac, 0); |
348 | |
349 | while (m2p_channel_state(edmac) == M2P_STATE_STALL) |
350 | cpu_relax(); |
351 | } |
352 | |
353 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) |
354 | { |
355 | struct ep93xx_dma_desc *desc; |
356 | u32 bus_addr; |
357 | |
358 | desc = ep93xx_dma_get_active(edmac); |
359 | if (!desc) { |
360 | dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n"); |
361 | return; |
362 | } |
363 | |
364 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) |
365 | bus_addr = desc->src_addr; |
366 | else |
367 | bus_addr = desc->dst_addr; |
368 | |
369 | if (edmac->buffer == 0) { |
370 | writel(desc->size, edmac->regs + M2P_MAXCNT0); |
371 | writel(bus_addr, edmac->regs + M2P_BASE0); |
372 | } else { |
373 | writel(desc->size, edmac->regs + M2P_MAXCNT1); |
374 | writel(bus_addr, edmac->regs + M2P_BASE1); |
375 | } |
376 | |
377 | edmac->buffer ^= 1; |
378 | } |
379 | |
380 | static void m2p_hw_submit(struct ep93xx_dma_chan *edmac) |
381 | { |
382 | u32 control = readl(edmac->regs + M2P_CONTROL); |
383 | |
384 | m2p_fill_desc(edmac); |
385 | control |= M2P_CONTROL_STALLINT; |
386 | |
387 | if (ep93xx_dma_advance_active(edmac)) { |
388 | m2p_fill_desc(edmac); |
389 | control |= M2P_CONTROL_NFBINT; |
390 | } |
391 | |
392 | m2p_set_control(edmac, control); |
393 | } |
394 | |
395 | static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) |
396 | { |
397 | u32 irq_status = readl(edmac->regs + M2P_INTERRUPT); |
398 | u32 control; |
399 | |
400 | if (irq_status & M2P_INTERRUPT_ERROR) { |
401 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); |
402 | |
403 | /* Clear the error interrupt */ |
404 | writel(1, edmac->regs + M2P_INTERRUPT); |
405 | |
406 | /* |
407 | * It seems that there is no easy way of reporting errors back |
408 | * to client so we just report the error here and continue as |
409 | * usual. |
410 | * |
411 | * Revisit this when there is a mechanism to report back the |
412 | * errors. |
413 | */ |
414 | dev_err(chan2dev(edmac), |
415 | "DMA transfer failed! Details:\n" |
416 | "\tcookie : %d\n" |
417 | "\tsrc_addr : 0x%08x\n" |
418 | "\tdst_addr : 0x%08x\n" |
419 | "\tsize : %zu\n", |
420 | desc->txd.cookie, desc->src_addr, desc->dst_addr, |
421 | desc->size); |
422 | } |
423 | |
424 | switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) { |
425 | case M2P_INTERRUPT_STALL: |
426 | /* Disable interrupts */ |
427 | control = readl(edmac->regs + M2P_CONTROL); |
428 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); |
429 | m2p_set_control(edmac, control); |
430 | |
431 | return INTERRUPT_DONE; |
432 | |
433 | case M2P_INTERRUPT_NFB: |
434 | if (ep93xx_dma_advance_active(edmac)) |
435 | m2p_fill_desc(edmac); |
436 | |
437 | return INTERRUPT_NEXT_BUFFER; |
438 | } |
439 | |
440 | return INTERRUPT_UNKNOWN; |
441 | } |
442 | |
443 | /* |
444 | * M2M DMA implementation |
445 | */ |
446 | |
447 | static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) |
448 | { |
449 | const struct ep93xx_dma_data *data = edmac->chan.private; |
450 | u32 control = 0; |
451 | |
452 | if (!data) { |
453 | /* This is memcpy channel, nothing to configure */ |
454 | writel(control, edmac->regs + M2M_CONTROL); |
455 | return 0; |
456 | } |
457 | |
458 | switch (data->port) { |
459 | case EP93XX_DMA_SSP: |
460 | /* |
461 | * This was found via experimenting - anything less than 5 |
462 | * causes the channel to perform only a partial transfer which |
463 | * leads to problems since we don't get DONE interrupt then. |
464 | */ |
465 | control = (5 << M2M_CONTROL_PWSC_SHIFT); |
466 | control |= M2M_CONTROL_NO_HDSK; |
467 | |
468 | if (data->direction == DMA_MEM_TO_DEV) { |
469 | control |= M2M_CONTROL_DAH; |
470 | control |= M2M_CONTROL_TM_TX; |
471 | control |= M2M_CONTROL_RSS_SSPTX; |
472 | } else { |
473 | control |= M2M_CONTROL_SAH; |
474 | control |= M2M_CONTROL_TM_RX; |
475 | control |= M2M_CONTROL_RSS_SSPRX; |
476 | } |
477 | break; |
478 | |
479 | case EP93XX_DMA_IDE: |
480 | /* |
481 | * This IDE part is totally untested. Values below are taken |
482 | * from the EP93xx Users's Guide and might not be correct. |
483 | */ |
484 | if (data->direction == DMA_MEM_TO_DEV) { |
485 | /* Worst case from the UG */ |
486 | control = (3 << M2M_CONTROL_PWSC_SHIFT); |
487 | control |= M2M_CONTROL_DAH; |
488 | control |= M2M_CONTROL_TM_TX; |
489 | } else { |
490 | control = (2 << M2M_CONTROL_PWSC_SHIFT); |
491 | control |= M2M_CONTROL_SAH; |
492 | control |= M2M_CONTROL_TM_RX; |
493 | } |
494 | |
495 | control |= M2M_CONTROL_NO_HDSK; |
496 | control |= M2M_CONTROL_RSS_IDE; |
497 | control |= M2M_CONTROL_PW_16; |
498 | break; |
499 | |
500 | default: |
501 | return -EINVAL; |
502 | } |
503 | |
504 | writel(control, edmac->regs + M2M_CONTROL); |
505 | return 0; |
506 | } |
507 | |
508 | static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) |
509 | { |
510 | /* Just disable the channel */ |
511 | writel(0, edmac->regs + M2M_CONTROL); |
512 | } |
513 | |
514 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) |
515 | { |
516 | struct ep93xx_dma_desc *desc; |
517 | |
518 | desc = ep93xx_dma_get_active(edmac); |
519 | if (!desc) { |
520 | dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n"); |
521 | return; |
522 | } |
523 | |
524 | if (edmac->buffer == 0) { |
525 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); |
526 | writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); |
527 | writel(desc->size, edmac->regs + M2M_BCR0); |
528 | } else { |
529 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); |
530 | writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); |
531 | writel(desc->size, edmac->regs + M2M_BCR1); |
532 | } |
533 | |
534 | edmac->buffer ^= 1; |
535 | } |
536 | |
537 | static void m2m_hw_submit(struct ep93xx_dma_chan *edmac) |
538 | { |
539 | struct ep93xx_dma_data *data = edmac->chan.private; |
540 | u32 control = readl(edmac->regs + M2M_CONTROL); |
541 | |
542 | /* |
543 | * Since we allow clients to configure PW (peripheral width) we always |
544 | * clear PW bits here and then set them according what is given in |
545 | * the runtime configuration. |
546 | */ |
547 | control &= ~M2M_CONTROL_PW_MASK; |
548 | control |= edmac->runtime_ctrl; |
549 | |
550 | m2m_fill_desc(edmac); |
551 | control |= M2M_CONTROL_DONEINT; |
552 | |
553 | if (ep93xx_dma_advance_active(edmac)) { |
554 | m2m_fill_desc(edmac); |
555 | control |= M2M_CONTROL_NFBINT; |
556 | } |
557 | |
558 | /* |
559 | * Now we can finally enable the channel. For M2M channel this must be |
560 | * done _after_ the BCRx registers are programmed. |
561 | */ |
562 | control |= M2M_CONTROL_ENABLE; |
563 | writel(control, edmac->regs + M2M_CONTROL); |
564 | |
565 | if (!data) { |
566 | /* |
567 | * For memcpy channels the software trigger must be asserted |
568 | * in order to start the memcpy operation. |
569 | */ |
570 | control |= M2M_CONTROL_START; |
571 | writel(control, edmac->regs + M2M_CONTROL); |
572 | } |
573 | } |
574 | |
575 | /* |
576 | * According to EP93xx User's Guide, we should receive DONE interrupt when all |
577 | * M2M DMA controller transactions complete normally. This is not always the |
578 | * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel |
579 | * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel |
580 | * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation). |
581 | * In effect, disabling the channel when only DONE bit is set could stop |
582 | * currently running DMA transfer. To avoid this, we use Buffer FSM and |
583 | * Control FSM to check current state of DMA channel. |
584 | */ |
585 | static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) |
586 | { |
587 | u32 status = readl(edmac->regs + M2M_STATUS); |
588 | u32 ctl_fsm = status & M2M_STATUS_CTL_MASK; |
589 | u32 buf_fsm = status & M2M_STATUS_BUF_MASK; |
590 | bool done = status & M2M_STATUS_DONE; |
591 | bool last_done; |
592 | u32 control; |
593 | struct ep93xx_dma_desc *desc; |
594 | |
595 | /* Accept only DONE and NFB interrupts */ |
596 | if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK)) |
597 | return INTERRUPT_UNKNOWN; |
598 | |
599 | if (done) { |
600 | /* Clear the DONE bit */ |
601 | writel(0, edmac->regs + M2M_INTERRUPT); |
602 | } |
603 | |
604 | /* |
605 | * Check whether we are done with descriptors or not. This, together |
606 | * with DMA channel state, determines action to take in interrupt. |
607 | */ |
608 | desc = ep93xx_dma_get_active(edmac); |
609 | last_done = !desc || desc->txd.cookie; |
610 | |
611 | /* |
612 | * Use M2M DMA Buffer FSM and Control FSM to check current state of |
613 | * DMA channel. Using DONE and NFB bits from channel status register |
614 | * or bits from channel interrupt register is not reliable. |
615 | */ |
616 | if (!last_done && |
617 | (buf_fsm == M2M_STATUS_BUF_NO || |
618 | buf_fsm == M2M_STATUS_BUF_ON)) { |
619 | /* |
620 | * Two buffers are ready for update when Buffer FSM is in |
621 | * DMA_NO_BUF state. Only one buffer can be prepared without |
622 | * disabling the channel or polling the DONE bit. |
623 | * To simplify things, always prepare only one buffer. |
624 | */ |
625 | if (ep93xx_dma_advance_active(edmac)) { |
626 | m2m_fill_desc(edmac); |
627 | if (done && !edmac->chan.private) { |
628 | /* Software trigger for memcpy channel */ |
629 | control = readl(edmac->regs + M2M_CONTROL); |
630 | control |= M2M_CONTROL_START; |
631 | writel(control, edmac->regs + M2M_CONTROL); |
632 | } |
633 | return INTERRUPT_NEXT_BUFFER; |
634 | } else { |
635 | last_done = true; |
636 | } |
637 | } |
638 | |
639 | /* |
640 | * Disable the channel only when Buffer FSM is in DMA_NO_BUF state |
641 | * and Control FSM is in DMA_STALL state. |
642 | */ |
643 | if (last_done && |
644 | buf_fsm == M2M_STATUS_BUF_NO && |
645 | ctl_fsm == M2M_STATUS_CTL_STALL) { |
646 | /* Disable interrupts and the channel */ |
647 | control = readl(edmac->regs + M2M_CONTROL); |
648 | control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT |
649 | | M2M_CONTROL_ENABLE); |
650 | writel(control, edmac->regs + M2M_CONTROL); |
651 | return INTERRUPT_DONE; |
652 | } |
653 | |
654 | /* |
655 | * Nothing to do this time. |
656 | */ |
657 | return INTERRUPT_NEXT_BUFFER; |
658 | } |
659 | |
660 | /* |
661 | * DMA engine API implementation |
662 | */ |
663 | |
664 | static struct ep93xx_dma_desc * |
665 | ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac) |
666 | { |
667 | struct ep93xx_dma_desc *desc, *_desc; |
668 | struct ep93xx_dma_desc *ret = NULL; |
669 | unsigned long flags; |
670 | |
671 | spin_lock_irqsave(&edmac->lock, flags); |
672 | list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { |
673 | if (async_tx_test_ack(&desc->txd)) { |
674 | list_del_init(&desc->node); |
675 | |
676 | /* Re-initialize the descriptor */ |
677 | desc->src_addr = 0; |
678 | desc->dst_addr = 0; |
679 | desc->size = 0; |
680 | desc->complete = false; |
681 | desc->txd.cookie = 0; |
682 | desc->txd.callback = NULL; |
683 | desc->txd.callback_param = NULL; |
684 | |
685 | ret = desc; |
686 | break; |
687 | } |
688 | } |
689 | spin_unlock_irqrestore(&edmac->lock, flags); |
690 | return ret; |
691 | } |
692 | |
693 | static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, |
694 | struct ep93xx_dma_desc *desc) |
695 | { |
696 | if (desc) { |
697 | unsigned long flags; |
698 | |
699 | spin_lock_irqsave(&edmac->lock, flags); |
700 | list_splice_init(&desc->tx_list, &edmac->free_list); |
701 | list_add(&desc->node, &edmac->free_list); |
702 | spin_unlock_irqrestore(&edmac->lock, flags); |
703 | } |
704 | } |
705 | |
706 | /** |
707 | * ep93xx_dma_advance_work - start processing the next pending transaction |
708 | * @edmac: channel |
709 | * |
710 | * If we have pending transactions queued and we are currently idling, this |
711 | * function takes the next queued transaction from the @edmac->queue and |
712 | * pushes it to the hardware for execution. |
713 | */ |
714 | static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac) |
715 | { |
716 | struct ep93xx_dma_desc *new; |
717 | unsigned long flags; |
718 | |
719 | spin_lock_irqsave(&edmac->lock, flags); |
720 | if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) { |
721 | spin_unlock_irqrestore(&edmac->lock, flags); |
722 | return; |
723 | } |
724 | |
725 | /* Take the next descriptor from the pending queue */ |
726 | new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node); |
727 | list_del_init(&new->node); |
728 | |
729 | ep93xx_dma_set_active(edmac, new); |
730 | |
731 | /* Push it to the hardware */ |
732 | edmac->edma->hw_submit(edmac); |
733 | spin_unlock_irqrestore(&edmac->lock, flags); |
734 | } |
735 | |
736 | static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc) |
737 | { |
738 | struct device *dev = desc->txd.chan->device->dev; |
739 | |
740 | if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
741 | if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
742 | dma_unmap_single(dev, desc->src_addr, desc->size, |
743 | DMA_TO_DEVICE); |
744 | else |
745 | dma_unmap_page(dev, desc->src_addr, desc->size, |
746 | DMA_TO_DEVICE); |
747 | } |
748 | if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
749 | if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
750 | dma_unmap_single(dev, desc->dst_addr, desc->size, |
751 | DMA_FROM_DEVICE); |
752 | else |
753 | dma_unmap_page(dev, desc->dst_addr, desc->size, |
754 | DMA_FROM_DEVICE); |
755 | } |
756 | } |
757 | |
758 | static void ep93xx_dma_tasklet(unsigned long data) |
759 | { |
760 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; |
761 | struct ep93xx_dma_desc *desc, *d; |
762 | dma_async_tx_callback callback = NULL; |
763 | void *callback_param = NULL; |
764 | LIST_HEAD(list); |
765 | |
766 | spin_lock_irq(&edmac->lock); |
767 | /* |
768 | * If dma_terminate_all() was called before we get to run, the active |
769 | * list has become empty. If that happens we aren't supposed to do |
770 | * anything more than call ep93xx_dma_advance_work(). |
771 | */ |
772 | desc = ep93xx_dma_get_active(edmac); |
773 | if (desc) { |
774 | if (desc->complete) { |
775 | /* mark descriptor complete for non cyclic case only */ |
776 | if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) |
777 | dma_cookie_complete(&desc->txd); |
778 | list_splice_init(&edmac->active, &list); |
779 | } |
780 | callback = desc->txd.callback; |
781 | callback_param = desc->txd.callback_param; |
782 | } |
783 | spin_unlock_irq(&edmac->lock); |
784 | |
785 | /* Pick up the next descriptor from the queue */ |
786 | ep93xx_dma_advance_work(edmac); |
787 | |
788 | /* Now we can release all the chained descriptors */ |
789 | list_for_each_entry_safe(desc, d, &list, node) { |
790 | /* |
791 | * For the memcpy channels the API requires us to unmap the |
792 | * buffers unless requested otherwise. |
793 | */ |
794 | if (!edmac->chan.private) |
795 | ep93xx_dma_unmap_buffers(desc); |
796 | |
797 | ep93xx_dma_desc_put(edmac, desc); |
798 | } |
799 | |
800 | if (callback) |
801 | callback(callback_param); |
802 | } |
803 | |
804 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) |
805 | { |
806 | struct ep93xx_dma_chan *edmac = dev_id; |
807 | struct ep93xx_dma_desc *desc; |
808 | irqreturn_t ret = IRQ_HANDLED; |
809 | |
810 | spin_lock(&edmac->lock); |
811 | |
812 | desc = ep93xx_dma_get_active(edmac); |
813 | if (!desc) { |
814 | dev_warn(chan2dev(edmac), |
815 | "got interrupt while active list is empty\n"); |
816 | spin_unlock(&edmac->lock); |
817 | return IRQ_NONE; |
818 | } |
819 | |
820 | switch (edmac->edma->hw_interrupt(edmac)) { |
821 | case INTERRUPT_DONE: |
822 | desc->complete = true; |
823 | tasklet_schedule(&edmac->tasklet); |
824 | break; |
825 | |
826 | case INTERRUPT_NEXT_BUFFER: |
827 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) |
828 | tasklet_schedule(&edmac->tasklet); |
829 | break; |
830 | |
831 | default: |
832 | dev_warn(chan2dev(edmac), "unknown interrupt!\n"); |
833 | ret = IRQ_NONE; |
834 | break; |
835 | } |
836 | |
837 | spin_unlock(&edmac->lock); |
838 | return ret; |
839 | } |
840 | |
841 | /** |
842 | * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed |
843 | * @tx: descriptor to be executed |
844 | * |
845 | * Function will execute given descriptor on the hardware or if the hardware |
846 | * is busy, queue the descriptor to be executed later on. Returns cookie which |
847 | * can be used to poll the status of the descriptor. |
848 | */ |
849 | static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
850 | { |
851 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan); |
852 | struct ep93xx_dma_desc *desc; |
853 | dma_cookie_t cookie; |
854 | unsigned long flags; |
855 | |
856 | spin_lock_irqsave(&edmac->lock, flags); |
857 | cookie = dma_cookie_assign(tx); |
858 | |
859 | desc = container_of(tx, struct ep93xx_dma_desc, txd); |
860 | |
861 | /* |
862 | * If nothing is currently prosessed, we push this descriptor |
863 | * directly to the hardware. Otherwise we put the descriptor |
864 | * to the pending queue. |
865 | */ |
866 | if (list_empty(&edmac->active)) { |
867 | ep93xx_dma_set_active(edmac, desc); |
868 | edmac->edma->hw_submit(edmac); |
869 | } else { |
870 | list_add_tail(&desc->node, &edmac->queue); |
871 | } |
872 | |
873 | spin_unlock_irqrestore(&edmac->lock, flags); |
874 | return cookie; |
875 | } |
876 | |
877 | /** |
878 | * ep93xx_dma_alloc_chan_resources - allocate resources for the channel |
879 | * @chan: channel to allocate resources |
880 | * |
881 | * Function allocates necessary resources for the given DMA channel and |
882 | * returns number of allocated descriptors for the channel. Negative errno |
883 | * is returned in case of failure. |
884 | */ |
885 | static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) |
886 | { |
887 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
888 | struct ep93xx_dma_data *data = chan->private; |
889 | const char *name = dma_chan_name(chan); |
890 | int ret, i; |
891 | |
892 | /* Sanity check the channel parameters */ |
893 | if (!edmac->edma->m2m) { |
894 | if (!data) |
895 | return -EINVAL; |
896 | if (data->port < EP93XX_DMA_I2S1 || |
897 | data->port > EP93XX_DMA_IRDA) |
898 | return -EINVAL; |
899 | if (data->direction != ep93xx_dma_chan_direction(chan)) |
900 | return -EINVAL; |
901 | } else { |
902 | if (data) { |
903 | switch (data->port) { |
904 | case EP93XX_DMA_SSP: |
905 | case EP93XX_DMA_IDE: |
906 | if (!is_slave_direction(data->direction)) |
907 | return -EINVAL; |
908 | break; |
909 | default: |
910 | return -EINVAL; |
911 | } |
912 | } |
913 | } |
914 | |
915 | if (data && data->name) |
916 | name = data->name; |
917 | |
918 | ret = clk_enable(edmac->clk); |
919 | if (ret) |
920 | return ret; |
921 | |
922 | ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac); |
923 | if (ret) |
924 | goto fail_clk_disable; |
925 | |
926 | spin_lock_irq(&edmac->lock); |
927 | dma_cookie_init(&edmac->chan); |
928 | ret = edmac->edma->hw_setup(edmac); |
929 | spin_unlock_irq(&edmac->lock); |
930 | |
931 | if (ret) |
932 | goto fail_free_irq; |
933 | |
934 | for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) { |
935 | struct ep93xx_dma_desc *desc; |
936 | |
937 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
938 | if (!desc) { |
939 | dev_warn(chan2dev(edmac), "not enough descriptors\n"); |
940 | break; |
941 | } |
942 | |
943 | INIT_LIST_HEAD(&desc->tx_list); |
944 | |
945 | dma_async_tx_descriptor_init(&desc->txd, chan); |
946 | desc->txd.flags = DMA_CTRL_ACK; |
947 | desc->txd.tx_submit = ep93xx_dma_tx_submit; |
948 | |
949 | ep93xx_dma_desc_put(edmac, desc); |
950 | } |
951 | |
952 | return i; |
953 | |
954 | fail_free_irq: |
955 | free_irq(edmac->irq, edmac); |
956 | fail_clk_disable: |
957 | clk_disable(edmac->clk); |
958 | |
959 | return ret; |
960 | } |
961 | |
962 | /** |
963 | * ep93xx_dma_free_chan_resources - release resources for the channel |
964 | * @chan: channel |
965 | * |
966 | * Function releases all the resources allocated for the given channel. |
967 | * The channel must be idle when this is called. |
968 | */ |
969 | static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) |
970 | { |
971 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
972 | struct ep93xx_dma_desc *desc, *d; |
973 | unsigned long flags; |
974 | LIST_HEAD(list); |
975 | |
976 | BUG_ON(!list_empty(&edmac->active)); |
977 | BUG_ON(!list_empty(&edmac->queue)); |
978 | |
979 | spin_lock_irqsave(&edmac->lock, flags); |
980 | edmac->edma->hw_shutdown(edmac); |
981 | edmac->runtime_addr = 0; |
982 | edmac->runtime_ctrl = 0; |
983 | edmac->buffer = 0; |
984 | list_splice_init(&edmac->free_list, &list); |
985 | spin_unlock_irqrestore(&edmac->lock, flags); |
986 | |
987 | list_for_each_entry_safe(desc, d, &list, node) |
988 | kfree(desc); |
989 | |
990 | clk_disable(edmac->clk); |
991 | free_irq(edmac->irq, edmac); |
992 | } |
993 | |
994 | /** |
995 | * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation |
996 | * @chan: channel |
997 | * @dest: destination bus address |
998 | * @src: source bus address |
999 | * @len: size of the transaction |
1000 | * @flags: flags for the descriptor |
1001 | * |
1002 | * Returns a valid DMA descriptor or %NULL in case of failure. |
1003 | */ |
1004 | static struct dma_async_tx_descriptor * |
1005 | ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, |
1006 | dma_addr_t src, size_t len, unsigned long flags) |
1007 | { |
1008 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1009 | struct ep93xx_dma_desc *desc, *first; |
1010 | size_t bytes, offset; |
1011 | |
1012 | first = NULL; |
1013 | for (offset = 0; offset < len; offset += bytes) { |
1014 | desc = ep93xx_dma_desc_get(edmac); |
1015 | if (!desc) { |
1016 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); |
1017 | goto fail; |
1018 | } |
1019 | |
1020 | bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES); |
1021 | |
1022 | desc->src_addr = src + offset; |
1023 | desc->dst_addr = dest + offset; |
1024 | desc->size = bytes; |
1025 | |
1026 | if (!first) |
1027 | first = desc; |
1028 | else |
1029 | list_add_tail(&desc->node, &first->tx_list); |
1030 | } |
1031 | |
1032 | first->txd.cookie = -EBUSY; |
1033 | first->txd.flags = flags; |
1034 | |
1035 | return &first->txd; |
1036 | fail: |
1037 | ep93xx_dma_desc_put(edmac, first); |
1038 | return NULL; |
1039 | } |
1040 | |
1041 | /** |
1042 | * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation |
1043 | * @chan: channel |
1044 | * @sgl: list of buffers to transfer |
1045 | * @sg_len: number of entries in @sgl |
1046 | * @dir: direction of tha DMA transfer |
1047 | * @flags: flags for the descriptor |
1048 | * @context: operation context (ignored) |
1049 | * |
1050 | * Returns a valid DMA descriptor or %NULL in case of failure. |
1051 | */ |
1052 | static struct dma_async_tx_descriptor * |
1053 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1054 | unsigned int sg_len, enum dma_transfer_direction dir, |
1055 | unsigned long flags, void *context) |
1056 | { |
1057 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1058 | struct ep93xx_dma_desc *desc, *first; |
1059 | struct scatterlist *sg; |
1060 | int i; |
1061 | |
1062 | if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { |
1063 | dev_warn(chan2dev(edmac), |
1064 | "channel was configured with different direction\n"); |
1065 | return NULL; |
1066 | } |
1067 | |
1068 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { |
1069 | dev_warn(chan2dev(edmac), |
1070 | "channel is already used for cyclic transfers\n"); |
1071 | return NULL; |
1072 | } |
1073 | |
1074 | first = NULL; |
1075 | for_each_sg(sgl, sg, sg_len, i) { |
1076 | size_t sg_len = sg_dma_len(sg); |
1077 | |
1078 | if (sg_len > DMA_MAX_CHAN_BYTES) { |
1079 | dev_warn(chan2dev(edmac), "too big transfer size %d\n", |
1080 | sg_len); |
1081 | goto fail; |
1082 | } |
1083 | |
1084 | desc = ep93xx_dma_desc_get(edmac); |
1085 | if (!desc) { |
1086 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); |
1087 | goto fail; |
1088 | } |
1089 | |
1090 | if (dir == DMA_MEM_TO_DEV) { |
1091 | desc->src_addr = sg_dma_address(sg); |
1092 | desc->dst_addr = edmac->runtime_addr; |
1093 | } else { |
1094 | desc->src_addr = edmac->runtime_addr; |
1095 | desc->dst_addr = sg_dma_address(sg); |
1096 | } |
1097 | desc->size = sg_len; |
1098 | |
1099 | if (!first) |
1100 | first = desc; |
1101 | else |
1102 | list_add_tail(&desc->node, &first->tx_list); |
1103 | } |
1104 | |
1105 | first->txd.cookie = -EBUSY; |
1106 | first->txd.flags = flags; |
1107 | |
1108 | return &first->txd; |
1109 | |
1110 | fail: |
1111 | ep93xx_dma_desc_put(edmac, first); |
1112 | return NULL; |
1113 | } |
1114 | |
1115 | /** |
1116 | * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation |
1117 | * @chan: channel |
1118 | * @dma_addr: DMA mapped address of the buffer |
1119 | * @buf_len: length of the buffer (in bytes) |
1120 | * @period_len: length of a single period |
1121 | * @dir: direction of the operation |
1122 | * @flags: tx descriptor status flags |
1123 | * @context: operation context (ignored) |
1124 | * |
1125 | * Prepares a descriptor for cyclic DMA operation. This means that once the |
1126 | * descriptor is submitted, we will be submitting in a @period_len sized |
1127 | * buffers and calling callback once the period has been elapsed. Transfer |
1128 | * terminates only when client calls dmaengine_terminate_all() for this |
1129 | * channel. |
1130 | * |
1131 | * Returns a valid DMA descriptor or %NULL in case of failure. |
1132 | */ |
1133 | static struct dma_async_tx_descriptor * |
1134 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
1135 | size_t buf_len, size_t period_len, |
1136 | enum dma_transfer_direction dir, unsigned long flags, |
1137 | void *context) |
1138 | { |
1139 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1140 | struct ep93xx_dma_desc *desc, *first; |
1141 | size_t offset = 0; |
1142 | |
1143 | if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) { |
1144 | dev_warn(chan2dev(edmac), |
1145 | "channel was configured with different direction\n"); |
1146 | return NULL; |
1147 | } |
1148 | |
1149 | if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) { |
1150 | dev_warn(chan2dev(edmac), |
1151 | "channel is already used for cyclic transfers\n"); |
1152 | return NULL; |
1153 | } |
1154 | |
1155 | if (period_len > DMA_MAX_CHAN_BYTES) { |
1156 | dev_warn(chan2dev(edmac), "too big period length %d\n", |
1157 | period_len); |
1158 | return NULL; |
1159 | } |
1160 | |
1161 | /* Split the buffer into period size chunks */ |
1162 | first = NULL; |
1163 | for (offset = 0; offset < buf_len; offset += period_len) { |
1164 | desc = ep93xx_dma_desc_get(edmac); |
1165 | if (!desc) { |
1166 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); |
1167 | goto fail; |
1168 | } |
1169 | |
1170 | if (dir == DMA_MEM_TO_DEV) { |
1171 | desc->src_addr = dma_addr + offset; |
1172 | desc->dst_addr = edmac->runtime_addr; |
1173 | } else { |
1174 | desc->src_addr = edmac->runtime_addr; |
1175 | desc->dst_addr = dma_addr + offset; |
1176 | } |
1177 | |
1178 | desc->size = period_len; |
1179 | |
1180 | if (!first) |
1181 | first = desc; |
1182 | else |
1183 | list_add_tail(&desc->node, &first->tx_list); |
1184 | } |
1185 | |
1186 | first->txd.cookie = -EBUSY; |
1187 | |
1188 | return &first->txd; |
1189 | |
1190 | fail: |
1191 | ep93xx_dma_desc_put(edmac, first); |
1192 | return NULL; |
1193 | } |
1194 | |
1195 | /** |
1196 | * ep93xx_dma_terminate_all - terminate all transactions |
1197 | * @edmac: channel |
1198 | * |
1199 | * Stops all DMA transactions. All descriptors are put back to the |
1200 | * @edmac->free_list and callbacks are _not_ called. |
1201 | */ |
1202 | static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) |
1203 | { |
1204 | struct ep93xx_dma_desc *desc, *_d; |
1205 | unsigned long flags; |
1206 | LIST_HEAD(list); |
1207 | |
1208 | spin_lock_irqsave(&edmac->lock, flags); |
1209 | /* First we disable and flush the DMA channel */ |
1210 | edmac->edma->hw_shutdown(edmac); |
1211 | clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags); |
1212 | list_splice_init(&edmac->active, &list); |
1213 | list_splice_init(&edmac->queue, &list); |
1214 | /* |
1215 | * We then re-enable the channel. This way we can continue submitting |
1216 | * the descriptors by just calling ->hw_submit() again. |
1217 | */ |
1218 | edmac->edma->hw_setup(edmac); |
1219 | spin_unlock_irqrestore(&edmac->lock, flags); |
1220 | |
1221 | list_for_each_entry_safe(desc, _d, &list, node) |
1222 | ep93xx_dma_desc_put(edmac, desc); |
1223 | |
1224 | return 0; |
1225 | } |
1226 | |
1227 | static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, |
1228 | struct dma_slave_config *config) |
1229 | { |
1230 | enum dma_slave_buswidth width; |
1231 | unsigned long flags; |
1232 | u32 addr, ctrl; |
1233 | |
1234 | if (!edmac->edma->m2m) |
1235 | return -EINVAL; |
1236 | |
1237 | switch (config->direction) { |
1238 | case DMA_DEV_TO_MEM: |
1239 | width = config->src_addr_width; |
1240 | addr = config->src_addr; |
1241 | break; |
1242 | |
1243 | case DMA_MEM_TO_DEV: |
1244 | width = config->dst_addr_width; |
1245 | addr = config->dst_addr; |
1246 | break; |
1247 | |
1248 | default: |
1249 | return -EINVAL; |
1250 | } |
1251 | |
1252 | switch (width) { |
1253 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
1254 | ctrl = 0; |
1255 | break; |
1256 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
1257 | ctrl = M2M_CONTROL_PW_16; |
1258 | break; |
1259 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
1260 | ctrl = M2M_CONTROL_PW_32; |
1261 | break; |
1262 | default: |
1263 | return -EINVAL; |
1264 | } |
1265 | |
1266 | spin_lock_irqsave(&edmac->lock, flags); |
1267 | edmac->runtime_addr = addr; |
1268 | edmac->runtime_ctrl = ctrl; |
1269 | spin_unlock_irqrestore(&edmac->lock, flags); |
1270 | |
1271 | return 0; |
1272 | } |
1273 | |
1274 | /** |
1275 | * ep93xx_dma_control - manipulate all pending operations on a channel |
1276 | * @chan: channel |
1277 | * @cmd: control command to perform |
1278 | * @arg: optional argument |
1279 | * |
1280 | * Controls the channel. Function returns %0 in case of success or negative |
1281 | * error in case of failure. |
1282 | */ |
1283 | static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1284 | unsigned long arg) |
1285 | { |
1286 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1287 | struct dma_slave_config *config; |
1288 | |
1289 | switch (cmd) { |
1290 | case DMA_TERMINATE_ALL: |
1291 | return ep93xx_dma_terminate_all(edmac); |
1292 | |
1293 | case DMA_SLAVE_CONFIG: |
1294 | config = (struct dma_slave_config *)arg; |
1295 | return ep93xx_dma_slave_config(edmac, config); |
1296 | |
1297 | default: |
1298 | break; |
1299 | } |
1300 | |
1301 | return -ENOSYS; |
1302 | } |
1303 | |
1304 | /** |
1305 | * ep93xx_dma_tx_status - check if a transaction is completed |
1306 | * @chan: channel |
1307 | * @cookie: transaction specific cookie |
1308 | * @state: state of the transaction is stored here if given |
1309 | * |
1310 | * This function can be used to query state of a given transaction. |
1311 | */ |
1312 | static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, |
1313 | dma_cookie_t cookie, |
1314 | struct dma_tx_state *state) |
1315 | { |
1316 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1317 | enum dma_status ret; |
1318 | unsigned long flags; |
1319 | |
1320 | spin_lock_irqsave(&edmac->lock, flags); |
1321 | ret = dma_cookie_status(chan, cookie, state); |
1322 | spin_unlock_irqrestore(&edmac->lock, flags); |
1323 | |
1324 | return ret; |
1325 | } |
1326 | |
1327 | /** |
1328 | * ep93xx_dma_issue_pending - push pending transactions to the hardware |
1329 | * @chan: channel |
1330 | * |
1331 | * When this function is called, all pending transactions are pushed to the |
1332 | * hardware and executed. |
1333 | */ |
1334 | static void ep93xx_dma_issue_pending(struct dma_chan *chan) |
1335 | { |
1336 | ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan)); |
1337 | } |
1338 | |
1339 | static int __init ep93xx_dma_probe(struct platform_device *pdev) |
1340 | { |
1341 | struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); |
1342 | struct ep93xx_dma_engine *edma; |
1343 | struct dma_device *dma_dev; |
1344 | size_t edma_size; |
1345 | int ret, i; |
1346 | |
1347 | edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan); |
1348 | edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL); |
1349 | if (!edma) |
1350 | return -ENOMEM; |
1351 | |
1352 | dma_dev = &edma->dma_dev; |
1353 | edma->m2m = platform_get_device_id(pdev)->driver_data; |
1354 | edma->num_channels = pdata->num_channels; |
1355 | |
1356 | INIT_LIST_HEAD(&dma_dev->channels); |
1357 | for (i = 0; i < pdata->num_channels; i++) { |
1358 | const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i]; |
1359 | struct ep93xx_dma_chan *edmac = &edma->channels[i]; |
1360 | |
1361 | edmac->chan.device = dma_dev; |
1362 | edmac->regs = cdata->base; |
1363 | edmac->irq = cdata->irq; |
1364 | edmac->edma = edma; |
1365 | |
1366 | edmac->clk = clk_get(NULL, cdata->name); |
1367 | if (IS_ERR(edmac->clk)) { |
1368 | dev_warn(&pdev->dev, "failed to get clock for %s\n", |
1369 | cdata->name); |
1370 | continue; |
1371 | } |
1372 | |
1373 | spin_lock_init(&edmac->lock); |
1374 | INIT_LIST_HEAD(&edmac->active); |
1375 | INIT_LIST_HEAD(&edmac->queue); |
1376 | INIT_LIST_HEAD(&edmac->free_list); |
1377 | tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet, |
1378 | (unsigned long)edmac); |
1379 | |
1380 | list_add_tail(&edmac->chan.device_node, |
1381 | &dma_dev->channels); |
1382 | } |
1383 | |
1384 | dma_cap_zero(dma_dev->cap_mask); |
1385 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); |
1386 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); |
1387 | |
1388 | dma_dev->dev = &pdev->dev; |
1389 | dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources; |
1390 | dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; |
1391 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; |
1392 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; |
1393 | dma_dev->device_control = ep93xx_dma_control; |
1394 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; |
1395 | dma_dev->device_tx_status = ep93xx_dma_tx_status; |
1396 | |
1397 | dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES); |
1398 | |
1399 | if (edma->m2m) { |
1400 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
1401 | dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy; |
1402 | |
1403 | edma->hw_setup = m2m_hw_setup; |
1404 | edma->hw_shutdown = m2m_hw_shutdown; |
1405 | edma->hw_submit = m2m_hw_submit; |
1406 | edma->hw_interrupt = m2m_hw_interrupt; |
1407 | } else { |
1408 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
1409 | |
1410 | edma->hw_setup = m2p_hw_setup; |
1411 | edma->hw_shutdown = m2p_hw_shutdown; |
1412 | edma->hw_submit = m2p_hw_submit; |
1413 | edma->hw_interrupt = m2p_hw_interrupt; |
1414 | } |
1415 | |
1416 | ret = dma_async_device_register(dma_dev); |
1417 | if (unlikely(ret)) { |
1418 | for (i = 0; i < edma->num_channels; i++) { |
1419 | struct ep93xx_dma_chan *edmac = &edma->channels[i]; |
1420 | if (!IS_ERR_OR_NULL(edmac->clk)) |
1421 | clk_put(edmac->clk); |
1422 | } |
1423 | kfree(edma); |
1424 | } else { |
1425 | dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", |
1426 | edma->m2m ? "M" : "P"); |
1427 | } |
1428 | |
1429 | return ret; |
1430 | } |
1431 | |
1432 | static struct platform_device_id ep93xx_dma_driver_ids[] = { |
1433 | { "ep93xx-dma-m2p", 0 }, |
1434 | { "ep93xx-dma-m2m", 1 }, |
1435 | { }, |
1436 | }; |
1437 | |
1438 | static struct platform_driver ep93xx_dma_driver = { |
1439 | .driver = { |
1440 | .name = "ep93xx-dma", |
1441 | }, |
1442 | .id_table = ep93xx_dma_driver_ids, |
1443 | }; |
1444 | |
1445 | static int __init ep93xx_dma_module_init(void) |
1446 | { |
1447 | return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe); |
1448 | } |
1449 | subsys_initcall(ep93xx_dma_module_init); |
1450 | |
1451 | MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>"); |
1452 | MODULE_DESCRIPTION("EP93xx DMA driver"); |
1453 | MODULE_LICENSE("GPL"); |
1454 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9