Root/
1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on |
3 | * AVR32 systems.) |
4 | * |
5 | * Copyright (C) 2007-2008 Atmel Corporation |
6 | * Copyright (C) 2010-2011 ST Microelectronics |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. |
11 | */ |
12 | #include <linux/bitops.h> |
13 | #include <linux/clk.h> |
14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> |
16 | #include <linux/dma-mapping.h> |
17 | #include <linux/init.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/io.h> |
20 | #include <linux/of.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> |
23 | #include <linux/platform_device.h> |
24 | #include <linux/slab.h> |
25 | |
26 | #include "dw_dmac_regs.h" |
27 | #include "dmaengine.h" |
28 | |
29 | /* |
30 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", |
31 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all |
32 | * of which use ARM any more). See the "Databook" from Synopsys for |
33 | * information beyond what licensees probably provide. |
34 | * |
35 | * The driver has currently been tested only with the Atmel AT32AP7000, |
36 | * which does not support descriptor writeback. |
37 | */ |
38 | |
39 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
40 | struct dw_dma_slave *__slave = (_chan->private); \ |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
43 | int _dms = __slave ? __slave->dst_master : 0; \ |
44 | int _sms = __slave ? __slave->src_master : 1; \ |
45 | u8 _smsize = __slave ? _sconfig->src_maxburst : \ |
46 | DW_DMA_MSIZE_16; \ |
47 | u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ |
48 | DW_DMA_MSIZE_16; \ |
49 | \ |
50 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
51 | | DWC_CTLL_SRC_MSIZE(_smsize) \ |
52 | | DWC_CTLL_LLP_D_EN \ |
53 | | DWC_CTLL_LLP_S_EN \ |
54 | | DWC_CTLL_DMS(_dms) \ |
55 | | DWC_CTLL_SMS(_sms)); \ |
56 | }) |
57 | |
58 | /* |
59 | * This is configuration-dependent and usually a funny size like 4095. |
60 | * |
61 | * Note that this is a transfer count, i.e. if we transfer 32-bit |
62 | * words, we can do 16380 bytes per descriptor. |
63 | * |
64 | * This parameter is also system-specific. |
65 | */ |
66 | #define DWC_MAX_COUNT 4095U |
67 | |
68 | /* |
69 | * Number of descriptors to allocate for each channel. This should be |
70 | * made configurable somehow; preferably, the clients (at least the |
71 | * ones using slave transfers) should be able to give us a hint. |
72 | */ |
73 | #define NR_DESCS_PER_CHANNEL 64 |
74 | |
75 | /*----------------------------------------------------------------------*/ |
76 | |
77 | /* |
78 | * Because we're not relying on writeback from the controller (it may not |
79 | * even be configured into the core!) we don't need to use dma_pool. These |
80 | * descriptors -- and associated data -- are cacheable. We do need to make |
81 | * sure their dcache entries are written back before handing them off to |
82 | * the controller, though. |
83 | */ |
84 | |
85 | static struct device *chan2dev(struct dma_chan *chan) |
86 | { |
87 | return &chan->dev->device; |
88 | } |
89 | static struct device *chan2parent(struct dma_chan *chan) |
90 | { |
91 | return chan->dev->device.parent; |
92 | } |
93 | |
94 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
95 | { |
96 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); |
97 | } |
98 | |
99 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
100 | { |
101 | struct dw_desc *desc, *_desc; |
102 | struct dw_desc *ret = NULL; |
103 | unsigned int i = 0; |
104 | unsigned long flags; |
105 | |
106 | spin_lock_irqsave(&dwc->lock, flags); |
107 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
108 | i++; |
109 | if (async_tx_test_ack(&desc->txd)) { |
110 | list_del(&desc->desc_node); |
111 | ret = desc; |
112 | break; |
113 | } |
114 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
115 | } |
116 | spin_unlock_irqrestore(&dwc->lock, flags); |
117 | |
118 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
119 | |
120 | return ret; |
121 | } |
122 | |
123 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) |
124 | { |
125 | struct dw_desc *child; |
126 | |
127 | list_for_each_entry(child, &desc->tx_list, desc_node) |
128 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
129 | child->txd.phys, sizeof(child->lli), |
130 | DMA_TO_DEVICE); |
131 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
132 | desc->txd.phys, sizeof(desc->lli), |
133 | DMA_TO_DEVICE); |
134 | } |
135 | |
136 | /* |
137 | * Move a descriptor, including any children, to the free list. |
138 | * `desc' must not be on any lists. |
139 | */ |
140 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
141 | { |
142 | unsigned long flags; |
143 | |
144 | if (desc) { |
145 | struct dw_desc *child; |
146 | |
147 | dwc_sync_desc_for_cpu(dwc, desc); |
148 | |
149 | spin_lock_irqsave(&dwc->lock, flags); |
150 | list_for_each_entry(child, &desc->tx_list, desc_node) |
151 | dev_vdbg(chan2dev(&dwc->chan), |
152 | "moving child desc %p to freelist\n", |
153 | child); |
154 | list_splice_init(&desc->tx_list, &dwc->free_list); |
155 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
156 | list_add(&desc->desc_node, &dwc->free_list); |
157 | spin_unlock_irqrestore(&dwc->lock, flags); |
158 | } |
159 | } |
160 | |
161 | static void dwc_initialize(struct dw_dma_chan *dwc) |
162 | { |
163 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
164 | struct dw_dma_slave *dws = dwc->chan.private; |
165 | u32 cfghi = DWC_CFGH_FIFO_MODE; |
166 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); |
167 | |
168 | if (dwc->initialized == true) |
169 | return; |
170 | |
171 | if (dws) { |
172 | /* |
173 | * We need controller-specific data to set up slave |
174 | * transfers. |
175 | */ |
176 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
177 | |
178 | cfghi = dws->cfg_hi; |
179 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
180 | } |
181 | |
182 | channel_writel(dwc, CFG_LO, cfglo); |
183 | channel_writel(dwc, CFG_HI, cfghi); |
184 | |
185 | /* Enable interrupts */ |
186 | channel_set_bit(dw, MASK.XFER, dwc->mask); |
187 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
188 | |
189 | dwc->initialized = true; |
190 | } |
191 | |
192 | /*----------------------------------------------------------------------*/ |
193 | |
194 | static inline unsigned int dwc_fast_fls(unsigned long long v) |
195 | { |
196 | /* |
197 | * We can be a lot more clever here, but this should take care |
198 | * of the most common optimization. |
199 | */ |
200 | if (!(v & 7)) |
201 | return 3; |
202 | else if (!(v & 3)) |
203 | return 2; |
204 | else if (!(v & 1)) |
205 | return 1; |
206 | return 0; |
207 | } |
208 | |
209 | static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
210 | { |
211 | dev_err(chan2dev(&dwc->chan), |
212 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
213 | channel_readl(dwc, SAR), |
214 | channel_readl(dwc, DAR), |
215 | channel_readl(dwc, LLP), |
216 | channel_readl(dwc, CTL_HI), |
217 | channel_readl(dwc, CTL_LO)); |
218 | } |
219 | |
220 | |
221 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) |
222 | { |
223 | channel_clear_bit(dw, CH_EN, dwc->mask); |
224 | while (dma_readl(dw, CH_EN) & dwc->mask) |
225 | cpu_relax(); |
226 | } |
227 | |
228 | /*----------------------------------------------------------------------*/ |
229 | |
230 | /* Called with dwc->lock held and bh disabled */ |
231 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
232 | { |
233 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
234 | |
235 | /* ASSERT: channel is idle */ |
236 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
237 | dev_err(chan2dev(&dwc->chan), |
238 | "BUG: Attempted to start non-idle channel\n"); |
239 | dwc_dump_chan_regs(dwc); |
240 | |
241 | /* The tasklet will hopefully advance the queue... */ |
242 | return; |
243 | } |
244 | |
245 | dwc_initialize(dwc); |
246 | |
247 | channel_writel(dwc, LLP, first->txd.phys); |
248 | channel_writel(dwc, CTL_LO, |
249 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
250 | channel_writel(dwc, CTL_HI, 0); |
251 | channel_set_bit(dw, CH_EN, dwc->mask); |
252 | } |
253 | |
254 | /*----------------------------------------------------------------------*/ |
255 | |
256 | static void |
257 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
258 | bool callback_required) |
259 | { |
260 | dma_async_tx_callback callback = NULL; |
261 | void *param = NULL; |
262 | struct dma_async_tx_descriptor *txd = &desc->txd; |
263 | struct dw_desc *child; |
264 | unsigned long flags; |
265 | |
266 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
267 | |
268 | spin_lock_irqsave(&dwc->lock, flags); |
269 | dma_cookie_complete(txd); |
270 | if (callback_required) { |
271 | callback = txd->callback; |
272 | param = txd->callback_param; |
273 | } |
274 | |
275 | dwc_sync_desc_for_cpu(dwc, desc); |
276 | |
277 | /* async_tx_ack */ |
278 | list_for_each_entry(child, &desc->tx_list, desc_node) |
279 | async_tx_ack(&child->txd); |
280 | async_tx_ack(&desc->txd); |
281 | |
282 | list_splice_init(&desc->tx_list, &dwc->free_list); |
283 | list_move(&desc->desc_node, &dwc->free_list); |
284 | |
285 | if (!dwc->chan.private) { |
286 | struct device *parent = chan2parent(&dwc->chan); |
287 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
288 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
289 | dma_unmap_single(parent, desc->lli.dar, |
290 | desc->len, DMA_FROM_DEVICE); |
291 | else |
292 | dma_unmap_page(parent, desc->lli.dar, |
293 | desc->len, DMA_FROM_DEVICE); |
294 | } |
295 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
296 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
297 | dma_unmap_single(parent, desc->lli.sar, |
298 | desc->len, DMA_TO_DEVICE); |
299 | else |
300 | dma_unmap_page(parent, desc->lli.sar, |
301 | desc->len, DMA_TO_DEVICE); |
302 | } |
303 | } |
304 | |
305 | spin_unlock_irqrestore(&dwc->lock, flags); |
306 | |
307 | if (callback_required && callback) |
308 | callback(param); |
309 | } |
310 | |
311 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) |
312 | { |
313 | struct dw_desc *desc, *_desc; |
314 | LIST_HEAD(list); |
315 | unsigned long flags; |
316 | |
317 | spin_lock_irqsave(&dwc->lock, flags); |
318 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
319 | dev_err(chan2dev(&dwc->chan), |
320 | "BUG: XFER bit set, but channel not idle!\n"); |
321 | |
322 | /* Try to continue after resetting the channel... */ |
323 | dwc_chan_disable(dw, dwc); |
324 | } |
325 | |
326 | /* |
327 | * Submit queued descriptors ASAP, i.e. before we go through |
328 | * the completed ones. |
329 | */ |
330 | list_splice_init(&dwc->active_list, &list); |
331 | if (!list_empty(&dwc->queue)) { |
332 | list_move(dwc->queue.next, &dwc->active_list); |
333 | dwc_dostart(dwc, dwc_first_active(dwc)); |
334 | } |
335 | |
336 | spin_unlock_irqrestore(&dwc->lock, flags); |
337 | |
338 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
339 | dwc_descriptor_complete(dwc, desc, true); |
340 | } |
341 | |
342 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
343 | { |
344 | dma_addr_t llp; |
345 | struct dw_desc *desc, *_desc; |
346 | struct dw_desc *child; |
347 | u32 status_xfer; |
348 | unsigned long flags; |
349 | |
350 | spin_lock_irqsave(&dwc->lock, flags); |
351 | llp = channel_readl(dwc, LLP); |
352 | status_xfer = dma_readl(dw, RAW.XFER); |
353 | |
354 | if (status_xfer & dwc->mask) { |
355 | /* Everything we've submitted is done */ |
356 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
357 | spin_unlock_irqrestore(&dwc->lock, flags); |
358 | |
359 | dwc_complete_all(dw, dwc); |
360 | return; |
361 | } |
362 | |
363 | if (list_empty(&dwc->active_list)) { |
364 | spin_unlock_irqrestore(&dwc->lock, flags); |
365 | return; |
366 | } |
367 | |
368 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, |
369 | (unsigned long long)llp); |
370 | |
371 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
372 | /* check first descriptors addr */ |
373 | if (desc->txd.phys == llp) { |
374 | spin_unlock_irqrestore(&dwc->lock, flags); |
375 | return; |
376 | } |
377 | |
378 | /* check first descriptors llp */ |
379 | if (desc->lli.llp == llp) { |
380 | /* This one is currently in progress */ |
381 | spin_unlock_irqrestore(&dwc->lock, flags); |
382 | return; |
383 | } |
384 | |
385 | list_for_each_entry(child, &desc->tx_list, desc_node) |
386 | if (child->lli.llp == llp) { |
387 | /* Currently in progress */ |
388 | spin_unlock_irqrestore(&dwc->lock, flags); |
389 | return; |
390 | } |
391 | |
392 | /* |
393 | * No descriptors so far seem to be in progress, i.e. |
394 | * this one must be done. |
395 | */ |
396 | spin_unlock_irqrestore(&dwc->lock, flags); |
397 | dwc_descriptor_complete(dwc, desc, true); |
398 | spin_lock_irqsave(&dwc->lock, flags); |
399 | } |
400 | |
401 | dev_err(chan2dev(&dwc->chan), |
402 | "BUG: All descriptors done, but channel not idle!\n"); |
403 | |
404 | /* Try to continue after resetting the channel... */ |
405 | dwc_chan_disable(dw, dwc); |
406 | |
407 | if (!list_empty(&dwc->queue)) { |
408 | list_move(dwc->queue.next, &dwc->active_list); |
409 | dwc_dostart(dwc, dwc_first_active(dwc)); |
410 | } |
411 | spin_unlock_irqrestore(&dwc->lock, flags); |
412 | } |
413 | |
414 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
415 | { |
416 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
417 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
418 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); |
419 | } |
420 | |
421 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
422 | { |
423 | struct dw_desc *bad_desc; |
424 | struct dw_desc *child; |
425 | unsigned long flags; |
426 | |
427 | dwc_scan_descriptors(dw, dwc); |
428 | |
429 | spin_lock_irqsave(&dwc->lock, flags); |
430 | |
431 | /* |
432 | * The descriptor currently at the head of the active list is |
433 | * borked. Since we don't have any way to report errors, we'll |
434 | * just have to scream loudly and try to carry on. |
435 | */ |
436 | bad_desc = dwc_first_active(dwc); |
437 | list_del_init(&bad_desc->desc_node); |
438 | list_move(dwc->queue.next, dwc->active_list.prev); |
439 | |
440 | /* Clear the error flag and try to restart the controller */ |
441 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
442 | if (!list_empty(&dwc->active_list)) |
443 | dwc_dostart(dwc, dwc_first_active(dwc)); |
444 | |
445 | /* |
446 | * KERN_CRITICAL may seem harsh, but since this only happens |
447 | * when someone submits a bad physical address in a |
448 | * descriptor, we should consider ourselves lucky that the |
449 | * controller flagged an error instead of scribbling over |
450 | * random memory locations. |
451 | */ |
452 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
453 | "Bad descriptor submitted for DMA!\n"); |
454 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
455 | " cookie: %d\n", bad_desc->txd.cookie); |
456 | dwc_dump_lli(dwc, &bad_desc->lli); |
457 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
458 | dwc_dump_lli(dwc, &child->lli); |
459 | |
460 | spin_unlock_irqrestore(&dwc->lock, flags); |
461 | |
462 | /* Pretend the descriptor completed successfully */ |
463 | dwc_descriptor_complete(dwc, bad_desc, true); |
464 | } |
465 | |
466 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
467 | |
468 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) |
469 | { |
470 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
471 | return channel_readl(dwc, SAR); |
472 | } |
473 | EXPORT_SYMBOL(dw_dma_get_src_addr); |
474 | |
475 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) |
476 | { |
477 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
478 | return channel_readl(dwc, DAR); |
479 | } |
480 | EXPORT_SYMBOL(dw_dma_get_dst_addr); |
481 | |
482 | /* called with dwc->lock held and all DMAC interrupts disabled */ |
483 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
484 | u32 status_err, u32 status_xfer) |
485 | { |
486 | unsigned long flags; |
487 | |
488 | if (dwc->mask) { |
489 | void (*callback)(void *param); |
490 | void *callback_param; |
491 | |
492 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", |
493 | channel_readl(dwc, LLP)); |
494 | |
495 | callback = dwc->cdesc->period_callback; |
496 | callback_param = dwc->cdesc->period_callback_param; |
497 | |
498 | if (callback) |
499 | callback(callback_param); |
500 | } |
501 | |
502 | /* |
503 | * Error and transfer complete are highly unlikely, and will most |
504 | * likely be due to a configuration error by the user. |
505 | */ |
506 | if (unlikely(status_err & dwc->mask) || |
507 | unlikely(status_xfer & dwc->mask)) { |
508 | int i; |
509 | |
510 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " |
511 | "interrupt, stopping DMA transfer\n", |
512 | status_xfer ? "xfer" : "error"); |
513 | |
514 | spin_lock_irqsave(&dwc->lock, flags); |
515 | |
516 | dwc_dump_chan_regs(dwc); |
517 | |
518 | dwc_chan_disable(dw, dwc); |
519 | |
520 | /* make sure DMA does not restart by loading a new list */ |
521 | channel_writel(dwc, LLP, 0); |
522 | channel_writel(dwc, CTL_LO, 0); |
523 | channel_writel(dwc, CTL_HI, 0); |
524 | |
525 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
526 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
527 | |
528 | for (i = 0; i < dwc->cdesc->periods; i++) |
529 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); |
530 | |
531 | spin_unlock_irqrestore(&dwc->lock, flags); |
532 | } |
533 | } |
534 | |
535 | /* ------------------------------------------------------------------------- */ |
536 | |
537 | static void dw_dma_tasklet(unsigned long data) |
538 | { |
539 | struct dw_dma *dw = (struct dw_dma *)data; |
540 | struct dw_dma_chan *dwc; |
541 | u32 status_xfer; |
542 | u32 status_err; |
543 | int i; |
544 | |
545 | status_xfer = dma_readl(dw, RAW.XFER); |
546 | status_err = dma_readl(dw, RAW.ERROR); |
547 | |
548 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
549 | |
550 | for (i = 0; i < dw->dma.chancnt; i++) { |
551 | dwc = &dw->chan[i]; |
552 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
553 | dwc_handle_cyclic(dw, dwc, status_err, status_xfer); |
554 | else if (status_err & (1 << i)) |
555 | dwc_handle_error(dw, dwc); |
556 | else if (status_xfer & (1 << i)) |
557 | dwc_scan_descriptors(dw, dwc); |
558 | } |
559 | |
560 | /* |
561 | * Re-enable interrupts. |
562 | */ |
563 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
564 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
565 | } |
566 | |
567 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) |
568 | { |
569 | struct dw_dma *dw = dev_id; |
570 | u32 status; |
571 | |
572 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, |
573 | dma_readl(dw, STATUS_INT)); |
574 | |
575 | /* |
576 | * Just disable the interrupts. We'll turn them back on in the |
577 | * softirq handler. |
578 | */ |
579 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
580 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
581 | |
582 | status = dma_readl(dw, STATUS_INT); |
583 | if (status) { |
584 | dev_err(dw->dma.dev, |
585 | "BUG: Unexpected interrupts pending: 0x%x\n", |
586 | status); |
587 | |
588 | /* Try to recover */ |
589 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); |
590 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
591 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); |
592 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); |
593 | } |
594 | |
595 | tasklet_schedule(&dw->tasklet); |
596 | |
597 | return IRQ_HANDLED; |
598 | } |
599 | |
600 | /*----------------------------------------------------------------------*/ |
601 | |
602 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) |
603 | { |
604 | struct dw_desc *desc = txd_to_dw_desc(tx); |
605 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); |
606 | dma_cookie_t cookie; |
607 | unsigned long flags; |
608 | |
609 | spin_lock_irqsave(&dwc->lock, flags); |
610 | cookie = dma_cookie_assign(tx); |
611 | |
612 | /* |
613 | * REVISIT: We should attempt to chain as many descriptors as |
614 | * possible, perhaps even appending to those already submitted |
615 | * for DMA. But this is hard to do in a race-free manner. |
616 | */ |
617 | if (list_empty(&dwc->active_list)) { |
618 | dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, |
619 | desc->txd.cookie); |
620 | list_add_tail(&desc->desc_node, &dwc->active_list); |
621 | dwc_dostart(dwc, dwc_first_active(dwc)); |
622 | } else { |
623 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, |
624 | desc->txd.cookie); |
625 | |
626 | list_add_tail(&desc->desc_node, &dwc->queue); |
627 | } |
628 | |
629 | spin_unlock_irqrestore(&dwc->lock, flags); |
630 | |
631 | return cookie; |
632 | } |
633 | |
634 | static struct dma_async_tx_descriptor * |
635 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
636 | size_t len, unsigned long flags) |
637 | { |
638 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
639 | struct dw_desc *desc; |
640 | struct dw_desc *first; |
641 | struct dw_desc *prev; |
642 | size_t xfer_count; |
643 | size_t offset; |
644 | unsigned int src_width; |
645 | unsigned int dst_width; |
646 | u32 ctllo; |
647 | |
648 | dev_vdbg(chan2dev(chan), |
649 | "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, |
650 | (unsigned long long)dest, (unsigned long long)src, |
651 | len, flags); |
652 | |
653 | if (unlikely(!len)) { |
654 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
655 | return NULL; |
656 | } |
657 | |
658 | src_width = dst_width = dwc_fast_fls(src | dest | len); |
659 | |
660 | ctllo = DWC_DEFAULT_CTLLO(chan) |
661 | | DWC_CTLL_DST_WIDTH(dst_width) |
662 | | DWC_CTLL_SRC_WIDTH(src_width) |
663 | | DWC_CTLL_DST_INC |
664 | | DWC_CTLL_SRC_INC |
665 | | DWC_CTLL_FC_M2M; |
666 | prev = first = NULL; |
667 | |
668 | for (offset = 0; offset < len; offset += xfer_count << src_width) { |
669 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
670 | DWC_MAX_COUNT); |
671 | |
672 | desc = dwc_desc_get(dwc); |
673 | if (!desc) |
674 | goto err_desc_get; |
675 | |
676 | desc->lli.sar = src + offset; |
677 | desc->lli.dar = dest + offset; |
678 | desc->lli.ctllo = ctllo; |
679 | desc->lli.ctlhi = xfer_count; |
680 | |
681 | if (!first) { |
682 | first = desc; |
683 | } else { |
684 | prev->lli.llp = desc->txd.phys; |
685 | dma_sync_single_for_device(chan2parent(chan), |
686 | prev->txd.phys, sizeof(prev->lli), |
687 | DMA_TO_DEVICE); |
688 | list_add_tail(&desc->desc_node, |
689 | &first->tx_list); |
690 | } |
691 | prev = desc; |
692 | } |
693 | |
694 | |
695 | if (flags & DMA_PREP_INTERRUPT) |
696 | /* Trigger interrupt after last block */ |
697 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
698 | |
699 | prev->lli.llp = 0; |
700 | dma_sync_single_for_device(chan2parent(chan), |
701 | prev->txd.phys, sizeof(prev->lli), |
702 | DMA_TO_DEVICE); |
703 | |
704 | first->txd.flags = flags; |
705 | first->len = len; |
706 | |
707 | return &first->txd; |
708 | |
709 | err_desc_get: |
710 | dwc_desc_put(dwc, first); |
711 | return NULL; |
712 | } |
713 | |
714 | static struct dma_async_tx_descriptor * |
715 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
716 | unsigned int sg_len, enum dma_transfer_direction direction, |
717 | unsigned long flags, void *context) |
718 | { |
719 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
720 | struct dw_dma_slave *dws = chan->private; |
721 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
722 | struct dw_desc *prev; |
723 | struct dw_desc *first; |
724 | u32 ctllo; |
725 | dma_addr_t reg; |
726 | unsigned int reg_width; |
727 | unsigned int mem_width; |
728 | unsigned int i; |
729 | struct scatterlist *sg; |
730 | size_t total_len = 0; |
731 | |
732 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
733 | |
734 | if (unlikely(!dws || !sg_len)) |
735 | return NULL; |
736 | |
737 | prev = first = NULL; |
738 | |
739 | switch (direction) { |
740 | case DMA_MEM_TO_DEV: |
741 | reg_width = __fls(sconfig->dst_addr_width); |
742 | reg = sconfig->dst_addr; |
743 | ctllo = (DWC_DEFAULT_CTLLO(chan) |
744 | | DWC_CTLL_DST_WIDTH(reg_width) |
745 | | DWC_CTLL_DST_FIX |
746 | | DWC_CTLL_SRC_INC); |
747 | |
748 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
749 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
750 | |
751 | for_each_sg(sgl, sg, sg_len, i) { |
752 | struct dw_desc *desc; |
753 | u32 len, dlen, mem; |
754 | |
755 | mem = sg_dma_address(sg); |
756 | len = sg_dma_len(sg); |
757 | |
758 | mem_width = dwc_fast_fls(mem | len); |
759 | |
760 | slave_sg_todev_fill_desc: |
761 | desc = dwc_desc_get(dwc); |
762 | if (!desc) { |
763 | dev_err(chan2dev(chan), |
764 | "not enough descriptors available\n"); |
765 | goto err_desc_get; |
766 | } |
767 | |
768 | desc->lli.sar = mem; |
769 | desc->lli.dar = reg; |
770 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); |
771 | if ((len >> mem_width) > DWC_MAX_COUNT) { |
772 | dlen = DWC_MAX_COUNT << mem_width; |
773 | mem += dlen; |
774 | len -= dlen; |
775 | } else { |
776 | dlen = len; |
777 | len = 0; |
778 | } |
779 | |
780 | desc->lli.ctlhi = dlen >> mem_width; |
781 | |
782 | if (!first) { |
783 | first = desc; |
784 | } else { |
785 | prev->lli.llp = desc->txd.phys; |
786 | dma_sync_single_for_device(chan2parent(chan), |
787 | prev->txd.phys, |
788 | sizeof(prev->lli), |
789 | DMA_TO_DEVICE); |
790 | list_add_tail(&desc->desc_node, |
791 | &first->tx_list); |
792 | } |
793 | prev = desc; |
794 | total_len += dlen; |
795 | |
796 | if (len) |
797 | goto slave_sg_todev_fill_desc; |
798 | } |
799 | break; |
800 | case DMA_DEV_TO_MEM: |
801 | reg_width = __fls(sconfig->src_addr_width); |
802 | reg = sconfig->src_addr; |
803 | ctllo = (DWC_DEFAULT_CTLLO(chan) |
804 | | DWC_CTLL_SRC_WIDTH(reg_width) |
805 | | DWC_CTLL_DST_INC |
806 | | DWC_CTLL_SRC_FIX); |
807 | |
808 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
809 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
810 | |
811 | for_each_sg(sgl, sg, sg_len, i) { |
812 | struct dw_desc *desc; |
813 | u32 len, dlen, mem; |
814 | |
815 | mem = sg_dma_address(sg); |
816 | len = sg_dma_len(sg); |
817 | |
818 | mem_width = dwc_fast_fls(mem | len); |
819 | |
820 | slave_sg_fromdev_fill_desc: |
821 | desc = dwc_desc_get(dwc); |
822 | if (!desc) { |
823 | dev_err(chan2dev(chan), |
824 | "not enough descriptors available\n"); |
825 | goto err_desc_get; |
826 | } |
827 | |
828 | desc->lli.sar = reg; |
829 | desc->lli.dar = mem; |
830 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); |
831 | if ((len >> reg_width) > DWC_MAX_COUNT) { |
832 | dlen = DWC_MAX_COUNT << reg_width; |
833 | mem += dlen; |
834 | len -= dlen; |
835 | } else { |
836 | dlen = len; |
837 | len = 0; |
838 | } |
839 | desc->lli.ctlhi = dlen >> reg_width; |
840 | |
841 | if (!first) { |
842 | first = desc; |
843 | } else { |
844 | prev->lli.llp = desc->txd.phys; |
845 | dma_sync_single_for_device(chan2parent(chan), |
846 | prev->txd.phys, |
847 | sizeof(prev->lli), |
848 | DMA_TO_DEVICE); |
849 | list_add_tail(&desc->desc_node, |
850 | &first->tx_list); |
851 | } |
852 | prev = desc; |
853 | total_len += dlen; |
854 | |
855 | if (len) |
856 | goto slave_sg_fromdev_fill_desc; |
857 | } |
858 | break; |
859 | default: |
860 | return NULL; |
861 | } |
862 | |
863 | if (flags & DMA_PREP_INTERRUPT) |
864 | /* Trigger interrupt after last block */ |
865 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
866 | |
867 | prev->lli.llp = 0; |
868 | dma_sync_single_for_device(chan2parent(chan), |
869 | prev->txd.phys, sizeof(prev->lli), |
870 | DMA_TO_DEVICE); |
871 | |
872 | first->len = total_len; |
873 | |
874 | return &first->txd; |
875 | |
876 | err_desc_get: |
877 | dwc_desc_put(dwc, first); |
878 | return NULL; |
879 | } |
880 | |
881 | /* |
882 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: |
883 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. |
884 | * |
885 | * NOTE: burst size 2 is not supported by controller. |
886 | * |
887 | * This can be done by finding least significant bit set: n & (n - 1) |
888 | */ |
889 | static inline void convert_burst(u32 *maxburst) |
890 | { |
891 | if (*maxburst > 1) |
892 | *maxburst = fls(*maxburst) - 2; |
893 | else |
894 | *maxburst = 0; |
895 | } |
896 | |
897 | static int |
898 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
899 | { |
900 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
901 | |
902 | /* Check if it is chan is configured for slave transfers */ |
903 | if (!chan->private) |
904 | return -EINVAL; |
905 | |
906 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
907 | |
908 | convert_burst(&dwc->dma_sconfig.src_maxburst); |
909 | convert_burst(&dwc->dma_sconfig.dst_maxburst); |
910 | |
911 | return 0; |
912 | } |
913 | |
914 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
915 | unsigned long arg) |
916 | { |
917 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
918 | struct dw_dma *dw = to_dw_dma(chan->device); |
919 | struct dw_desc *desc, *_desc; |
920 | unsigned long flags; |
921 | u32 cfglo; |
922 | LIST_HEAD(list); |
923 | |
924 | if (cmd == DMA_PAUSE) { |
925 | spin_lock_irqsave(&dwc->lock, flags); |
926 | |
927 | cfglo = channel_readl(dwc, CFG_LO); |
928 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
929 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) |
930 | cpu_relax(); |
931 | |
932 | dwc->paused = true; |
933 | spin_unlock_irqrestore(&dwc->lock, flags); |
934 | } else if (cmd == DMA_RESUME) { |
935 | if (!dwc->paused) |
936 | return 0; |
937 | |
938 | spin_lock_irqsave(&dwc->lock, flags); |
939 | |
940 | cfglo = channel_readl(dwc, CFG_LO); |
941 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); |
942 | dwc->paused = false; |
943 | |
944 | spin_unlock_irqrestore(&dwc->lock, flags); |
945 | } else if (cmd == DMA_TERMINATE_ALL) { |
946 | spin_lock_irqsave(&dwc->lock, flags); |
947 | |
948 | dwc_chan_disable(dw, dwc); |
949 | |
950 | dwc->paused = false; |
951 | |
952 | /* active_list entries will end up before queued entries */ |
953 | list_splice_init(&dwc->queue, &list); |
954 | list_splice_init(&dwc->active_list, &list); |
955 | |
956 | spin_unlock_irqrestore(&dwc->lock, flags); |
957 | |
958 | /* Flush all pending and queued descriptors */ |
959 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
960 | dwc_descriptor_complete(dwc, desc, false); |
961 | } else if (cmd == DMA_SLAVE_CONFIG) { |
962 | return set_runtime_config(chan, (struct dma_slave_config *)arg); |
963 | } else { |
964 | return -ENXIO; |
965 | } |
966 | |
967 | return 0; |
968 | } |
969 | |
970 | static enum dma_status |
971 | dwc_tx_status(struct dma_chan *chan, |
972 | dma_cookie_t cookie, |
973 | struct dma_tx_state *txstate) |
974 | { |
975 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
976 | enum dma_status ret; |
977 | |
978 | ret = dma_cookie_status(chan, cookie, txstate); |
979 | if (ret != DMA_SUCCESS) { |
980 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
981 | |
982 | ret = dma_cookie_status(chan, cookie, txstate); |
983 | } |
984 | |
985 | if (ret != DMA_SUCCESS) |
986 | dma_set_residue(txstate, dwc_first_active(dwc)->len); |
987 | |
988 | if (dwc->paused) |
989 | return DMA_PAUSED; |
990 | |
991 | return ret; |
992 | } |
993 | |
994 | static void dwc_issue_pending(struct dma_chan *chan) |
995 | { |
996 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
997 | |
998 | if (!list_empty(&dwc->queue)) |
999 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
1000 | } |
1001 | |
1002 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
1003 | { |
1004 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1005 | struct dw_dma *dw = to_dw_dma(chan->device); |
1006 | struct dw_desc *desc; |
1007 | int i; |
1008 | unsigned long flags; |
1009 | |
1010 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
1011 | |
1012 | /* ASSERT: channel is idle */ |
1013 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
1014 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
1015 | return -EIO; |
1016 | } |
1017 | |
1018 | dma_cookie_init(chan); |
1019 | |
1020 | /* |
1021 | * NOTE: some controllers may have additional features that we |
1022 | * need to initialize here, like "scatter-gather" (which |
1023 | * doesn't mean what you think it means), and status writeback. |
1024 | */ |
1025 | |
1026 | spin_lock_irqsave(&dwc->lock, flags); |
1027 | i = dwc->descs_allocated; |
1028 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { |
1029 | spin_unlock_irqrestore(&dwc->lock, flags); |
1030 | |
1031 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); |
1032 | if (!desc) { |
1033 | dev_info(chan2dev(chan), |
1034 | "only allocated %d descriptors\n", i); |
1035 | spin_lock_irqsave(&dwc->lock, flags); |
1036 | break; |
1037 | } |
1038 | |
1039 | INIT_LIST_HEAD(&desc->tx_list); |
1040 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1041 | desc->txd.tx_submit = dwc_tx_submit; |
1042 | desc->txd.flags = DMA_CTRL_ACK; |
1043 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
1044 | sizeof(desc->lli), DMA_TO_DEVICE); |
1045 | dwc_desc_put(dwc, desc); |
1046 | |
1047 | spin_lock_irqsave(&dwc->lock, flags); |
1048 | i = ++dwc->descs_allocated; |
1049 | } |
1050 | |
1051 | spin_unlock_irqrestore(&dwc->lock, flags); |
1052 | |
1053 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
1054 | |
1055 | return i; |
1056 | } |
1057 | |
1058 | static void dwc_free_chan_resources(struct dma_chan *chan) |
1059 | { |
1060 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1061 | struct dw_dma *dw = to_dw_dma(chan->device); |
1062 | struct dw_desc *desc, *_desc; |
1063 | unsigned long flags; |
1064 | LIST_HEAD(list); |
1065 | |
1066 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
1067 | dwc->descs_allocated); |
1068 | |
1069 | /* ASSERT: channel is idle */ |
1070 | BUG_ON(!list_empty(&dwc->active_list)); |
1071 | BUG_ON(!list_empty(&dwc->queue)); |
1072 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); |
1073 | |
1074 | spin_lock_irqsave(&dwc->lock, flags); |
1075 | list_splice_init(&dwc->free_list, &list); |
1076 | dwc->descs_allocated = 0; |
1077 | dwc->initialized = false; |
1078 | |
1079 | /* Disable interrupts */ |
1080 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
1081 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1082 | |
1083 | spin_unlock_irqrestore(&dwc->lock, flags); |
1084 | |
1085 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
1086 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1087 | dma_unmap_single(chan2parent(chan), desc->txd.phys, |
1088 | sizeof(desc->lli), DMA_TO_DEVICE); |
1089 | kfree(desc); |
1090 | } |
1091 | |
1092 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1093 | } |
1094 | |
1095 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1096 | |
1097 | /** |
1098 | * dw_dma_cyclic_start - start the cyclic DMA transfer |
1099 | * @chan: the DMA channel to start |
1100 | * |
1101 | * Must be called with soft interrupts disabled. Returns zero on success or |
1102 | * -errno on failure. |
1103 | */ |
1104 | int dw_dma_cyclic_start(struct dma_chan *chan) |
1105 | { |
1106 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1107 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1108 | unsigned long flags; |
1109 | |
1110 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { |
1111 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); |
1112 | return -ENODEV; |
1113 | } |
1114 | |
1115 | spin_lock_irqsave(&dwc->lock, flags); |
1116 | |
1117 | /* assert channel is idle */ |
1118 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
1119 | dev_err(chan2dev(&dwc->chan), |
1120 | "BUG: Attempted to start non-idle channel\n"); |
1121 | dwc_dump_chan_regs(dwc); |
1122 | spin_unlock_irqrestore(&dwc->lock, flags); |
1123 | return -EBUSY; |
1124 | } |
1125 | |
1126 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1127 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1128 | |
1129 | /* setup DMAC channel registers */ |
1130 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); |
1131 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
1132 | channel_writel(dwc, CTL_HI, 0); |
1133 | |
1134 | channel_set_bit(dw, CH_EN, dwc->mask); |
1135 | |
1136 | spin_unlock_irqrestore(&dwc->lock, flags); |
1137 | |
1138 | return 0; |
1139 | } |
1140 | EXPORT_SYMBOL(dw_dma_cyclic_start); |
1141 | |
1142 | /** |
1143 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer |
1144 | * @chan: the DMA channel to stop |
1145 | * |
1146 | * Must be called with soft interrupts disabled. |
1147 | */ |
1148 | void dw_dma_cyclic_stop(struct dma_chan *chan) |
1149 | { |
1150 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1151 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1152 | unsigned long flags; |
1153 | |
1154 | spin_lock_irqsave(&dwc->lock, flags); |
1155 | |
1156 | dwc_chan_disable(dw, dwc); |
1157 | |
1158 | spin_unlock_irqrestore(&dwc->lock, flags); |
1159 | } |
1160 | EXPORT_SYMBOL(dw_dma_cyclic_stop); |
1161 | |
1162 | /** |
1163 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer |
1164 | * @chan: the DMA channel to prepare |
1165 | * @buf_addr: physical DMA address where the buffer starts |
1166 | * @buf_len: total number of bytes for the entire buffer |
1167 | * @period_len: number of bytes for each period |
1168 | * @direction: transfer direction, to or from device |
1169 | * |
1170 | * Must be called before trying to start the transfer. Returns a valid struct |
1171 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. |
1172 | */ |
1173 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, |
1174 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, |
1175 | enum dma_transfer_direction direction) |
1176 | { |
1177 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1178 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
1179 | struct dw_cyclic_desc *cdesc; |
1180 | struct dw_cyclic_desc *retval = NULL; |
1181 | struct dw_desc *desc; |
1182 | struct dw_desc *last = NULL; |
1183 | unsigned long was_cyclic; |
1184 | unsigned int reg_width; |
1185 | unsigned int periods; |
1186 | unsigned int i; |
1187 | unsigned long flags; |
1188 | |
1189 | spin_lock_irqsave(&dwc->lock, flags); |
1190 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
1191 | spin_unlock_irqrestore(&dwc->lock, flags); |
1192 | dev_dbg(chan2dev(&dwc->chan), |
1193 | "queue and/or active list are not empty\n"); |
1194 | return ERR_PTR(-EBUSY); |
1195 | } |
1196 | |
1197 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1198 | spin_unlock_irqrestore(&dwc->lock, flags); |
1199 | if (was_cyclic) { |
1200 | dev_dbg(chan2dev(&dwc->chan), |
1201 | "channel already prepared for cyclic DMA\n"); |
1202 | return ERR_PTR(-EBUSY); |
1203 | } |
1204 | |
1205 | retval = ERR_PTR(-EINVAL); |
1206 | |
1207 | if (direction == DMA_MEM_TO_DEV) |
1208 | reg_width = __ffs(sconfig->dst_addr_width); |
1209 | else |
1210 | reg_width = __ffs(sconfig->src_addr_width); |
1211 | |
1212 | periods = buf_len / period_len; |
1213 | |
1214 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ |
1215 | if (period_len > (DWC_MAX_COUNT << reg_width)) |
1216 | goto out_err; |
1217 | if (unlikely(period_len & ((1 << reg_width) - 1))) |
1218 | goto out_err; |
1219 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
1220 | goto out_err; |
1221 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) |
1222 | goto out_err; |
1223 | |
1224 | retval = ERR_PTR(-ENOMEM); |
1225 | |
1226 | if (periods > NR_DESCS_PER_CHANNEL) |
1227 | goto out_err; |
1228 | |
1229 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); |
1230 | if (!cdesc) |
1231 | goto out_err; |
1232 | |
1233 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); |
1234 | if (!cdesc->desc) |
1235 | goto out_err_alloc; |
1236 | |
1237 | for (i = 0; i < periods; i++) { |
1238 | desc = dwc_desc_get(dwc); |
1239 | if (!desc) |
1240 | goto out_err_desc_get; |
1241 | |
1242 | switch (direction) { |
1243 | case DMA_MEM_TO_DEV: |
1244 | desc->lli.dar = sconfig->dst_addr; |
1245 | desc->lli.sar = buf_addr + (period_len * i); |
1246 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
1247 | | DWC_CTLL_DST_WIDTH(reg_width) |
1248 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1249 | | DWC_CTLL_DST_FIX |
1250 | | DWC_CTLL_SRC_INC |
1251 | | DWC_CTLL_INT_EN); |
1252 | |
1253 | desc->lli.ctllo |= sconfig->device_fc ? |
1254 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
1255 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
1256 | |
1257 | break; |
1258 | case DMA_DEV_TO_MEM: |
1259 | desc->lli.dar = buf_addr + (period_len * i); |
1260 | desc->lli.sar = sconfig->src_addr; |
1261 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
1262 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1263 | | DWC_CTLL_DST_WIDTH(reg_width) |
1264 | | DWC_CTLL_DST_INC |
1265 | | DWC_CTLL_SRC_FIX |
1266 | | DWC_CTLL_INT_EN); |
1267 | |
1268 | desc->lli.ctllo |= sconfig->device_fc ? |
1269 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
1270 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
1271 | |
1272 | break; |
1273 | default: |
1274 | break; |
1275 | } |
1276 | |
1277 | desc->lli.ctlhi = (period_len >> reg_width); |
1278 | cdesc->desc[i] = desc; |
1279 | |
1280 | if (last) { |
1281 | last->lli.llp = desc->txd.phys; |
1282 | dma_sync_single_for_device(chan2parent(chan), |
1283 | last->txd.phys, sizeof(last->lli), |
1284 | DMA_TO_DEVICE); |
1285 | } |
1286 | |
1287 | last = desc; |
1288 | } |
1289 | |
1290 | /* lets make a cyclic list */ |
1291 | last->lli.llp = cdesc->desc[0]->txd.phys; |
1292 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, |
1293 | sizeof(last->lli), DMA_TO_DEVICE); |
1294 | |
1295 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
1296 | "period %zu periods %d\n", (unsigned long long)buf_addr, |
1297 | buf_len, period_len, periods); |
1298 | |
1299 | cdesc->periods = periods; |
1300 | dwc->cdesc = cdesc; |
1301 | |
1302 | return cdesc; |
1303 | |
1304 | out_err_desc_get: |
1305 | while (i--) |
1306 | dwc_desc_put(dwc, cdesc->desc[i]); |
1307 | out_err_alloc: |
1308 | kfree(cdesc); |
1309 | out_err: |
1310 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1311 | return (struct dw_cyclic_desc *)retval; |
1312 | } |
1313 | EXPORT_SYMBOL(dw_dma_cyclic_prep); |
1314 | |
1315 | /** |
1316 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer |
1317 | * @chan: the DMA channel to free |
1318 | */ |
1319 | void dw_dma_cyclic_free(struct dma_chan *chan) |
1320 | { |
1321 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1322 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1323 | struct dw_cyclic_desc *cdesc = dwc->cdesc; |
1324 | int i; |
1325 | unsigned long flags; |
1326 | |
1327 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
1328 | |
1329 | if (!cdesc) |
1330 | return; |
1331 | |
1332 | spin_lock_irqsave(&dwc->lock, flags); |
1333 | |
1334 | dwc_chan_disable(dw, dwc); |
1335 | |
1336 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1337 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1338 | |
1339 | spin_unlock_irqrestore(&dwc->lock, flags); |
1340 | |
1341 | for (i = 0; i < cdesc->periods; i++) |
1342 | dwc_desc_put(dwc, cdesc->desc[i]); |
1343 | |
1344 | kfree(cdesc->desc); |
1345 | kfree(cdesc); |
1346 | |
1347 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1348 | } |
1349 | EXPORT_SYMBOL(dw_dma_cyclic_free); |
1350 | |
1351 | /*----------------------------------------------------------------------*/ |
1352 | |
1353 | static void dw_dma_off(struct dw_dma *dw) |
1354 | { |
1355 | int i; |
1356 | |
1357 | dma_writel(dw, CFG, 0); |
1358 | |
1359 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
1360 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1361 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); |
1362 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
1363 | |
1364 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) |
1365 | cpu_relax(); |
1366 | |
1367 | for (i = 0; i < dw->dma.chancnt; i++) |
1368 | dw->chan[i].initialized = false; |
1369 | } |
1370 | |
1371 | static int __devinit dw_probe(struct platform_device *pdev) |
1372 | { |
1373 | struct dw_dma_platform_data *pdata; |
1374 | struct resource *io; |
1375 | struct dw_dma *dw; |
1376 | size_t size; |
1377 | int irq; |
1378 | int err; |
1379 | int i; |
1380 | |
1381 | pdata = dev_get_platdata(&pdev->dev); |
1382 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1383 | return -EINVAL; |
1384 | |
1385 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1386 | if (!io) |
1387 | return -EINVAL; |
1388 | |
1389 | irq = platform_get_irq(pdev, 0); |
1390 | if (irq < 0) |
1391 | return irq; |
1392 | |
1393 | size = sizeof(struct dw_dma); |
1394 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); |
1395 | dw = kzalloc(size, GFP_KERNEL); |
1396 | if (!dw) |
1397 | return -ENOMEM; |
1398 | |
1399 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { |
1400 | err = -EBUSY; |
1401 | goto err_kfree; |
1402 | } |
1403 | |
1404 | dw->regs = ioremap(io->start, DW_REGLEN); |
1405 | if (!dw->regs) { |
1406 | err = -ENOMEM; |
1407 | goto err_release_r; |
1408 | } |
1409 | |
1410 | dw->clk = clk_get(&pdev->dev, "hclk"); |
1411 | if (IS_ERR(dw->clk)) { |
1412 | err = PTR_ERR(dw->clk); |
1413 | goto err_clk; |
1414 | } |
1415 | clk_prepare_enable(dw->clk); |
1416 | |
1417 | /* Calculate all channel mask before DMA setup */ |
1418 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1419 | |
1420 | /* force dma off, just in case */ |
1421 | dw_dma_off(dw); |
1422 | |
1423 | /* disable BLOCK interrupts as well */ |
1424 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
1425 | |
1426 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); |
1427 | if (err) |
1428 | goto err_irq; |
1429 | |
1430 | platform_set_drvdata(pdev, dw); |
1431 | |
1432 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1433 | |
1434 | INIT_LIST_HEAD(&dw->dma.channels); |
1435 | for (i = 0; i < pdata->nr_channels; i++) { |
1436 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1437 | |
1438 | dwc->chan.device = &dw->dma; |
1439 | dma_cookie_init(&dwc->chan); |
1440 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1441 | list_add_tail(&dwc->chan.device_node, |
1442 | &dw->dma.channels); |
1443 | else |
1444 | list_add(&dwc->chan.device_node, &dw->dma.channels); |
1445 | |
1446 | /* 7 is highest priority & 0 is lowest. */ |
1447 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) |
1448 | dwc->priority = pdata->nr_channels - i - 1; |
1449 | else |
1450 | dwc->priority = i; |
1451 | |
1452 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1453 | spin_lock_init(&dwc->lock); |
1454 | dwc->mask = 1 << i; |
1455 | |
1456 | INIT_LIST_HEAD(&dwc->active_list); |
1457 | INIT_LIST_HEAD(&dwc->queue); |
1458 | INIT_LIST_HEAD(&dwc->free_list); |
1459 | |
1460 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1461 | } |
1462 | |
1463 | /* Clear all interrupts on all channels. */ |
1464 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
1465 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
1466 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1467 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); |
1468 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); |
1469 | |
1470 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1471 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1472 | if (pdata->is_private) |
1473 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); |
1474 | dw->dma.dev = &pdev->dev; |
1475 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1476 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; |
1477 | |
1478 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; |
1479 | |
1480 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; |
1481 | dw->dma.device_control = dwc_control; |
1482 | |
1483 | dw->dma.device_tx_status = dwc_tx_status; |
1484 | dw->dma.device_issue_pending = dwc_issue_pending; |
1485 | |
1486 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1487 | |
1488 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", |
1489 | dev_name(&pdev->dev), pdata->nr_channels); |
1490 | |
1491 | dma_async_device_register(&dw->dma); |
1492 | |
1493 | return 0; |
1494 | |
1495 | err_irq: |
1496 | clk_disable_unprepare(dw->clk); |
1497 | clk_put(dw->clk); |
1498 | err_clk: |
1499 | iounmap(dw->regs); |
1500 | dw->regs = NULL; |
1501 | err_release_r: |
1502 | release_resource(io); |
1503 | err_kfree: |
1504 | kfree(dw); |
1505 | return err; |
1506 | } |
1507 | |
1508 | static int __devexit dw_remove(struct platform_device *pdev) |
1509 | { |
1510 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1511 | struct dw_dma_chan *dwc, *_dwc; |
1512 | struct resource *io; |
1513 | |
1514 | dw_dma_off(dw); |
1515 | dma_async_device_unregister(&dw->dma); |
1516 | |
1517 | free_irq(platform_get_irq(pdev, 0), dw); |
1518 | tasklet_kill(&dw->tasklet); |
1519 | |
1520 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, |
1521 | chan.device_node) { |
1522 | list_del(&dwc->chan.device_node); |
1523 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1524 | } |
1525 | |
1526 | clk_disable_unprepare(dw->clk); |
1527 | clk_put(dw->clk); |
1528 | |
1529 | iounmap(dw->regs); |
1530 | dw->regs = NULL; |
1531 | |
1532 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1533 | release_mem_region(io->start, DW_REGLEN); |
1534 | |
1535 | kfree(dw); |
1536 | |
1537 | return 0; |
1538 | } |
1539 | |
1540 | static void dw_shutdown(struct platform_device *pdev) |
1541 | { |
1542 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1543 | |
1544 | dw_dma_off(platform_get_drvdata(pdev)); |
1545 | clk_disable_unprepare(dw->clk); |
1546 | } |
1547 | |
1548 | static int dw_suspend_noirq(struct device *dev) |
1549 | { |
1550 | struct platform_device *pdev = to_platform_device(dev); |
1551 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1552 | |
1553 | dw_dma_off(platform_get_drvdata(pdev)); |
1554 | clk_disable_unprepare(dw->clk); |
1555 | |
1556 | return 0; |
1557 | } |
1558 | |
1559 | static int dw_resume_noirq(struct device *dev) |
1560 | { |
1561 | struct platform_device *pdev = to_platform_device(dev); |
1562 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1563 | |
1564 | clk_prepare_enable(dw->clk); |
1565 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1566 | return 0; |
1567 | } |
1568 | |
1569 | static const struct dev_pm_ops dw_dev_pm_ops = { |
1570 | .suspend_noirq = dw_suspend_noirq, |
1571 | .resume_noirq = dw_resume_noirq, |
1572 | .freeze_noirq = dw_suspend_noirq, |
1573 | .thaw_noirq = dw_resume_noirq, |
1574 | .restore_noirq = dw_resume_noirq, |
1575 | .poweroff_noirq = dw_suspend_noirq, |
1576 | }; |
1577 | |
1578 | #ifdef CONFIG_OF |
1579 | static const struct of_device_id dw_dma_id_table[] = { |
1580 | { .compatible = "snps,dma-spear1340" }, |
1581 | {} |
1582 | }; |
1583 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); |
1584 | #endif |
1585 | |
1586 | static struct platform_driver dw_driver = { |
1587 | .remove = __devexit_p(dw_remove), |
1588 | .shutdown = dw_shutdown, |
1589 | .driver = { |
1590 | .name = "dw_dmac", |
1591 | .pm = &dw_dev_pm_ops, |
1592 | .of_match_table = of_match_ptr(dw_dma_id_table), |
1593 | }, |
1594 | }; |
1595 | |
1596 | static int __init dw_init(void) |
1597 | { |
1598 | return platform_driver_probe(&dw_driver, dw_probe); |
1599 | } |
1600 | subsys_initcall(dw_init); |
1601 | |
1602 | static void __exit dw_exit(void) |
1603 | { |
1604 | platform_driver_unregister(&dw_driver); |
1605 | } |
1606 | module_exit(dw_exit); |
1607 | |
1608 | MODULE_LICENSE("GPL v2"); |
1609 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); |
1610 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
1611 | MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); |
1612 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9