Root/
1 | /* |
2 | * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) |
3 | * |
4 | * Copyright (C) 2008 Atmel Corporation |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. |
10 | * |
11 | * |
12 | * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. |
13 | * The only Atmel DMA Controller that is not covered by this driver is the one |
14 | * found on AT91SAM9263. |
15 | */ |
16 | |
17 | #include <linux/clk.h> |
18 | #include <linux/dmaengine.h> |
19 | #include <linux/dma-mapping.h> |
20 | #include <linux/dmapool.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/module.h> |
23 | #include <linux/platform_device.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/of.h> |
26 | #include <linux/of_device.h> |
27 | |
28 | #include "at_hdmac_regs.h" |
29 | #include "dmaengine.h" |
30 | |
31 | /* |
32 | * Glossary |
33 | * -------- |
34 | * |
35 | * at_hdmac : Name of the ATmel AHB DMA Controller |
36 | * at_dma_ / atdma : ATmel DMA controller entity related |
37 | * atc_ / atchan : ATmel DMA Channel entity related |
38 | */ |
39 | |
40 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) |
41 | #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |
42 | |ATC_DIF(AT_DMA_MEM_IF)) |
43 | |
44 | /* |
45 | * Initial number of descriptors to allocate for each channel. This could |
46 | * be increased during dma usage. |
47 | */ |
48 | static unsigned int init_nr_desc_per_channel = 64; |
49 | module_param(init_nr_desc_per_channel, uint, 0644); |
50 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
51 | "initial descriptors per channel (default: 64)"); |
52 | |
53 | |
54 | /* prototypes */ |
55 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); |
56 | |
57 | |
58 | /*----------------------------------------------------------------------*/ |
59 | |
60 | static struct at_desc *atc_first_active(struct at_dma_chan *atchan) |
61 | { |
62 | return list_first_entry(&atchan->active_list, |
63 | struct at_desc, desc_node); |
64 | } |
65 | |
66 | static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) |
67 | { |
68 | return list_first_entry(&atchan->queue, |
69 | struct at_desc, desc_node); |
70 | } |
71 | |
72 | /** |
73 | * atc_alloc_descriptor - allocate and return an initialized descriptor |
74 | * @chan: the channel to allocate descriptors for |
75 | * @gfp_flags: GFP allocation flags |
76 | * |
77 | * Note: The ack-bit is positioned in the descriptor flag at creation time |
78 | * to make initial allocation more convenient. This bit will be cleared |
79 | * and control will be given to client at usage time (during |
80 | * preparation functions). |
81 | */ |
82 | static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, |
83 | gfp_t gfp_flags) |
84 | { |
85 | struct at_desc *desc = NULL; |
86 | struct at_dma *atdma = to_at_dma(chan->device); |
87 | dma_addr_t phys; |
88 | |
89 | desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); |
90 | if (desc) { |
91 | memset(desc, 0, sizeof(struct at_desc)); |
92 | INIT_LIST_HEAD(&desc->tx_list); |
93 | dma_async_tx_descriptor_init(&desc->txd, chan); |
94 | /* txd.flags will be overwritten in prep functions */ |
95 | desc->txd.flags = DMA_CTRL_ACK; |
96 | desc->txd.tx_submit = atc_tx_submit; |
97 | desc->txd.phys = phys; |
98 | } |
99 | |
100 | return desc; |
101 | } |
102 | |
103 | /** |
104 | * atc_desc_get - get an unused descriptor from free_list |
105 | * @atchan: channel we want a new descriptor for |
106 | */ |
107 | static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) |
108 | { |
109 | struct at_desc *desc, *_desc; |
110 | struct at_desc *ret = NULL; |
111 | unsigned long flags; |
112 | unsigned int i = 0; |
113 | LIST_HEAD(tmp_list); |
114 | |
115 | spin_lock_irqsave(&atchan->lock, flags); |
116 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
117 | i++; |
118 | if (async_tx_test_ack(&desc->txd)) { |
119 | list_del(&desc->desc_node); |
120 | ret = desc; |
121 | break; |
122 | } |
123 | dev_dbg(chan2dev(&atchan->chan_common), |
124 | "desc %p not ACKed\n", desc); |
125 | } |
126 | spin_unlock_irqrestore(&atchan->lock, flags); |
127 | dev_vdbg(chan2dev(&atchan->chan_common), |
128 | "scanned %u descriptors on freelist\n", i); |
129 | |
130 | /* no more descriptor available in initial pool: create one more */ |
131 | if (!ret) { |
132 | ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); |
133 | if (ret) { |
134 | spin_lock_irqsave(&atchan->lock, flags); |
135 | atchan->descs_allocated++; |
136 | spin_unlock_irqrestore(&atchan->lock, flags); |
137 | } else { |
138 | dev_err(chan2dev(&atchan->chan_common), |
139 | "not enough descriptors available\n"); |
140 | } |
141 | } |
142 | |
143 | return ret; |
144 | } |
145 | |
146 | /** |
147 | * atc_desc_put - move a descriptor, including any children, to the free list |
148 | * @atchan: channel we work on |
149 | * @desc: descriptor, at the head of a chain, to move to free list |
150 | */ |
151 | static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) |
152 | { |
153 | if (desc) { |
154 | struct at_desc *child; |
155 | unsigned long flags; |
156 | |
157 | spin_lock_irqsave(&atchan->lock, flags); |
158 | list_for_each_entry(child, &desc->tx_list, desc_node) |
159 | dev_vdbg(chan2dev(&atchan->chan_common), |
160 | "moving child desc %p to freelist\n", |
161 | child); |
162 | list_splice_init(&desc->tx_list, &atchan->free_list); |
163 | dev_vdbg(chan2dev(&atchan->chan_common), |
164 | "moving desc %p to freelist\n", desc); |
165 | list_add(&desc->desc_node, &atchan->free_list); |
166 | spin_unlock_irqrestore(&atchan->lock, flags); |
167 | } |
168 | } |
169 | |
170 | /** |
171 | * atc_desc_chain - build chain adding a descriptor |
172 | * @first: address of first descriptor of the chain |
173 | * @prev: address of previous descriptor of the chain |
174 | * @desc: descriptor to queue |
175 | * |
176 | * Called from prep_* functions |
177 | */ |
178 | static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, |
179 | struct at_desc *desc) |
180 | { |
181 | if (!(*first)) { |
182 | *first = desc; |
183 | } else { |
184 | /* inform the HW lli about chaining */ |
185 | (*prev)->lli.dscr = desc->txd.phys; |
186 | /* insert the link descriptor to the LD ring */ |
187 | list_add_tail(&desc->desc_node, |
188 | &(*first)->tx_list); |
189 | } |
190 | *prev = desc; |
191 | } |
192 | |
193 | /** |
194 | * atc_dostart - starts the DMA engine for real |
195 | * @atchan: the channel we want to start |
196 | * @first: first descriptor in the list we want to begin with |
197 | * |
198 | * Called with atchan->lock held and bh disabled |
199 | */ |
200 | static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) |
201 | { |
202 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); |
203 | |
204 | /* ASSERT: channel is idle */ |
205 | if (atc_chan_is_enabled(atchan)) { |
206 | dev_err(chan2dev(&atchan->chan_common), |
207 | "BUG: Attempted to start non-idle channel\n"); |
208 | dev_err(chan2dev(&atchan->chan_common), |
209 | " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", |
210 | channel_readl(atchan, SADDR), |
211 | channel_readl(atchan, DADDR), |
212 | channel_readl(atchan, CTRLA), |
213 | channel_readl(atchan, CTRLB), |
214 | channel_readl(atchan, DSCR)); |
215 | |
216 | /* The tasklet will hopefully advance the queue... */ |
217 | return; |
218 | } |
219 | |
220 | vdbg_dump_regs(atchan); |
221 | |
222 | channel_writel(atchan, SADDR, 0); |
223 | channel_writel(atchan, DADDR, 0); |
224 | channel_writel(atchan, CTRLA, 0); |
225 | channel_writel(atchan, CTRLB, 0); |
226 | channel_writel(atchan, DSCR, first->txd.phys); |
227 | dma_writel(atdma, CHER, atchan->mask); |
228 | |
229 | vdbg_dump_regs(atchan); |
230 | } |
231 | |
232 | /** |
233 | * atc_chain_complete - finish work for one transaction chain |
234 | * @atchan: channel we work on |
235 | * @desc: descriptor at the head of the chain we want do complete |
236 | * |
237 | * Called with atchan->lock held and bh disabled */ |
238 | static void |
239 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) |
240 | { |
241 | struct dma_async_tx_descriptor *txd = &desc->txd; |
242 | |
243 | dev_vdbg(chan2dev(&atchan->chan_common), |
244 | "descriptor %u complete\n", txd->cookie); |
245 | |
246 | /* mark the descriptor as complete for non cyclic cases only */ |
247 | if (!atc_chan_is_cyclic(atchan)) |
248 | dma_cookie_complete(txd); |
249 | |
250 | /* move children to free_list */ |
251 | list_splice_init(&desc->tx_list, &atchan->free_list); |
252 | /* move myself to free_list */ |
253 | list_move(&desc->desc_node, &atchan->free_list); |
254 | |
255 | /* unmap dma addresses (not on slave channels) */ |
256 | if (!atchan->chan_common.private) { |
257 | struct device *parent = chan2parent(&atchan->chan_common); |
258 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
259 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
260 | dma_unmap_single(parent, |
261 | desc->lli.daddr, |
262 | desc->len, DMA_FROM_DEVICE); |
263 | else |
264 | dma_unmap_page(parent, |
265 | desc->lli.daddr, |
266 | desc->len, DMA_FROM_DEVICE); |
267 | } |
268 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
269 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
270 | dma_unmap_single(parent, |
271 | desc->lli.saddr, |
272 | desc->len, DMA_TO_DEVICE); |
273 | else |
274 | dma_unmap_page(parent, |
275 | desc->lli.saddr, |
276 | desc->len, DMA_TO_DEVICE); |
277 | } |
278 | } |
279 | |
280 | /* for cyclic transfers, |
281 | * no need to replay callback function while stopping */ |
282 | if (!atc_chan_is_cyclic(atchan)) { |
283 | dma_async_tx_callback callback = txd->callback; |
284 | void *param = txd->callback_param; |
285 | |
286 | /* |
287 | * The API requires that no submissions are done from a |
288 | * callback, so we don't need to drop the lock here |
289 | */ |
290 | if (callback) |
291 | callback(param); |
292 | } |
293 | |
294 | dma_run_dependencies(txd); |
295 | } |
296 | |
297 | /** |
298 | * atc_complete_all - finish work for all transactions |
299 | * @atchan: channel to complete transactions for |
300 | * |
301 | * Eventually submit queued descriptors if any |
302 | * |
303 | * Assume channel is idle while calling this function |
304 | * Called with atchan->lock held and bh disabled |
305 | */ |
306 | static void atc_complete_all(struct at_dma_chan *atchan) |
307 | { |
308 | struct at_desc *desc, *_desc; |
309 | LIST_HEAD(list); |
310 | |
311 | dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); |
312 | |
313 | /* |
314 | * Submit queued descriptors ASAP, i.e. before we go through |
315 | * the completed ones. |
316 | */ |
317 | if (!list_empty(&atchan->queue)) |
318 | atc_dostart(atchan, atc_first_queued(atchan)); |
319 | /* empty active_list now it is completed */ |
320 | list_splice_init(&atchan->active_list, &list); |
321 | /* empty queue list by moving descriptors (if any) to active_list */ |
322 | list_splice_init(&atchan->queue, &atchan->active_list); |
323 | |
324 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
325 | atc_chain_complete(atchan, desc); |
326 | } |
327 | |
328 | /** |
329 | * atc_cleanup_descriptors - cleanup up finished descriptors in active_list |
330 | * @atchan: channel to be cleaned up |
331 | * |
332 | * Called with atchan->lock held and bh disabled |
333 | */ |
334 | static void atc_cleanup_descriptors(struct at_dma_chan *atchan) |
335 | { |
336 | struct at_desc *desc, *_desc; |
337 | struct at_desc *child; |
338 | |
339 | dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); |
340 | |
341 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { |
342 | if (!(desc->lli.ctrla & ATC_DONE)) |
343 | /* This one is currently in progress */ |
344 | return; |
345 | |
346 | list_for_each_entry(child, &desc->tx_list, desc_node) |
347 | if (!(child->lli.ctrla & ATC_DONE)) |
348 | /* Currently in progress */ |
349 | return; |
350 | |
351 | /* |
352 | * No descriptors so far seem to be in progress, i.e. |
353 | * this chain must be done. |
354 | */ |
355 | atc_chain_complete(atchan, desc); |
356 | } |
357 | } |
358 | |
359 | /** |
360 | * atc_advance_work - at the end of a transaction, move forward |
361 | * @atchan: channel where the transaction ended |
362 | * |
363 | * Called with atchan->lock held and bh disabled |
364 | */ |
365 | static void atc_advance_work(struct at_dma_chan *atchan) |
366 | { |
367 | dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); |
368 | |
369 | if (atc_chan_is_enabled(atchan)) |
370 | return; |
371 | |
372 | if (list_empty(&atchan->active_list) || |
373 | list_is_singular(&atchan->active_list)) { |
374 | atc_complete_all(atchan); |
375 | } else { |
376 | atc_chain_complete(atchan, atc_first_active(atchan)); |
377 | /* advance work */ |
378 | atc_dostart(atchan, atc_first_active(atchan)); |
379 | } |
380 | } |
381 | |
382 | |
383 | /** |
384 | * atc_handle_error - handle errors reported by DMA controller |
385 | * @atchan: channel where error occurs |
386 | * |
387 | * Called with atchan->lock held and bh disabled |
388 | */ |
389 | static void atc_handle_error(struct at_dma_chan *atchan) |
390 | { |
391 | struct at_desc *bad_desc; |
392 | struct at_desc *child; |
393 | |
394 | /* |
395 | * The descriptor currently at the head of the active list is |
396 | * broked. Since we don't have any way to report errors, we'll |
397 | * just have to scream loudly and try to carry on. |
398 | */ |
399 | bad_desc = atc_first_active(atchan); |
400 | list_del_init(&bad_desc->desc_node); |
401 | |
402 | /* As we are stopped, take advantage to push queued descriptors |
403 | * in active_list */ |
404 | list_splice_init(&atchan->queue, atchan->active_list.prev); |
405 | |
406 | /* Try to restart the controller */ |
407 | if (!list_empty(&atchan->active_list)) |
408 | atc_dostart(atchan, atc_first_active(atchan)); |
409 | |
410 | /* |
411 | * KERN_CRITICAL may seem harsh, but since this only happens |
412 | * when someone submits a bad physical address in a |
413 | * descriptor, we should consider ourselves lucky that the |
414 | * controller flagged an error instead of scribbling over |
415 | * random memory locations. |
416 | */ |
417 | dev_crit(chan2dev(&atchan->chan_common), |
418 | "Bad descriptor submitted for DMA!\n"); |
419 | dev_crit(chan2dev(&atchan->chan_common), |
420 | " cookie: %d\n", bad_desc->txd.cookie); |
421 | atc_dump_lli(atchan, &bad_desc->lli); |
422 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
423 | atc_dump_lli(atchan, &child->lli); |
424 | |
425 | /* Pretend the descriptor completed successfully */ |
426 | atc_chain_complete(atchan, bad_desc); |
427 | } |
428 | |
429 | /** |
430 | * atc_handle_cyclic - at the end of a period, run callback function |
431 | * @atchan: channel used for cyclic operations |
432 | * |
433 | * Called with atchan->lock held and bh disabled |
434 | */ |
435 | static void atc_handle_cyclic(struct at_dma_chan *atchan) |
436 | { |
437 | struct at_desc *first = atc_first_active(atchan); |
438 | struct dma_async_tx_descriptor *txd = &first->txd; |
439 | dma_async_tx_callback callback = txd->callback; |
440 | void *param = txd->callback_param; |
441 | |
442 | dev_vdbg(chan2dev(&atchan->chan_common), |
443 | "new cyclic period llp 0x%08x\n", |
444 | channel_readl(atchan, DSCR)); |
445 | |
446 | if (callback) |
447 | callback(param); |
448 | } |
449 | |
450 | /*-- IRQ & Tasklet ---------------------------------------------------*/ |
451 | |
452 | static void atc_tasklet(unsigned long data) |
453 | { |
454 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; |
455 | unsigned long flags; |
456 | |
457 | spin_lock_irqsave(&atchan->lock, flags); |
458 | if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) |
459 | atc_handle_error(atchan); |
460 | else if (atc_chan_is_cyclic(atchan)) |
461 | atc_handle_cyclic(atchan); |
462 | else |
463 | atc_advance_work(atchan); |
464 | |
465 | spin_unlock_irqrestore(&atchan->lock, flags); |
466 | } |
467 | |
468 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) |
469 | { |
470 | struct at_dma *atdma = (struct at_dma *)dev_id; |
471 | struct at_dma_chan *atchan; |
472 | int i; |
473 | u32 status, pending, imr; |
474 | int ret = IRQ_NONE; |
475 | |
476 | do { |
477 | imr = dma_readl(atdma, EBCIMR); |
478 | status = dma_readl(atdma, EBCISR); |
479 | pending = status & imr; |
480 | |
481 | if (!pending) |
482 | break; |
483 | |
484 | dev_vdbg(atdma->dma_common.dev, |
485 | "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", |
486 | status, imr, pending); |
487 | |
488 | for (i = 0; i < atdma->dma_common.chancnt; i++) { |
489 | atchan = &atdma->chan[i]; |
490 | if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) { |
491 | if (pending & AT_DMA_ERR(i)) { |
492 | /* Disable channel on AHB error */ |
493 | dma_writel(atdma, CHDR, |
494 | AT_DMA_RES(i) | atchan->mask); |
495 | /* Give information to tasklet */ |
496 | set_bit(ATC_IS_ERROR, &atchan->status); |
497 | } |
498 | tasklet_schedule(&atchan->tasklet); |
499 | ret = IRQ_HANDLED; |
500 | } |
501 | } |
502 | |
503 | } while (pending); |
504 | |
505 | return ret; |
506 | } |
507 | |
508 | |
509 | /*-- DMA Engine API --------------------------------------------------*/ |
510 | |
511 | /** |
512 | * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine |
513 | * @desc: descriptor at the head of the transaction chain |
514 | * |
515 | * Queue chain if DMA engine is working already |
516 | * |
517 | * Cookie increment and adding to active_list or queue must be atomic |
518 | */ |
519 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) |
520 | { |
521 | struct at_desc *desc = txd_to_at_desc(tx); |
522 | struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); |
523 | dma_cookie_t cookie; |
524 | unsigned long flags; |
525 | |
526 | spin_lock_irqsave(&atchan->lock, flags); |
527 | cookie = dma_cookie_assign(tx); |
528 | |
529 | if (list_empty(&atchan->active_list)) { |
530 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
531 | desc->txd.cookie); |
532 | atc_dostart(atchan, desc); |
533 | list_add_tail(&desc->desc_node, &atchan->active_list); |
534 | } else { |
535 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
536 | desc->txd.cookie); |
537 | list_add_tail(&desc->desc_node, &atchan->queue); |
538 | } |
539 | |
540 | spin_unlock_irqrestore(&atchan->lock, flags); |
541 | |
542 | return cookie; |
543 | } |
544 | |
545 | /** |
546 | * atc_prep_dma_memcpy - prepare a memcpy operation |
547 | * @chan: the channel to prepare operation on |
548 | * @dest: operation virtual destination address |
549 | * @src: operation virtual source address |
550 | * @len: operation length |
551 | * @flags: tx descriptor status flags |
552 | */ |
553 | static struct dma_async_tx_descriptor * |
554 | atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
555 | size_t len, unsigned long flags) |
556 | { |
557 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
558 | struct at_desc *desc = NULL; |
559 | struct at_desc *first = NULL; |
560 | struct at_desc *prev = NULL; |
561 | size_t xfer_count; |
562 | size_t offset; |
563 | unsigned int src_width; |
564 | unsigned int dst_width; |
565 | u32 ctrla; |
566 | u32 ctrlb; |
567 | |
568 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", |
569 | dest, src, len, flags); |
570 | |
571 | if (unlikely(!len)) { |
572 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
573 | return NULL; |
574 | } |
575 | |
576 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
577 | | ATC_SRC_ADDR_MODE_INCR |
578 | | ATC_DST_ADDR_MODE_INCR |
579 | | ATC_FC_MEM2MEM; |
580 | |
581 | /* |
582 | * We can be a lot more clever here, but this should take care |
583 | * of the most common optimization. |
584 | */ |
585 | if (!((src | dest | len) & 3)) { |
586 | ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; |
587 | src_width = dst_width = 2; |
588 | } else if (!((src | dest | len) & 1)) { |
589 | ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; |
590 | src_width = dst_width = 1; |
591 | } else { |
592 | ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; |
593 | src_width = dst_width = 0; |
594 | } |
595 | |
596 | for (offset = 0; offset < len; offset += xfer_count << src_width) { |
597 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
598 | ATC_BTSIZE_MAX); |
599 | |
600 | desc = atc_desc_get(atchan); |
601 | if (!desc) |
602 | goto err_desc_get; |
603 | |
604 | desc->lli.saddr = src + offset; |
605 | desc->lli.daddr = dest + offset; |
606 | desc->lli.ctrla = ctrla | xfer_count; |
607 | desc->lli.ctrlb = ctrlb; |
608 | |
609 | desc->txd.cookie = 0; |
610 | |
611 | atc_desc_chain(&first, &prev, desc); |
612 | } |
613 | |
614 | /* First descriptor of the chain embedds additional information */ |
615 | first->txd.cookie = -EBUSY; |
616 | first->len = len; |
617 | |
618 | /* set end-of-link to the last link descriptor of list*/ |
619 | set_desc_eol(desc); |
620 | |
621 | first->txd.flags = flags; /* client is in control of this ack */ |
622 | |
623 | return &first->txd; |
624 | |
625 | err_desc_get: |
626 | atc_desc_put(atchan, first); |
627 | return NULL; |
628 | } |
629 | |
630 | |
631 | /** |
632 | * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction |
633 | * @chan: DMA channel |
634 | * @sgl: scatterlist to transfer to/from |
635 | * @sg_len: number of entries in @scatterlist |
636 | * @direction: DMA direction |
637 | * @flags: tx descriptor status flags |
638 | * @context: transaction context (ignored) |
639 | */ |
640 | static struct dma_async_tx_descriptor * |
641 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
642 | unsigned int sg_len, enum dma_transfer_direction direction, |
643 | unsigned long flags, void *context) |
644 | { |
645 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
646 | struct at_dma_slave *atslave = chan->private; |
647 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
648 | struct at_desc *first = NULL; |
649 | struct at_desc *prev = NULL; |
650 | u32 ctrla; |
651 | u32 ctrlb; |
652 | dma_addr_t reg; |
653 | unsigned int reg_width; |
654 | unsigned int mem_width; |
655 | unsigned int i; |
656 | struct scatterlist *sg; |
657 | size_t total_len = 0; |
658 | |
659 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", |
660 | sg_len, |
661 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
662 | flags); |
663 | |
664 | if (unlikely(!atslave || !sg_len)) { |
665 | dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); |
666 | return NULL; |
667 | } |
668 | |
669 | ctrla = ATC_SCSIZE(sconfig->src_maxburst) |
670 | | ATC_DCSIZE(sconfig->dst_maxburst); |
671 | ctrlb = ATC_IEN; |
672 | |
673 | switch (direction) { |
674 | case DMA_MEM_TO_DEV: |
675 | reg_width = convert_buswidth(sconfig->dst_addr_width); |
676 | ctrla |= ATC_DST_WIDTH(reg_width); |
677 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
678 | | ATC_SRC_ADDR_MODE_INCR |
679 | | ATC_FC_MEM2PER |
680 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); |
681 | reg = sconfig->dst_addr; |
682 | for_each_sg(sgl, sg, sg_len, i) { |
683 | struct at_desc *desc; |
684 | u32 len; |
685 | u32 mem; |
686 | |
687 | desc = atc_desc_get(atchan); |
688 | if (!desc) |
689 | goto err_desc_get; |
690 | |
691 | mem = sg_dma_address(sg); |
692 | len = sg_dma_len(sg); |
693 | if (unlikely(!len)) { |
694 | dev_dbg(chan2dev(chan), |
695 | "prep_slave_sg: sg(%d) data length is zero\n", i); |
696 | goto err; |
697 | } |
698 | mem_width = 2; |
699 | if (unlikely(mem & 3 || len & 3)) |
700 | mem_width = 0; |
701 | |
702 | desc->lli.saddr = mem; |
703 | desc->lli.daddr = reg; |
704 | desc->lli.ctrla = ctrla |
705 | | ATC_SRC_WIDTH(mem_width) |
706 | | len >> mem_width; |
707 | desc->lli.ctrlb = ctrlb; |
708 | |
709 | atc_desc_chain(&first, &prev, desc); |
710 | total_len += len; |
711 | } |
712 | break; |
713 | case DMA_DEV_TO_MEM: |
714 | reg_width = convert_buswidth(sconfig->src_addr_width); |
715 | ctrla |= ATC_SRC_WIDTH(reg_width); |
716 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
717 | | ATC_SRC_ADDR_MODE_FIXED |
718 | | ATC_FC_PER2MEM |
719 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); |
720 | |
721 | reg = sconfig->src_addr; |
722 | for_each_sg(sgl, sg, sg_len, i) { |
723 | struct at_desc *desc; |
724 | u32 len; |
725 | u32 mem; |
726 | |
727 | desc = atc_desc_get(atchan); |
728 | if (!desc) |
729 | goto err_desc_get; |
730 | |
731 | mem = sg_dma_address(sg); |
732 | len = sg_dma_len(sg); |
733 | if (unlikely(!len)) { |
734 | dev_dbg(chan2dev(chan), |
735 | "prep_slave_sg: sg(%d) data length is zero\n", i); |
736 | goto err; |
737 | } |
738 | mem_width = 2; |
739 | if (unlikely(mem & 3 || len & 3)) |
740 | mem_width = 0; |
741 | |
742 | desc->lli.saddr = reg; |
743 | desc->lli.daddr = mem; |
744 | desc->lli.ctrla = ctrla |
745 | | ATC_DST_WIDTH(mem_width) |
746 | | len >> reg_width; |
747 | desc->lli.ctrlb = ctrlb; |
748 | |
749 | atc_desc_chain(&first, &prev, desc); |
750 | total_len += len; |
751 | } |
752 | break; |
753 | default: |
754 | return NULL; |
755 | } |
756 | |
757 | /* set end-of-link to the last link descriptor of list*/ |
758 | set_desc_eol(prev); |
759 | |
760 | /* First descriptor of the chain embedds additional information */ |
761 | first->txd.cookie = -EBUSY; |
762 | first->len = total_len; |
763 | |
764 | /* first link descriptor of list is responsible of flags */ |
765 | first->txd.flags = flags; /* client is in control of this ack */ |
766 | |
767 | return &first->txd; |
768 | |
769 | err_desc_get: |
770 | dev_err(chan2dev(chan), "not enough descriptors available\n"); |
771 | err: |
772 | atc_desc_put(atchan, first); |
773 | return NULL; |
774 | } |
775 | |
776 | /** |
777 | * atc_dma_cyclic_check_values |
778 | * Check for too big/unaligned periods and unaligned DMA buffer |
779 | */ |
780 | static int |
781 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, |
782 | size_t period_len) |
783 | { |
784 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) |
785 | goto err_out; |
786 | if (unlikely(period_len & ((1 << reg_width) - 1))) |
787 | goto err_out; |
788 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
789 | goto err_out; |
790 | |
791 | return 0; |
792 | |
793 | err_out: |
794 | return -EINVAL; |
795 | } |
796 | |
797 | /** |
798 | * atc_dma_cyclic_fill_desc - Fill one period descriptor |
799 | */ |
800 | static int |
801 | atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, |
802 | unsigned int period_index, dma_addr_t buf_addr, |
803 | unsigned int reg_width, size_t period_len, |
804 | enum dma_transfer_direction direction) |
805 | { |
806 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
807 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
808 | u32 ctrla; |
809 | |
810 | /* prepare common CRTLA value */ |
811 | ctrla = ATC_SCSIZE(sconfig->src_maxburst) |
812 | | ATC_DCSIZE(sconfig->dst_maxburst) |
813 | | ATC_DST_WIDTH(reg_width) |
814 | | ATC_SRC_WIDTH(reg_width) |
815 | | period_len >> reg_width; |
816 | |
817 | switch (direction) { |
818 | case DMA_MEM_TO_DEV: |
819 | desc->lli.saddr = buf_addr + (period_len * period_index); |
820 | desc->lli.daddr = sconfig->dst_addr; |
821 | desc->lli.ctrla = ctrla; |
822 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED |
823 | | ATC_SRC_ADDR_MODE_INCR |
824 | | ATC_FC_MEM2PER |
825 | | ATC_SIF(AT_DMA_MEM_IF) |
826 | | ATC_DIF(AT_DMA_PER_IF); |
827 | break; |
828 | |
829 | case DMA_DEV_TO_MEM: |
830 | desc->lli.saddr = sconfig->src_addr; |
831 | desc->lli.daddr = buf_addr + (period_len * period_index); |
832 | desc->lli.ctrla = ctrla; |
833 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR |
834 | | ATC_SRC_ADDR_MODE_FIXED |
835 | | ATC_FC_PER2MEM |
836 | | ATC_SIF(AT_DMA_PER_IF) |
837 | | ATC_DIF(AT_DMA_MEM_IF); |
838 | break; |
839 | |
840 | default: |
841 | return -EINVAL; |
842 | } |
843 | |
844 | return 0; |
845 | } |
846 | |
847 | /** |
848 | * atc_prep_dma_cyclic - prepare the cyclic DMA transfer |
849 | * @chan: the DMA channel to prepare |
850 | * @buf_addr: physical DMA address where the buffer starts |
851 | * @buf_len: total number of bytes for the entire buffer |
852 | * @period_len: number of bytes for each period |
853 | * @direction: transfer direction, to or from device |
854 | * @flags: tx descriptor status flags |
855 | * @context: transfer context (ignored) |
856 | */ |
857 | static struct dma_async_tx_descriptor * |
858 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
859 | size_t period_len, enum dma_transfer_direction direction, |
860 | unsigned long flags, void *context) |
861 | { |
862 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
863 | struct at_dma_slave *atslave = chan->private; |
864 | struct dma_slave_config *sconfig = &atchan->dma_sconfig; |
865 | struct at_desc *first = NULL; |
866 | struct at_desc *prev = NULL; |
867 | unsigned long was_cyclic; |
868 | unsigned int reg_width; |
869 | unsigned int periods = buf_len / period_len; |
870 | unsigned int i; |
871 | |
872 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", |
873 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
874 | buf_addr, |
875 | periods, buf_len, period_len); |
876 | |
877 | if (unlikely(!atslave || !buf_len || !period_len)) { |
878 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n"); |
879 | return NULL; |
880 | } |
881 | |
882 | was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status); |
883 | if (was_cyclic) { |
884 | dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n"); |
885 | return NULL; |
886 | } |
887 | |
888 | if (unlikely(!is_slave_direction(direction))) |
889 | goto err_out; |
890 | |
891 | if (sconfig->direction == DMA_MEM_TO_DEV) |
892 | reg_width = convert_buswidth(sconfig->dst_addr_width); |
893 | else |
894 | reg_width = convert_buswidth(sconfig->src_addr_width); |
895 | |
896 | /* Check for too big/unaligned periods and unaligned DMA buffer */ |
897 | if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len)) |
898 | goto err_out; |
899 | |
900 | /* build cyclic linked list */ |
901 | for (i = 0; i < periods; i++) { |
902 | struct at_desc *desc; |
903 | |
904 | desc = atc_desc_get(atchan); |
905 | if (!desc) |
906 | goto err_desc_get; |
907 | |
908 | if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, |
909 | reg_width, period_len, direction)) |
910 | goto err_desc_get; |
911 | |
912 | atc_desc_chain(&first, &prev, desc); |
913 | } |
914 | |
915 | /* lets make a cyclic list */ |
916 | prev->lli.dscr = first->txd.phys; |
917 | |
918 | /* First descriptor of the chain embedds additional information */ |
919 | first->txd.cookie = -EBUSY; |
920 | first->len = buf_len; |
921 | |
922 | return &first->txd; |
923 | |
924 | err_desc_get: |
925 | dev_err(chan2dev(chan), "not enough descriptors available\n"); |
926 | atc_desc_put(atchan, first); |
927 | err_out: |
928 | clear_bit(ATC_IS_CYCLIC, &atchan->status); |
929 | return NULL; |
930 | } |
931 | |
932 | static int set_runtime_config(struct dma_chan *chan, |
933 | struct dma_slave_config *sconfig) |
934 | { |
935 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
936 | |
937 | /* Check if it is chan is configured for slave transfers */ |
938 | if (!chan->private) |
939 | return -EINVAL; |
940 | |
941 | memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig)); |
942 | |
943 | convert_burst(&atchan->dma_sconfig.src_maxburst); |
944 | convert_burst(&atchan->dma_sconfig.dst_maxburst); |
945 | |
946 | return 0; |
947 | } |
948 | |
949 | |
950 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
951 | unsigned long arg) |
952 | { |
953 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
954 | struct at_dma *atdma = to_at_dma(chan->device); |
955 | int chan_id = atchan->chan_common.chan_id; |
956 | unsigned long flags; |
957 | |
958 | LIST_HEAD(list); |
959 | |
960 | dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); |
961 | |
962 | if (cmd == DMA_PAUSE) { |
963 | spin_lock_irqsave(&atchan->lock, flags); |
964 | |
965 | dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); |
966 | set_bit(ATC_IS_PAUSED, &atchan->status); |
967 | |
968 | spin_unlock_irqrestore(&atchan->lock, flags); |
969 | } else if (cmd == DMA_RESUME) { |
970 | if (!atc_chan_is_paused(atchan)) |
971 | return 0; |
972 | |
973 | spin_lock_irqsave(&atchan->lock, flags); |
974 | |
975 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); |
976 | clear_bit(ATC_IS_PAUSED, &atchan->status); |
977 | |
978 | spin_unlock_irqrestore(&atchan->lock, flags); |
979 | } else if (cmd == DMA_TERMINATE_ALL) { |
980 | struct at_desc *desc, *_desc; |
981 | /* |
982 | * This is only called when something went wrong elsewhere, so |
983 | * we don't really care about the data. Just disable the |
984 | * channel. We still have to poll the channel enable bit due |
985 | * to AHB/HSB limitations. |
986 | */ |
987 | spin_lock_irqsave(&atchan->lock, flags); |
988 | |
989 | /* disabling channel: must also remove suspend state */ |
990 | dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); |
991 | |
992 | /* confirm that this channel is disabled */ |
993 | while (dma_readl(atdma, CHSR) & atchan->mask) |
994 | cpu_relax(); |
995 | |
996 | /* active_list entries will end up before queued entries */ |
997 | list_splice_init(&atchan->queue, &list); |
998 | list_splice_init(&atchan->active_list, &list); |
999 | |
1000 | /* Flush all pending and queued descriptors */ |
1001 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
1002 | atc_chain_complete(atchan, desc); |
1003 | |
1004 | clear_bit(ATC_IS_PAUSED, &atchan->status); |
1005 | /* if channel dedicated to cyclic operations, free it */ |
1006 | clear_bit(ATC_IS_CYCLIC, &atchan->status); |
1007 | |
1008 | spin_unlock_irqrestore(&atchan->lock, flags); |
1009 | } else if (cmd == DMA_SLAVE_CONFIG) { |
1010 | return set_runtime_config(chan, (struct dma_slave_config *)arg); |
1011 | } else { |
1012 | return -ENXIO; |
1013 | } |
1014 | |
1015 | return 0; |
1016 | } |
1017 | |
1018 | /** |
1019 | * atc_tx_status - poll for transaction completion |
1020 | * @chan: DMA channel |
1021 | * @cookie: transaction identifier to check status of |
1022 | * @txstate: if not %NULL updated with transaction state |
1023 | * |
1024 | * If @txstate is passed in, upon return it reflect the driver |
1025 | * internal state and can be used with dma_async_is_complete() to check |
1026 | * the status of multiple cookies without re-checking hardware state. |
1027 | */ |
1028 | static enum dma_status |
1029 | atc_tx_status(struct dma_chan *chan, |
1030 | dma_cookie_t cookie, |
1031 | struct dma_tx_state *txstate) |
1032 | { |
1033 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1034 | dma_cookie_t last_used; |
1035 | dma_cookie_t last_complete; |
1036 | unsigned long flags; |
1037 | enum dma_status ret; |
1038 | |
1039 | spin_lock_irqsave(&atchan->lock, flags); |
1040 | |
1041 | ret = dma_cookie_status(chan, cookie, txstate); |
1042 | if (ret != DMA_SUCCESS) { |
1043 | atc_cleanup_descriptors(atchan); |
1044 | |
1045 | ret = dma_cookie_status(chan, cookie, txstate); |
1046 | } |
1047 | |
1048 | last_complete = chan->completed_cookie; |
1049 | last_used = chan->cookie; |
1050 | |
1051 | spin_unlock_irqrestore(&atchan->lock, flags); |
1052 | |
1053 | if (ret != DMA_SUCCESS) |
1054 | dma_set_residue(txstate, atc_first_active(atchan)->len); |
1055 | |
1056 | if (atc_chan_is_paused(atchan)) |
1057 | ret = DMA_PAUSED; |
1058 | |
1059 | dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", |
1060 | ret, cookie, last_complete ? last_complete : 0, |
1061 | last_used ? last_used : 0); |
1062 | |
1063 | return ret; |
1064 | } |
1065 | |
1066 | /** |
1067 | * atc_issue_pending - try to finish work |
1068 | * @chan: target DMA channel |
1069 | */ |
1070 | static void atc_issue_pending(struct dma_chan *chan) |
1071 | { |
1072 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1073 | unsigned long flags; |
1074 | |
1075 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
1076 | |
1077 | /* Not needed for cyclic transfers */ |
1078 | if (atc_chan_is_cyclic(atchan)) |
1079 | return; |
1080 | |
1081 | spin_lock_irqsave(&atchan->lock, flags); |
1082 | atc_advance_work(atchan); |
1083 | spin_unlock_irqrestore(&atchan->lock, flags); |
1084 | } |
1085 | |
1086 | /** |
1087 | * atc_alloc_chan_resources - allocate resources for DMA channel |
1088 | * @chan: allocate descriptor resources for this channel |
1089 | * @client: current client requesting the channel be ready for requests |
1090 | * |
1091 | * return - the number of allocated descriptors |
1092 | */ |
1093 | static int atc_alloc_chan_resources(struct dma_chan *chan) |
1094 | { |
1095 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1096 | struct at_dma *atdma = to_at_dma(chan->device); |
1097 | struct at_desc *desc; |
1098 | struct at_dma_slave *atslave; |
1099 | unsigned long flags; |
1100 | int i; |
1101 | u32 cfg; |
1102 | LIST_HEAD(tmp_list); |
1103 | |
1104 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
1105 | |
1106 | /* ASSERT: channel is idle */ |
1107 | if (atc_chan_is_enabled(atchan)) { |
1108 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); |
1109 | return -EIO; |
1110 | } |
1111 | |
1112 | cfg = ATC_DEFAULT_CFG; |
1113 | |
1114 | atslave = chan->private; |
1115 | if (atslave) { |
1116 | /* |
1117 | * We need controller-specific data to set up slave |
1118 | * transfers. |
1119 | */ |
1120 | BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); |
1121 | |
1122 | /* if cfg configuration specified take it instad of default */ |
1123 | if (atslave->cfg) |
1124 | cfg = atslave->cfg; |
1125 | } |
1126 | |
1127 | /* have we already been set up? |
1128 | * reconfigure channel but no need to reallocate descriptors */ |
1129 | if (!list_empty(&atchan->free_list)) |
1130 | return atchan->descs_allocated; |
1131 | |
1132 | /* Allocate initial pool of descriptors */ |
1133 | for (i = 0; i < init_nr_desc_per_channel; i++) { |
1134 | desc = atc_alloc_descriptor(chan, GFP_KERNEL); |
1135 | if (!desc) { |
1136 | dev_err(atdma->dma_common.dev, |
1137 | "Only %d initial descriptors\n", i); |
1138 | break; |
1139 | } |
1140 | list_add_tail(&desc->desc_node, &tmp_list); |
1141 | } |
1142 | |
1143 | spin_lock_irqsave(&atchan->lock, flags); |
1144 | atchan->descs_allocated = i; |
1145 | list_splice(&tmp_list, &atchan->free_list); |
1146 | dma_cookie_init(chan); |
1147 | spin_unlock_irqrestore(&atchan->lock, flags); |
1148 | |
1149 | /* channel parameters */ |
1150 | channel_writel(atchan, CFG, cfg); |
1151 | |
1152 | dev_dbg(chan2dev(chan), |
1153 | "alloc_chan_resources: allocated %d descriptors\n", |
1154 | atchan->descs_allocated); |
1155 | |
1156 | return atchan->descs_allocated; |
1157 | } |
1158 | |
1159 | /** |
1160 | * atc_free_chan_resources - free all channel resources |
1161 | * @chan: DMA channel |
1162 | */ |
1163 | static void atc_free_chan_resources(struct dma_chan *chan) |
1164 | { |
1165 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1166 | struct at_dma *atdma = to_at_dma(chan->device); |
1167 | struct at_desc *desc, *_desc; |
1168 | LIST_HEAD(list); |
1169 | |
1170 | dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", |
1171 | atchan->descs_allocated); |
1172 | |
1173 | /* ASSERT: channel is idle */ |
1174 | BUG_ON(!list_empty(&atchan->active_list)); |
1175 | BUG_ON(!list_empty(&atchan->queue)); |
1176 | BUG_ON(atc_chan_is_enabled(atchan)); |
1177 | |
1178 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
1179 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1180 | list_del(&desc->desc_node); |
1181 | /* free link descriptor */ |
1182 | dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); |
1183 | } |
1184 | list_splice_init(&atchan->free_list, &list); |
1185 | atchan->descs_allocated = 0; |
1186 | atchan->status = 0; |
1187 | |
1188 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
1189 | } |
1190 | |
1191 | |
1192 | /*-- Module Management -----------------------------------------------*/ |
1193 | |
1194 | /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ |
1195 | static struct at_dma_platform_data at91sam9rl_config = { |
1196 | .nr_channels = 2, |
1197 | }; |
1198 | static struct at_dma_platform_data at91sam9g45_config = { |
1199 | .nr_channels = 8, |
1200 | }; |
1201 | |
1202 | #if defined(CONFIG_OF) |
1203 | static const struct of_device_id atmel_dma_dt_ids[] = { |
1204 | { |
1205 | .compatible = "atmel,at91sam9rl-dma", |
1206 | .data = &at91sam9rl_config, |
1207 | }, { |
1208 | .compatible = "atmel,at91sam9g45-dma", |
1209 | .data = &at91sam9g45_config, |
1210 | }, { |
1211 | /* sentinel */ |
1212 | } |
1213 | }; |
1214 | |
1215 | MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); |
1216 | #endif |
1217 | |
1218 | static const struct platform_device_id atdma_devtypes[] = { |
1219 | { |
1220 | .name = "at91sam9rl_dma", |
1221 | .driver_data = (unsigned long) &at91sam9rl_config, |
1222 | }, { |
1223 | .name = "at91sam9g45_dma", |
1224 | .driver_data = (unsigned long) &at91sam9g45_config, |
1225 | }, { |
1226 | /* sentinel */ |
1227 | } |
1228 | }; |
1229 | |
1230 | static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( |
1231 | struct platform_device *pdev) |
1232 | { |
1233 | if (pdev->dev.of_node) { |
1234 | const struct of_device_id *match; |
1235 | match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); |
1236 | if (match == NULL) |
1237 | return NULL; |
1238 | return match->data; |
1239 | } |
1240 | return (struct at_dma_platform_data *) |
1241 | platform_get_device_id(pdev)->driver_data; |
1242 | } |
1243 | |
1244 | /** |
1245 | * at_dma_off - disable DMA controller |
1246 | * @atdma: the Atmel HDAMC device |
1247 | */ |
1248 | static void at_dma_off(struct at_dma *atdma) |
1249 | { |
1250 | dma_writel(atdma, EN, 0); |
1251 | |
1252 | /* disable all interrupts */ |
1253 | dma_writel(atdma, EBCIDR, -1L); |
1254 | |
1255 | /* confirm that all channels are disabled */ |
1256 | while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) |
1257 | cpu_relax(); |
1258 | } |
1259 | |
1260 | static int __init at_dma_probe(struct platform_device *pdev) |
1261 | { |
1262 | struct resource *io; |
1263 | struct at_dma *atdma; |
1264 | size_t size; |
1265 | int irq; |
1266 | int err; |
1267 | int i; |
1268 | const struct at_dma_platform_data *plat_dat; |
1269 | |
1270 | /* setup platform data for each SoC */ |
1271 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); |
1272 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); |
1273 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); |
1274 | |
1275 | /* get DMA parameters from controller type */ |
1276 | plat_dat = at_dma_get_driver_data(pdev); |
1277 | if (!plat_dat) |
1278 | return -ENODEV; |
1279 | |
1280 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1281 | if (!io) |
1282 | return -EINVAL; |
1283 | |
1284 | irq = platform_get_irq(pdev, 0); |
1285 | if (irq < 0) |
1286 | return irq; |
1287 | |
1288 | size = sizeof(struct at_dma); |
1289 | size += plat_dat->nr_channels * sizeof(struct at_dma_chan); |
1290 | atdma = kzalloc(size, GFP_KERNEL); |
1291 | if (!atdma) |
1292 | return -ENOMEM; |
1293 | |
1294 | /* discover transaction capabilities */ |
1295 | atdma->dma_common.cap_mask = plat_dat->cap_mask; |
1296 | atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; |
1297 | |
1298 | size = resource_size(io); |
1299 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
1300 | err = -EBUSY; |
1301 | goto err_kfree; |
1302 | } |
1303 | |
1304 | atdma->regs = ioremap(io->start, size); |
1305 | if (!atdma->regs) { |
1306 | err = -ENOMEM; |
1307 | goto err_release_r; |
1308 | } |
1309 | |
1310 | atdma->clk = clk_get(&pdev->dev, "dma_clk"); |
1311 | if (IS_ERR(atdma->clk)) { |
1312 | err = PTR_ERR(atdma->clk); |
1313 | goto err_clk; |
1314 | } |
1315 | clk_enable(atdma->clk); |
1316 | |
1317 | /* force dma off, just in case */ |
1318 | at_dma_off(atdma); |
1319 | |
1320 | err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); |
1321 | if (err) |
1322 | goto err_irq; |
1323 | |
1324 | platform_set_drvdata(pdev, atdma); |
1325 | |
1326 | /* create a pool of consistent memory blocks for hardware descriptors */ |
1327 | atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", |
1328 | &pdev->dev, sizeof(struct at_desc), |
1329 | 4 /* word alignment */, 0); |
1330 | if (!atdma->dma_desc_pool) { |
1331 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); |
1332 | err = -ENOMEM; |
1333 | goto err_pool_create; |
1334 | } |
1335 | |
1336 | /* clear any pending interrupt */ |
1337 | while (dma_readl(atdma, EBCISR)) |
1338 | cpu_relax(); |
1339 | |
1340 | /* initialize channels related values */ |
1341 | INIT_LIST_HEAD(&atdma->dma_common.channels); |
1342 | for (i = 0; i < plat_dat->nr_channels; i++) { |
1343 | struct at_dma_chan *atchan = &atdma->chan[i]; |
1344 | |
1345 | atchan->chan_common.device = &atdma->dma_common; |
1346 | dma_cookie_init(&atchan->chan_common); |
1347 | list_add_tail(&atchan->chan_common.device_node, |
1348 | &atdma->dma_common.channels); |
1349 | |
1350 | atchan->ch_regs = atdma->regs + ch_regs(i); |
1351 | spin_lock_init(&atchan->lock); |
1352 | atchan->mask = 1 << i; |
1353 | |
1354 | INIT_LIST_HEAD(&atchan->active_list); |
1355 | INIT_LIST_HEAD(&atchan->queue); |
1356 | INIT_LIST_HEAD(&atchan->free_list); |
1357 | |
1358 | tasklet_init(&atchan->tasklet, atc_tasklet, |
1359 | (unsigned long)atchan); |
1360 | atc_enable_chan_irq(atdma, i); |
1361 | } |
1362 | |
1363 | /* set base routines */ |
1364 | atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; |
1365 | atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; |
1366 | atdma->dma_common.device_tx_status = atc_tx_status; |
1367 | atdma->dma_common.device_issue_pending = atc_issue_pending; |
1368 | atdma->dma_common.dev = &pdev->dev; |
1369 | |
1370 | /* set prep routines based on capability */ |
1371 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) |
1372 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; |
1373 | |
1374 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { |
1375 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; |
1376 | /* controller can do slave DMA: can trigger cyclic transfers */ |
1377 | dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); |
1378 | atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; |
1379 | atdma->dma_common.device_control = atc_control; |
1380 | } |
1381 | |
1382 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1383 | |
1384 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", |
1385 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", |
1386 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", |
1387 | plat_dat->nr_channels); |
1388 | |
1389 | dma_async_device_register(&atdma->dma_common); |
1390 | |
1391 | return 0; |
1392 | |
1393 | err_pool_create: |
1394 | platform_set_drvdata(pdev, NULL); |
1395 | free_irq(platform_get_irq(pdev, 0), atdma); |
1396 | err_irq: |
1397 | clk_disable(atdma->clk); |
1398 | clk_put(atdma->clk); |
1399 | err_clk: |
1400 | iounmap(atdma->regs); |
1401 | atdma->regs = NULL; |
1402 | err_release_r: |
1403 | release_mem_region(io->start, size); |
1404 | err_kfree: |
1405 | kfree(atdma); |
1406 | return err; |
1407 | } |
1408 | |
1409 | static int __exit at_dma_remove(struct platform_device *pdev) |
1410 | { |
1411 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1412 | struct dma_chan *chan, *_chan; |
1413 | struct resource *io; |
1414 | |
1415 | at_dma_off(atdma); |
1416 | dma_async_device_unregister(&atdma->dma_common); |
1417 | |
1418 | dma_pool_destroy(atdma->dma_desc_pool); |
1419 | platform_set_drvdata(pdev, NULL); |
1420 | free_irq(platform_get_irq(pdev, 0), atdma); |
1421 | |
1422 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, |
1423 | device_node) { |
1424 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1425 | |
1426 | /* Disable interrupts */ |
1427 | atc_disable_chan_irq(atdma, chan->chan_id); |
1428 | tasklet_disable(&atchan->tasklet); |
1429 | |
1430 | tasklet_kill(&atchan->tasklet); |
1431 | list_del(&chan->device_node); |
1432 | } |
1433 | |
1434 | clk_disable(atdma->clk); |
1435 | clk_put(atdma->clk); |
1436 | |
1437 | iounmap(atdma->regs); |
1438 | atdma->regs = NULL; |
1439 | |
1440 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1441 | release_mem_region(io->start, resource_size(io)); |
1442 | |
1443 | kfree(atdma); |
1444 | |
1445 | return 0; |
1446 | } |
1447 | |
1448 | static void at_dma_shutdown(struct platform_device *pdev) |
1449 | { |
1450 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1451 | |
1452 | at_dma_off(platform_get_drvdata(pdev)); |
1453 | clk_disable(atdma->clk); |
1454 | } |
1455 | |
1456 | static int at_dma_prepare(struct device *dev) |
1457 | { |
1458 | struct platform_device *pdev = to_platform_device(dev); |
1459 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1460 | struct dma_chan *chan, *_chan; |
1461 | |
1462 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, |
1463 | device_node) { |
1464 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1465 | /* wait for transaction completion (except in cyclic case) */ |
1466 | if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) |
1467 | return -EAGAIN; |
1468 | } |
1469 | return 0; |
1470 | } |
1471 | |
1472 | static void atc_suspend_cyclic(struct at_dma_chan *atchan) |
1473 | { |
1474 | struct dma_chan *chan = &atchan->chan_common; |
1475 | |
1476 | /* Channel should be paused by user |
1477 | * do it anyway even if it is not done already */ |
1478 | if (!atc_chan_is_paused(atchan)) { |
1479 | dev_warn(chan2dev(chan), |
1480 | "cyclic channel not paused, should be done by channel user\n"); |
1481 | atc_control(chan, DMA_PAUSE, 0); |
1482 | } |
1483 | |
1484 | /* now preserve additional data for cyclic operations */ |
1485 | /* next descriptor address in the cyclic list */ |
1486 | atchan->save_dscr = channel_readl(atchan, DSCR); |
1487 | |
1488 | vdbg_dump_regs(atchan); |
1489 | } |
1490 | |
1491 | static int at_dma_suspend_noirq(struct device *dev) |
1492 | { |
1493 | struct platform_device *pdev = to_platform_device(dev); |
1494 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1495 | struct dma_chan *chan, *_chan; |
1496 | |
1497 | /* preserve data */ |
1498 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, |
1499 | device_node) { |
1500 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1501 | |
1502 | if (atc_chan_is_cyclic(atchan)) |
1503 | atc_suspend_cyclic(atchan); |
1504 | atchan->save_cfg = channel_readl(atchan, CFG); |
1505 | } |
1506 | atdma->save_imr = dma_readl(atdma, EBCIMR); |
1507 | |
1508 | /* disable DMA controller */ |
1509 | at_dma_off(atdma); |
1510 | clk_disable(atdma->clk); |
1511 | return 0; |
1512 | } |
1513 | |
1514 | static void atc_resume_cyclic(struct at_dma_chan *atchan) |
1515 | { |
1516 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); |
1517 | |
1518 | /* restore channel status for cyclic descriptors list: |
1519 | * next descriptor in the cyclic list at the time of suspend */ |
1520 | channel_writel(atchan, SADDR, 0); |
1521 | channel_writel(atchan, DADDR, 0); |
1522 | channel_writel(atchan, CTRLA, 0); |
1523 | channel_writel(atchan, CTRLB, 0); |
1524 | channel_writel(atchan, DSCR, atchan->save_dscr); |
1525 | dma_writel(atdma, CHER, atchan->mask); |
1526 | |
1527 | /* channel pause status should be removed by channel user |
1528 | * We cannot take the initiative to do it here */ |
1529 | |
1530 | vdbg_dump_regs(atchan); |
1531 | } |
1532 | |
1533 | static int at_dma_resume_noirq(struct device *dev) |
1534 | { |
1535 | struct platform_device *pdev = to_platform_device(dev); |
1536 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1537 | struct dma_chan *chan, *_chan; |
1538 | |
1539 | /* bring back DMA controller */ |
1540 | clk_enable(atdma->clk); |
1541 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1542 | |
1543 | /* clear any pending interrupt */ |
1544 | while (dma_readl(atdma, EBCISR)) |
1545 | cpu_relax(); |
1546 | |
1547 | /* restore saved data */ |
1548 | dma_writel(atdma, EBCIER, atdma->save_imr); |
1549 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, |
1550 | device_node) { |
1551 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1552 | |
1553 | channel_writel(atchan, CFG, atchan->save_cfg); |
1554 | if (atc_chan_is_cyclic(atchan)) |
1555 | atc_resume_cyclic(atchan); |
1556 | } |
1557 | return 0; |
1558 | } |
1559 | |
1560 | static const struct dev_pm_ops at_dma_dev_pm_ops = { |
1561 | .prepare = at_dma_prepare, |
1562 | .suspend_noirq = at_dma_suspend_noirq, |
1563 | .resume_noirq = at_dma_resume_noirq, |
1564 | }; |
1565 | |
1566 | static struct platform_driver at_dma_driver = { |
1567 | .remove = __exit_p(at_dma_remove), |
1568 | .shutdown = at_dma_shutdown, |
1569 | .id_table = atdma_devtypes, |
1570 | .driver = { |
1571 | .name = "at_hdmac", |
1572 | .pm = &at_dma_dev_pm_ops, |
1573 | .of_match_table = of_match_ptr(atmel_dma_dt_ids), |
1574 | }, |
1575 | }; |
1576 | |
1577 | static int __init at_dma_init(void) |
1578 | { |
1579 | return platform_driver_probe(&at_dma_driver, at_dma_probe); |
1580 | } |
1581 | subsys_initcall(at_dma_init); |
1582 | |
1583 | static void __exit at_dma_exit(void) |
1584 | { |
1585 | platform_driver_unregister(&at_dma_driver); |
1586 | } |
1587 | module_exit(at_dma_exit); |
1588 | |
1589 | MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); |
1590 | MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); |
1591 | MODULE_LICENSE("GPL"); |
1592 | MODULE_ALIAS("platform:at_hdmac"); |
1593 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9