Root/
1 | /* |
2 | * Driver for the TXx9 SoC DMA Controller |
3 | * |
4 | * Copyright (C) 2009 Atsushi Nemoto |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. |
9 | */ |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/init.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> |
14 | #include <linux/module.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/scatterlist.h> |
18 | |
19 | #include "dmaengine.h" |
20 | #include "txx9dmac.h" |
21 | |
22 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) |
23 | { |
24 | return container_of(chan, struct txx9dmac_chan, chan); |
25 | } |
26 | |
27 | static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) |
28 | { |
29 | return dc->ch_regs; |
30 | } |
31 | |
32 | static struct txx9dmac_cregs32 __iomem *__dma_regs32( |
33 | const struct txx9dmac_chan *dc) |
34 | { |
35 | return dc->ch_regs; |
36 | } |
37 | |
38 | #define channel64_readq(dc, name) \ |
39 | __raw_readq(&(__dma_regs(dc)->name)) |
40 | #define channel64_writeq(dc, name, val) \ |
41 | __raw_writeq((val), &(__dma_regs(dc)->name)) |
42 | #define channel64_readl(dc, name) \ |
43 | __raw_readl(&(__dma_regs(dc)->name)) |
44 | #define channel64_writel(dc, name, val) \ |
45 | __raw_writel((val), &(__dma_regs(dc)->name)) |
46 | |
47 | #define channel32_readl(dc, name) \ |
48 | __raw_readl(&(__dma_regs32(dc)->name)) |
49 | #define channel32_writel(dc, name, val) \ |
50 | __raw_writel((val), &(__dma_regs32(dc)->name)) |
51 | |
52 | #define channel_readq(dc, name) channel64_readq(dc, name) |
53 | #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) |
54 | #define channel_readl(dc, name) \ |
55 | (is_dmac64(dc) ? \ |
56 | channel64_readl(dc, name) : channel32_readl(dc, name)) |
57 | #define channel_writel(dc, name, val) \ |
58 | (is_dmac64(dc) ? \ |
59 | channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) |
60 | |
61 | static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) |
62 | { |
63 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) |
64 | return channel64_readq(dc, CHAR); |
65 | else |
66 | return channel64_readl(dc, CHAR); |
67 | } |
68 | |
69 | static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) |
70 | { |
71 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) |
72 | channel64_writeq(dc, CHAR, val); |
73 | else |
74 | channel64_writel(dc, CHAR, val); |
75 | } |
76 | |
77 | static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) |
78 | { |
79 | #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) |
80 | channel64_writel(dc, CHAR, 0); |
81 | channel64_writel(dc, __pad_CHAR, 0); |
82 | #else |
83 | channel64_writeq(dc, CHAR, 0); |
84 | #endif |
85 | } |
86 | |
87 | static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) |
88 | { |
89 | if (is_dmac64(dc)) |
90 | return channel64_read_CHAR(dc); |
91 | else |
92 | return channel32_readl(dc, CHAR); |
93 | } |
94 | |
95 | static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) |
96 | { |
97 | if (is_dmac64(dc)) |
98 | channel64_write_CHAR(dc, val); |
99 | else |
100 | channel32_writel(dc, CHAR, val); |
101 | } |
102 | |
103 | static struct txx9dmac_regs __iomem *__txx9dmac_regs( |
104 | const struct txx9dmac_dev *ddev) |
105 | { |
106 | return ddev->regs; |
107 | } |
108 | |
109 | static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( |
110 | const struct txx9dmac_dev *ddev) |
111 | { |
112 | return ddev->regs; |
113 | } |
114 | |
115 | #define dma64_readl(ddev, name) \ |
116 | __raw_readl(&(__txx9dmac_regs(ddev)->name)) |
117 | #define dma64_writel(ddev, name, val) \ |
118 | __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) |
119 | |
120 | #define dma32_readl(ddev, name) \ |
121 | __raw_readl(&(__txx9dmac_regs32(ddev)->name)) |
122 | #define dma32_writel(ddev, name, val) \ |
123 | __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) |
124 | |
125 | #define dma_readl(ddev, name) \ |
126 | (__is_dmac64(ddev) ? \ |
127 | dma64_readl(ddev, name) : dma32_readl(ddev, name)) |
128 | #define dma_writel(ddev, name, val) \ |
129 | (__is_dmac64(ddev) ? \ |
130 | dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) |
131 | |
132 | static struct device *chan2dev(struct dma_chan *chan) |
133 | { |
134 | return &chan->dev->device; |
135 | } |
136 | static struct device *chan2parent(struct dma_chan *chan) |
137 | { |
138 | return chan->dev->device.parent; |
139 | } |
140 | |
141 | static struct txx9dmac_desc * |
142 | txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) |
143 | { |
144 | return container_of(txd, struct txx9dmac_desc, txd); |
145 | } |
146 | |
147 | static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, |
148 | const struct txx9dmac_desc *desc) |
149 | { |
150 | return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; |
151 | } |
152 | |
153 | static void desc_write_CHAR(const struct txx9dmac_chan *dc, |
154 | struct txx9dmac_desc *desc, dma_addr_t val) |
155 | { |
156 | if (is_dmac64(dc)) |
157 | desc->hwdesc.CHAR = val; |
158 | else |
159 | desc->hwdesc32.CHAR = val; |
160 | } |
161 | |
162 | #define TXX9_DMA_MAX_COUNT 0x04000000 |
163 | |
164 | #define TXX9_DMA_INITIAL_DESC_COUNT 64 |
165 | |
166 | static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) |
167 | { |
168 | return list_entry(dc->active_list.next, |
169 | struct txx9dmac_desc, desc_node); |
170 | } |
171 | |
172 | static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) |
173 | { |
174 | return list_entry(dc->active_list.prev, |
175 | struct txx9dmac_desc, desc_node); |
176 | } |
177 | |
178 | static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) |
179 | { |
180 | return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); |
181 | } |
182 | |
183 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) |
184 | { |
185 | if (!list_empty(&desc->tx_list)) |
186 | desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); |
187 | return desc; |
188 | } |
189 | |
190 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); |
191 | |
192 | static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, |
193 | gfp_t flags) |
194 | { |
195 | struct txx9dmac_dev *ddev = dc->ddev; |
196 | struct txx9dmac_desc *desc; |
197 | |
198 | desc = kzalloc(sizeof(*desc), flags); |
199 | if (!desc) |
200 | return NULL; |
201 | INIT_LIST_HEAD(&desc->tx_list); |
202 | dma_async_tx_descriptor_init(&desc->txd, &dc->chan); |
203 | desc->txd.tx_submit = txx9dmac_tx_submit; |
204 | /* txd.flags will be overwritten in prep funcs */ |
205 | desc->txd.flags = DMA_CTRL_ACK; |
206 | desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, |
207 | ddev->descsize, DMA_TO_DEVICE); |
208 | return desc; |
209 | } |
210 | |
211 | static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) |
212 | { |
213 | struct txx9dmac_desc *desc, *_desc; |
214 | struct txx9dmac_desc *ret = NULL; |
215 | unsigned int i = 0; |
216 | |
217 | spin_lock_bh(&dc->lock); |
218 | list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { |
219 | if (async_tx_test_ack(&desc->txd)) { |
220 | list_del(&desc->desc_node); |
221 | ret = desc; |
222 | break; |
223 | } |
224 | dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); |
225 | i++; |
226 | } |
227 | spin_unlock_bh(&dc->lock); |
228 | |
229 | dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", |
230 | i); |
231 | if (!ret) { |
232 | ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); |
233 | if (ret) { |
234 | spin_lock_bh(&dc->lock); |
235 | dc->descs_allocated++; |
236 | spin_unlock_bh(&dc->lock); |
237 | } else |
238 | dev_err(chan2dev(&dc->chan), |
239 | "not enough descriptors available\n"); |
240 | } |
241 | return ret; |
242 | } |
243 | |
244 | static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, |
245 | struct txx9dmac_desc *desc) |
246 | { |
247 | struct txx9dmac_dev *ddev = dc->ddev; |
248 | struct txx9dmac_desc *child; |
249 | |
250 | list_for_each_entry(child, &desc->tx_list, desc_node) |
251 | dma_sync_single_for_cpu(chan2parent(&dc->chan), |
252 | child->txd.phys, ddev->descsize, |
253 | DMA_TO_DEVICE); |
254 | dma_sync_single_for_cpu(chan2parent(&dc->chan), |
255 | desc->txd.phys, ddev->descsize, |
256 | DMA_TO_DEVICE); |
257 | } |
258 | |
259 | /* |
260 | * Move a descriptor, including any children, to the free list. |
261 | * `desc' must not be on any lists. |
262 | */ |
263 | static void txx9dmac_desc_put(struct txx9dmac_chan *dc, |
264 | struct txx9dmac_desc *desc) |
265 | { |
266 | if (desc) { |
267 | struct txx9dmac_desc *child; |
268 | |
269 | txx9dmac_sync_desc_for_cpu(dc, desc); |
270 | |
271 | spin_lock_bh(&dc->lock); |
272 | list_for_each_entry(child, &desc->tx_list, desc_node) |
273 | dev_vdbg(chan2dev(&dc->chan), |
274 | "moving child desc %p to freelist\n", |
275 | child); |
276 | list_splice_init(&desc->tx_list, &dc->free_list); |
277 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", |
278 | desc); |
279 | list_add(&desc->desc_node, &dc->free_list); |
280 | spin_unlock_bh(&dc->lock); |
281 | } |
282 | } |
283 | |
284 | /*----------------------------------------------------------------------*/ |
285 | |
286 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) |
287 | { |
288 | if (is_dmac64(dc)) |
289 | dev_err(chan2dev(&dc->chan), |
290 | " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" |
291 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", |
292 | (u64)channel64_read_CHAR(dc), |
293 | channel64_readq(dc, SAR), |
294 | channel64_readq(dc, DAR), |
295 | channel64_readl(dc, CNTR), |
296 | channel64_readl(dc, SAIR), |
297 | channel64_readl(dc, DAIR), |
298 | channel64_readl(dc, CCR), |
299 | channel64_readl(dc, CSR)); |
300 | else |
301 | dev_err(chan2dev(&dc->chan), |
302 | " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" |
303 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", |
304 | channel32_readl(dc, CHAR), |
305 | channel32_readl(dc, SAR), |
306 | channel32_readl(dc, DAR), |
307 | channel32_readl(dc, CNTR), |
308 | channel32_readl(dc, SAIR), |
309 | channel32_readl(dc, DAIR), |
310 | channel32_readl(dc, CCR), |
311 | channel32_readl(dc, CSR)); |
312 | } |
313 | |
314 | static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) |
315 | { |
316 | channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); |
317 | if (is_dmac64(dc)) { |
318 | channel64_clear_CHAR(dc); |
319 | channel_writeq(dc, SAR, 0); |
320 | channel_writeq(dc, DAR, 0); |
321 | } else { |
322 | channel_writel(dc, CHAR, 0); |
323 | channel_writel(dc, SAR, 0); |
324 | channel_writel(dc, DAR, 0); |
325 | } |
326 | channel_writel(dc, CNTR, 0); |
327 | channel_writel(dc, SAIR, 0); |
328 | channel_writel(dc, DAIR, 0); |
329 | channel_writel(dc, CCR, 0); |
330 | mmiowb(); |
331 | } |
332 | |
333 | /* Called with dc->lock held and bh disabled */ |
334 | static void txx9dmac_dostart(struct txx9dmac_chan *dc, |
335 | struct txx9dmac_desc *first) |
336 | { |
337 | struct txx9dmac_slave *ds = dc->chan.private; |
338 | u32 sai, dai; |
339 | |
340 | dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", |
341 | first->txd.cookie, first); |
342 | /* ASSERT: channel is idle */ |
343 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { |
344 | dev_err(chan2dev(&dc->chan), |
345 | "BUG: Attempted to start non-idle channel\n"); |
346 | txx9dmac_dump_regs(dc); |
347 | /* The tasklet will hopefully advance the queue... */ |
348 | return; |
349 | } |
350 | |
351 | if (is_dmac64(dc)) { |
352 | channel64_writel(dc, CNTR, 0); |
353 | channel64_writel(dc, CSR, 0xffffffff); |
354 | if (ds) { |
355 | if (ds->tx_reg) { |
356 | sai = ds->reg_width; |
357 | dai = 0; |
358 | } else { |
359 | sai = 0; |
360 | dai = ds->reg_width; |
361 | } |
362 | } else { |
363 | sai = 8; |
364 | dai = 8; |
365 | } |
366 | channel64_writel(dc, SAIR, sai); |
367 | channel64_writel(dc, DAIR, dai); |
368 | /* All 64-bit DMAC supports SMPCHN */ |
369 | channel64_writel(dc, CCR, dc->ccr); |
370 | /* Writing a non zero value to CHAR will assert XFACT */ |
371 | channel64_write_CHAR(dc, first->txd.phys); |
372 | } else { |
373 | channel32_writel(dc, CNTR, 0); |
374 | channel32_writel(dc, CSR, 0xffffffff); |
375 | if (ds) { |
376 | if (ds->tx_reg) { |
377 | sai = ds->reg_width; |
378 | dai = 0; |
379 | } else { |
380 | sai = 0; |
381 | dai = ds->reg_width; |
382 | } |
383 | } else { |
384 | sai = 4; |
385 | dai = 4; |
386 | } |
387 | channel32_writel(dc, SAIR, sai); |
388 | channel32_writel(dc, DAIR, dai); |
389 | if (txx9_dma_have_SMPCHN()) { |
390 | channel32_writel(dc, CCR, dc->ccr); |
391 | /* Writing a non zero value to CHAR will assert XFACT */ |
392 | channel32_writel(dc, CHAR, first->txd.phys); |
393 | } else { |
394 | channel32_writel(dc, CHAR, first->txd.phys); |
395 | channel32_writel(dc, CCR, dc->ccr); |
396 | } |
397 | } |
398 | } |
399 | |
400 | /*----------------------------------------------------------------------*/ |
401 | |
402 | static void |
403 | txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, |
404 | struct txx9dmac_desc *desc) |
405 | { |
406 | dma_async_tx_callback callback; |
407 | void *param; |
408 | struct dma_async_tx_descriptor *txd = &desc->txd; |
409 | struct txx9dmac_slave *ds = dc->chan.private; |
410 | |
411 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", |
412 | txd->cookie, desc); |
413 | |
414 | dma_cookie_complete(txd); |
415 | callback = txd->callback; |
416 | param = txd->callback_param; |
417 | |
418 | txx9dmac_sync_desc_for_cpu(dc, desc); |
419 | list_splice_init(&desc->tx_list, &dc->free_list); |
420 | list_move(&desc->desc_node, &dc->free_list); |
421 | |
422 | if (!ds) { |
423 | dma_addr_t dmaaddr; |
424 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
425 | dmaaddr = is_dmac64(dc) ? |
426 | desc->hwdesc.DAR : desc->hwdesc32.DAR; |
427 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
428 | dma_unmap_single(chan2parent(&dc->chan), |
429 | dmaaddr, desc->len, DMA_FROM_DEVICE); |
430 | else |
431 | dma_unmap_page(chan2parent(&dc->chan), |
432 | dmaaddr, desc->len, DMA_FROM_DEVICE); |
433 | } |
434 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
435 | dmaaddr = is_dmac64(dc) ? |
436 | desc->hwdesc.SAR : desc->hwdesc32.SAR; |
437 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
438 | dma_unmap_single(chan2parent(&dc->chan), |
439 | dmaaddr, desc->len, DMA_TO_DEVICE); |
440 | else |
441 | dma_unmap_page(chan2parent(&dc->chan), |
442 | dmaaddr, desc->len, DMA_TO_DEVICE); |
443 | } |
444 | } |
445 | |
446 | /* |
447 | * The API requires that no submissions are done from a |
448 | * callback, so we don't need to drop the lock here |
449 | */ |
450 | if (callback) |
451 | callback(param); |
452 | dma_run_dependencies(txd); |
453 | } |
454 | |
455 | static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) |
456 | { |
457 | struct txx9dmac_dev *ddev = dc->ddev; |
458 | struct txx9dmac_desc *desc; |
459 | struct txx9dmac_desc *prev = NULL; |
460 | |
461 | BUG_ON(!list_empty(list)); |
462 | do { |
463 | desc = txx9dmac_first_queued(dc); |
464 | if (prev) { |
465 | desc_write_CHAR(dc, prev, desc->txd.phys); |
466 | dma_sync_single_for_device(chan2parent(&dc->chan), |
467 | prev->txd.phys, ddev->descsize, |
468 | DMA_TO_DEVICE); |
469 | } |
470 | prev = txx9dmac_last_child(desc); |
471 | list_move_tail(&desc->desc_node, list); |
472 | /* Make chain-completion interrupt happen */ |
473 | if ((desc->txd.flags & DMA_PREP_INTERRUPT) && |
474 | !txx9dmac_chan_INTENT(dc)) |
475 | break; |
476 | } while (!list_empty(&dc->queue)); |
477 | } |
478 | |
479 | static void txx9dmac_complete_all(struct txx9dmac_chan *dc) |
480 | { |
481 | struct txx9dmac_desc *desc, *_desc; |
482 | LIST_HEAD(list); |
483 | |
484 | /* |
485 | * Submit queued descriptors ASAP, i.e. before we go through |
486 | * the completed ones. |
487 | */ |
488 | list_splice_init(&dc->active_list, &list); |
489 | if (!list_empty(&dc->queue)) { |
490 | txx9dmac_dequeue(dc, &dc->active_list); |
491 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); |
492 | } |
493 | |
494 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
495 | txx9dmac_descriptor_complete(dc, desc); |
496 | } |
497 | |
498 | static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, |
499 | struct txx9dmac_hwdesc *desc) |
500 | { |
501 | if (is_dmac64(dc)) { |
502 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN |
503 | dev_crit(chan2dev(&dc->chan), |
504 | " desc: ch%#llx s%#llx d%#llx c%#x\n", |
505 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); |
506 | #else |
507 | dev_crit(chan2dev(&dc->chan), |
508 | " desc: ch%#llx s%#llx d%#llx c%#x" |
509 | " si%#x di%#x cc%#x cs%#x\n", |
510 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, |
511 | desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); |
512 | #endif |
513 | } else { |
514 | struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; |
515 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN |
516 | dev_crit(chan2dev(&dc->chan), |
517 | " desc: ch%#x s%#x d%#x c%#x\n", |
518 | d->CHAR, d->SAR, d->DAR, d->CNTR); |
519 | #else |
520 | dev_crit(chan2dev(&dc->chan), |
521 | " desc: ch%#x s%#x d%#x c%#x" |
522 | " si%#x di%#x cc%#x cs%#x\n", |
523 | d->CHAR, d->SAR, d->DAR, d->CNTR, |
524 | d->SAIR, d->DAIR, d->CCR, d->CSR); |
525 | #endif |
526 | } |
527 | } |
528 | |
529 | static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) |
530 | { |
531 | struct txx9dmac_desc *bad_desc; |
532 | struct txx9dmac_desc *child; |
533 | u32 errors; |
534 | |
535 | /* |
536 | * The descriptor currently at the head of the active list is |
537 | * borked. Since we don't have any way to report errors, we'll |
538 | * just have to scream loudly and try to carry on. |
539 | */ |
540 | dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); |
541 | txx9dmac_dump_regs(dc); |
542 | |
543 | bad_desc = txx9dmac_first_active(dc); |
544 | list_del_init(&bad_desc->desc_node); |
545 | |
546 | /* Clear all error flags and try to restart the controller */ |
547 | errors = csr & (TXX9_DMA_CSR_ABCHC | |
548 | TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | |
549 | TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); |
550 | channel_writel(dc, CSR, errors); |
551 | |
552 | if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) |
553 | txx9dmac_dequeue(dc, &dc->active_list); |
554 | if (!list_empty(&dc->active_list)) |
555 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); |
556 | |
557 | dev_crit(chan2dev(&dc->chan), |
558 | "Bad descriptor submitted for DMA! (cookie: %d)\n", |
559 | bad_desc->txd.cookie); |
560 | txx9dmac_dump_desc(dc, &bad_desc->hwdesc); |
561 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
562 | txx9dmac_dump_desc(dc, &child->hwdesc); |
563 | /* Pretend the descriptor completed successfully */ |
564 | txx9dmac_descriptor_complete(dc, bad_desc); |
565 | } |
566 | |
567 | static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) |
568 | { |
569 | dma_addr_t chain; |
570 | struct txx9dmac_desc *desc, *_desc; |
571 | struct txx9dmac_desc *child; |
572 | u32 csr; |
573 | |
574 | if (is_dmac64(dc)) { |
575 | chain = channel64_read_CHAR(dc); |
576 | csr = channel64_readl(dc, CSR); |
577 | channel64_writel(dc, CSR, csr); |
578 | } else { |
579 | chain = channel32_readl(dc, CHAR); |
580 | csr = channel32_readl(dc, CSR); |
581 | channel32_writel(dc, CSR, csr); |
582 | } |
583 | /* For dynamic chain, we should look at XFACT instead of NCHNC */ |
584 | if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { |
585 | /* Everything we've submitted is done */ |
586 | txx9dmac_complete_all(dc); |
587 | return; |
588 | } |
589 | if (!(csr & TXX9_DMA_CSR_CHNEN)) |
590 | chain = 0; /* last descriptor of this chain */ |
591 | |
592 | dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", |
593 | (u64)chain); |
594 | |
595 | list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { |
596 | if (desc_read_CHAR(dc, desc) == chain) { |
597 | /* This one is currently in progress */ |
598 | if (csr & TXX9_DMA_CSR_ABCHC) |
599 | goto scan_done; |
600 | return; |
601 | } |
602 | |
603 | list_for_each_entry(child, &desc->tx_list, desc_node) |
604 | if (desc_read_CHAR(dc, child) == chain) { |
605 | /* Currently in progress */ |
606 | if (csr & TXX9_DMA_CSR_ABCHC) |
607 | goto scan_done; |
608 | return; |
609 | } |
610 | |
611 | /* |
612 | * No descriptors so far seem to be in progress, i.e. |
613 | * this one must be done. |
614 | */ |
615 | txx9dmac_descriptor_complete(dc, desc); |
616 | } |
617 | scan_done: |
618 | if (csr & TXX9_DMA_CSR_ABCHC) { |
619 | txx9dmac_handle_error(dc, csr); |
620 | return; |
621 | } |
622 | |
623 | dev_err(chan2dev(&dc->chan), |
624 | "BUG: All descriptors done, but channel not idle!\n"); |
625 | |
626 | /* Try to continue after resetting the channel... */ |
627 | txx9dmac_reset_chan(dc); |
628 | |
629 | if (!list_empty(&dc->queue)) { |
630 | txx9dmac_dequeue(dc, &dc->active_list); |
631 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); |
632 | } |
633 | } |
634 | |
635 | static void txx9dmac_chan_tasklet(unsigned long data) |
636 | { |
637 | int irq; |
638 | u32 csr; |
639 | struct txx9dmac_chan *dc; |
640 | |
641 | dc = (struct txx9dmac_chan *)data; |
642 | csr = channel_readl(dc, CSR); |
643 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); |
644 | |
645 | spin_lock(&dc->lock); |
646 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | |
647 | TXX9_DMA_CSR_NTRNFC)) |
648 | txx9dmac_scan_descriptors(dc); |
649 | spin_unlock(&dc->lock); |
650 | irq = dc->irq; |
651 | |
652 | enable_irq(irq); |
653 | } |
654 | |
655 | static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) |
656 | { |
657 | struct txx9dmac_chan *dc = dev_id; |
658 | |
659 | dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", |
660 | channel_readl(dc, CSR)); |
661 | |
662 | tasklet_schedule(&dc->tasklet); |
663 | /* |
664 | * Just disable the interrupts. We'll turn them back on in the |
665 | * softirq handler. |
666 | */ |
667 | disable_irq_nosync(irq); |
668 | |
669 | return IRQ_HANDLED; |
670 | } |
671 | |
672 | static void txx9dmac_tasklet(unsigned long data) |
673 | { |
674 | int irq; |
675 | u32 csr; |
676 | struct txx9dmac_chan *dc; |
677 | |
678 | struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data; |
679 | u32 mcr; |
680 | int i; |
681 | |
682 | mcr = dma_readl(ddev, MCR); |
683 | dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); |
684 | for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { |
685 | if ((mcr >> (24 + i)) & 0x11) { |
686 | dc = ddev->chan[i]; |
687 | csr = channel_readl(dc, CSR); |
688 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", |
689 | csr); |
690 | spin_lock(&dc->lock); |
691 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | |
692 | TXX9_DMA_CSR_NTRNFC)) |
693 | txx9dmac_scan_descriptors(dc); |
694 | spin_unlock(&dc->lock); |
695 | } |
696 | } |
697 | irq = ddev->irq; |
698 | |
699 | enable_irq(irq); |
700 | } |
701 | |
702 | static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) |
703 | { |
704 | struct txx9dmac_dev *ddev = dev_id; |
705 | |
706 | dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", |
707 | dma_readl(ddev, MCR)); |
708 | |
709 | tasklet_schedule(&ddev->tasklet); |
710 | /* |
711 | * Just disable the interrupts. We'll turn them back on in the |
712 | * softirq handler. |
713 | */ |
714 | disable_irq_nosync(irq); |
715 | |
716 | return IRQ_HANDLED; |
717 | } |
718 | |
719 | /*----------------------------------------------------------------------*/ |
720 | |
721 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) |
722 | { |
723 | struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); |
724 | struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); |
725 | dma_cookie_t cookie; |
726 | |
727 | spin_lock_bh(&dc->lock); |
728 | cookie = dma_cookie_assign(tx); |
729 | |
730 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", |
731 | desc->txd.cookie, desc); |
732 | |
733 | list_add_tail(&desc->desc_node, &dc->queue); |
734 | spin_unlock_bh(&dc->lock); |
735 | |
736 | return cookie; |
737 | } |
738 | |
739 | static struct dma_async_tx_descriptor * |
740 | txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
741 | size_t len, unsigned long flags) |
742 | { |
743 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
744 | struct txx9dmac_dev *ddev = dc->ddev; |
745 | struct txx9dmac_desc *desc; |
746 | struct txx9dmac_desc *first; |
747 | struct txx9dmac_desc *prev; |
748 | size_t xfer_count; |
749 | size_t offset; |
750 | |
751 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", |
752 | (u64)dest, (u64)src, len, flags); |
753 | |
754 | if (unlikely(!len)) { |
755 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
756 | return NULL; |
757 | } |
758 | |
759 | prev = first = NULL; |
760 | |
761 | for (offset = 0; offset < len; offset += xfer_count) { |
762 | xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); |
763 | /* |
764 | * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, |
765 | * ERT-TX49H4-016 (slightly conservative) |
766 | */ |
767 | if (__is_dmac64(ddev)) { |
768 | if (xfer_count > 0x100 && |
769 | (xfer_count & 0xff) >= 0xfa && |
770 | (xfer_count & 0xff) <= 0xff) |
771 | xfer_count -= 0x20; |
772 | } else { |
773 | if (xfer_count > 0x80 && |
774 | (xfer_count & 0x7f) >= 0x7e && |
775 | (xfer_count & 0x7f) <= 0x7f) |
776 | xfer_count -= 0x20; |
777 | } |
778 | |
779 | desc = txx9dmac_desc_get(dc); |
780 | if (!desc) { |
781 | txx9dmac_desc_put(dc, first); |
782 | return NULL; |
783 | } |
784 | |
785 | if (__is_dmac64(ddev)) { |
786 | desc->hwdesc.SAR = src + offset; |
787 | desc->hwdesc.DAR = dest + offset; |
788 | desc->hwdesc.CNTR = xfer_count; |
789 | txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, |
790 | dc->ccr | TXX9_DMA_CCR_XFACT); |
791 | } else { |
792 | desc->hwdesc32.SAR = src + offset; |
793 | desc->hwdesc32.DAR = dest + offset; |
794 | desc->hwdesc32.CNTR = xfer_count; |
795 | txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, |
796 | dc->ccr | TXX9_DMA_CCR_XFACT); |
797 | } |
798 | |
799 | /* |
800 | * The descriptors on tx_list are not reachable from |
801 | * the dc->queue list or dc->active_list after a |
802 | * submit. If we put all descriptors on active_list, |
803 | * calling of callback on the completion will be more |
804 | * complex. |
805 | */ |
806 | if (!first) { |
807 | first = desc; |
808 | } else { |
809 | desc_write_CHAR(dc, prev, desc->txd.phys); |
810 | dma_sync_single_for_device(chan2parent(&dc->chan), |
811 | prev->txd.phys, ddev->descsize, |
812 | DMA_TO_DEVICE); |
813 | list_add_tail(&desc->desc_node, &first->tx_list); |
814 | } |
815 | prev = desc; |
816 | } |
817 | |
818 | /* Trigger interrupt after last block */ |
819 | if (flags & DMA_PREP_INTERRUPT) |
820 | txx9dmac_desc_set_INTENT(ddev, prev); |
821 | |
822 | desc_write_CHAR(dc, prev, 0); |
823 | dma_sync_single_for_device(chan2parent(&dc->chan), |
824 | prev->txd.phys, ddev->descsize, |
825 | DMA_TO_DEVICE); |
826 | |
827 | first->txd.flags = flags; |
828 | first->len = len; |
829 | |
830 | return &first->txd; |
831 | } |
832 | |
833 | static struct dma_async_tx_descriptor * |
834 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
835 | unsigned int sg_len, enum dma_transfer_direction direction, |
836 | unsigned long flags, void *context) |
837 | { |
838 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
839 | struct txx9dmac_dev *ddev = dc->ddev; |
840 | struct txx9dmac_slave *ds = chan->private; |
841 | struct txx9dmac_desc *prev; |
842 | struct txx9dmac_desc *first; |
843 | unsigned int i; |
844 | struct scatterlist *sg; |
845 | |
846 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
847 | |
848 | BUG_ON(!ds || !ds->reg_width); |
849 | if (ds->tx_reg) |
850 | BUG_ON(direction != DMA_MEM_TO_DEV); |
851 | else |
852 | BUG_ON(direction != DMA_DEV_TO_MEM); |
853 | if (unlikely(!sg_len)) |
854 | return NULL; |
855 | |
856 | prev = first = NULL; |
857 | |
858 | for_each_sg(sgl, sg, sg_len, i) { |
859 | struct txx9dmac_desc *desc; |
860 | dma_addr_t mem; |
861 | u32 sai, dai; |
862 | |
863 | desc = txx9dmac_desc_get(dc); |
864 | if (!desc) { |
865 | txx9dmac_desc_put(dc, first); |
866 | return NULL; |
867 | } |
868 | |
869 | mem = sg_dma_address(sg); |
870 | |
871 | if (__is_dmac64(ddev)) { |
872 | if (direction == DMA_MEM_TO_DEV) { |
873 | desc->hwdesc.SAR = mem; |
874 | desc->hwdesc.DAR = ds->tx_reg; |
875 | } else { |
876 | desc->hwdesc.SAR = ds->rx_reg; |
877 | desc->hwdesc.DAR = mem; |
878 | } |
879 | desc->hwdesc.CNTR = sg_dma_len(sg); |
880 | } else { |
881 | if (direction == DMA_MEM_TO_DEV) { |
882 | desc->hwdesc32.SAR = mem; |
883 | desc->hwdesc32.DAR = ds->tx_reg; |
884 | } else { |
885 | desc->hwdesc32.SAR = ds->rx_reg; |
886 | desc->hwdesc32.DAR = mem; |
887 | } |
888 | desc->hwdesc32.CNTR = sg_dma_len(sg); |
889 | } |
890 | if (direction == DMA_MEM_TO_DEV) { |
891 | sai = ds->reg_width; |
892 | dai = 0; |
893 | } else { |
894 | sai = 0; |
895 | dai = ds->reg_width; |
896 | } |
897 | txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, |
898 | dc->ccr | TXX9_DMA_CCR_XFACT); |
899 | |
900 | if (!first) { |
901 | first = desc; |
902 | } else { |
903 | desc_write_CHAR(dc, prev, desc->txd.phys); |
904 | dma_sync_single_for_device(chan2parent(&dc->chan), |
905 | prev->txd.phys, |
906 | ddev->descsize, |
907 | DMA_TO_DEVICE); |
908 | list_add_tail(&desc->desc_node, &first->tx_list); |
909 | } |
910 | prev = desc; |
911 | } |
912 | |
913 | /* Trigger interrupt after last block */ |
914 | if (flags & DMA_PREP_INTERRUPT) |
915 | txx9dmac_desc_set_INTENT(ddev, prev); |
916 | |
917 | desc_write_CHAR(dc, prev, 0); |
918 | dma_sync_single_for_device(chan2parent(&dc->chan), |
919 | prev->txd.phys, ddev->descsize, |
920 | DMA_TO_DEVICE); |
921 | |
922 | first->txd.flags = flags; |
923 | first->len = 0; |
924 | |
925 | return &first->txd; |
926 | } |
927 | |
928 | static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
929 | unsigned long arg) |
930 | { |
931 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
932 | struct txx9dmac_desc *desc, *_desc; |
933 | LIST_HEAD(list); |
934 | |
935 | /* Only supports DMA_TERMINATE_ALL */ |
936 | if (cmd != DMA_TERMINATE_ALL) |
937 | return -EINVAL; |
938 | |
939 | dev_vdbg(chan2dev(chan), "terminate_all\n"); |
940 | spin_lock_bh(&dc->lock); |
941 | |
942 | txx9dmac_reset_chan(dc); |
943 | |
944 | /* active_list entries will end up before queued entries */ |
945 | list_splice_init(&dc->queue, &list); |
946 | list_splice_init(&dc->active_list, &list); |
947 | |
948 | spin_unlock_bh(&dc->lock); |
949 | |
950 | /* Flush all pending and queued descriptors */ |
951 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
952 | txx9dmac_descriptor_complete(dc, desc); |
953 | |
954 | return 0; |
955 | } |
956 | |
957 | static enum dma_status |
958 | txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
959 | struct dma_tx_state *txstate) |
960 | { |
961 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
962 | enum dma_status ret; |
963 | |
964 | ret = dma_cookie_status(chan, cookie, txstate); |
965 | if (ret != DMA_SUCCESS) { |
966 | spin_lock_bh(&dc->lock); |
967 | txx9dmac_scan_descriptors(dc); |
968 | spin_unlock_bh(&dc->lock); |
969 | |
970 | ret = dma_cookie_status(chan, cookie, txstate); |
971 | } |
972 | |
973 | return ret; |
974 | } |
975 | |
976 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, |
977 | struct txx9dmac_desc *prev) |
978 | { |
979 | struct txx9dmac_dev *ddev = dc->ddev; |
980 | struct txx9dmac_desc *desc; |
981 | LIST_HEAD(list); |
982 | |
983 | prev = txx9dmac_last_child(prev); |
984 | txx9dmac_dequeue(dc, &list); |
985 | desc = list_entry(list.next, struct txx9dmac_desc, desc_node); |
986 | desc_write_CHAR(dc, prev, desc->txd.phys); |
987 | dma_sync_single_for_device(chan2parent(&dc->chan), |
988 | prev->txd.phys, ddev->descsize, |
989 | DMA_TO_DEVICE); |
990 | mmiowb(); |
991 | if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && |
992 | channel_read_CHAR(dc) == prev->txd.phys) |
993 | /* Restart chain DMA */ |
994 | channel_write_CHAR(dc, desc->txd.phys); |
995 | list_splice_tail(&list, &dc->active_list); |
996 | } |
997 | |
998 | static void txx9dmac_issue_pending(struct dma_chan *chan) |
999 | { |
1000 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
1001 | |
1002 | spin_lock_bh(&dc->lock); |
1003 | |
1004 | if (!list_empty(&dc->active_list)) |
1005 | txx9dmac_scan_descriptors(dc); |
1006 | if (!list_empty(&dc->queue)) { |
1007 | if (list_empty(&dc->active_list)) { |
1008 | txx9dmac_dequeue(dc, &dc->active_list); |
1009 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); |
1010 | } else if (txx9_dma_have_SMPCHN()) { |
1011 | struct txx9dmac_desc *prev = txx9dmac_last_active(dc); |
1012 | |
1013 | if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || |
1014 | txx9dmac_chan_INTENT(dc)) |
1015 | txx9dmac_chain_dynamic(dc, prev); |
1016 | } |
1017 | } |
1018 | |
1019 | spin_unlock_bh(&dc->lock); |
1020 | } |
1021 | |
1022 | static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) |
1023 | { |
1024 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
1025 | struct txx9dmac_slave *ds = chan->private; |
1026 | struct txx9dmac_desc *desc; |
1027 | int i; |
1028 | |
1029 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
1030 | |
1031 | /* ASSERT: channel is idle */ |
1032 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { |
1033 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
1034 | return -EIO; |
1035 | } |
1036 | |
1037 | dma_cookie_init(chan); |
1038 | |
1039 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; |
1040 | txx9dmac_chan_set_SMPCHN(dc); |
1041 | if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) |
1042 | dc->ccr |= TXX9_DMA_CCR_INTENC; |
1043 | if (chan->device->device_prep_dma_memcpy) { |
1044 | if (ds) |
1045 | return -EINVAL; |
1046 | dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; |
1047 | } else { |
1048 | if (!ds || |
1049 | (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) |
1050 | return -EINVAL; |
1051 | dc->ccr |= TXX9_DMA_CCR_EXTRQ | |
1052 | TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); |
1053 | txx9dmac_chan_set_INTENT(dc); |
1054 | } |
1055 | |
1056 | spin_lock_bh(&dc->lock); |
1057 | i = dc->descs_allocated; |
1058 | while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { |
1059 | spin_unlock_bh(&dc->lock); |
1060 | |
1061 | desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); |
1062 | if (!desc) { |
1063 | dev_info(chan2dev(chan), |
1064 | "only allocated %d descriptors\n", i); |
1065 | spin_lock_bh(&dc->lock); |
1066 | break; |
1067 | } |
1068 | txx9dmac_desc_put(dc, desc); |
1069 | |
1070 | spin_lock_bh(&dc->lock); |
1071 | i = ++dc->descs_allocated; |
1072 | } |
1073 | spin_unlock_bh(&dc->lock); |
1074 | |
1075 | dev_dbg(chan2dev(chan), |
1076 | "alloc_chan_resources allocated %d descriptors\n", i); |
1077 | |
1078 | return i; |
1079 | } |
1080 | |
1081 | static void txx9dmac_free_chan_resources(struct dma_chan *chan) |
1082 | { |
1083 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
1084 | struct txx9dmac_dev *ddev = dc->ddev; |
1085 | struct txx9dmac_desc *desc, *_desc; |
1086 | LIST_HEAD(list); |
1087 | |
1088 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
1089 | dc->descs_allocated); |
1090 | |
1091 | /* ASSERT: channel is idle */ |
1092 | BUG_ON(!list_empty(&dc->active_list)); |
1093 | BUG_ON(!list_empty(&dc->queue)); |
1094 | BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); |
1095 | |
1096 | spin_lock_bh(&dc->lock); |
1097 | list_splice_init(&dc->free_list, &list); |
1098 | dc->descs_allocated = 0; |
1099 | spin_unlock_bh(&dc->lock); |
1100 | |
1101 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
1102 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1103 | dma_unmap_single(chan2parent(chan), desc->txd.phys, |
1104 | ddev->descsize, DMA_TO_DEVICE); |
1105 | kfree(desc); |
1106 | } |
1107 | |
1108 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
1109 | } |
1110 | |
1111 | /*----------------------------------------------------------------------*/ |
1112 | |
1113 | static void txx9dmac_off(struct txx9dmac_dev *ddev) |
1114 | { |
1115 | dma_writel(ddev, MCR, 0); |
1116 | mmiowb(); |
1117 | } |
1118 | |
1119 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) |
1120 | { |
1121 | struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data; |
1122 | struct platform_device *dmac_dev = cpdata->dmac_dev; |
1123 | struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data; |
1124 | struct txx9dmac_chan *dc; |
1125 | int err; |
1126 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; |
1127 | int irq; |
1128 | |
1129 | dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); |
1130 | if (!dc) |
1131 | return -ENOMEM; |
1132 | |
1133 | dc->dma.dev = &pdev->dev; |
1134 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; |
1135 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; |
1136 | dc->dma.device_control = txx9dmac_control; |
1137 | dc->dma.device_tx_status = txx9dmac_tx_status; |
1138 | dc->dma.device_issue_pending = txx9dmac_issue_pending; |
1139 | if (pdata && pdata->memcpy_chan == ch) { |
1140 | dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; |
1141 | dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); |
1142 | } else { |
1143 | dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; |
1144 | dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); |
1145 | dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); |
1146 | } |
1147 | |
1148 | INIT_LIST_HEAD(&dc->dma.channels); |
1149 | dc->ddev = platform_get_drvdata(dmac_dev); |
1150 | if (dc->ddev->irq < 0) { |
1151 | irq = platform_get_irq(pdev, 0); |
1152 | if (irq < 0) |
1153 | return irq; |
1154 | tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet, |
1155 | (unsigned long)dc); |
1156 | dc->irq = irq; |
1157 | err = devm_request_irq(&pdev->dev, dc->irq, |
1158 | txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); |
1159 | if (err) |
1160 | return err; |
1161 | } else |
1162 | dc->irq = -1; |
1163 | dc->ddev->chan[ch] = dc; |
1164 | dc->chan.device = &dc->dma; |
1165 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); |
1166 | dma_cookie_init(&dc->chan); |
1167 | |
1168 | if (is_dmac64(dc)) |
1169 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; |
1170 | else |
1171 | dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; |
1172 | spin_lock_init(&dc->lock); |
1173 | |
1174 | INIT_LIST_HEAD(&dc->active_list); |
1175 | INIT_LIST_HEAD(&dc->queue); |
1176 | INIT_LIST_HEAD(&dc->free_list); |
1177 | |
1178 | txx9dmac_reset_chan(dc); |
1179 | |
1180 | platform_set_drvdata(pdev, dc); |
1181 | |
1182 | err = dma_async_device_register(&dc->dma); |
1183 | if (err) |
1184 | return err; |
1185 | dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", |
1186 | dc->dma.dev_id, |
1187 | dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", |
1188 | dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); |
1189 | |
1190 | return 0; |
1191 | } |
1192 | |
1193 | static int __exit txx9dmac_chan_remove(struct platform_device *pdev) |
1194 | { |
1195 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); |
1196 | |
1197 | dma_async_device_unregister(&dc->dma); |
1198 | if (dc->irq >= 0) |
1199 | tasklet_kill(&dc->tasklet); |
1200 | dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; |
1201 | return 0; |
1202 | } |
1203 | |
1204 | static int __init txx9dmac_probe(struct platform_device *pdev) |
1205 | { |
1206 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; |
1207 | struct resource *io; |
1208 | struct txx9dmac_dev *ddev; |
1209 | u32 mcr; |
1210 | int err; |
1211 | |
1212 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1213 | if (!io) |
1214 | return -EINVAL; |
1215 | |
1216 | ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); |
1217 | if (!ddev) |
1218 | return -ENOMEM; |
1219 | |
1220 | if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), |
1221 | dev_name(&pdev->dev))) |
1222 | return -EBUSY; |
1223 | |
1224 | ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); |
1225 | if (!ddev->regs) |
1226 | return -ENOMEM; |
1227 | ddev->have_64bit_regs = pdata->have_64bit_regs; |
1228 | if (__is_dmac64(ddev)) |
1229 | ddev->descsize = sizeof(struct txx9dmac_hwdesc); |
1230 | else |
1231 | ddev->descsize = sizeof(struct txx9dmac_hwdesc32); |
1232 | |
1233 | /* force dma off, just in case */ |
1234 | txx9dmac_off(ddev); |
1235 | |
1236 | ddev->irq = platform_get_irq(pdev, 0); |
1237 | if (ddev->irq >= 0) { |
1238 | tasklet_init(&ddev->tasklet, txx9dmac_tasklet, |
1239 | (unsigned long)ddev); |
1240 | err = devm_request_irq(&pdev->dev, ddev->irq, |
1241 | txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); |
1242 | if (err) |
1243 | return err; |
1244 | } |
1245 | |
1246 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; |
1247 | if (pdata && pdata->memcpy_chan >= 0) |
1248 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); |
1249 | dma_writel(ddev, MCR, mcr); |
1250 | |
1251 | platform_set_drvdata(pdev, ddev); |
1252 | return 0; |
1253 | } |
1254 | |
1255 | static int __exit txx9dmac_remove(struct platform_device *pdev) |
1256 | { |
1257 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1258 | |
1259 | txx9dmac_off(ddev); |
1260 | if (ddev->irq >= 0) |
1261 | tasklet_kill(&ddev->tasklet); |
1262 | return 0; |
1263 | } |
1264 | |
1265 | static void txx9dmac_shutdown(struct platform_device *pdev) |
1266 | { |
1267 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1268 | |
1269 | txx9dmac_off(ddev); |
1270 | } |
1271 | |
1272 | static int txx9dmac_suspend_noirq(struct device *dev) |
1273 | { |
1274 | struct platform_device *pdev = to_platform_device(dev); |
1275 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1276 | |
1277 | txx9dmac_off(ddev); |
1278 | return 0; |
1279 | } |
1280 | |
1281 | static int txx9dmac_resume_noirq(struct device *dev) |
1282 | { |
1283 | struct platform_device *pdev = to_platform_device(dev); |
1284 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1285 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; |
1286 | u32 mcr; |
1287 | |
1288 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; |
1289 | if (pdata && pdata->memcpy_chan >= 0) |
1290 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); |
1291 | dma_writel(ddev, MCR, mcr); |
1292 | return 0; |
1293 | |
1294 | } |
1295 | |
1296 | static const struct dev_pm_ops txx9dmac_dev_pm_ops = { |
1297 | .suspend_noirq = txx9dmac_suspend_noirq, |
1298 | .resume_noirq = txx9dmac_resume_noirq, |
1299 | }; |
1300 | |
1301 | static struct platform_driver txx9dmac_chan_driver = { |
1302 | .remove = __exit_p(txx9dmac_chan_remove), |
1303 | .driver = { |
1304 | .name = "txx9dmac-chan", |
1305 | }, |
1306 | }; |
1307 | |
1308 | static struct platform_driver txx9dmac_driver = { |
1309 | .remove = __exit_p(txx9dmac_remove), |
1310 | .shutdown = txx9dmac_shutdown, |
1311 | .driver = { |
1312 | .name = "txx9dmac", |
1313 | .pm = &txx9dmac_dev_pm_ops, |
1314 | }, |
1315 | }; |
1316 | |
1317 | static int __init txx9dmac_init(void) |
1318 | { |
1319 | int rc; |
1320 | |
1321 | rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); |
1322 | if (!rc) { |
1323 | rc = platform_driver_probe(&txx9dmac_chan_driver, |
1324 | txx9dmac_chan_probe); |
1325 | if (rc) |
1326 | platform_driver_unregister(&txx9dmac_driver); |
1327 | } |
1328 | return rc; |
1329 | } |
1330 | module_init(txx9dmac_init); |
1331 | |
1332 | static void __exit txx9dmac_exit(void) |
1333 | { |
1334 | platform_driver_unregister(&txx9dmac_chan_driver); |
1335 | platform_driver_unregister(&txx9dmac_driver); |
1336 | } |
1337 | module_exit(txx9dmac_exit); |
1338 | |
1339 | MODULE_LICENSE("GPL"); |
1340 | MODULE_DESCRIPTION("TXx9 DMA Controller driver"); |
1341 | MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); |
1342 | MODULE_ALIAS("platform:txx9dmac"); |
1343 | MODULE_ALIAS("platform:txx9dmac-chan"); |
1344 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9