Root/
1 | /* |
2 | * Renesas SuperH DMA Engine support |
3 | * |
4 | * base is drivers/dma/flsdma.c |
5 | * |
6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. |
8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
9 | * |
10 | * This is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License, or |
13 | * (at your option) any later version. |
14 | * |
15 | * - DMA of SuperH does not have Hardware DMA chain mode. |
16 | * - MAX DMA size is 16MB. |
17 | * |
18 | */ |
19 | |
20 | #include <linux/init.h> |
21 | #include <linux/module.h> |
22 | #include <linux/interrupt.h> |
23 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> |
25 | #include <linux/dma-mapping.h> |
26 | #include <linux/dmapool.h> |
27 | #include <linux/platform_device.h> |
28 | #include <cpu/dma.h> |
29 | #include <asm/dma-sh.h> |
30 | #include "shdma.h" |
31 | |
32 | /* DMA descriptor control */ |
33 | #define DESC_LAST (-1) |
34 | #define DESC_COMP (1) |
35 | #define DESC_NCOMP (0) |
36 | |
37 | #define NR_DESCS_PER_CHANNEL 32 |
38 | /* |
39 | * Define the default configuration for dual address memory-memory transfer. |
40 | * The 0x400 value represents auto-request, external->external. |
41 | * |
42 | * And this driver set 4byte burst mode. |
43 | * If you want to change mode, you need to change RS_DEFAULT of value. |
44 | * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) |
45 | */ |
46 | #define RS_DEFAULT (RS_DUAL) |
47 | |
48 | #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) |
49 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
50 | { |
51 | ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); |
52 | } |
53 | |
54 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
55 | { |
56 | return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); |
57 | } |
58 | |
59 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
60 | { |
61 | u32 chcr = RS_DEFAULT; /* default is DUAL mode */ |
62 | sh_dmae_writel(sh_chan, chcr, CHCR); |
63 | } |
64 | |
65 | /* |
66 | * Reset DMA controller |
67 | * |
68 | * SH7780 has two DMAOR register |
69 | */ |
70 | static void sh_dmae_ctl_stop(int id) |
71 | { |
72 | unsigned short dmaor = dmaor_read_reg(id); |
73 | |
74 | dmaor &= ~(DMAOR_NMIF | DMAOR_AE); |
75 | dmaor_write_reg(id, dmaor); |
76 | } |
77 | |
78 | static int sh_dmae_rst(int id) |
79 | { |
80 | unsigned short dmaor; |
81 | |
82 | sh_dmae_ctl_stop(id); |
83 | dmaor = (dmaor_read_reg(id)|DMAOR_INIT); |
84 | |
85 | dmaor_write_reg(id, dmaor); |
86 | if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) { |
87 | pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); |
88 | return -EINVAL; |
89 | } |
90 | return 0; |
91 | } |
92 | |
93 | static int dmae_is_idle(struct sh_dmae_chan *sh_chan) |
94 | { |
95 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
96 | if (chcr & CHCR_DE) { |
97 | if (!(chcr & CHCR_TE)) |
98 | return -EBUSY; /* working */ |
99 | } |
100 | return 0; /* waiting */ |
101 | } |
102 | |
103 | static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) |
104 | { |
105 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
106 | return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; |
107 | } |
108 | |
109 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) |
110 | { |
111 | sh_dmae_writel(sh_chan, hw.sar, SAR); |
112 | sh_dmae_writel(sh_chan, hw.dar, DAR); |
113 | sh_dmae_writel(sh_chan, |
114 | (hw.tcr >> calc_xmit_shift(sh_chan)), TCR); |
115 | } |
116 | |
117 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
118 | { |
119 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
120 | |
121 | chcr |= (CHCR_DE|CHCR_IE); |
122 | sh_dmae_writel(sh_chan, chcr, CHCR); |
123 | } |
124 | |
125 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
126 | { |
127 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
128 | |
129 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); |
130 | sh_dmae_writel(sh_chan, chcr, CHCR); |
131 | } |
132 | |
133 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
134 | { |
135 | int ret = dmae_is_idle(sh_chan); |
136 | /* When DMA was working, can not set data to CHCR */ |
137 | if (ret) |
138 | return ret; |
139 | |
140 | sh_dmae_writel(sh_chan, val, CHCR); |
141 | return 0; |
142 | } |
143 | |
144 | #define DMARS1_ADDR 0x04 |
145 | #define DMARS2_ADDR 0x08 |
146 | #define DMARS_SHIFT 8 |
147 | #define DMARS_CHAN_MSK 0x01 |
148 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
149 | { |
150 | u32 addr; |
151 | int shift = 0; |
152 | int ret = dmae_is_idle(sh_chan); |
153 | if (ret) |
154 | return ret; |
155 | |
156 | if (sh_chan->id & DMARS_CHAN_MSK) |
157 | shift = DMARS_SHIFT; |
158 | |
159 | switch (sh_chan->id) { |
160 | /* DMARS0 */ |
161 | case 0: |
162 | case 1: |
163 | addr = SH_DMARS_BASE; |
164 | break; |
165 | /* DMARS1 */ |
166 | case 2: |
167 | case 3: |
168 | addr = (SH_DMARS_BASE + DMARS1_ADDR); |
169 | break; |
170 | /* DMARS2 */ |
171 | case 4: |
172 | case 5: |
173 | addr = (SH_DMARS_BASE + DMARS2_ADDR); |
174 | break; |
175 | default: |
176 | return -EINVAL; |
177 | } |
178 | |
179 | ctrl_outw((val << shift) | |
180 | (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), |
181 | addr); |
182 | |
183 | return 0; |
184 | } |
185 | |
186 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) |
187 | { |
188 | struct sh_desc *desc = tx_to_sh_desc(tx); |
189 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); |
190 | dma_cookie_t cookie; |
191 | |
192 | spin_lock_bh(&sh_chan->desc_lock); |
193 | |
194 | cookie = sh_chan->common.cookie; |
195 | cookie++; |
196 | if (cookie < 0) |
197 | cookie = 1; |
198 | |
199 | /* If desc only in the case of 1 */ |
200 | if (desc->async_tx.cookie != -EBUSY) |
201 | desc->async_tx.cookie = cookie; |
202 | sh_chan->common.cookie = desc->async_tx.cookie; |
203 | |
204 | list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); |
205 | |
206 | spin_unlock_bh(&sh_chan->desc_lock); |
207 | |
208 | return cookie; |
209 | } |
210 | |
211 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) |
212 | { |
213 | struct sh_desc *desc, *_desc, *ret = NULL; |
214 | |
215 | spin_lock_bh(&sh_chan->desc_lock); |
216 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { |
217 | if (async_tx_test_ack(&desc->async_tx)) { |
218 | list_del(&desc->node); |
219 | ret = desc; |
220 | break; |
221 | } |
222 | } |
223 | spin_unlock_bh(&sh_chan->desc_lock); |
224 | |
225 | return ret; |
226 | } |
227 | |
228 | static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc) |
229 | { |
230 | if (desc) { |
231 | spin_lock_bh(&sh_chan->desc_lock); |
232 | |
233 | list_splice_init(&desc->tx_list, &sh_chan->ld_free); |
234 | list_add(&desc->node, &sh_chan->ld_free); |
235 | |
236 | spin_unlock_bh(&sh_chan->desc_lock); |
237 | } |
238 | } |
239 | |
240 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) |
241 | { |
242 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
243 | struct sh_desc *desc; |
244 | |
245 | spin_lock_bh(&sh_chan->desc_lock); |
246 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { |
247 | spin_unlock_bh(&sh_chan->desc_lock); |
248 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); |
249 | if (!desc) { |
250 | spin_lock_bh(&sh_chan->desc_lock); |
251 | break; |
252 | } |
253 | dma_async_tx_descriptor_init(&desc->async_tx, |
254 | &sh_chan->common); |
255 | desc->async_tx.tx_submit = sh_dmae_tx_submit; |
256 | desc->async_tx.flags = DMA_CTRL_ACK; |
257 | INIT_LIST_HEAD(&desc->tx_list); |
258 | sh_dmae_put_desc(sh_chan, desc); |
259 | |
260 | spin_lock_bh(&sh_chan->desc_lock); |
261 | sh_chan->descs_allocated++; |
262 | } |
263 | spin_unlock_bh(&sh_chan->desc_lock); |
264 | |
265 | return sh_chan->descs_allocated; |
266 | } |
267 | |
268 | /* |
269 | * sh_dma_free_chan_resources - Free all resources of the channel. |
270 | */ |
271 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) |
272 | { |
273 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
274 | struct sh_desc *desc, *_desc; |
275 | LIST_HEAD(list); |
276 | |
277 | BUG_ON(!list_empty(&sh_chan->ld_queue)); |
278 | spin_lock_bh(&sh_chan->desc_lock); |
279 | |
280 | list_splice_init(&sh_chan->ld_free, &list); |
281 | sh_chan->descs_allocated = 0; |
282 | |
283 | spin_unlock_bh(&sh_chan->desc_lock); |
284 | |
285 | list_for_each_entry_safe(desc, _desc, &list, node) |
286 | kfree(desc); |
287 | } |
288 | |
289 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( |
290 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, |
291 | size_t len, unsigned long flags) |
292 | { |
293 | struct sh_dmae_chan *sh_chan; |
294 | struct sh_desc *first = NULL, *prev = NULL, *new; |
295 | size_t copy_size; |
296 | |
297 | if (!chan) |
298 | return NULL; |
299 | |
300 | if (!len) |
301 | return NULL; |
302 | |
303 | sh_chan = to_sh_chan(chan); |
304 | |
305 | do { |
306 | /* Allocate the link descriptor from DMA pool */ |
307 | new = sh_dmae_get_desc(sh_chan); |
308 | if (!new) { |
309 | dev_err(sh_chan->dev, |
310 | "No free memory for link descriptor\n"); |
311 | goto err_get_desc; |
312 | } |
313 | |
314 | copy_size = min(len, (size_t)SH_DMA_TCR_MAX); |
315 | |
316 | new->hw.sar = dma_src; |
317 | new->hw.dar = dma_dest; |
318 | new->hw.tcr = copy_size; |
319 | if (!first) |
320 | first = new; |
321 | |
322 | new->mark = DESC_NCOMP; |
323 | async_tx_ack(&new->async_tx); |
324 | |
325 | prev = new; |
326 | len -= copy_size; |
327 | dma_src += copy_size; |
328 | dma_dest += copy_size; |
329 | /* Insert the link descriptor to the LD ring */ |
330 | list_add_tail(&new->node, &first->tx_list); |
331 | } while (len); |
332 | |
333 | new->async_tx.flags = flags; /* client is in control of this ack */ |
334 | new->async_tx.cookie = -EBUSY; /* Last desc */ |
335 | |
336 | return &first->async_tx; |
337 | |
338 | err_get_desc: |
339 | sh_dmae_put_desc(sh_chan, first); |
340 | return NULL; |
341 | |
342 | } |
343 | |
344 | /* |
345 | * sh_chan_ld_cleanup - Clean up link descriptors |
346 | * |
347 | * This function clean up the ld_queue of DMA channel. |
348 | */ |
349 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan) |
350 | { |
351 | struct sh_desc *desc, *_desc; |
352 | |
353 | spin_lock_bh(&sh_chan->desc_lock); |
354 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { |
355 | dma_async_tx_callback callback; |
356 | void *callback_param; |
357 | |
358 | /* non send data */ |
359 | if (desc->mark == DESC_NCOMP) |
360 | break; |
361 | |
362 | /* send data sesc */ |
363 | callback = desc->async_tx.callback; |
364 | callback_param = desc->async_tx.callback_param; |
365 | |
366 | /* Remove from ld_queue list */ |
367 | list_splice_init(&desc->tx_list, &sh_chan->ld_free); |
368 | |
369 | dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", |
370 | desc); |
371 | |
372 | list_move(&desc->node, &sh_chan->ld_free); |
373 | /* Run the link descriptor callback function */ |
374 | if (callback) { |
375 | spin_unlock_bh(&sh_chan->desc_lock); |
376 | dev_dbg(sh_chan->dev, "link descriptor %p callback\n", |
377 | desc); |
378 | callback(callback_param); |
379 | spin_lock_bh(&sh_chan->desc_lock); |
380 | } |
381 | } |
382 | spin_unlock_bh(&sh_chan->desc_lock); |
383 | } |
384 | |
385 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
386 | { |
387 | struct list_head *ld_node; |
388 | struct sh_dmae_regs hw; |
389 | |
390 | /* DMA work check */ |
391 | if (dmae_is_idle(sh_chan)) |
392 | return; |
393 | |
394 | /* Find the first un-transfer desciptor */ |
395 | for (ld_node = sh_chan->ld_queue.next; |
396 | (ld_node != &sh_chan->ld_queue) |
397 | && (to_sh_desc(ld_node)->mark == DESC_COMP); |
398 | ld_node = ld_node->next) |
399 | cpu_relax(); |
400 | |
401 | if (ld_node != &sh_chan->ld_queue) { |
402 | /* Get the ld start address from ld_queue */ |
403 | hw = to_sh_desc(ld_node)->hw; |
404 | dmae_set_reg(sh_chan, hw); |
405 | dmae_start(sh_chan); |
406 | } |
407 | } |
408 | |
409 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) |
410 | { |
411 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
412 | sh_chan_xfer_ld_queue(sh_chan); |
413 | } |
414 | |
415 | static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, |
416 | dma_cookie_t cookie, |
417 | dma_cookie_t *done, |
418 | dma_cookie_t *used) |
419 | { |
420 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
421 | dma_cookie_t last_used; |
422 | dma_cookie_t last_complete; |
423 | |
424 | sh_dmae_chan_ld_cleanup(sh_chan); |
425 | |
426 | last_used = chan->cookie; |
427 | last_complete = sh_chan->completed_cookie; |
428 | if (last_complete == -EBUSY) |
429 | last_complete = last_used; |
430 | |
431 | if (done) |
432 | *done = last_complete; |
433 | |
434 | if (used) |
435 | *used = last_used; |
436 | |
437 | return dma_async_is_complete(cookie, last_complete, last_used); |
438 | } |
439 | |
440 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
441 | { |
442 | irqreturn_t ret = IRQ_NONE; |
443 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
444 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
445 | |
446 | if (chcr & CHCR_TE) { |
447 | /* DMA stop */ |
448 | dmae_halt(sh_chan); |
449 | |
450 | ret = IRQ_HANDLED; |
451 | tasklet_schedule(&sh_chan->tasklet); |
452 | } |
453 | |
454 | return ret; |
455 | } |
456 | |
457 | #if defined(CONFIG_CPU_SH4) |
458 | static irqreturn_t sh_dmae_err(int irq, void *data) |
459 | { |
460 | int err = 0; |
461 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; |
462 | |
463 | /* IRQ Multi */ |
464 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
465 | int cnt = 0; |
466 | switch (irq) { |
467 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) |
468 | case DMTE6_IRQ: |
469 | cnt++; |
470 | #endif |
471 | case DMTE0_IRQ: |
472 | if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { |
473 | disable_irq(irq); |
474 | return IRQ_HANDLED; |
475 | } |
476 | default: |
477 | return IRQ_NONE; |
478 | } |
479 | } else { |
480 | /* reset dma controller */ |
481 | err = sh_dmae_rst(0); |
482 | if (err) |
483 | return err; |
484 | if (shdev->pdata.mode & SHDMA_DMAOR1) { |
485 | err = sh_dmae_rst(1); |
486 | if (err) |
487 | return err; |
488 | } |
489 | disable_irq(irq); |
490 | return IRQ_HANDLED; |
491 | } |
492 | } |
493 | #endif |
494 | |
495 | static void dmae_do_tasklet(unsigned long data) |
496 | { |
497 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
498 | struct sh_desc *desc, *_desc, *cur_desc = NULL; |
499 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
500 | list_for_each_entry_safe(desc, _desc, |
501 | &sh_chan->ld_queue, node) { |
502 | if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { |
503 | cur_desc = desc; |
504 | break; |
505 | } |
506 | } |
507 | |
508 | if (cur_desc) { |
509 | switch (cur_desc->async_tx.cookie) { |
510 | case 0: /* other desc data */ |
511 | break; |
512 | case -EBUSY: /* last desc */ |
513 | sh_chan->completed_cookie = |
514 | cur_desc->async_tx.cookie; |
515 | break; |
516 | default: /* first desc ( 0 < )*/ |
517 | sh_chan->completed_cookie = |
518 | cur_desc->async_tx.cookie - 1; |
519 | break; |
520 | } |
521 | cur_desc->mark = DESC_COMP; |
522 | } |
523 | /* Next desc */ |
524 | sh_chan_xfer_ld_queue(sh_chan); |
525 | sh_dmae_chan_ld_cleanup(sh_chan); |
526 | } |
527 | |
528 | static unsigned int get_dmae_irq(unsigned int id) |
529 | { |
530 | unsigned int irq = 0; |
531 | if (id < ARRAY_SIZE(dmte_irq_map)) |
532 | irq = dmte_irq_map[id]; |
533 | return irq; |
534 | } |
535 | |
536 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) |
537 | { |
538 | int err; |
539 | unsigned int irq = get_dmae_irq(id); |
540 | unsigned long irqflags = IRQF_DISABLED; |
541 | struct sh_dmae_chan *new_sh_chan; |
542 | |
543 | /* alloc channel */ |
544 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); |
545 | if (!new_sh_chan) { |
546 | dev_err(shdev->common.dev, "No free memory for allocating " |
547 | "dma channels!\n"); |
548 | return -ENOMEM; |
549 | } |
550 | |
551 | new_sh_chan->dev = shdev->common.dev; |
552 | new_sh_chan->id = id; |
553 | |
554 | /* Init DMA tasklet */ |
555 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
556 | (unsigned long)new_sh_chan); |
557 | |
558 | /* Init the channel */ |
559 | dmae_init(new_sh_chan); |
560 | |
561 | spin_lock_init(&new_sh_chan->desc_lock); |
562 | |
563 | /* Init descripter manage list */ |
564 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); |
565 | INIT_LIST_HEAD(&new_sh_chan->ld_free); |
566 | |
567 | /* copy struct dma_device */ |
568 | new_sh_chan->common.device = &shdev->common; |
569 | |
570 | /* Add the channel to DMA device channel list */ |
571 | list_add_tail(&new_sh_chan->common.device_node, |
572 | &shdev->common.channels); |
573 | shdev->common.chancnt++; |
574 | |
575 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
576 | irqflags = IRQF_SHARED; |
577 | #if defined(DMTE6_IRQ) |
578 | if (irq >= DMTE6_IRQ) |
579 | irq = DMTE6_IRQ; |
580 | else |
581 | #endif |
582 | irq = DMTE0_IRQ; |
583 | } |
584 | |
585 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
586 | "sh-dmae%d", new_sh_chan->id); |
587 | |
588 | /* set up channel irq */ |
589 | err = request_irq(irq, &sh_dmae_interrupt, |
590 | irqflags, new_sh_chan->dev_id, new_sh_chan); |
591 | if (err) { |
592 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " |
593 | "with return %d\n", id, err); |
594 | goto err_no_irq; |
595 | } |
596 | |
597 | /* CHCR register control function */ |
598 | new_sh_chan->set_chcr = dmae_set_chcr; |
599 | /* DMARS register control function */ |
600 | new_sh_chan->set_dmars = dmae_set_dmars; |
601 | |
602 | shdev->chan[id] = new_sh_chan; |
603 | return 0; |
604 | |
605 | err_no_irq: |
606 | /* remove from dmaengine device node */ |
607 | list_del(&new_sh_chan->common.device_node); |
608 | kfree(new_sh_chan); |
609 | return err; |
610 | } |
611 | |
612 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) |
613 | { |
614 | int i; |
615 | |
616 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { |
617 | if (shdev->chan[i]) { |
618 | struct sh_dmae_chan *shchan = shdev->chan[i]; |
619 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) |
620 | free_irq(dmte_irq_map[i], shchan); |
621 | |
622 | list_del(&shchan->common.device_node); |
623 | kfree(shchan); |
624 | shdev->chan[i] = NULL; |
625 | } |
626 | } |
627 | shdev->common.chancnt = 0; |
628 | } |
629 | |
630 | static int __init sh_dmae_probe(struct platform_device *pdev) |
631 | { |
632 | int err = 0, cnt, ecnt; |
633 | unsigned long irqflags = IRQF_DISABLED; |
634 | #if defined(CONFIG_CPU_SH4) |
635 | int eirq[] = { DMAE0_IRQ, |
636 | #if defined(DMAE1_IRQ) |
637 | DMAE1_IRQ |
638 | #endif |
639 | }; |
640 | #endif |
641 | struct sh_dmae_device *shdev; |
642 | |
643 | /* get platform data */ |
644 | if (!pdev->dev.platform_data) |
645 | return -ENODEV; |
646 | |
647 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); |
648 | if (!shdev) { |
649 | dev_err(&pdev->dev, "No enough memory\n"); |
650 | return -ENOMEM; |
651 | } |
652 | |
653 | /* platform data */ |
654 | memcpy(&shdev->pdata, pdev->dev.platform_data, |
655 | sizeof(struct sh_dmae_pdata)); |
656 | |
657 | /* reset dma controller */ |
658 | err = sh_dmae_rst(0); |
659 | if (err) |
660 | goto rst_err; |
661 | |
662 | /* SH7780/85/23 has DMAOR1 */ |
663 | if (shdev->pdata.mode & SHDMA_DMAOR1) { |
664 | err = sh_dmae_rst(1); |
665 | if (err) |
666 | goto rst_err; |
667 | } |
668 | |
669 | INIT_LIST_HEAD(&shdev->common.channels); |
670 | |
671 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
672 | shdev->common.device_alloc_chan_resources |
673 | = sh_dmae_alloc_chan_resources; |
674 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; |
675 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; |
676 | shdev->common.device_is_tx_complete = sh_dmae_is_complete; |
677 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; |
678 | shdev->common.dev = &pdev->dev; |
679 | |
680 | #if defined(CONFIG_CPU_SH4) |
681 | /* Non Mix IRQ mode SH7722/SH7730 etc... */ |
682 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
683 | irqflags = IRQF_SHARED; |
684 | eirq[0] = DMTE0_IRQ; |
685 | #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) |
686 | eirq[1] = DMTE6_IRQ; |
687 | #endif |
688 | } |
689 | |
690 | for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { |
691 | err = request_irq(eirq[ecnt], sh_dmae_err, |
692 | irqflags, "DMAC Address Error", shdev); |
693 | if (err) { |
694 | dev_err(&pdev->dev, "DMA device request_irq" |
695 | "error (irq %d) with return %d\n", |
696 | eirq[ecnt], err); |
697 | goto eirq_err; |
698 | } |
699 | } |
700 | #endif /* CONFIG_CPU_SH4 */ |
701 | |
702 | /* Create DMA Channel */ |
703 | for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { |
704 | err = sh_dmae_chan_probe(shdev, cnt); |
705 | if (err) |
706 | goto chan_probe_err; |
707 | } |
708 | |
709 | platform_set_drvdata(pdev, shdev); |
710 | dma_async_device_register(&shdev->common); |
711 | |
712 | return err; |
713 | |
714 | chan_probe_err: |
715 | sh_dmae_chan_remove(shdev); |
716 | |
717 | eirq_err: |
718 | for (ecnt-- ; ecnt >= 0; ecnt--) |
719 | free_irq(eirq[ecnt], shdev); |
720 | |
721 | rst_err: |
722 | kfree(shdev); |
723 | |
724 | return err; |
725 | } |
726 | |
727 | static int __exit sh_dmae_remove(struct platform_device *pdev) |
728 | { |
729 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
730 | |
731 | dma_async_device_unregister(&shdev->common); |
732 | |
733 | if (shdev->pdata.mode & SHDMA_MIX_IRQ) { |
734 | free_irq(DMTE0_IRQ, shdev); |
735 | #if defined(DMTE6_IRQ) |
736 | free_irq(DMTE6_IRQ, shdev); |
737 | #endif |
738 | } |
739 | |
740 | /* channel data remove */ |
741 | sh_dmae_chan_remove(shdev); |
742 | |
743 | if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { |
744 | free_irq(DMAE0_IRQ, shdev); |
745 | #if defined(DMAE1_IRQ) |
746 | free_irq(DMAE1_IRQ, shdev); |
747 | #endif |
748 | } |
749 | kfree(shdev); |
750 | |
751 | return 0; |
752 | } |
753 | |
754 | static void sh_dmae_shutdown(struct platform_device *pdev) |
755 | { |
756 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
757 | sh_dmae_ctl_stop(0); |
758 | if (shdev->pdata.mode & SHDMA_DMAOR1) |
759 | sh_dmae_ctl_stop(1); |
760 | } |
761 | |
762 | static struct platform_driver sh_dmae_driver = { |
763 | .remove = __exit_p(sh_dmae_remove), |
764 | .shutdown = sh_dmae_shutdown, |
765 | .driver = { |
766 | .name = "sh-dma-engine", |
767 | }, |
768 | }; |
769 | |
770 | static int __init sh_dmae_init(void) |
771 | { |
772 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); |
773 | } |
774 | module_init(sh_dmae_init); |
775 | |
776 | static void __exit sh_dmae_exit(void) |
777 | { |
778 | platform_driver_unregister(&sh_dmae_driver); |
779 | } |
780 | module_exit(sh_dmae_exit); |
781 | |
782 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); |
783 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); |
784 | MODULE_LICENSE("GPL"); |
785 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9