Root/
1 | /* |
2 | * driver/dma/coh901318.c |
3 | * |
4 | * Copyright (C) 2007-2009 ST-Ericsson |
5 | * License terms: GNU General Public License (GPL) version 2 |
6 | * DMA driver for COH 901 318 |
7 | * Author: Per Friden <per.friden@stericsson.com> |
8 | */ |
9 | |
10 | #include <linux/init.h> |
11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> /* printk() */ |
13 | #include <linux/fs.h> /* everything... */ |
14 | #include <linux/slab.h> /* kmalloc() */ |
15 | #include <linux/dmaengine.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/device.h> |
18 | #include <linux/irqreturn.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/io.h> |
21 | #include <linux/uaccess.h> |
22 | #include <linux/debugfs.h> |
23 | #include <mach/coh901318.h> |
24 | |
25 | #include "coh901318_lli.h" |
26 | |
27 | #define COHC_2_DEV(cohc) (&cohc->chan.dev->device) |
28 | |
29 | #ifdef VERBOSE_DEBUG |
30 | #define COH_DBG(x) ({ if (1) x; 0; }) |
31 | #else |
32 | #define COH_DBG(x) ({ if (0) x; 0; }) |
33 | #endif |
34 | |
35 | struct coh901318_desc { |
36 | struct dma_async_tx_descriptor desc; |
37 | struct list_head node; |
38 | struct scatterlist *sg; |
39 | unsigned int sg_len; |
40 | struct coh901318_lli *data; |
41 | enum dma_data_direction dir; |
42 | unsigned long flags; |
43 | }; |
44 | |
45 | struct coh901318_base { |
46 | struct device *dev; |
47 | void __iomem *virtbase; |
48 | struct coh901318_pool pool; |
49 | struct powersave pm; |
50 | struct dma_device dma_slave; |
51 | struct dma_device dma_memcpy; |
52 | struct coh901318_chan *chans; |
53 | struct coh901318_platform *platform; |
54 | }; |
55 | |
56 | struct coh901318_chan { |
57 | spinlock_t lock; |
58 | int allocated; |
59 | int completed; |
60 | int id; |
61 | int stopped; |
62 | |
63 | struct work_struct free_work; |
64 | struct dma_chan chan; |
65 | |
66 | struct tasklet_struct tasklet; |
67 | |
68 | struct list_head active; |
69 | struct list_head queue; |
70 | struct list_head free; |
71 | |
72 | unsigned long nbr_active_done; |
73 | unsigned long busy; |
74 | |
75 | struct coh901318_base *base; |
76 | }; |
77 | |
78 | static void coh901318_list_print(struct coh901318_chan *cohc, |
79 | struct coh901318_lli *lli) |
80 | { |
81 | struct coh901318_lli *l = lli; |
82 | int i = 0; |
83 | |
84 | while (l) { |
85 | dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" |
86 | ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", |
87 | i, l, l->control, l->src_addr, l->dst_addr, |
88 | l->link_addr, l->virt_link_addr); |
89 | i++; |
90 | l = l->virt_link_addr; |
91 | } |
92 | } |
93 | |
94 | #ifdef CONFIG_DEBUG_FS |
95 | |
96 | #define COH901318_DEBUGFS_ASSIGN(x, y) (x = y) |
97 | |
98 | static struct coh901318_base *debugfs_dma_base; |
99 | static struct dentry *dma_dentry; |
100 | |
101 | static int coh901318_debugfs_open(struct inode *inode, struct file *file) |
102 | { |
103 | |
104 | file->private_data = inode->i_private; |
105 | return 0; |
106 | } |
107 | |
108 | static int coh901318_debugfs_read(struct file *file, char __user *buf, |
109 | size_t count, loff_t *f_pos) |
110 | { |
111 | u64 started_channels = debugfs_dma_base->pm.started_channels; |
112 | int pool_count = debugfs_dma_base->pool.debugfs_pool_counter; |
113 | int i; |
114 | int ret = 0; |
115 | char *dev_buf; |
116 | char *tmp; |
117 | int dev_size; |
118 | |
119 | dev_buf = kmalloc(4*1024, GFP_KERNEL); |
120 | if (dev_buf == NULL) |
121 | goto err_kmalloc; |
122 | tmp = dev_buf; |
123 | |
124 | tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); |
125 | |
126 | for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) |
127 | if (started_channels & (1 << i)) |
128 | tmp += sprintf(tmp, "channel %d\n", i); |
129 | |
130 | tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count); |
131 | dev_size = tmp - dev_buf; |
132 | |
133 | /* No more to read if offset != 0 */ |
134 | if (*f_pos > dev_size) |
135 | goto out; |
136 | |
137 | if (count > dev_size - *f_pos) |
138 | count = dev_size - *f_pos; |
139 | |
140 | if (copy_to_user(buf, dev_buf + *f_pos, count)) |
141 | ret = -EINVAL; |
142 | ret = count; |
143 | *f_pos += count; |
144 | |
145 | out: |
146 | kfree(dev_buf); |
147 | return ret; |
148 | |
149 | err_kmalloc: |
150 | return 0; |
151 | } |
152 | |
153 | static const struct file_operations coh901318_debugfs_status_operations = { |
154 | .owner = THIS_MODULE, |
155 | .open = coh901318_debugfs_open, |
156 | .read = coh901318_debugfs_read, |
157 | }; |
158 | |
159 | |
160 | static int __init init_coh901318_debugfs(void) |
161 | { |
162 | |
163 | dma_dentry = debugfs_create_dir("dma", NULL); |
164 | |
165 | (void) debugfs_create_file("status", |
166 | S_IFREG | S_IRUGO, |
167 | dma_dentry, NULL, |
168 | &coh901318_debugfs_status_operations); |
169 | return 0; |
170 | } |
171 | |
172 | static void __exit exit_coh901318_debugfs(void) |
173 | { |
174 | debugfs_remove_recursive(dma_dentry); |
175 | } |
176 | |
177 | module_init(init_coh901318_debugfs); |
178 | module_exit(exit_coh901318_debugfs); |
179 | #else |
180 | |
181 | #define COH901318_DEBUGFS_ASSIGN(x, y) |
182 | |
183 | #endif /* CONFIG_DEBUG_FS */ |
184 | |
185 | static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan) |
186 | { |
187 | return container_of(chan, struct coh901318_chan, chan); |
188 | } |
189 | |
190 | static inline dma_addr_t |
191 | cohc_dev_addr(struct coh901318_chan *cohc) |
192 | { |
193 | return cohc->base->platform->chan_conf[cohc->id].dev_addr; |
194 | } |
195 | |
196 | static inline const struct coh901318_params * |
197 | cohc_chan_param(struct coh901318_chan *cohc) |
198 | { |
199 | return &cohc->base->platform->chan_conf[cohc->id].param; |
200 | } |
201 | |
202 | static inline const struct coh_dma_channel * |
203 | cohc_chan_conf(struct coh901318_chan *cohc) |
204 | { |
205 | return &cohc->base->platform->chan_conf[cohc->id]; |
206 | } |
207 | |
208 | static void enable_powersave(struct coh901318_chan *cohc) |
209 | { |
210 | unsigned long flags; |
211 | struct powersave *pm = &cohc->base->pm; |
212 | |
213 | spin_lock_irqsave(&pm->lock, flags); |
214 | |
215 | pm->started_channels &= ~(1ULL << cohc->id); |
216 | |
217 | if (!pm->started_channels) { |
218 | /* DMA no longer intends to access memory */ |
219 | cohc->base->platform->access_memory_state(cohc->base->dev, |
220 | false); |
221 | } |
222 | |
223 | spin_unlock_irqrestore(&pm->lock, flags); |
224 | } |
225 | static void disable_powersave(struct coh901318_chan *cohc) |
226 | { |
227 | unsigned long flags; |
228 | struct powersave *pm = &cohc->base->pm; |
229 | |
230 | spin_lock_irqsave(&pm->lock, flags); |
231 | |
232 | if (!pm->started_channels) { |
233 | /* DMA intends to access memory */ |
234 | cohc->base->platform->access_memory_state(cohc->base->dev, |
235 | true); |
236 | } |
237 | |
238 | pm->started_channels |= (1ULL << cohc->id); |
239 | |
240 | spin_unlock_irqrestore(&pm->lock, flags); |
241 | } |
242 | |
243 | static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control) |
244 | { |
245 | int channel = cohc->id; |
246 | void __iomem *virtbase = cohc->base->virtbase; |
247 | |
248 | writel(control, |
249 | virtbase + COH901318_CX_CTRL + |
250 | COH901318_CX_CTRL_SPACING * channel); |
251 | return 0; |
252 | } |
253 | |
254 | static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf) |
255 | { |
256 | int channel = cohc->id; |
257 | void __iomem *virtbase = cohc->base->virtbase; |
258 | |
259 | writel(conf, |
260 | virtbase + COH901318_CX_CFG + |
261 | COH901318_CX_CFG_SPACING*channel); |
262 | return 0; |
263 | } |
264 | |
265 | |
266 | static int coh901318_start(struct coh901318_chan *cohc) |
267 | { |
268 | u32 val; |
269 | int channel = cohc->id; |
270 | void __iomem *virtbase = cohc->base->virtbase; |
271 | |
272 | disable_powersave(cohc); |
273 | |
274 | val = readl(virtbase + COH901318_CX_CFG + |
275 | COH901318_CX_CFG_SPACING * channel); |
276 | |
277 | /* Enable channel */ |
278 | val |= COH901318_CX_CFG_CH_ENABLE; |
279 | writel(val, virtbase + COH901318_CX_CFG + |
280 | COH901318_CX_CFG_SPACING * channel); |
281 | |
282 | return 0; |
283 | } |
284 | |
285 | static int coh901318_prep_linked_list(struct coh901318_chan *cohc, |
286 | struct coh901318_lli *data) |
287 | { |
288 | int channel = cohc->id; |
289 | void __iomem *virtbase = cohc->base->virtbase; |
290 | |
291 | BUG_ON(readl(virtbase + COH901318_CX_STAT + |
292 | COH901318_CX_STAT_SPACING*channel) & |
293 | COH901318_CX_STAT_ACTIVE); |
294 | |
295 | writel(data->src_addr, |
296 | virtbase + COH901318_CX_SRC_ADDR + |
297 | COH901318_CX_SRC_ADDR_SPACING * channel); |
298 | |
299 | writel(data->dst_addr, virtbase + |
300 | COH901318_CX_DST_ADDR + |
301 | COH901318_CX_DST_ADDR_SPACING * channel); |
302 | |
303 | writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR + |
304 | COH901318_CX_LNK_ADDR_SPACING * channel); |
305 | |
306 | writel(data->control, virtbase + COH901318_CX_CTRL + |
307 | COH901318_CX_CTRL_SPACING * channel); |
308 | |
309 | return 0; |
310 | } |
311 | static dma_cookie_t |
312 | coh901318_assign_cookie(struct coh901318_chan *cohc, |
313 | struct coh901318_desc *cohd) |
314 | { |
315 | dma_cookie_t cookie = cohc->chan.cookie; |
316 | |
317 | if (++cookie < 0) |
318 | cookie = 1; |
319 | |
320 | cohc->chan.cookie = cookie; |
321 | cohd->desc.cookie = cookie; |
322 | |
323 | return cookie; |
324 | } |
325 | |
326 | static struct coh901318_desc * |
327 | coh901318_desc_get(struct coh901318_chan *cohc) |
328 | { |
329 | struct coh901318_desc *desc; |
330 | |
331 | if (list_empty(&cohc->free)) { |
332 | /* alloc new desc because we're out of used ones |
333 | * TODO: alloc a pile of descs instead of just one, |
334 | * avoid many small allocations. |
335 | */ |
336 | desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); |
337 | if (desc == NULL) |
338 | goto out; |
339 | INIT_LIST_HEAD(&desc->node); |
340 | dma_async_tx_descriptor_init(&desc->desc, &cohc->chan); |
341 | } else { |
342 | /* Reuse an old desc. */ |
343 | desc = list_first_entry(&cohc->free, |
344 | struct coh901318_desc, |
345 | node); |
346 | list_del(&desc->node); |
347 | /* Initialize it a bit so it's not insane */ |
348 | desc->sg = NULL; |
349 | desc->sg_len = 0; |
350 | desc->desc.callback = NULL; |
351 | desc->desc.callback_param = NULL; |
352 | } |
353 | |
354 | out: |
355 | return desc; |
356 | } |
357 | |
358 | static void |
359 | coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd) |
360 | { |
361 | list_add_tail(&cohd->node, &cohc->free); |
362 | } |
363 | |
364 | /* call with irq lock held */ |
365 | static void |
366 | coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) |
367 | { |
368 | list_add_tail(&desc->node, &cohc->active); |
369 | } |
370 | |
371 | static struct coh901318_desc * |
372 | coh901318_first_active_get(struct coh901318_chan *cohc) |
373 | { |
374 | struct coh901318_desc *d; |
375 | |
376 | if (list_empty(&cohc->active)) |
377 | return NULL; |
378 | |
379 | d = list_first_entry(&cohc->active, |
380 | struct coh901318_desc, |
381 | node); |
382 | return d; |
383 | } |
384 | |
385 | static void |
386 | coh901318_desc_remove(struct coh901318_desc *cohd) |
387 | { |
388 | list_del(&cohd->node); |
389 | } |
390 | |
391 | static void |
392 | coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc) |
393 | { |
394 | list_add_tail(&desc->node, &cohc->queue); |
395 | } |
396 | |
397 | static struct coh901318_desc * |
398 | coh901318_first_queued(struct coh901318_chan *cohc) |
399 | { |
400 | struct coh901318_desc *d; |
401 | |
402 | if (list_empty(&cohc->queue)) |
403 | return NULL; |
404 | |
405 | d = list_first_entry(&cohc->queue, |
406 | struct coh901318_desc, |
407 | node); |
408 | return d; |
409 | } |
410 | |
411 | /* |
412 | * DMA start/stop controls |
413 | */ |
414 | u32 coh901318_get_bytes_left(struct dma_chan *chan) |
415 | { |
416 | unsigned long flags; |
417 | u32 ret; |
418 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
419 | |
420 | spin_lock_irqsave(&cohc->lock, flags); |
421 | |
422 | /* Read transfer count value */ |
423 | ret = readl(cohc->base->virtbase + |
424 | COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING * |
425 | cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK; |
426 | |
427 | spin_unlock_irqrestore(&cohc->lock, flags); |
428 | |
429 | return ret; |
430 | } |
431 | EXPORT_SYMBOL(coh901318_get_bytes_left); |
432 | |
433 | |
434 | /* Stops a transfer without losing data. Enables power save. |
435 | Use this function in conjunction with coh901318_continue(..) |
436 | */ |
437 | void coh901318_stop(struct dma_chan *chan) |
438 | { |
439 | u32 val; |
440 | unsigned long flags; |
441 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
442 | int channel = cohc->id; |
443 | void __iomem *virtbase = cohc->base->virtbase; |
444 | |
445 | spin_lock_irqsave(&cohc->lock, flags); |
446 | |
447 | /* Disable channel in HW */ |
448 | val = readl(virtbase + COH901318_CX_CFG + |
449 | COH901318_CX_CFG_SPACING * channel); |
450 | |
451 | /* Stopping infinit transfer */ |
452 | if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 && |
453 | (val & COH901318_CX_CFG_CH_ENABLE)) |
454 | cohc->stopped = 1; |
455 | |
456 | |
457 | val &= ~COH901318_CX_CFG_CH_ENABLE; |
458 | /* Enable twice, HW bug work around */ |
459 | writel(val, virtbase + COH901318_CX_CFG + |
460 | COH901318_CX_CFG_SPACING * channel); |
461 | writel(val, virtbase + COH901318_CX_CFG + |
462 | COH901318_CX_CFG_SPACING * channel); |
463 | |
464 | /* Spin-wait for it to actually go inactive */ |
465 | while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING * |
466 | channel) & COH901318_CX_STAT_ACTIVE) |
467 | cpu_relax(); |
468 | |
469 | /* Check if we stopped an active job */ |
470 | if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING * |
471 | channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0) |
472 | cohc->stopped = 1; |
473 | |
474 | enable_powersave(cohc); |
475 | |
476 | spin_unlock_irqrestore(&cohc->lock, flags); |
477 | } |
478 | EXPORT_SYMBOL(coh901318_stop); |
479 | |
480 | /* Continues a transfer that has been stopped via 300_dma_stop(..). |
481 | Power save is handled. |
482 | */ |
483 | void coh901318_continue(struct dma_chan *chan) |
484 | { |
485 | u32 val; |
486 | unsigned long flags; |
487 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
488 | int channel = cohc->id; |
489 | |
490 | spin_lock_irqsave(&cohc->lock, flags); |
491 | |
492 | disable_powersave(cohc); |
493 | |
494 | if (cohc->stopped) { |
495 | /* Enable channel in HW */ |
496 | val = readl(cohc->base->virtbase + COH901318_CX_CFG + |
497 | COH901318_CX_CFG_SPACING * channel); |
498 | |
499 | val |= COH901318_CX_CFG_CH_ENABLE; |
500 | |
501 | writel(val, cohc->base->virtbase + COH901318_CX_CFG + |
502 | COH901318_CX_CFG_SPACING*channel); |
503 | |
504 | cohc->stopped = 0; |
505 | } |
506 | |
507 | spin_unlock_irqrestore(&cohc->lock, flags); |
508 | } |
509 | EXPORT_SYMBOL(coh901318_continue); |
510 | |
511 | bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) |
512 | { |
513 | unsigned int ch_nr = (unsigned int) chan_id; |
514 | |
515 | if (ch_nr == to_coh901318_chan(chan)->id) |
516 | return true; |
517 | |
518 | return false; |
519 | } |
520 | EXPORT_SYMBOL(coh901318_filter_id); |
521 | |
522 | /* |
523 | * DMA channel allocation |
524 | */ |
525 | static int coh901318_config(struct coh901318_chan *cohc, |
526 | struct coh901318_params *param) |
527 | { |
528 | unsigned long flags; |
529 | const struct coh901318_params *p; |
530 | int channel = cohc->id; |
531 | void __iomem *virtbase = cohc->base->virtbase; |
532 | |
533 | spin_lock_irqsave(&cohc->lock, flags); |
534 | |
535 | if (param) |
536 | p = param; |
537 | else |
538 | p = &cohc->base->platform->chan_conf[channel].param; |
539 | |
540 | /* Clear any pending BE or TC interrupt */ |
541 | if (channel < 32) { |
542 | writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1); |
543 | writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1); |
544 | } else { |
545 | writel(1 << (channel - 32), virtbase + |
546 | COH901318_BE_INT_CLEAR2); |
547 | writel(1 << (channel - 32), virtbase + |
548 | COH901318_TC_INT_CLEAR2); |
549 | } |
550 | |
551 | coh901318_set_conf(cohc, p->config); |
552 | coh901318_set_ctrl(cohc, p->ctrl_lli_last); |
553 | |
554 | spin_unlock_irqrestore(&cohc->lock, flags); |
555 | |
556 | return 0; |
557 | } |
558 | |
559 | /* must lock when calling this function |
560 | * start queued jobs, if any |
561 | * TODO: start all queued jobs in one go |
562 | * |
563 | * Returns descriptor if queued job is started otherwise NULL. |
564 | * If the queue is empty NULL is returned. |
565 | */ |
566 | static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) |
567 | { |
568 | struct coh901318_desc *cohd_que; |
569 | |
570 | /* start queued jobs, if any |
571 | * TODO: transmit all queued jobs in one go |
572 | */ |
573 | cohd_que = coh901318_first_queued(cohc); |
574 | |
575 | if (cohd_que != NULL) { |
576 | /* Remove from queue */ |
577 | coh901318_desc_remove(cohd_que); |
578 | /* initiate DMA job */ |
579 | cohc->busy = 1; |
580 | |
581 | coh901318_desc_submit(cohc, cohd_que); |
582 | |
583 | coh901318_prep_linked_list(cohc, cohd_que->data); |
584 | |
585 | /* start dma job */ |
586 | coh901318_start(cohc); |
587 | |
588 | } |
589 | |
590 | return cohd_que; |
591 | } |
592 | |
593 | /* |
594 | * This tasklet is called from the interrupt handler to |
595 | * handle each descriptor (DMA job) that is sent to a channel. |
596 | */ |
597 | static void dma_tasklet(unsigned long data) |
598 | { |
599 | struct coh901318_chan *cohc = (struct coh901318_chan *) data; |
600 | struct coh901318_desc *cohd_fin; |
601 | unsigned long flags; |
602 | dma_async_tx_callback callback; |
603 | void *callback_param; |
604 | |
605 | dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" |
606 | " nbr_active_done %ld\n", __func__, |
607 | cohc->id, cohc->nbr_active_done); |
608 | |
609 | spin_lock_irqsave(&cohc->lock, flags); |
610 | |
611 | /* get first active descriptor entry from list */ |
612 | cohd_fin = coh901318_first_active_get(cohc); |
613 | |
614 | if (cohd_fin == NULL) |
615 | goto err; |
616 | |
617 | /* locate callback to client */ |
618 | callback = cohd_fin->desc.callback; |
619 | callback_param = cohd_fin->desc.callback_param; |
620 | |
621 | /* sign this job as completed on the channel */ |
622 | cohc->completed = cohd_fin->desc.cookie; |
623 | |
624 | /* release the lli allocation and remove the descriptor */ |
625 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); |
626 | |
627 | /* return desc to free-list */ |
628 | coh901318_desc_remove(cohd_fin); |
629 | coh901318_desc_free(cohc, cohd_fin); |
630 | |
631 | spin_unlock_irqrestore(&cohc->lock, flags); |
632 | |
633 | /* Call the callback when we're done */ |
634 | if (callback) |
635 | callback(callback_param); |
636 | |
637 | spin_lock_irqsave(&cohc->lock, flags); |
638 | |
639 | /* |
640 | * If another interrupt fired while the tasklet was scheduling, |
641 | * we don't get called twice, so we have this number of active |
642 | * counter that keep track of the number of IRQs expected to |
643 | * be handled for this channel. If there happen to be more than |
644 | * one IRQ to be ack:ed, we simply schedule this tasklet again. |
645 | */ |
646 | cohc->nbr_active_done--; |
647 | if (cohc->nbr_active_done) { |
648 | dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " |
649 | "came in while we were scheduling this tasklet\n"); |
650 | if (cohc_chan_conf(cohc)->priority_high) |
651 | tasklet_hi_schedule(&cohc->tasklet); |
652 | else |
653 | tasklet_schedule(&cohc->tasklet); |
654 | } |
655 | |
656 | spin_unlock_irqrestore(&cohc->lock, flags); |
657 | |
658 | return; |
659 | |
660 | err: |
661 | spin_unlock_irqrestore(&cohc->lock, flags); |
662 | dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__); |
663 | } |
664 | |
665 | |
666 | /* called from interrupt context */ |
667 | static void dma_tc_handle(struct coh901318_chan *cohc) |
668 | { |
669 | BUG_ON(!cohc->allocated && (list_empty(&cohc->active) || |
670 | list_empty(&cohc->queue))); |
671 | |
672 | if (!cohc->allocated) |
673 | return; |
674 | |
675 | spin_lock(&cohc->lock); |
676 | |
677 | cohc->nbr_active_done++; |
678 | |
679 | if (coh901318_queue_start(cohc) == NULL) |
680 | cohc->busy = 0; |
681 | |
682 | BUG_ON(list_empty(&cohc->active)); |
683 | |
684 | spin_unlock(&cohc->lock); |
685 | |
686 | if (cohc_chan_conf(cohc)->priority_high) |
687 | tasklet_hi_schedule(&cohc->tasklet); |
688 | else |
689 | tasklet_schedule(&cohc->tasklet); |
690 | } |
691 | |
692 | |
693 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) |
694 | { |
695 | u32 status1; |
696 | u32 status2; |
697 | int i; |
698 | int ch; |
699 | struct coh901318_base *base = dev_id; |
700 | struct coh901318_chan *cohc; |
701 | void __iomem *virtbase = base->virtbase; |
702 | |
703 | status1 = readl(virtbase + COH901318_INT_STATUS1); |
704 | status2 = readl(virtbase + COH901318_INT_STATUS2); |
705 | |
706 | if (unlikely(status1 == 0 && status2 == 0)) { |
707 | dev_warn(base->dev, "spurious DMA IRQ from no channel!\n"); |
708 | return IRQ_HANDLED; |
709 | } |
710 | |
711 | /* TODO: consider handle IRQ in tasklet here to |
712 | * minimize interrupt latency */ |
713 | |
714 | /* Check the first 32 DMA channels for IRQ */ |
715 | while (status1) { |
716 | /* Find first bit set, return as a number. */ |
717 | i = ffs(status1) - 1; |
718 | ch = i; |
719 | |
720 | cohc = &base->chans[ch]; |
721 | spin_lock(&cohc->lock); |
722 | |
723 | /* Mask off this bit */ |
724 | status1 &= ~(1 << i); |
725 | /* Check the individual channel bits */ |
726 | if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) { |
727 | dev_crit(COHC_2_DEV(cohc), |
728 | "DMA bus error on channel %d!\n", ch); |
729 | BUG_ON(1); |
730 | /* Clear BE interrupt */ |
731 | __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1); |
732 | } else { |
733 | /* Caused by TC, really? */ |
734 | if (unlikely(!test_bit(i, virtbase + |
735 | COH901318_TC_INT_STATUS1))) { |
736 | dev_warn(COHC_2_DEV(cohc), |
737 | "ignoring interrupt not caused by terminal count on channel %d\n", ch); |
738 | /* Clear TC interrupt */ |
739 | BUG_ON(1); |
740 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); |
741 | } else { |
742 | /* Enable powersave if transfer has finished */ |
743 | if (!(readl(virtbase + COH901318_CX_STAT + |
744 | COH901318_CX_STAT_SPACING*ch) & |
745 | COH901318_CX_STAT_ENABLED)) { |
746 | enable_powersave(cohc); |
747 | } |
748 | |
749 | /* Must clear TC interrupt before calling |
750 | * dma_tc_handle |
751 | * in case tc_handle initate a new dma job |
752 | */ |
753 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); |
754 | |
755 | dma_tc_handle(cohc); |
756 | } |
757 | } |
758 | spin_unlock(&cohc->lock); |
759 | } |
760 | |
761 | /* Check the remaining 32 DMA channels for IRQ */ |
762 | while (status2) { |
763 | /* Find first bit set, return as a number. */ |
764 | i = ffs(status2) - 1; |
765 | ch = i + 32; |
766 | cohc = &base->chans[ch]; |
767 | spin_lock(&cohc->lock); |
768 | |
769 | /* Mask off this bit */ |
770 | status2 &= ~(1 << i); |
771 | /* Check the individual channel bits */ |
772 | if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) { |
773 | dev_crit(COHC_2_DEV(cohc), |
774 | "DMA bus error on channel %d!\n", ch); |
775 | /* Clear BE interrupt */ |
776 | BUG_ON(1); |
777 | __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2); |
778 | } else { |
779 | /* Caused by TC, really? */ |
780 | if (unlikely(!test_bit(i, virtbase + |
781 | COH901318_TC_INT_STATUS2))) { |
782 | dev_warn(COHC_2_DEV(cohc), |
783 | "ignoring interrupt not caused by terminal count on channel %d\n", ch); |
784 | /* Clear TC interrupt */ |
785 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); |
786 | BUG_ON(1); |
787 | } else { |
788 | /* Enable powersave if transfer has finished */ |
789 | if (!(readl(virtbase + COH901318_CX_STAT + |
790 | COH901318_CX_STAT_SPACING*ch) & |
791 | COH901318_CX_STAT_ENABLED)) { |
792 | enable_powersave(cohc); |
793 | } |
794 | /* Must clear TC interrupt before calling |
795 | * dma_tc_handle |
796 | * in case tc_handle initate a new dma job |
797 | */ |
798 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); |
799 | |
800 | dma_tc_handle(cohc); |
801 | } |
802 | } |
803 | spin_unlock(&cohc->lock); |
804 | } |
805 | |
806 | return IRQ_HANDLED; |
807 | } |
808 | |
809 | static int coh901318_alloc_chan_resources(struct dma_chan *chan) |
810 | { |
811 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
812 | |
813 | dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n", |
814 | __func__, cohc->id); |
815 | |
816 | if (chan->client_count > 1) |
817 | return -EBUSY; |
818 | |
819 | coh901318_config(cohc, NULL); |
820 | |
821 | cohc->allocated = 1; |
822 | cohc->completed = chan->cookie = 1; |
823 | |
824 | return 1; |
825 | } |
826 | |
827 | static void |
828 | coh901318_free_chan_resources(struct dma_chan *chan) |
829 | { |
830 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
831 | int channel = cohc->id; |
832 | unsigned long flags; |
833 | |
834 | spin_lock_irqsave(&cohc->lock, flags); |
835 | |
836 | /* Disable HW */ |
837 | writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG + |
838 | COH901318_CX_CFG_SPACING*channel); |
839 | writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL + |
840 | COH901318_CX_CTRL_SPACING*channel); |
841 | |
842 | cohc->allocated = 0; |
843 | |
844 | spin_unlock_irqrestore(&cohc->lock, flags); |
845 | |
846 | chan->device->device_terminate_all(chan); |
847 | } |
848 | |
849 | |
850 | static dma_cookie_t |
851 | coh901318_tx_submit(struct dma_async_tx_descriptor *tx) |
852 | { |
853 | struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc, |
854 | desc); |
855 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); |
856 | unsigned long flags; |
857 | |
858 | spin_lock_irqsave(&cohc->lock, flags); |
859 | |
860 | tx->cookie = coh901318_assign_cookie(cohc, cohd); |
861 | |
862 | coh901318_desc_queue(cohc, cohd); |
863 | |
864 | spin_unlock_irqrestore(&cohc->lock, flags); |
865 | |
866 | return tx->cookie; |
867 | } |
868 | |
869 | static struct dma_async_tx_descriptor * |
870 | coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
871 | size_t size, unsigned long flags) |
872 | { |
873 | struct coh901318_lli *data; |
874 | struct coh901318_desc *cohd; |
875 | unsigned long flg; |
876 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
877 | int lli_len; |
878 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; |
879 | int ret; |
880 | |
881 | spin_lock_irqsave(&cohc->lock, flg); |
882 | |
883 | dev_vdbg(COHC_2_DEV(cohc), |
884 | "[%s] channel %d src 0x%x dest 0x%x size %d\n", |
885 | __func__, cohc->id, src, dest, size); |
886 | |
887 | if (flags & DMA_PREP_INTERRUPT) |
888 | /* Trigger interrupt after last lli */ |
889 | ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; |
890 | |
891 | lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT; |
892 | if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size) |
893 | lli_len++; |
894 | |
895 | data = coh901318_lli_alloc(&cohc->base->pool, lli_len); |
896 | |
897 | if (data == NULL) |
898 | goto err; |
899 | |
900 | ret = coh901318_lli_fill_memcpy( |
901 | &cohc->base->pool, data, src, size, dest, |
902 | cohc_chan_param(cohc)->ctrl_lli_chained, |
903 | ctrl_last); |
904 | if (ret) |
905 | goto err; |
906 | |
907 | COH_DBG(coh901318_list_print(cohc, data)); |
908 | |
909 | /* Pick a descriptor to handle this transfer */ |
910 | cohd = coh901318_desc_get(cohc); |
911 | cohd->data = data; |
912 | cohd->flags = flags; |
913 | cohd->desc.tx_submit = coh901318_tx_submit; |
914 | |
915 | spin_unlock_irqrestore(&cohc->lock, flg); |
916 | |
917 | return &cohd->desc; |
918 | err: |
919 | spin_unlock_irqrestore(&cohc->lock, flg); |
920 | return NULL; |
921 | } |
922 | |
923 | static struct dma_async_tx_descriptor * |
924 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
925 | unsigned int sg_len, enum dma_data_direction direction, |
926 | unsigned long flags) |
927 | { |
928 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
929 | struct coh901318_lli *data; |
930 | struct coh901318_desc *cohd; |
931 | const struct coh901318_params *params; |
932 | struct scatterlist *sg; |
933 | int len = 0; |
934 | int size; |
935 | int i; |
936 | u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; |
937 | u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; |
938 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; |
939 | u32 config; |
940 | unsigned long flg; |
941 | int ret; |
942 | |
943 | if (!sgl) |
944 | goto out; |
945 | if (sgl->length == 0) |
946 | goto out; |
947 | |
948 | spin_lock_irqsave(&cohc->lock, flg); |
949 | |
950 | dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n", |
951 | __func__, sg_len, direction); |
952 | |
953 | if (flags & DMA_PREP_INTERRUPT) |
954 | /* Trigger interrupt after last lli */ |
955 | ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; |
956 | |
957 | params = cohc_chan_param(cohc); |
958 | config = params->config; |
959 | |
960 | if (direction == DMA_TO_DEVICE) { |
961 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | |
962 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; |
963 | |
964 | config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY; |
965 | ctrl_chained |= tx_flags; |
966 | ctrl_last |= tx_flags; |
967 | ctrl |= tx_flags; |
968 | } else if (direction == DMA_FROM_DEVICE) { |
969 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | |
970 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; |
971 | |
972 | config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY; |
973 | ctrl_chained |= rx_flags; |
974 | ctrl_last |= rx_flags; |
975 | ctrl |= rx_flags; |
976 | } else |
977 | goto err_direction; |
978 | |
979 | coh901318_set_conf(cohc, config); |
980 | |
981 | /* The dma only supports transmitting packages up to |
982 | * MAX_DMA_PACKET_SIZE. Calculate to total number of |
983 | * dma elemts required to send the entire sg list |
984 | */ |
985 | for_each_sg(sgl, sg, sg_len, i) { |
986 | unsigned int factor; |
987 | size = sg_dma_len(sg); |
988 | |
989 | if (size <= MAX_DMA_PACKET_SIZE) { |
990 | len++; |
991 | continue; |
992 | } |
993 | |
994 | factor = size >> MAX_DMA_PACKET_SIZE_SHIFT; |
995 | if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size) |
996 | factor++; |
997 | |
998 | len += factor; |
999 | } |
1000 | |
1001 | pr_debug("Allocate %d lli:s for this transfer\n", len); |
1002 | data = coh901318_lli_alloc(&cohc->base->pool, len); |
1003 | |
1004 | if (data == NULL) |
1005 | goto err_dma_alloc; |
1006 | |
1007 | /* initiate allocated data list */ |
1008 | ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, |
1009 | cohc_dev_addr(cohc), |
1010 | ctrl_chained, |
1011 | ctrl, |
1012 | ctrl_last, |
1013 | direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); |
1014 | if (ret) |
1015 | goto err_lli_fill; |
1016 | |
1017 | COH_DBG(coh901318_list_print(cohc, data)); |
1018 | |
1019 | /* Pick a descriptor to handle this transfer */ |
1020 | cohd = coh901318_desc_get(cohc); |
1021 | cohd->dir = direction; |
1022 | cohd->flags = flags; |
1023 | cohd->desc.tx_submit = coh901318_tx_submit; |
1024 | cohd->data = data; |
1025 | |
1026 | spin_unlock_irqrestore(&cohc->lock, flg); |
1027 | |
1028 | return &cohd->desc; |
1029 | err_lli_fill: |
1030 | err_dma_alloc: |
1031 | err_direction: |
1032 | spin_unlock_irqrestore(&cohc->lock, flg); |
1033 | out: |
1034 | return NULL; |
1035 | } |
1036 | |
1037 | static enum dma_status |
1038 | coh901318_is_tx_complete(struct dma_chan *chan, |
1039 | dma_cookie_t cookie, dma_cookie_t *done, |
1040 | dma_cookie_t *used) |
1041 | { |
1042 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1043 | dma_cookie_t last_used; |
1044 | dma_cookie_t last_complete; |
1045 | int ret; |
1046 | |
1047 | last_complete = cohc->completed; |
1048 | last_used = chan->cookie; |
1049 | |
1050 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
1051 | |
1052 | if (done) |
1053 | *done = last_complete; |
1054 | if (used) |
1055 | *used = last_used; |
1056 | |
1057 | return ret; |
1058 | } |
1059 | |
1060 | static void |
1061 | coh901318_issue_pending(struct dma_chan *chan) |
1062 | { |
1063 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1064 | unsigned long flags; |
1065 | |
1066 | spin_lock_irqsave(&cohc->lock, flags); |
1067 | |
1068 | /* Busy means that pending jobs are already being processed */ |
1069 | if (!cohc->busy) |
1070 | coh901318_queue_start(cohc); |
1071 | |
1072 | spin_unlock_irqrestore(&cohc->lock, flags); |
1073 | } |
1074 | |
1075 | static void |
1076 | coh901318_terminate_all(struct dma_chan *chan) |
1077 | { |
1078 | unsigned long flags; |
1079 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1080 | struct coh901318_desc *cohd; |
1081 | void __iomem *virtbase = cohc->base->virtbase; |
1082 | |
1083 | coh901318_stop(chan); |
1084 | |
1085 | spin_lock_irqsave(&cohc->lock, flags); |
1086 | |
1087 | /* Clear any pending BE or TC interrupt */ |
1088 | if (cohc->id < 32) { |
1089 | writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); |
1090 | writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); |
1091 | } else { |
1092 | writel(1 << (cohc->id - 32), virtbase + |
1093 | COH901318_BE_INT_CLEAR2); |
1094 | writel(1 << (cohc->id - 32), virtbase + |
1095 | COH901318_TC_INT_CLEAR2); |
1096 | } |
1097 | |
1098 | enable_powersave(cohc); |
1099 | |
1100 | while ((cohd = coh901318_first_active_get(cohc))) { |
1101 | /* release the lli allocation*/ |
1102 | coh901318_lli_free(&cohc->base->pool, &cohd->data); |
1103 | |
1104 | /* return desc to free-list */ |
1105 | coh901318_desc_remove(cohd); |
1106 | coh901318_desc_free(cohc, cohd); |
1107 | } |
1108 | |
1109 | while ((cohd = coh901318_first_queued(cohc))) { |
1110 | /* release the lli allocation*/ |
1111 | coh901318_lli_free(&cohc->base->pool, &cohd->data); |
1112 | |
1113 | /* return desc to free-list */ |
1114 | coh901318_desc_remove(cohd); |
1115 | coh901318_desc_free(cohc, cohd); |
1116 | } |
1117 | |
1118 | |
1119 | cohc->nbr_active_done = 0; |
1120 | cohc->busy = 0; |
1121 | |
1122 | spin_unlock_irqrestore(&cohc->lock, flags); |
1123 | } |
1124 | void coh901318_base_init(struct dma_device *dma, const int *pick_chans, |
1125 | struct coh901318_base *base) |
1126 | { |
1127 | int chans_i; |
1128 | int i = 0; |
1129 | struct coh901318_chan *cohc; |
1130 | |
1131 | INIT_LIST_HEAD(&dma->channels); |
1132 | |
1133 | for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { |
1134 | for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { |
1135 | cohc = &base->chans[i]; |
1136 | |
1137 | cohc->base = base; |
1138 | cohc->chan.device = dma; |
1139 | cohc->id = i; |
1140 | |
1141 | /* TODO: do we really need this lock if only one |
1142 | * client is connected to each channel? |
1143 | */ |
1144 | |
1145 | spin_lock_init(&cohc->lock); |
1146 | |
1147 | cohc->nbr_active_done = 0; |
1148 | cohc->busy = 0; |
1149 | INIT_LIST_HEAD(&cohc->free); |
1150 | INIT_LIST_HEAD(&cohc->active); |
1151 | INIT_LIST_HEAD(&cohc->queue); |
1152 | |
1153 | tasklet_init(&cohc->tasklet, dma_tasklet, |
1154 | (unsigned long) cohc); |
1155 | |
1156 | list_add_tail(&cohc->chan.device_node, |
1157 | &dma->channels); |
1158 | } |
1159 | } |
1160 | } |
1161 | |
1162 | static int __init coh901318_probe(struct platform_device *pdev) |
1163 | { |
1164 | int err = 0; |
1165 | struct coh901318_platform *pdata; |
1166 | struct coh901318_base *base; |
1167 | int irq; |
1168 | struct resource *io; |
1169 | |
1170 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1171 | if (!io) |
1172 | goto err_get_resource; |
1173 | |
1174 | /* Map DMA controller registers to virtual memory */ |
1175 | if (request_mem_region(io->start, |
1176 | resource_size(io), |
1177 | pdev->dev.driver->name) == NULL) { |
1178 | err = -EBUSY; |
1179 | goto err_request_mem; |
1180 | } |
1181 | |
1182 | pdata = pdev->dev.platform_data; |
1183 | if (!pdata) |
1184 | goto err_no_platformdata; |
1185 | |
1186 | base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) + |
1187 | pdata->max_channels * |
1188 | sizeof(struct coh901318_chan), |
1189 | GFP_KERNEL); |
1190 | if (!base) |
1191 | goto err_alloc_coh_dma_channels; |
1192 | |
1193 | base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); |
1194 | |
1195 | base->virtbase = ioremap(io->start, resource_size(io)); |
1196 | if (!base->virtbase) { |
1197 | err = -ENOMEM; |
1198 | goto err_no_ioremap; |
1199 | } |
1200 | |
1201 | base->dev = &pdev->dev; |
1202 | base->platform = pdata; |
1203 | spin_lock_init(&base->pm.lock); |
1204 | base->pm.started_channels = 0; |
1205 | |
1206 | COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); |
1207 | |
1208 | platform_set_drvdata(pdev, base); |
1209 | |
1210 | irq = platform_get_irq(pdev, 0); |
1211 | if (irq < 0) |
1212 | goto err_no_irq; |
1213 | |
1214 | err = request_irq(irq, dma_irq_handler, IRQF_DISABLED, |
1215 | "coh901318", base); |
1216 | if (err) { |
1217 | dev_crit(&pdev->dev, |
1218 | "Cannot allocate IRQ for DMA controller!\n"); |
1219 | goto err_request_irq; |
1220 | } |
1221 | |
1222 | err = coh901318_pool_create(&base->pool, &pdev->dev, |
1223 | sizeof(struct coh901318_lli), |
1224 | 32); |
1225 | if (err) |
1226 | goto err_pool_create; |
1227 | |
1228 | /* init channels for device transfers */ |
1229 | coh901318_base_init(&base->dma_slave, base->platform->chans_slave, |
1230 | base); |
1231 | |
1232 | dma_cap_zero(base->dma_slave.cap_mask); |
1233 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
1234 | |
1235 | base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; |
1236 | base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; |
1237 | base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; |
1238 | base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete; |
1239 | base->dma_slave.device_issue_pending = coh901318_issue_pending; |
1240 | base->dma_slave.device_terminate_all = coh901318_terminate_all; |
1241 | base->dma_slave.dev = &pdev->dev; |
1242 | |
1243 | err = dma_async_device_register(&base->dma_slave); |
1244 | |
1245 | if (err) |
1246 | goto err_register_slave; |
1247 | |
1248 | /* init channels for memcpy */ |
1249 | coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy, |
1250 | base); |
1251 | |
1252 | dma_cap_zero(base->dma_memcpy.cap_mask); |
1253 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
1254 | |
1255 | base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; |
1256 | base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; |
1257 | base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; |
1258 | base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete; |
1259 | base->dma_memcpy.device_issue_pending = coh901318_issue_pending; |
1260 | base->dma_memcpy.device_terminate_all = coh901318_terminate_all; |
1261 | base->dma_memcpy.dev = &pdev->dev; |
1262 | /* |
1263 | * This controller can only access address at even 32bit boundaries, |
1264 | * i.e. 2^2 |
1265 | */ |
1266 | base->dma_memcpy.copy_align = 2; |
1267 | err = dma_async_device_register(&base->dma_memcpy); |
1268 | |
1269 | if (err) |
1270 | goto err_register_memcpy; |
1271 | |
1272 | dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", |
1273 | (u32) base->virtbase); |
1274 | |
1275 | return err; |
1276 | |
1277 | err_register_memcpy: |
1278 | dma_async_device_unregister(&base->dma_slave); |
1279 | err_register_slave: |
1280 | coh901318_pool_destroy(&base->pool); |
1281 | err_pool_create: |
1282 | free_irq(platform_get_irq(pdev, 0), base); |
1283 | err_request_irq: |
1284 | err_no_irq: |
1285 | iounmap(base->virtbase); |
1286 | err_no_ioremap: |
1287 | kfree(base); |
1288 | err_alloc_coh_dma_channels: |
1289 | err_no_platformdata: |
1290 | release_mem_region(pdev->resource->start, |
1291 | resource_size(pdev->resource)); |
1292 | err_request_mem: |
1293 | err_get_resource: |
1294 | return err; |
1295 | } |
1296 | |
1297 | static int __exit coh901318_remove(struct platform_device *pdev) |
1298 | { |
1299 | struct coh901318_base *base = platform_get_drvdata(pdev); |
1300 | |
1301 | dma_async_device_unregister(&base->dma_memcpy); |
1302 | dma_async_device_unregister(&base->dma_slave); |
1303 | coh901318_pool_destroy(&base->pool); |
1304 | free_irq(platform_get_irq(pdev, 0), base); |
1305 | iounmap(base->virtbase); |
1306 | kfree(base); |
1307 | release_mem_region(pdev->resource->start, |
1308 | resource_size(pdev->resource)); |
1309 | return 0; |
1310 | } |
1311 | |
1312 | |
1313 | static struct platform_driver coh901318_driver = { |
1314 | .remove = __exit_p(coh901318_remove), |
1315 | .driver = { |
1316 | .name = "coh901318", |
1317 | }, |
1318 | }; |
1319 | |
1320 | int __init coh901318_init(void) |
1321 | { |
1322 | return platform_driver_probe(&coh901318_driver, coh901318_probe); |
1323 | } |
1324 | subsys_initcall(coh901318_init); |
1325 | |
1326 | void __exit coh901318_exit(void) |
1327 | { |
1328 | platform_driver_unregister(&coh901318_driver); |
1329 | } |
1330 | module_exit(coh901318_exit); |
1331 | |
1332 | MODULE_LICENSE("GPL"); |
1333 | MODULE_AUTHOR("Per Friden"); |
1334 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9