Root/
1 | /* |
2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. |
3 | * Copyright (C) Semihalf 2009 |
4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 |
5 | * |
6 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description |
7 | * (defines, structures and comments) was taken from MPC5121 DMA driver |
8 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. |
9 | * |
10 | * Approved as OSADL project by a majority of OSADL members and funded |
11 | * by OSADL membership fees in 2009; for details see www.osadl.org. |
12 | * |
13 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms of the GNU General Public License as published by the Free |
15 | * Software Foundation; either version 2 of the License, or (at your option) |
16 | * any later version. |
17 | * |
18 | * This program is distributed in the hope that it will be useful, but WITHOUT |
19 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
20 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
21 | * more details. |
22 | * |
23 | * You should have received a copy of the GNU General Public License along with |
24 | * this program; if not, write to the Free Software Foundation, Inc., 59 |
25 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
26 | * |
27 | * The full GNU General Public License is included in this distribution in the |
28 | * file called COPYING. |
29 | */ |
30 | |
31 | /* |
32 | * This is initial version of MPC5121 DMA driver. Only memory to memory |
33 | * transfers are supported (tested using dmatest module). |
34 | */ |
35 | |
36 | #include <linux/module.h> |
37 | #include <linux/dmaengine.h> |
38 | #include <linux/dma-mapping.h> |
39 | #include <linux/interrupt.h> |
40 | #include <linux/io.h> |
41 | #include <linux/slab.h> |
42 | #include <linux/of_device.h> |
43 | #include <linux/of_platform.h> |
44 | |
45 | #include <linux/random.h> |
46 | |
47 | #include "dmaengine.h" |
48 | |
49 | /* Number of DMA Transfer descriptors allocated per channel */ |
50 | #define MPC_DMA_DESCRIPTORS 64 |
51 | |
52 | /* Macro definitions */ |
53 | #define MPC_DMA_CHANNELS 64 |
54 | #define MPC_DMA_TCD_OFFSET 0x1000 |
55 | |
56 | /* Arbitration mode of group and channel */ |
57 | #define MPC_DMA_DMACR_EDCG (1 << 31) |
58 | #define MPC_DMA_DMACR_ERGA (1 << 3) |
59 | #define MPC_DMA_DMACR_ERCA (1 << 2) |
60 | |
61 | /* Error codes */ |
62 | #define MPC_DMA_DMAES_VLD (1 << 31) |
63 | #define MPC_DMA_DMAES_GPE (1 << 15) |
64 | #define MPC_DMA_DMAES_CPE (1 << 14) |
65 | #define MPC_DMA_DMAES_ERRCHN(err) \ |
66 | (((err) >> 8) & 0x3f) |
67 | #define MPC_DMA_DMAES_SAE (1 << 7) |
68 | #define MPC_DMA_DMAES_SOE (1 << 6) |
69 | #define MPC_DMA_DMAES_DAE (1 << 5) |
70 | #define MPC_DMA_DMAES_DOE (1 << 4) |
71 | #define MPC_DMA_DMAES_NCE (1 << 3) |
72 | #define MPC_DMA_DMAES_SGE (1 << 2) |
73 | #define MPC_DMA_DMAES_SBE (1 << 1) |
74 | #define MPC_DMA_DMAES_DBE (1 << 0) |
75 | |
76 | #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6) |
77 | |
78 | #define MPC_DMA_TSIZE_1 0x00 |
79 | #define MPC_DMA_TSIZE_2 0x01 |
80 | #define MPC_DMA_TSIZE_4 0x02 |
81 | #define MPC_DMA_TSIZE_16 0x04 |
82 | #define MPC_DMA_TSIZE_32 0x05 |
83 | |
84 | /* MPC5121 DMA engine registers */ |
85 | struct __attribute__ ((__packed__)) mpc_dma_regs { |
86 | /* 0x00 */ |
87 | u32 dmacr; /* DMA control register */ |
88 | u32 dmaes; /* DMA error status */ |
89 | /* 0x08 */ |
90 | u32 dmaerqh; /* DMA enable request high(channels 63~32) */ |
91 | u32 dmaerql; /* DMA enable request low(channels 31~0) */ |
92 | u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ |
93 | u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ |
94 | /* 0x18 */ |
95 | u8 dmaserq; /* DMA set enable request */ |
96 | u8 dmacerq; /* DMA clear enable request */ |
97 | u8 dmaseei; /* DMA set enable error interrupt */ |
98 | u8 dmaceei; /* DMA clear enable error interrupt */ |
99 | /* 0x1c */ |
100 | u8 dmacint; /* DMA clear interrupt request */ |
101 | u8 dmacerr; /* DMA clear error */ |
102 | u8 dmassrt; /* DMA set start bit */ |
103 | u8 dmacdne; /* DMA clear DONE status bit */ |
104 | /* 0x20 */ |
105 | u32 dmainth; /* DMA interrupt request high(ch63~32) */ |
106 | u32 dmaintl; /* DMA interrupt request low(ch31~0) */ |
107 | u32 dmaerrh; /* DMA error high(ch63~32) */ |
108 | u32 dmaerrl; /* DMA error low(ch31~0) */ |
109 | /* 0x30 */ |
110 | u32 dmahrsh; /* DMA hw request status high(ch63~32) */ |
111 | u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ |
112 | union { |
113 | u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ |
114 | u32 dmagpor; /* (General purpose register on MPC8308) */ |
115 | }; |
116 | u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ |
117 | /* 0x40 ~ 0xff */ |
118 | u32 reserve0[48]; /* Reserved */ |
119 | /* 0x100 */ |
120 | u8 dchpri[MPC_DMA_CHANNELS]; |
121 | /* DMA channels(0~63) priority */ |
122 | }; |
123 | |
124 | struct __attribute__ ((__packed__)) mpc_dma_tcd { |
125 | /* 0x00 */ |
126 | u32 saddr; /* Source address */ |
127 | |
128 | u32 smod:5; /* Source address modulo */ |
129 | u32 ssize:3; /* Source data transfer size */ |
130 | u32 dmod:5; /* Destination address modulo */ |
131 | u32 dsize:3; /* Destination data transfer size */ |
132 | u32 soff:16; /* Signed source address offset */ |
133 | |
134 | /* 0x08 */ |
135 | u32 nbytes; /* Inner "minor" byte count */ |
136 | u32 slast; /* Last source address adjustment */ |
137 | u32 daddr; /* Destination address */ |
138 | |
139 | /* 0x14 */ |
140 | u32 citer_elink:1; /* Enable channel-to-channel linking on |
141 | * minor loop complete |
142 | */ |
143 | u32 citer_linkch:6; /* Link channel for minor loop complete */ |
144 | u32 citer:9; /* Current "major" iteration count */ |
145 | u32 doff:16; /* Signed destination address offset */ |
146 | |
147 | /* 0x18 */ |
148 | u32 dlast_sga; /* Last Destination address adjustment/scatter |
149 | * gather address |
150 | */ |
151 | |
152 | /* 0x1c */ |
153 | u32 biter_elink:1; /* Enable channel-to-channel linking on major |
154 | * loop complete |
155 | */ |
156 | u32 biter_linkch:6; |
157 | u32 biter:9; /* Beginning "major" iteration count */ |
158 | u32 bwc:2; /* Bandwidth control */ |
159 | u32 major_linkch:6; /* Link channel number */ |
160 | u32 done:1; /* Channel done */ |
161 | u32 active:1; /* Channel active */ |
162 | u32 major_elink:1; /* Enable channel-to-channel linking on major |
163 | * loop complete |
164 | */ |
165 | u32 e_sg:1; /* Enable scatter/gather processing */ |
166 | u32 d_req:1; /* Disable request */ |
167 | u32 int_half:1; /* Enable an interrupt when major counter is |
168 | * half complete |
169 | */ |
170 | u32 int_maj:1; /* Enable an interrupt when major iteration |
171 | * count completes |
172 | */ |
173 | u32 start:1; /* Channel start */ |
174 | }; |
175 | |
176 | struct mpc_dma_desc { |
177 | struct dma_async_tx_descriptor desc; |
178 | struct mpc_dma_tcd *tcd; |
179 | dma_addr_t tcd_paddr; |
180 | int error; |
181 | struct list_head node; |
182 | }; |
183 | |
184 | struct mpc_dma_chan { |
185 | struct dma_chan chan; |
186 | struct list_head free; |
187 | struct list_head prepared; |
188 | struct list_head queued; |
189 | struct list_head active; |
190 | struct list_head completed; |
191 | struct mpc_dma_tcd *tcd; |
192 | dma_addr_t tcd_paddr; |
193 | |
194 | /* Lock for this structure */ |
195 | spinlock_t lock; |
196 | }; |
197 | |
198 | struct mpc_dma { |
199 | struct dma_device dma; |
200 | struct tasklet_struct tasklet; |
201 | struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; |
202 | struct mpc_dma_regs __iomem *regs; |
203 | struct mpc_dma_tcd __iomem *tcd; |
204 | int irq; |
205 | int irq2; |
206 | uint error_status; |
207 | int is_mpc8308; |
208 | |
209 | /* Lock for error_status field in this structure */ |
210 | spinlock_t error_status_lock; |
211 | }; |
212 | |
213 | #define DRV_NAME "mpc512x_dma" |
214 | |
215 | /* Convert struct dma_chan to struct mpc_dma_chan */ |
216 | static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) |
217 | { |
218 | return container_of(c, struct mpc_dma_chan, chan); |
219 | } |
220 | |
221 | /* Convert struct dma_chan to struct mpc_dma */ |
222 | static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) |
223 | { |
224 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); |
225 | return container_of(mchan, struct mpc_dma, channels[c->chan_id]); |
226 | } |
227 | |
228 | /* |
229 | * Execute all queued DMA descriptors. |
230 | * |
231 | * Following requirements must be met while calling mpc_dma_execute(): |
232 | * a) mchan->lock is acquired, |
233 | * b) mchan->active list is empty, |
234 | * c) mchan->queued list contains at least one entry. |
235 | */ |
236 | static void mpc_dma_execute(struct mpc_dma_chan *mchan) |
237 | { |
238 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); |
239 | struct mpc_dma_desc *first = NULL; |
240 | struct mpc_dma_desc *prev = NULL; |
241 | struct mpc_dma_desc *mdesc; |
242 | int cid = mchan->chan.chan_id; |
243 | |
244 | /* Move all queued descriptors to active list */ |
245 | list_splice_tail_init(&mchan->queued, &mchan->active); |
246 | |
247 | /* Chain descriptors into one transaction */ |
248 | list_for_each_entry(mdesc, &mchan->active, node) { |
249 | if (!first) |
250 | first = mdesc; |
251 | |
252 | if (!prev) { |
253 | prev = mdesc; |
254 | continue; |
255 | } |
256 | |
257 | prev->tcd->dlast_sga = mdesc->tcd_paddr; |
258 | prev->tcd->e_sg = 1; |
259 | mdesc->tcd->start = 1; |
260 | |
261 | prev = mdesc; |
262 | } |
263 | |
264 | prev->tcd->int_maj = 1; |
265 | |
266 | /* Send first descriptor in chain into hardware */ |
267 | memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); |
268 | |
269 | if (first != prev) |
270 | mdma->tcd[cid].e_sg = 1; |
271 | out_8(&mdma->regs->dmassrt, cid); |
272 | } |
273 | |
274 | /* Handle interrupt on one half of DMA controller (32 channels) */ |
275 | static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) |
276 | { |
277 | struct mpc_dma_chan *mchan; |
278 | struct mpc_dma_desc *mdesc; |
279 | u32 status = is | es; |
280 | int ch; |
281 | |
282 | while ((ch = fls(status) - 1) >= 0) { |
283 | status &= ~(1 << ch); |
284 | mchan = &mdma->channels[ch + off]; |
285 | |
286 | spin_lock(&mchan->lock); |
287 | |
288 | out_8(&mdma->regs->dmacint, ch + off); |
289 | out_8(&mdma->regs->dmacerr, ch + off); |
290 | |
291 | /* Check error status */ |
292 | if (es & (1 << ch)) |
293 | list_for_each_entry(mdesc, &mchan->active, node) |
294 | mdesc->error = -EIO; |
295 | |
296 | /* Execute queued descriptors */ |
297 | list_splice_tail_init(&mchan->active, &mchan->completed); |
298 | if (!list_empty(&mchan->queued)) |
299 | mpc_dma_execute(mchan); |
300 | |
301 | spin_unlock(&mchan->lock); |
302 | } |
303 | } |
304 | |
305 | /* Interrupt handler */ |
306 | static irqreturn_t mpc_dma_irq(int irq, void *data) |
307 | { |
308 | struct mpc_dma *mdma = data; |
309 | uint es; |
310 | |
311 | /* Save error status register */ |
312 | es = in_be32(&mdma->regs->dmaes); |
313 | spin_lock(&mdma->error_status_lock); |
314 | if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) |
315 | mdma->error_status = es; |
316 | spin_unlock(&mdma->error_status_lock); |
317 | |
318 | /* Handle interrupt on each channel */ |
319 | if (mdma->dma.chancnt > 32) { |
320 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), |
321 | in_be32(&mdma->regs->dmaerrh), 32); |
322 | } |
323 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), |
324 | in_be32(&mdma->regs->dmaerrl), 0); |
325 | |
326 | /* Schedule tasklet */ |
327 | tasklet_schedule(&mdma->tasklet); |
328 | |
329 | return IRQ_HANDLED; |
330 | } |
331 | |
332 | /* process completed descriptors */ |
333 | static void mpc_dma_process_completed(struct mpc_dma *mdma) |
334 | { |
335 | dma_cookie_t last_cookie = 0; |
336 | struct mpc_dma_chan *mchan; |
337 | struct mpc_dma_desc *mdesc; |
338 | struct dma_async_tx_descriptor *desc; |
339 | unsigned long flags; |
340 | LIST_HEAD(list); |
341 | int i; |
342 | |
343 | for (i = 0; i < mdma->dma.chancnt; i++) { |
344 | mchan = &mdma->channels[i]; |
345 | |
346 | /* Get all completed descriptors */ |
347 | spin_lock_irqsave(&mchan->lock, flags); |
348 | if (!list_empty(&mchan->completed)) |
349 | list_splice_tail_init(&mchan->completed, &list); |
350 | spin_unlock_irqrestore(&mchan->lock, flags); |
351 | |
352 | if (list_empty(&list)) |
353 | continue; |
354 | |
355 | /* Execute callbacks and run dependencies */ |
356 | list_for_each_entry(mdesc, &list, node) { |
357 | desc = &mdesc->desc; |
358 | |
359 | if (desc->callback) |
360 | desc->callback(desc->callback_param); |
361 | |
362 | last_cookie = desc->cookie; |
363 | dma_run_dependencies(desc); |
364 | } |
365 | |
366 | /* Free descriptors */ |
367 | spin_lock_irqsave(&mchan->lock, flags); |
368 | list_splice_tail_init(&list, &mchan->free); |
369 | mchan->chan.completed_cookie = last_cookie; |
370 | spin_unlock_irqrestore(&mchan->lock, flags); |
371 | } |
372 | } |
373 | |
374 | /* DMA Tasklet */ |
375 | static void mpc_dma_tasklet(unsigned long data) |
376 | { |
377 | struct mpc_dma *mdma = (void *)data; |
378 | unsigned long flags; |
379 | uint es; |
380 | |
381 | spin_lock_irqsave(&mdma->error_status_lock, flags); |
382 | es = mdma->error_status; |
383 | mdma->error_status = 0; |
384 | spin_unlock_irqrestore(&mdma->error_status_lock, flags); |
385 | |
386 | /* Print nice error report */ |
387 | if (es) { |
388 | dev_err(mdma->dma.dev, |
389 | "Hardware reported following error(s) on channel %u:\n", |
390 | MPC_DMA_DMAES_ERRCHN(es)); |
391 | |
392 | if (es & MPC_DMA_DMAES_GPE) |
393 | dev_err(mdma->dma.dev, "- Group Priority Error\n"); |
394 | if (es & MPC_DMA_DMAES_CPE) |
395 | dev_err(mdma->dma.dev, "- Channel Priority Error\n"); |
396 | if (es & MPC_DMA_DMAES_SAE) |
397 | dev_err(mdma->dma.dev, "- Source Address Error\n"); |
398 | if (es & MPC_DMA_DMAES_SOE) |
399 | dev_err(mdma->dma.dev, "- Source Offset" |
400 | " Configuration Error\n"); |
401 | if (es & MPC_DMA_DMAES_DAE) |
402 | dev_err(mdma->dma.dev, "- Destination Address" |
403 | " Error\n"); |
404 | if (es & MPC_DMA_DMAES_DOE) |
405 | dev_err(mdma->dma.dev, "- Destination Offset" |
406 | " Configuration Error\n"); |
407 | if (es & MPC_DMA_DMAES_NCE) |
408 | dev_err(mdma->dma.dev, "- NBytes/Citter" |
409 | " Configuration Error\n"); |
410 | if (es & MPC_DMA_DMAES_SGE) |
411 | dev_err(mdma->dma.dev, "- Scatter/Gather" |
412 | " Configuration Error\n"); |
413 | if (es & MPC_DMA_DMAES_SBE) |
414 | dev_err(mdma->dma.dev, "- Source Bus Error\n"); |
415 | if (es & MPC_DMA_DMAES_DBE) |
416 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); |
417 | } |
418 | |
419 | mpc_dma_process_completed(mdma); |
420 | } |
421 | |
422 | /* Submit descriptor to hardware */ |
423 | static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) |
424 | { |
425 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); |
426 | struct mpc_dma_desc *mdesc; |
427 | unsigned long flags; |
428 | dma_cookie_t cookie; |
429 | |
430 | mdesc = container_of(txd, struct mpc_dma_desc, desc); |
431 | |
432 | spin_lock_irqsave(&mchan->lock, flags); |
433 | |
434 | /* Move descriptor to queue */ |
435 | list_move_tail(&mdesc->node, &mchan->queued); |
436 | |
437 | /* If channel is idle, execute all queued descriptors */ |
438 | if (list_empty(&mchan->active)) |
439 | mpc_dma_execute(mchan); |
440 | |
441 | /* Update cookie */ |
442 | cookie = dma_cookie_assign(txd); |
443 | spin_unlock_irqrestore(&mchan->lock, flags); |
444 | |
445 | return cookie; |
446 | } |
447 | |
448 | /* Alloc channel resources */ |
449 | static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) |
450 | { |
451 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); |
452 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
453 | struct mpc_dma_desc *mdesc; |
454 | struct mpc_dma_tcd *tcd; |
455 | dma_addr_t tcd_paddr; |
456 | unsigned long flags; |
457 | LIST_HEAD(descs); |
458 | int i; |
459 | |
460 | /* Alloc DMA memory for Transfer Control Descriptors */ |
461 | tcd = dma_alloc_coherent(mdma->dma.dev, |
462 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), |
463 | &tcd_paddr, GFP_KERNEL); |
464 | if (!tcd) |
465 | return -ENOMEM; |
466 | |
467 | /* Alloc descriptors for this channel */ |
468 | for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { |
469 | mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); |
470 | if (!mdesc) { |
471 | dev_notice(mdma->dma.dev, "Memory allocation error. " |
472 | "Allocated only %u descriptors\n", i); |
473 | break; |
474 | } |
475 | |
476 | dma_async_tx_descriptor_init(&mdesc->desc, chan); |
477 | mdesc->desc.flags = DMA_CTRL_ACK; |
478 | mdesc->desc.tx_submit = mpc_dma_tx_submit; |
479 | |
480 | mdesc->tcd = &tcd[i]; |
481 | mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); |
482 | |
483 | list_add_tail(&mdesc->node, &descs); |
484 | } |
485 | |
486 | /* Return error only if no descriptors were allocated */ |
487 | if (i == 0) { |
488 | dma_free_coherent(mdma->dma.dev, |
489 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), |
490 | tcd, tcd_paddr); |
491 | return -ENOMEM; |
492 | } |
493 | |
494 | spin_lock_irqsave(&mchan->lock, flags); |
495 | mchan->tcd = tcd; |
496 | mchan->tcd_paddr = tcd_paddr; |
497 | list_splice_tail_init(&descs, &mchan->free); |
498 | spin_unlock_irqrestore(&mchan->lock, flags); |
499 | |
500 | /* Enable Error Interrupt */ |
501 | out_8(&mdma->regs->dmaseei, chan->chan_id); |
502 | |
503 | return 0; |
504 | } |
505 | |
506 | /* Free channel resources */ |
507 | static void mpc_dma_free_chan_resources(struct dma_chan *chan) |
508 | { |
509 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); |
510 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
511 | struct mpc_dma_desc *mdesc, *tmp; |
512 | struct mpc_dma_tcd *tcd; |
513 | dma_addr_t tcd_paddr; |
514 | unsigned long flags; |
515 | LIST_HEAD(descs); |
516 | |
517 | spin_lock_irqsave(&mchan->lock, flags); |
518 | |
519 | /* Channel must be idle */ |
520 | BUG_ON(!list_empty(&mchan->prepared)); |
521 | BUG_ON(!list_empty(&mchan->queued)); |
522 | BUG_ON(!list_empty(&mchan->active)); |
523 | BUG_ON(!list_empty(&mchan->completed)); |
524 | |
525 | /* Move data */ |
526 | list_splice_tail_init(&mchan->free, &descs); |
527 | tcd = mchan->tcd; |
528 | tcd_paddr = mchan->tcd_paddr; |
529 | |
530 | spin_unlock_irqrestore(&mchan->lock, flags); |
531 | |
532 | /* Free DMA memory used by descriptors */ |
533 | dma_free_coherent(mdma->dma.dev, |
534 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), |
535 | tcd, tcd_paddr); |
536 | |
537 | /* Free descriptors */ |
538 | list_for_each_entry_safe(mdesc, tmp, &descs, node) |
539 | kfree(mdesc); |
540 | |
541 | /* Disable Error Interrupt */ |
542 | out_8(&mdma->regs->dmaceei, chan->chan_id); |
543 | } |
544 | |
545 | /* Send all pending descriptor to hardware */ |
546 | static void mpc_dma_issue_pending(struct dma_chan *chan) |
547 | { |
548 | /* |
549 | * We are posting descriptors to the hardware as soon as |
550 | * they are ready, so this function does nothing. |
551 | */ |
552 | } |
553 | |
554 | /* Check request completion status */ |
555 | static enum dma_status |
556 | mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
557 | struct dma_tx_state *txstate) |
558 | { |
559 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
560 | enum dma_status ret; |
561 | unsigned long flags; |
562 | |
563 | spin_lock_irqsave(&mchan->lock, flags); |
564 | ret = dma_cookie_status(chan, cookie, txstate); |
565 | spin_unlock_irqrestore(&mchan->lock, flags); |
566 | |
567 | return ret; |
568 | } |
569 | |
570 | /* Prepare descriptor for memory to memory copy */ |
571 | static struct dma_async_tx_descriptor * |
572 | mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, |
573 | size_t len, unsigned long flags) |
574 | { |
575 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); |
576 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
577 | struct mpc_dma_desc *mdesc = NULL; |
578 | struct mpc_dma_tcd *tcd; |
579 | unsigned long iflags; |
580 | |
581 | /* Get free descriptor */ |
582 | spin_lock_irqsave(&mchan->lock, iflags); |
583 | if (!list_empty(&mchan->free)) { |
584 | mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, |
585 | node); |
586 | list_del(&mdesc->node); |
587 | } |
588 | spin_unlock_irqrestore(&mchan->lock, iflags); |
589 | |
590 | if (!mdesc) { |
591 | /* try to free completed descriptors */ |
592 | mpc_dma_process_completed(mdma); |
593 | return NULL; |
594 | } |
595 | |
596 | mdesc->error = 0; |
597 | tcd = mdesc->tcd; |
598 | |
599 | /* Prepare Transfer Control Descriptor for this transaction */ |
600 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); |
601 | |
602 | if (IS_ALIGNED(src | dst | len, 32)) { |
603 | tcd->ssize = MPC_DMA_TSIZE_32; |
604 | tcd->dsize = MPC_DMA_TSIZE_32; |
605 | tcd->soff = 32; |
606 | tcd->doff = 32; |
607 | } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { |
608 | /* MPC8308 doesn't support 16 byte transfers */ |
609 | tcd->ssize = MPC_DMA_TSIZE_16; |
610 | tcd->dsize = MPC_DMA_TSIZE_16; |
611 | tcd->soff = 16; |
612 | tcd->doff = 16; |
613 | } else if (IS_ALIGNED(src | dst | len, 4)) { |
614 | tcd->ssize = MPC_DMA_TSIZE_4; |
615 | tcd->dsize = MPC_DMA_TSIZE_4; |
616 | tcd->soff = 4; |
617 | tcd->doff = 4; |
618 | } else if (IS_ALIGNED(src | dst | len, 2)) { |
619 | tcd->ssize = MPC_DMA_TSIZE_2; |
620 | tcd->dsize = MPC_DMA_TSIZE_2; |
621 | tcd->soff = 2; |
622 | tcd->doff = 2; |
623 | } else { |
624 | tcd->ssize = MPC_DMA_TSIZE_1; |
625 | tcd->dsize = MPC_DMA_TSIZE_1; |
626 | tcd->soff = 1; |
627 | tcd->doff = 1; |
628 | } |
629 | |
630 | tcd->saddr = src; |
631 | tcd->daddr = dst; |
632 | tcd->nbytes = len; |
633 | tcd->biter = 1; |
634 | tcd->citer = 1; |
635 | |
636 | /* Place descriptor in prepared list */ |
637 | spin_lock_irqsave(&mchan->lock, iflags); |
638 | list_add_tail(&mdesc->node, &mchan->prepared); |
639 | spin_unlock_irqrestore(&mchan->lock, iflags); |
640 | |
641 | return &mdesc->desc; |
642 | } |
643 | |
644 | static int __devinit mpc_dma_probe(struct platform_device *op) |
645 | { |
646 | struct device_node *dn = op->dev.of_node; |
647 | struct device *dev = &op->dev; |
648 | struct dma_device *dma; |
649 | struct mpc_dma *mdma; |
650 | struct mpc_dma_chan *mchan; |
651 | struct resource res; |
652 | ulong regs_start, regs_size; |
653 | int retval, i; |
654 | |
655 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); |
656 | if (!mdma) { |
657 | dev_err(dev, "Memory exhausted!\n"); |
658 | return -ENOMEM; |
659 | } |
660 | |
661 | mdma->irq = irq_of_parse_and_map(dn, 0); |
662 | if (mdma->irq == NO_IRQ) { |
663 | dev_err(dev, "Error mapping IRQ!\n"); |
664 | return -EINVAL; |
665 | } |
666 | |
667 | if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { |
668 | mdma->is_mpc8308 = 1; |
669 | mdma->irq2 = irq_of_parse_and_map(dn, 1); |
670 | if (mdma->irq2 == NO_IRQ) { |
671 | dev_err(dev, "Error mapping IRQ!\n"); |
672 | return -EINVAL; |
673 | } |
674 | } |
675 | |
676 | retval = of_address_to_resource(dn, 0, &res); |
677 | if (retval) { |
678 | dev_err(dev, "Error parsing memory region!\n"); |
679 | return retval; |
680 | } |
681 | |
682 | regs_start = res.start; |
683 | regs_size = resource_size(&res); |
684 | |
685 | if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { |
686 | dev_err(dev, "Error requesting memory region!\n"); |
687 | return -EBUSY; |
688 | } |
689 | |
690 | mdma->regs = devm_ioremap(dev, regs_start, regs_size); |
691 | if (!mdma->regs) { |
692 | dev_err(dev, "Error mapping memory region!\n"); |
693 | return -ENOMEM; |
694 | } |
695 | |
696 | mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) |
697 | + MPC_DMA_TCD_OFFSET); |
698 | |
699 | retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, |
700 | mdma); |
701 | if (retval) { |
702 | dev_err(dev, "Error requesting IRQ!\n"); |
703 | return -EINVAL; |
704 | } |
705 | |
706 | if (mdma->is_mpc8308) { |
707 | retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0, |
708 | DRV_NAME, mdma); |
709 | if (retval) { |
710 | dev_err(dev, "Error requesting IRQ2!\n"); |
711 | return -EINVAL; |
712 | } |
713 | } |
714 | |
715 | spin_lock_init(&mdma->error_status_lock); |
716 | |
717 | dma = &mdma->dma; |
718 | dma->dev = dev; |
719 | if (!mdma->is_mpc8308) |
720 | dma->chancnt = MPC_DMA_CHANNELS; |
721 | else |
722 | dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */ |
723 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; |
724 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; |
725 | dma->device_issue_pending = mpc_dma_issue_pending; |
726 | dma->device_tx_status = mpc_dma_tx_status; |
727 | dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; |
728 | |
729 | INIT_LIST_HEAD(&dma->channels); |
730 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); |
731 | |
732 | for (i = 0; i < dma->chancnt; i++) { |
733 | mchan = &mdma->channels[i]; |
734 | |
735 | mchan->chan.device = dma; |
736 | dma_cookie_init(&mchan->chan); |
737 | |
738 | INIT_LIST_HEAD(&mchan->free); |
739 | INIT_LIST_HEAD(&mchan->prepared); |
740 | INIT_LIST_HEAD(&mchan->queued); |
741 | INIT_LIST_HEAD(&mchan->active); |
742 | INIT_LIST_HEAD(&mchan->completed); |
743 | |
744 | spin_lock_init(&mchan->lock); |
745 | list_add_tail(&mchan->chan.device_node, &dma->channels); |
746 | } |
747 | |
748 | tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); |
749 | |
750 | /* |
751 | * Configure DMA Engine: |
752 | * - Dynamic clock, |
753 | * - Round-robin group arbitration, |
754 | * - Round-robin channel arbitration. |
755 | */ |
756 | if (!mdma->is_mpc8308) { |
757 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
758 | MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); |
759 | |
760 | /* Disable hardware DMA requests */ |
761 | out_be32(&mdma->regs->dmaerqh, 0); |
762 | out_be32(&mdma->regs->dmaerql, 0); |
763 | |
764 | /* Disable error interrupts */ |
765 | out_be32(&mdma->regs->dmaeeih, 0); |
766 | out_be32(&mdma->regs->dmaeeil, 0); |
767 | |
768 | /* Clear interrupts status */ |
769 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); |
770 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); |
771 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); |
772 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); |
773 | |
774 | /* Route interrupts to IPIC */ |
775 | out_be32(&mdma->regs->dmaihsa, 0); |
776 | out_be32(&mdma->regs->dmailsa, 0); |
777 | } else { |
778 | /* MPC8308 has 16 channels and lacks some registers */ |
779 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); |
780 | |
781 | /* enable snooping */ |
782 | out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); |
783 | /* Disable error interrupts */ |
784 | out_be32(&mdma->regs->dmaeeil, 0); |
785 | |
786 | /* Clear interrupts status */ |
787 | out_be32(&mdma->regs->dmaintl, 0xFFFF); |
788 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); |
789 | } |
790 | |
791 | /* Register DMA engine */ |
792 | dev_set_drvdata(dev, mdma); |
793 | retval = dma_async_device_register(dma); |
794 | if (retval) { |
795 | devm_free_irq(dev, mdma->irq, mdma); |
796 | irq_dispose_mapping(mdma->irq); |
797 | } |
798 | |
799 | return retval; |
800 | } |
801 | |
802 | static int __devexit mpc_dma_remove(struct platform_device *op) |
803 | { |
804 | struct device *dev = &op->dev; |
805 | struct mpc_dma *mdma = dev_get_drvdata(dev); |
806 | |
807 | dma_async_device_unregister(&mdma->dma); |
808 | devm_free_irq(dev, mdma->irq, mdma); |
809 | irq_dispose_mapping(mdma->irq); |
810 | |
811 | return 0; |
812 | } |
813 | |
814 | static struct of_device_id mpc_dma_match[] = { |
815 | { .compatible = "fsl,mpc5121-dma", }, |
816 | {}, |
817 | }; |
818 | |
819 | static struct platform_driver mpc_dma_driver = { |
820 | .probe = mpc_dma_probe, |
821 | .remove = __devexit_p(mpc_dma_remove), |
822 | .driver = { |
823 | .name = DRV_NAME, |
824 | .owner = THIS_MODULE, |
825 | .of_match_table = mpc_dma_match, |
826 | }, |
827 | }; |
828 | |
829 | module_platform_driver(mpc_dma_driver); |
830 | |
831 | MODULE_LICENSE("GPL"); |
832 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); |
833 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9