Root/
1 | /* |
2 | * SA11x0 DMAengine support |
3 | * |
4 | * Copyright (C) 2012 Russell King |
5 | * Derived in part from arch/arm/mach-sa1100/dma.c, |
6 | * Copyright (C) 2000, 2001 by Nicolas Pitre |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. |
11 | */ |
12 | #include <linux/sched.h> |
13 | #include <linux/device.h> |
14 | #include <linux/dmaengine.h> |
15 | #include <linux/init.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/sa11x0-dma.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/spinlock.h> |
23 | |
24 | #include "virt-dma.h" |
25 | |
26 | #define NR_PHY_CHAN 6 |
27 | #define DMA_ALIGN 3 |
28 | #define DMA_MAX_SIZE 0x1fff |
29 | #define DMA_CHUNK_SIZE 0x1000 |
30 | |
31 | #define DMA_DDAR 0x00 |
32 | #define DMA_DCSR_S 0x04 |
33 | #define DMA_DCSR_C 0x08 |
34 | #define DMA_DCSR_R 0x0c |
35 | #define DMA_DBSA 0x10 |
36 | #define DMA_DBTA 0x14 |
37 | #define DMA_DBSB 0x18 |
38 | #define DMA_DBTB 0x1c |
39 | #define DMA_SIZE 0x20 |
40 | |
41 | #define DCSR_RUN (1 << 0) |
42 | #define DCSR_IE (1 << 1) |
43 | #define DCSR_ERROR (1 << 2) |
44 | #define DCSR_DONEA (1 << 3) |
45 | #define DCSR_STRTA (1 << 4) |
46 | #define DCSR_DONEB (1 << 5) |
47 | #define DCSR_STRTB (1 << 6) |
48 | #define DCSR_BIU (1 << 7) |
49 | |
50 | #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ |
51 | #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ |
52 | #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ |
53 | #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ |
54 | #define DDAR_Ser0UDCTr (0x0 << 4) |
55 | #define DDAR_Ser0UDCRc (0x1 << 4) |
56 | #define DDAR_Ser1SDLCTr (0x2 << 4) |
57 | #define DDAR_Ser1SDLCRc (0x3 << 4) |
58 | #define DDAR_Ser1UARTTr (0x4 << 4) |
59 | #define DDAR_Ser1UARTRc (0x5 << 4) |
60 | #define DDAR_Ser2ICPTr (0x6 << 4) |
61 | #define DDAR_Ser2ICPRc (0x7 << 4) |
62 | #define DDAR_Ser3UARTTr (0x8 << 4) |
63 | #define DDAR_Ser3UARTRc (0x9 << 4) |
64 | #define DDAR_Ser4MCP0Tr (0xa << 4) |
65 | #define DDAR_Ser4MCP0Rc (0xb << 4) |
66 | #define DDAR_Ser4MCP1Tr (0xc << 4) |
67 | #define DDAR_Ser4MCP1Rc (0xd << 4) |
68 | #define DDAR_Ser4SSPTr (0xe << 4) |
69 | #define DDAR_Ser4SSPRc (0xf << 4) |
70 | |
71 | struct sa11x0_dma_sg { |
72 | u32 addr; |
73 | u32 len; |
74 | }; |
75 | |
76 | struct sa11x0_dma_desc { |
77 | struct virt_dma_desc vd; |
78 | |
79 | u32 ddar; |
80 | size_t size; |
81 | unsigned period; |
82 | bool cyclic; |
83 | |
84 | unsigned sglen; |
85 | struct sa11x0_dma_sg sg[0]; |
86 | }; |
87 | |
88 | struct sa11x0_dma_phy; |
89 | |
90 | struct sa11x0_dma_chan { |
91 | struct virt_dma_chan vc; |
92 | |
93 | /* protected by c->vc.lock */ |
94 | struct sa11x0_dma_phy *phy; |
95 | enum dma_status status; |
96 | |
97 | /* protected by d->lock */ |
98 | struct list_head node; |
99 | |
100 | u32 ddar; |
101 | const char *name; |
102 | }; |
103 | |
104 | struct sa11x0_dma_phy { |
105 | void __iomem *base; |
106 | struct sa11x0_dma_dev *dev; |
107 | unsigned num; |
108 | |
109 | struct sa11x0_dma_chan *vchan; |
110 | |
111 | /* Protected by c->vc.lock */ |
112 | unsigned sg_load; |
113 | struct sa11x0_dma_desc *txd_load; |
114 | unsigned sg_done; |
115 | struct sa11x0_dma_desc *txd_done; |
116 | #ifdef CONFIG_PM_SLEEP |
117 | u32 dbs[2]; |
118 | u32 dbt[2]; |
119 | u32 dcsr; |
120 | #endif |
121 | }; |
122 | |
123 | struct sa11x0_dma_dev { |
124 | struct dma_device slave; |
125 | void __iomem *base; |
126 | spinlock_t lock; |
127 | struct tasklet_struct task; |
128 | struct list_head chan_pending; |
129 | struct sa11x0_dma_phy phy[NR_PHY_CHAN]; |
130 | }; |
131 | |
132 | static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) |
133 | { |
134 | return container_of(chan, struct sa11x0_dma_chan, vc.chan); |
135 | } |
136 | |
137 | static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) |
138 | { |
139 | return container_of(dmadev, struct sa11x0_dma_dev, slave); |
140 | } |
141 | |
142 | static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) |
143 | { |
144 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); |
145 | |
146 | return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; |
147 | } |
148 | |
149 | static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) |
150 | { |
151 | kfree(container_of(vd, struct sa11x0_dma_desc, vd)); |
152 | } |
153 | |
154 | static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) |
155 | { |
156 | list_del(&txd->vd.node); |
157 | p->txd_load = txd; |
158 | p->sg_load = 0; |
159 | |
160 | dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", |
161 | p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); |
162 | } |
163 | |
164 | static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, |
165 | struct sa11x0_dma_chan *c) |
166 | { |
167 | struct sa11x0_dma_desc *txd = p->txd_load; |
168 | struct sa11x0_dma_sg *sg; |
169 | void __iomem *base = p->base; |
170 | unsigned dbsx, dbtx; |
171 | u32 dcsr; |
172 | |
173 | if (!txd) |
174 | return; |
175 | |
176 | dcsr = readl_relaxed(base + DMA_DCSR_R); |
177 | |
178 | /* Don't try to load the next transfer if both buffers are started */ |
179 | if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) |
180 | return; |
181 | |
182 | if (p->sg_load == txd->sglen) { |
183 | if (!txd->cyclic) { |
184 | struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); |
185 | |
186 | /* |
187 | * We have reached the end of the current descriptor. |
188 | * Peek at the next descriptor, and if compatible with |
189 | * the current, start processing it. |
190 | */ |
191 | if (txn && txn->ddar == txd->ddar) { |
192 | txd = txn; |
193 | sa11x0_dma_start_desc(p, txn); |
194 | } else { |
195 | p->txd_load = NULL; |
196 | return; |
197 | } |
198 | } else { |
199 | /* Cyclic: reset back to beginning */ |
200 | p->sg_load = 0; |
201 | } |
202 | } |
203 | |
204 | sg = &txd->sg[p->sg_load++]; |
205 | |
206 | /* Select buffer to load according to channel status */ |
207 | if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || |
208 | ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { |
209 | dbsx = DMA_DBSA; |
210 | dbtx = DMA_DBTA; |
211 | dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; |
212 | } else { |
213 | dbsx = DMA_DBSB; |
214 | dbtx = DMA_DBTB; |
215 | dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; |
216 | } |
217 | |
218 | writel_relaxed(sg->addr, base + dbsx); |
219 | writel_relaxed(sg->len, base + dbtx); |
220 | writel(dcsr, base + DMA_DCSR_S); |
221 | |
222 | dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n", |
223 | p->num, dcsr, |
224 | 'A' + (dbsx == DMA_DBSB), sg->addr, |
225 | 'A' + (dbtx == DMA_DBTB), sg->len); |
226 | } |
227 | |
228 | static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, |
229 | struct sa11x0_dma_chan *c) |
230 | { |
231 | struct sa11x0_dma_desc *txd = p->txd_done; |
232 | |
233 | if (++p->sg_done == txd->sglen) { |
234 | if (!txd->cyclic) { |
235 | vchan_cookie_complete(&txd->vd); |
236 | |
237 | p->sg_done = 0; |
238 | p->txd_done = p->txd_load; |
239 | |
240 | if (!p->txd_done) |
241 | tasklet_schedule(&p->dev->task); |
242 | } else { |
243 | if ((p->sg_done % txd->period) == 0) |
244 | vchan_cyclic_callback(&txd->vd); |
245 | |
246 | /* Cyclic: reset back to beginning */ |
247 | p->sg_done = 0; |
248 | } |
249 | } |
250 | |
251 | sa11x0_dma_start_sg(p, c); |
252 | } |
253 | |
254 | static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) |
255 | { |
256 | struct sa11x0_dma_phy *p = dev_id; |
257 | struct sa11x0_dma_dev *d = p->dev; |
258 | struct sa11x0_dma_chan *c; |
259 | u32 dcsr; |
260 | |
261 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
262 | if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) |
263 | return IRQ_NONE; |
264 | |
265 | /* Clear reported status bits */ |
266 | writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), |
267 | p->base + DMA_DCSR_C); |
268 | |
269 | dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr); |
270 | |
271 | if (dcsr & DCSR_ERROR) { |
272 | dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n", |
273 | p->num, dcsr, |
274 | readl_relaxed(p->base + DMA_DDAR), |
275 | readl_relaxed(p->base + DMA_DBSA), |
276 | readl_relaxed(p->base + DMA_DBTA), |
277 | readl_relaxed(p->base + DMA_DBSB), |
278 | readl_relaxed(p->base + DMA_DBTB)); |
279 | } |
280 | |
281 | c = p->vchan; |
282 | if (c) { |
283 | unsigned long flags; |
284 | |
285 | spin_lock_irqsave(&c->vc.lock, flags); |
286 | /* |
287 | * Now that we're holding the lock, check that the vchan |
288 | * really is associated with this pchan before touching the |
289 | * hardware. This should always succeed, because we won't |
290 | * change p->vchan or c->phy while the channel is actively |
291 | * transferring. |
292 | */ |
293 | if (c->phy == p) { |
294 | if (dcsr & DCSR_DONEA) |
295 | sa11x0_dma_complete(p, c); |
296 | if (dcsr & DCSR_DONEB) |
297 | sa11x0_dma_complete(p, c); |
298 | } |
299 | spin_unlock_irqrestore(&c->vc.lock, flags); |
300 | } |
301 | |
302 | return IRQ_HANDLED; |
303 | } |
304 | |
305 | static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) |
306 | { |
307 | struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); |
308 | |
309 | /* If the issued list is empty, we have no further txds to process */ |
310 | if (txd) { |
311 | struct sa11x0_dma_phy *p = c->phy; |
312 | |
313 | sa11x0_dma_start_desc(p, txd); |
314 | p->txd_done = txd; |
315 | p->sg_done = 0; |
316 | |
317 | /* The channel should not have any transfers started */ |
318 | WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & |
319 | (DCSR_STRTA | DCSR_STRTB)); |
320 | |
321 | /* Clear the run and start bits before changing DDAR */ |
322 | writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, |
323 | p->base + DMA_DCSR_C); |
324 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); |
325 | |
326 | /* Try to start both buffers */ |
327 | sa11x0_dma_start_sg(p, c); |
328 | sa11x0_dma_start_sg(p, c); |
329 | } |
330 | } |
331 | |
332 | static void sa11x0_dma_tasklet(unsigned long arg) |
333 | { |
334 | struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; |
335 | struct sa11x0_dma_phy *p; |
336 | struct sa11x0_dma_chan *c; |
337 | unsigned pch, pch_alloc = 0; |
338 | |
339 | dev_dbg(d->slave.dev, "tasklet enter\n"); |
340 | |
341 | list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { |
342 | spin_lock_irq(&c->vc.lock); |
343 | p = c->phy; |
344 | if (p && !p->txd_done) { |
345 | sa11x0_dma_start_txd(c); |
346 | if (!p->txd_done) { |
347 | /* No current txd associated with this channel */ |
348 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); |
349 | |
350 | /* Mark this channel free */ |
351 | c->phy = NULL; |
352 | p->vchan = NULL; |
353 | } |
354 | } |
355 | spin_unlock_irq(&c->vc.lock); |
356 | } |
357 | |
358 | spin_lock_irq(&d->lock); |
359 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { |
360 | p = &d->phy[pch]; |
361 | |
362 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { |
363 | c = list_first_entry(&d->chan_pending, |
364 | struct sa11x0_dma_chan, node); |
365 | list_del_init(&c->node); |
366 | |
367 | pch_alloc |= 1 << pch; |
368 | |
369 | /* Mark this channel allocated */ |
370 | p->vchan = c; |
371 | |
372 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); |
373 | } |
374 | } |
375 | spin_unlock_irq(&d->lock); |
376 | |
377 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { |
378 | if (pch_alloc & (1 << pch)) { |
379 | p = &d->phy[pch]; |
380 | c = p->vchan; |
381 | |
382 | spin_lock_irq(&c->vc.lock); |
383 | c->phy = p; |
384 | |
385 | sa11x0_dma_start_txd(c); |
386 | spin_unlock_irq(&c->vc.lock); |
387 | } |
388 | } |
389 | |
390 | dev_dbg(d->slave.dev, "tasklet exit\n"); |
391 | } |
392 | |
393 | |
394 | static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) |
395 | { |
396 | return 0; |
397 | } |
398 | |
399 | static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) |
400 | { |
401 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
402 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
403 | unsigned long flags; |
404 | |
405 | spin_lock_irqsave(&d->lock, flags); |
406 | list_del_init(&c->node); |
407 | spin_unlock_irqrestore(&d->lock, flags); |
408 | |
409 | vchan_free_chan_resources(&c->vc); |
410 | } |
411 | |
412 | static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) |
413 | { |
414 | unsigned reg; |
415 | u32 dcsr; |
416 | |
417 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
418 | |
419 | if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || |
420 | (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) |
421 | reg = DMA_DBSA; |
422 | else |
423 | reg = DMA_DBSB; |
424 | |
425 | return readl_relaxed(p->base + reg); |
426 | } |
427 | |
428 | static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, |
429 | dma_cookie_t cookie, struct dma_tx_state *state) |
430 | { |
431 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
432 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
433 | struct sa11x0_dma_phy *p; |
434 | struct virt_dma_desc *vd; |
435 | unsigned long flags; |
436 | enum dma_status ret; |
437 | |
438 | ret = dma_cookie_status(&c->vc.chan, cookie, state); |
439 | if (ret == DMA_SUCCESS) |
440 | return ret; |
441 | |
442 | if (!state) |
443 | return c->status; |
444 | |
445 | spin_lock_irqsave(&c->vc.lock, flags); |
446 | p = c->phy; |
447 | |
448 | /* |
449 | * If the cookie is on our issue queue, then the residue is |
450 | * its total size. |
451 | */ |
452 | vd = vchan_find_desc(&c->vc, cookie); |
453 | if (vd) { |
454 | state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; |
455 | } else if (!p) { |
456 | state->residue = 0; |
457 | } else { |
458 | struct sa11x0_dma_desc *txd; |
459 | size_t bytes = 0; |
460 | |
461 | if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) |
462 | txd = p->txd_done; |
463 | else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) |
464 | txd = p->txd_load; |
465 | else |
466 | txd = NULL; |
467 | |
468 | ret = c->status; |
469 | if (txd) { |
470 | dma_addr_t addr = sa11x0_dma_pos(p); |
471 | unsigned i; |
472 | |
473 | dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); |
474 | |
475 | for (i = 0; i < txd->sglen; i++) { |
476 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", |
477 | i, txd->sg[i].addr, txd->sg[i].len); |
478 | if (addr >= txd->sg[i].addr && |
479 | addr < txd->sg[i].addr + txd->sg[i].len) { |
480 | unsigned len; |
481 | |
482 | len = txd->sg[i].len - |
483 | (addr - txd->sg[i].addr); |
484 | dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n", |
485 | i, len); |
486 | bytes += len; |
487 | i++; |
488 | break; |
489 | } |
490 | } |
491 | for (; i < txd->sglen; i++) { |
492 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n", |
493 | i, txd->sg[i].addr, txd->sg[i].len); |
494 | bytes += txd->sg[i].len; |
495 | } |
496 | } |
497 | state->residue = bytes; |
498 | } |
499 | spin_unlock_irqrestore(&c->vc.lock, flags); |
500 | |
501 | dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); |
502 | |
503 | return ret; |
504 | } |
505 | |
506 | /* |
507 | * Move pending txds to the issued list, and re-init pending list. |
508 | * If not already pending, add this channel to the list of pending |
509 | * channels and trigger the tasklet to run. |
510 | */ |
511 | static void sa11x0_dma_issue_pending(struct dma_chan *chan) |
512 | { |
513 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
514 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
515 | unsigned long flags; |
516 | |
517 | spin_lock_irqsave(&c->vc.lock, flags); |
518 | if (vchan_issue_pending(&c->vc)) { |
519 | if (!c->phy) { |
520 | spin_lock(&d->lock); |
521 | if (list_empty(&c->node)) { |
522 | list_add_tail(&c->node, &d->chan_pending); |
523 | tasklet_schedule(&d->task); |
524 | dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); |
525 | } |
526 | spin_unlock(&d->lock); |
527 | } |
528 | } else |
529 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); |
530 | spin_unlock_irqrestore(&c->vc.lock, flags); |
531 | } |
532 | |
533 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( |
534 | struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, |
535 | enum dma_transfer_direction dir, unsigned long flags, void *context) |
536 | { |
537 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
538 | struct sa11x0_dma_desc *txd; |
539 | struct scatterlist *sgent; |
540 | unsigned i, j = sglen; |
541 | size_t size = 0; |
542 | |
543 | /* SA11x0 channels can only operate in their native direction */ |
544 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { |
545 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", |
546 | &c->vc, c->ddar, dir); |
547 | return NULL; |
548 | } |
549 | |
550 | /* Do not allow zero-sized txds */ |
551 | if (sglen == 0) |
552 | return NULL; |
553 | |
554 | for_each_sg(sg, sgent, sglen, i) { |
555 | dma_addr_t addr = sg_dma_address(sgent); |
556 | unsigned int len = sg_dma_len(sgent); |
557 | |
558 | if (len > DMA_MAX_SIZE) |
559 | j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; |
560 | if (addr & DMA_ALIGN) { |
561 | dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", |
562 | &c->vc, addr); |
563 | return NULL; |
564 | } |
565 | } |
566 | |
567 | txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); |
568 | if (!txd) { |
569 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); |
570 | return NULL; |
571 | } |
572 | |
573 | j = 0; |
574 | for_each_sg(sg, sgent, sglen, i) { |
575 | dma_addr_t addr = sg_dma_address(sgent); |
576 | unsigned len = sg_dma_len(sgent); |
577 | |
578 | size += len; |
579 | |
580 | do { |
581 | unsigned tlen = len; |
582 | |
583 | /* |
584 | * Check whether the transfer will fit. If not, try |
585 | * to split the transfer up such that we end up with |
586 | * equal chunks - but make sure that we preserve the |
587 | * alignment. This avoids small segments. |
588 | */ |
589 | if (tlen > DMA_MAX_SIZE) { |
590 | unsigned mult = DIV_ROUND_UP(tlen, |
591 | DMA_MAX_SIZE & ~DMA_ALIGN); |
592 | |
593 | tlen = (tlen / mult) & ~DMA_ALIGN; |
594 | } |
595 | |
596 | txd->sg[j].addr = addr; |
597 | txd->sg[j].len = tlen; |
598 | |
599 | addr += tlen; |
600 | len -= tlen; |
601 | j++; |
602 | } while (len); |
603 | } |
604 | |
605 | txd->ddar = c->ddar; |
606 | txd->size = size; |
607 | txd->sglen = j; |
608 | |
609 | dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", |
610 | &c->vc, &txd->vd, txd->size, txd->sglen); |
611 | |
612 | return vchan_tx_prep(&c->vc, &txd->vd, flags); |
613 | } |
614 | |
615 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( |
616 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, |
617 | enum dma_transfer_direction dir, void *context) |
618 | { |
619 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
620 | struct sa11x0_dma_desc *txd; |
621 | unsigned i, j, k, sglen, sgperiod; |
622 | |
623 | /* SA11x0 channels can only operate in their native direction */ |
624 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { |
625 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", |
626 | &c->vc, c->ddar, dir); |
627 | return NULL; |
628 | } |
629 | |
630 | sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); |
631 | sglen = size * sgperiod / period; |
632 | |
633 | /* Do not allow zero-sized txds */ |
634 | if (sglen == 0) |
635 | return NULL; |
636 | |
637 | txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); |
638 | if (!txd) { |
639 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); |
640 | return NULL; |
641 | } |
642 | |
643 | for (i = k = 0; i < size / period; i++) { |
644 | size_t tlen, len = period; |
645 | |
646 | for (j = 0; j < sgperiod; j++, k++) { |
647 | tlen = len; |
648 | |
649 | if (tlen > DMA_MAX_SIZE) { |
650 | unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); |
651 | tlen = (tlen / mult) & ~DMA_ALIGN; |
652 | } |
653 | |
654 | txd->sg[k].addr = addr; |
655 | txd->sg[k].len = tlen; |
656 | addr += tlen; |
657 | len -= tlen; |
658 | } |
659 | |
660 | WARN_ON(len != 0); |
661 | } |
662 | |
663 | WARN_ON(k != sglen); |
664 | |
665 | txd->ddar = c->ddar; |
666 | txd->size = size; |
667 | txd->sglen = sglen; |
668 | txd->cyclic = 1; |
669 | txd->period = sgperiod; |
670 | |
671 | return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
672 | } |
673 | |
674 | static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) |
675 | { |
676 | u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); |
677 | dma_addr_t addr; |
678 | enum dma_slave_buswidth width; |
679 | u32 maxburst; |
680 | |
681 | if (ddar & DDAR_RW) { |
682 | addr = cfg->src_addr; |
683 | width = cfg->src_addr_width; |
684 | maxburst = cfg->src_maxburst; |
685 | } else { |
686 | addr = cfg->dst_addr; |
687 | width = cfg->dst_addr_width; |
688 | maxburst = cfg->dst_maxburst; |
689 | } |
690 | |
691 | if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && |
692 | width != DMA_SLAVE_BUSWIDTH_2_BYTES) || |
693 | (maxburst != 4 && maxburst != 8)) |
694 | return -EINVAL; |
695 | |
696 | if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) |
697 | ddar |= DDAR_DW; |
698 | if (maxburst == 8) |
699 | ddar |= DDAR_BS; |
700 | |
701 | dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", |
702 | &c->vc, addr, width, maxburst); |
703 | |
704 | c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; |
705 | |
706 | return 0; |
707 | } |
708 | |
709 | static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
710 | unsigned long arg) |
711 | { |
712 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
713 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
714 | struct sa11x0_dma_phy *p; |
715 | LIST_HEAD(head); |
716 | unsigned long flags; |
717 | int ret; |
718 | |
719 | switch (cmd) { |
720 | case DMA_SLAVE_CONFIG: |
721 | return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); |
722 | |
723 | case DMA_TERMINATE_ALL: |
724 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); |
725 | /* Clear the tx descriptor lists */ |
726 | spin_lock_irqsave(&c->vc.lock, flags); |
727 | vchan_get_all_descriptors(&c->vc, &head); |
728 | |
729 | p = c->phy; |
730 | if (p) { |
731 | dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); |
732 | /* vchan is assigned to a pchan - stop the channel */ |
733 | writel(DCSR_RUN | DCSR_IE | |
734 | DCSR_STRTA | DCSR_DONEA | |
735 | DCSR_STRTB | DCSR_DONEB, |
736 | p->base + DMA_DCSR_C); |
737 | |
738 | if (p->txd_load) { |
739 | if (p->txd_load != p->txd_done) |
740 | list_add_tail(&p->txd_load->vd.node, &head); |
741 | p->txd_load = NULL; |
742 | } |
743 | if (p->txd_done) { |
744 | list_add_tail(&p->txd_done->vd.node, &head); |
745 | p->txd_done = NULL; |
746 | } |
747 | c->phy = NULL; |
748 | spin_lock(&d->lock); |
749 | p->vchan = NULL; |
750 | spin_unlock(&d->lock); |
751 | tasklet_schedule(&d->task); |
752 | } |
753 | spin_unlock_irqrestore(&c->vc.lock, flags); |
754 | vchan_dma_desc_free_list(&c->vc, &head); |
755 | ret = 0; |
756 | break; |
757 | |
758 | case DMA_PAUSE: |
759 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); |
760 | spin_lock_irqsave(&c->vc.lock, flags); |
761 | if (c->status == DMA_IN_PROGRESS) { |
762 | c->status = DMA_PAUSED; |
763 | |
764 | p = c->phy; |
765 | if (p) { |
766 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); |
767 | } else { |
768 | spin_lock(&d->lock); |
769 | list_del_init(&c->node); |
770 | spin_unlock(&d->lock); |
771 | } |
772 | } |
773 | spin_unlock_irqrestore(&c->vc.lock, flags); |
774 | ret = 0; |
775 | break; |
776 | |
777 | case DMA_RESUME: |
778 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); |
779 | spin_lock_irqsave(&c->vc.lock, flags); |
780 | if (c->status == DMA_PAUSED) { |
781 | c->status = DMA_IN_PROGRESS; |
782 | |
783 | p = c->phy; |
784 | if (p) { |
785 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); |
786 | } else if (!list_empty(&c->vc.desc_issued)) { |
787 | spin_lock(&d->lock); |
788 | list_add_tail(&c->node, &d->chan_pending); |
789 | spin_unlock(&d->lock); |
790 | } |
791 | } |
792 | spin_unlock_irqrestore(&c->vc.lock, flags); |
793 | ret = 0; |
794 | break; |
795 | |
796 | default: |
797 | ret = -ENXIO; |
798 | break; |
799 | } |
800 | |
801 | return ret; |
802 | } |
803 | |
804 | struct sa11x0_dma_channel_desc { |
805 | u32 ddar; |
806 | const char *name; |
807 | }; |
808 | |
809 | #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } |
810 | static const struct sa11x0_dma_channel_desc chan_desc[] = { |
811 | CD(Ser0UDCTr, 0), |
812 | CD(Ser0UDCRc, DDAR_RW), |
813 | CD(Ser1SDLCTr, 0), |
814 | CD(Ser1SDLCRc, DDAR_RW), |
815 | CD(Ser1UARTTr, 0), |
816 | CD(Ser1UARTRc, DDAR_RW), |
817 | CD(Ser2ICPTr, 0), |
818 | CD(Ser2ICPRc, DDAR_RW), |
819 | CD(Ser3UARTTr, 0), |
820 | CD(Ser3UARTRc, DDAR_RW), |
821 | CD(Ser4MCP0Tr, 0), |
822 | CD(Ser4MCP0Rc, DDAR_RW), |
823 | CD(Ser4MCP1Tr, 0), |
824 | CD(Ser4MCP1Rc, DDAR_RW), |
825 | CD(Ser4SSPTr, 0), |
826 | CD(Ser4SSPRc, DDAR_RW), |
827 | }; |
828 | |
829 | static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, |
830 | struct device *dev) |
831 | { |
832 | unsigned i; |
833 | |
834 | dmadev->chancnt = ARRAY_SIZE(chan_desc); |
835 | INIT_LIST_HEAD(&dmadev->channels); |
836 | dmadev->dev = dev; |
837 | dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; |
838 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; |
839 | dmadev->device_control = sa11x0_dma_control; |
840 | dmadev->device_tx_status = sa11x0_dma_tx_status; |
841 | dmadev->device_issue_pending = sa11x0_dma_issue_pending; |
842 | |
843 | for (i = 0; i < dmadev->chancnt; i++) { |
844 | struct sa11x0_dma_chan *c; |
845 | |
846 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
847 | if (!c) { |
848 | dev_err(dev, "no memory for channel %u\n", i); |
849 | return -ENOMEM; |
850 | } |
851 | |
852 | c->status = DMA_IN_PROGRESS; |
853 | c->ddar = chan_desc[i].ddar; |
854 | c->name = chan_desc[i].name; |
855 | INIT_LIST_HEAD(&c->node); |
856 | |
857 | c->vc.desc_free = sa11x0_dma_free_desc; |
858 | vchan_init(&c->vc, dmadev); |
859 | } |
860 | |
861 | return dma_async_device_register(dmadev); |
862 | } |
863 | |
864 | static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, |
865 | void *data) |
866 | { |
867 | int irq = platform_get_irq(pdev, nr); |
868 | |
869 | if (irq <= 0) |
870 | return -ENXIO; |
871 | |
872 | return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data); |
873 | } |
874 | |
875 | static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, |
876 | void *data) |
877 | { |
878 | int irq = platform_get_irq(pdev, nr); |
879 | if (irq > 0) |
880 | free_irq(irq, data); |
881 | } |
882 | |
883 | static void sa11x0_dma_free_channels(struct dma_device *dmadev) |
884 | { |
885 | struct sa11x0_dma_chan *c, *cn; |
886 | |
887 | list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { |
888 | list_del(&c->vc.chan.device_node); |
889 | tasklet_kill(&c->vc.task); |
890 | kfree(c); |
891 | } |
892 | } |
893 | |
894 | static int __devinit sa11x0_dma_probe(struct platform_device *pdev) |
895 | { |
896 | struct sa11x0_dma_dev *d; |
897 | struct resource *res; |
898 | unsigned i; |
899 | int ret; |
900 | |
901 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
902 | if (!res) |
903 | return -ENXIO; |
904 | |
905 | d = kzalloc(sizeof(*d), GFP_KERNEL); |
906 | if (!d) { |
907 | ret = -ENOMEM; |
908 | goto err_alloc; |
909 | } |
910 | |
911 | spin_lock_init(&d->lock); |
912 | INIT_LIST_HEAD(&d->chan_pending); |
913 | |
914 | d->base = ioremap(res->start, resource_size(res)); |
915 | if (!d->base) { |
916 | ret = -ENOMEM; |
917 | goto err_ioremap; |
918 | } |
919 | |
920 | tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d); |
921 | |
922 | for (i = 0; i < NR_PHY_CHAN; i++) { |
923 | struct sa11x0_dma_phy *p = &d->phy[i]; |
924 | |
925 | p->dev = d; |
926 | p->num = i; |
927 | p->base = d->base + i * DMA_SIZE; |
928 | writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | |
929 | DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, |
930 | p->base + DMA_DCSR_C); |
931 | writel_relaxed(0, p->base + DMA_DDAR); |
932 | |
933 | ret = sa11x0_dma_request_irq(pdev, i, p); |
934 | if (ret) { |
935 | while (i) { |
936 | i--; |
937 | sa11x0_dma_free_irq(pdev, i, &d->phy[i]); |
938 | } |
939 | goto err_irq; |
940 | } |
941 | } |
942 | |
943 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); |
944 | dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); |
945 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; |
946 | d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; |
947 | ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); |
948 | if (ret) { |
949 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n", |
950 | ret); |
951 | goto err_slave_reg; |
952 | } |
953 | |
954 | platform_set_drvdata(pdev, d); |
955 | return 0; |
956 | |
957 | err_slave_reg: |
958 | sa11x0_dma_free_channels(&d->slave); |
959 | for (i = 0; i < NR_PHY_CHAN; i++) |
960 | sa11x0_dma_free_irq(pdev, i, &d->phy[i]); |
961 | err_irq: |
962 | tasklet_kill(&d->task); |
963 | iounmap(d->base); |
964 | err_ioremap: |
965 | kfree(d); |
966 | err_alloc: |
967 | return ret; |
968 | } |
969 | |
970 | static int __devexit sa11x0_dma_remove(struct platform_device *pdev) |
971 | { |
972 | struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); |
973 | unsigned pch; |
974 | |
975 | dma_async_device_unregister(&d->slave); |
976 | |
977 | sa11x0_dma_free_channels(&d->slave); |
978 | for (pch = 0; pch < NR_PHY_CHAN; pch++) |
979 | sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); |
980 | tasklet_kill(&d->task); |
981 | iounmap(d->base); |
982 | kfree(d); |
983 | |
984 | return 0; |
985 | } |
986 | |
987 | #ifdef CONFIG_PM_SLEEP |
988 | static int sa11x0_dma_suspend(struct device *dev) |
989 | { |
990 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); |
991 | unsigned pch; |
992 | |
993 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { |
994 | struct sa11x0_dma_phy *p = &d->phy[pch]; |
995 | u32 dcsr, saved_dcsr; |
996 | |
997 | dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
998 | if (dcsr & DCSR_RUN) { |
999 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); |
1000 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
1001 | } |
1002 | |
1003 | saved_dcsr &= DCSR_RUN | DCSR_IE; |
1004 | if (dcsr & DCSR_BIU) { |
1005 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); |
1006 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); |
1007 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); |
1008 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); |
1009 | saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | |
1010 | (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); |
1011 | } else { |
1012 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); |
1013 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); |
1014 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); |
1015 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); |
1016 | saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); |
1017 | } |
1018 | p->dcsr = saved_dcsr; |
1019 | |
1020 | writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C); |
1021 | } |
1022 | |
1023 | return 0; |
1024 | } |
1025 | |
1026 | static int sa11x0_dma_resume(struct device *dev) |
1027 | { |
1028 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); |
1029 | unsigned pch; |
1030 | |
1031 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { |
1032 | struct sa11x0_dma_phy *p = &d->phy[pch]; |
1033 | struct sa11x0_dma_desc *txd = NULL; |
1034 | u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); |
1035 | |
1036 | WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); |
1037 | |
1038 | if (p->txd_done) |
1039 | txd = p->txd_done; |
1040 | else if (p->txd_load) |
1041 | txd = p->txd_load; |
1042 | |
1043 | if (!txd) |
1044 | continue; |
1045 | |
1046 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); |
1047 | |
1048 | writel_relaxed(p->dbs[0], p->base + DMA_DBSA); |
1049 | writel_relaxed(p->dbt[0], p->base + DMA_DBTA); |
1050 | writel_relaxed(p->dbs[1], p->base + DMA_DBSB); |
1051 | writel_relaxed(p->dbt[1], p->base + DMA_DBTB); |
1052 | writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); |
1053 | } |
1054 | |
1055 | return 0; |
1056 | } |
1057 | #endif |
1058 | |
1059 | static const struct dev_pm_ops sa11x0_dma_pm_ops = { |
1060 | .suspend_noirq = sa11x0_dma_suspend, |
1061 | .resume_noirq = sa11x0_dma_resume, |
1062 | .freeze_noirq = sa11x0_dma_suspend, |
1063 | .thaw_noirq = sa11x0_dma_resume, |
1064 | .poweroff_noirq = sa11x0_dma_suspend, |
1065 | .restore_noirq = sa11x0_dma_resume, |
1066 | }; |
1067 | |
1068 | static struct platform_driver sa11x0_dma_driver = { |
1069 | .driver = { |
1070 | .name = "sa11x0-dma", |
1071 | .owner = THIS_MODULE, |
1072 | .pm = &sa11x0_dma_pm_ops, |
1073 | }, |
1074 | .probe = sa11x0_dma_probe, |
1075 | .remove = __devexit_p(sa11x0_dma_remove), |
1076 | }; |
1077 | |
1078 | bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) |
1079 | { |
1080 | if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { |
1081 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
1082 | const char *p = param; |
1083 | |
1084 | return !strcmp(c->name, p); |
1085 | } |
1086 | return false; |
1087 | } |
1088 | EXPORT_SYMBOL(sa11x0_dma_filter_fn); |
1089 | |
1090 | static int __init sa11x0_dma_init(void) |
1091 | { |
1092 | return platform_driver_register(&sa11x0_dma_driver); |
1093 | } |
1094 | subsys_initcall(sa11x0_dma_init); |
1095 | |
1096 | static void __exit sa11x0_dma_exit(void) |
1097 | { |
1098 | platform_driver_unregister(&sa11x0_dma_driver); |
1099 | } |
1100 | module_exit(sa11x0_dma_exit); |
1101 | |
1102 | MODULE_AUTHOR("Russell King"); |
1103 | MODULE_DESCRIPTION("SA-11x0 DMA driver"); |
1104 | MODULE_LICENSE("GPL v2"); |
1105 | MODULE_ALIAS("platform:sa11x0-dma"); |
1106 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9