Root/
1 | /* |
2 | * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. |
3 | * |
4 | * Refer to drivers/dma/imx-sdma.c |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. |
9 | */ |
10 | |
11 | #include <linux/init.h> |
12 | #include <linux/types.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/clk.h> |
16 | #include <linux/wait.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/semaphore.h> |
19 | #include <linux/device.h> |
20 | #include <linux/dma-mapping.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/platform_device.h> |
23 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> |
25 | #include <linux/module.h> |
26 | #include <linux/fsl/mxs-dma.h> |
27 | #include <linux/stmp_device.h> |
28 | #include <linux/of.h> |
29 | #include <linux/of_device.h> |
30 | |
31 | #include <asm/irq.h> |
32 | |
33 | #include "dmaengine.h" |
34 | |
35 | /* |
36 | * NOTE: The term "PIO" throughout the mxs-dma implementation means |
37 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, |
38 | * dma can program the controller registers of peripheral devices. |
39 | */ |
40 | |
41 | #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) |
42 | #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) |
43 | |
44 | #define HW_APBHX_CTRL0 0x000 |
45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) |
46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) |
47 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 |
48 | #define HW_APBHX_CTRL1 0x010 |
49 | #define HW_APBHX_CTRL2 0x020 |
50 | #define HW_APBHX_CHANNEL_CTRL 0x030 |
51 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 |
52 | /* |
53 | * The offset of NXTCMDAR register is different per both dma type and version, |
54 | * while stride for each channel is all the same 0x70. |
55 | */ |
56 | #define HW_APBHX_CHn_NXTCMDAR(d, n) \ |
57 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) |
58 | #define HW_APBHX_CHn_SEMA(d, n) \ |
59 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) |
60 | |
61 | /* |
62 | * ccw bits definitions |
63 | * |
64 | * COMMAND: 0..1 (2) |
65 | * CHAIN: 2 (1) |
66 | * IRQ: 3 (1) |
67 | * NAND_LOCK: 4 (1) - not implemented |
68 | * NAND_WAIT4READY: 5 (1) - not implemented |
69 | * DEC_SEM: 6 (1) |
70 | * WAIT4END: 7 (1) |
71 | * HALT_ON_TERMINATE: 8 (1) |
72 | * TERMINATE_FLUSH: 9 (1) |
73 | * RESERVED: 10..11 (2) |
74 | * PIO_NUM: 12..15 (4) |
75 | */ |
76 | #define BP_CCW_COMMAND 0 |
77 | #define BM_CCW_COMMAND (3 << 0) |
78 | #define CCW_CHAIN (1 << 2) |
79 | #define CCW_IRQ (1 << 3) |
80 | #define CCW_DEC_SEM (1 << 6) |
81 | #define CCW_WAIT4END (1 << 7) |
82 | #define CCW_HALT_ON_TERM (1 << 8) |
83 | #define CCW_TERM_FLUSH (1 << 9) |
84 | #define BP_CCW_PIO_NUM 12 |
85 | #define BM_CCW_PIO_NUM (0xf << 12) |
86 | |
87 | #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) |
88 | |
89 | #define MXS_DMA_CMD_NO_XFER 0 |
90 | #define MXS_DMA_CMD_WRITE 1 |
91 | #define MXS_DMA_CMD_READ 2 |
92 | #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ |
93 | |
94 | struct mxs_dma_ccw { |
95 | u32 next; |
96 | u16 bits; |
97 | u16 xfer_bytes; |
98 | #define MAX_XFER_BYTES 0xff00 |
99 | u32 bufaddr; |
100 | #define MXS_PIO_WORDS 16 |
101 | u32 pio_words[MXS_PIO_WORDS]; |
102 | }; |
103 | |
104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) |
105 | |
106 | struct mxs_dma_chan { |
107 | struct mxs_dma_engine *mxs_dma; |
108 | struct dma_chan chan; |
109 | struct dma_async_tx_descriptor desc; |
110 | struct tasklet_struct tasklet; |
111 | int chan_irq; |
112 | struct mxs_dma_ccw *ccw; |
113 | dma_addr_t ccw_phys; |
114 | int desc_count; |
115 | enum dma_status status; |
116 | unsigned int flags; |
117 | #define MXS_DMA_SG_LOOP (1 << 0) |
118 | }; |
119 | |
120 | #define MXS_DMA_CHANNELS 16 |
121 | #define MXS_DMA_CHANNELS_MASK 0xffff |
122 | |
123 | enum mxs_dma_devtype { |
124 | MXS_DMA_APBH, |
125 | MXS_DMA_APBX, |
126 | }; |
127 | |
128 | enum mxs_dma_id { |
129 | IMX23_DMA, |
130 | IMX28_DMA, |
131 | }; |
132 | |
133 | struct mxs_dma_engine { |
134 | enum mxs_dma_id dev_id; |
135 | enum mxs_dma_devtype type; |
136 | void __iomem *base; |
137 | struct clk *clk; |
138 | struct dma_device dma_device; |
139 | struct device_dma_parameters dma_parms; |
140 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; |
141 | }; |
142 | |
143 | struct mxs_dma_type { |
144 | enum mxs_dma_id id; |
145 | enum mxs_dma_devtype type; |
146 | }; |
147 | |
148 | static struct mxs_dma_type mxs_dma_types[] = { |
149 | { |
150 | .id = IMX23_DMA, |
151 | .type = MXS_DMA_APBH, |
152 | }, { |
153 | .id = IMX23_DMA, |
154 | .type = MXS_DMA_APBX, |
155 | }, { |
156 | .id = IMX28_DMA, |
157 | .type = MXS_DMA_APBH, |
158 | }, { |
159 | .id = IMX28_DMA, |
160 | .type = MXS_DMA_APBX, |
161 | } |
162 | }; |
163 | |
164 | static struct platform_device_id mxs_dma_ids[] = { |
165 | { |
166 | .name = "imx23-dma-apbh", |
167 | .driver_data = (kernel_ulong_t) &mxs_dma_types[0], |
168 | }, { |
169 | .name = "imx23-dma-apbx", |
170 | .driver_data = (kernel_ulong_t) &mxs_dma_types[1], |
171 | }, { |
172 | .name = "imx28-dma-apbh", |
173 | .driver_data = (kernel_ulong_t) &mxs_dma_types[2], |
174 | }, { |
175 | .name = "imx28-dma-apbx", |
176 | .driver_data = (kernel_ulong_t) &mxs_dma_types[3], |
177 | }, { |
178 | /* end of list */ |
179 | } |
180 | }; |
181 | |
182 | static const struct of_device_id mxs_dma_dt_ids[] = { |
183 | { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, |
184 | { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, |
185 | { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, |
186 | { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, |
187 | { /* sentinel */ } |
188 | }; |
189 | MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); |
190 | |
191 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) |
192 | { |
193 | return container_of(chan, struct mxs_dma_chan, chan); |
194 | } |
195 | |
196 | int mxs_dma_is_apbh(struct dma_chan *chan) |
197 | { |
198 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
199 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
200 | |
201 | return dma_is_apbh(mxs_dma); |
202 | } |
203 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbh); |
204 | |
205 | int mxs_dma_is_apbx(struct dma_chan *chan) |
206 | { |
207 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
208 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
209 | |
210 | return !dma_is_apbh(mxs_dma); |
211 | } |
212 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbx); |
213 | |
214 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) |
215 | { |
216 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
217 | int chan_id = mxs_chan->chan.chan_id; |
218 | |
219 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
220 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), |
221 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
222 | else |
223 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), |
224 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
225 | } |
226 | |
227 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) |
228 | { |
229 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
230 | int chan_id = mxs_chan->chan.chan_id; |
231 | |
232 | /* set cmd_addr up */ |
233 | writel(mxs_chan->ccw_phys, |
234 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); |
235 | |
236 | /* write 1 to SEMA to kick off the channel */ |
237 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); |
238 | } |
239 | |
240 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) |
241 | { |
242 | mxs_chan->status = DMA_SUCCESS; |
243 | } |
244 | |
245 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) |
246 | { |
247 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
248 | int chan_id = mxs_chan->chan.chan_id; |
249 | |
250 | /* freeze the channel */ |
251 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
252 | writel(1 << chan_id, |
253 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
254 | else |
255 | writel(1 << chan_id, |
256 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
257 | |
258 | mxs_chan->status = DMA_PAUSED; |
259 | } |
260 | |
261 | static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) |
262 | { |
263 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
264 | int chan_id = mxs_chan->chan.chan_id; |
265 | |
266 | /* unfreeze the channel */ |
267 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
268 | writel(1 << chan_id, |
269 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); |
270 | else |
271 | writel(1 << chan_id, |
272 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); |
273 | |
274 | mxs_chan->status = DMA_IN_PROGRESS; |
275 | } |
276 | |
277 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
278 | { |
279 | return dma_cookie_assign(tx); |
280 | } |
281 | |
282 | static void mxs_dma_tasklet(unsigned long data) |
283 | { |
284 | struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; |
285 | |
286 | if (mxs_chan->desc.callback) |
287 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); |
288 | } |
289 | |
290 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) |
291 | { |
292 | struct mxs_dma_engine *mxs_dma = dev_id; |
293 | u32 stat1, stat2; |
294 | |
295 | /* completion status */ |
296 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); |
297 | stat1 &= MXS_DMA_CHANNELS_MASK; |
298 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); |
299 | |
300 | /* error status */ |
301 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); |
302 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); |
303 | |
304 | /* |
305 | * When both completion and error of termination bits set at the |
306 | * same time, we do not take it as an error. IOW, it only becomes |
307 | * an error we need to handle here in case of either it's (1) a bus |
308 | * error or (2) a termination error with no completion. |
309 | */ |
310 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ |
311 | (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ |
312 | |
313 | /* combine error and completion status for checking */ |
314 | stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; |
315 | while (stat1) { |
316 | int channel = fls(stat1) - 1; |
317 | struct mxs_dma_chan *mxs_chan = |
318 | &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; |
319 | |
320 | if (channel >= MXS_DMA_CHANNELS) { |
321 | dev_dbg(mxs_dma->dma_device.dev, |
322 | "%s: error in channel %d\n", __func__, |
323 | channel - MXS_DMA_CHANNELS); |
324 | mxs_chan->status = DMA_ERROR; |
325 | mxs_dma_reset_chan(mxs_chan); |
326 | } else { |
327 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) |
328 | mxs_chan->status = DMA_IN_PROGRESS; |
329 | else |
330 | mxs_chan->status = DMA_SUCCESS; |
331 | } |
332 | |
333 | stat1 &= ~(1 << channel); |
334 | |
335 | if (mxs_chan->status == DMA_SUCCESS) |
336 | dma_cookie_complete(&mxs_chan->desc); |
337 | |
338 | /* schedule tasklet on this channel */ |
339 | tasklet_schedule(&mxs_chan->tasklet); |
340 | } |
341 | |
342 | return IRQ_HANDLED; |
343 | } |
344 | |
345 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) |
346 | { |
347 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
348 | struct mxs_dma_data *data = chan->private; |
349 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
350 | int ret; |
351 | |
352 | if (!data) |
353 | return -EINVAL; |
354 | |
355 | mxs_chan->chan_irq = data->chan_irq; |
356 | |
357 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
358 | &mxs_chan->ccw_phys, GFP_KERNEL); |
359 | if (!mxs_chan->ccw) { |
360 | ret = -ENOMEM; |
361 | goto err_alloc; |
362 | } |
363 | |
364 | memset(mxs_chan->ccw, 0, PAGE_SIZE); |
365 | |
366 | if (mxs_chan->chan_irq != NO_IRQ) { |
367 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
368 | 0, "mxs-dma", mxs_dma); |
369 | if (ret) |
370 | goto err_irq; |
371 | } |
372 | |
373 | ret = clk_prepare_enable(mxs_dma->clk); |
374 | if (ret) |
375 | goto err_clk; |
376 | |
377 | mxs_dma_reset_chan(mxs_chan); |
378 | |
379 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); |
380 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; |
381 | |
382 | /* the descriptor is ready */ |
383 | async_tx_ack(&mxs_chan->desc); |
384 | |
385 | return 0; |
386 | |
387 | err_clk: |
388 | free_irq(mxs_chan->chan_irq, mxs_dma); |
389 | err_irq: |
390 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
391 | mxs_chan->ccw, mxs_chan->ccw_phys); |
392 | err_alloc: |
393 | return ret; |
394 | } |
395 | |
396 | static void mxs_dma_free_chan_resources(struct dma_chan *chan) |
397 | { |
398 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
399 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
400 | |
401 | mxs_dma_disable_chan(mxs_chan); |
402 | |
403 | free_irq(mxs_chan->chan_irq, mxs_dma); |
404 | |
405 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
406 | mxs_chan->ccw, mxs_chan->ccw_phys); |
407 | |
408 | clk_disable_unprepare(mxs_dma->clk); |
409 | } |
410 | |
411 | /* |
412 | * How to use the flags for ->device_prep_slave_sg() : |
413 | * [1] If there is only one DMA command in the DMA chain, the code should be: |
414 | * ...... |
415 | * ->device_prep_slave_sg(DMA_CTRL_ACK); |
416 | * ...... |
417 | * [2] If there are two DMA commands in the DMA chain, the code should be |
418 | * ...... |
419 | * ->device_prep_slave_sg(0); |
420 | * ...... |
421 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
422 | * ...... |
423 | * [3] If there are more than two DMA commands in the DMA chain, the code |
424 | * should be: |
425 | * ...... |
426 | * ->device_prep_slave_sg(0); // First |
427 | * ...... |
428 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); |
429 | * ...... |
430 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last |
431 | * ...... |
432 | */ |
433 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
434 | struct dma_chan *chan, struct scatterlist *sgl, |
435 | unsigned int sg_len, enum dma_transfer_direction direction, |
436 | unsigned long flags, void *context) |
437 | { |
438 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
439 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
440 | struct mxs_dma_ccw *ccw; |
441 | struct scatterlist *sg; |
442 | int i, j; |
443 | u32 *pio; |
444 | bool append = flags & DMA_PREP_INTERRUPT; |
445 | int idx = append ? mxs_chan->desc_count : 0; |
446 | |
447 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) |
448 | return NULL; |
449 | |
450 | if (sg_len + (append ? idx : 0) > NUM_CCW) { |
451 | dev_err(mxs_dma->dma_device.dev, |
452 | "maximum number of sg exceeded: %d > %d\n", |
453 | sg_len, NUM_CCW); |
454 | goto err_out; |
455 | } |
456 | |
457 | mxs_chan->status = DMA_IN_PROGRESS; |
458 | mxs_chan->flags = 0; |
459 | |
460 | /* |
461 | * If the sg is prepared with append flag set, the sg |
462 | * will be appended to the last prepared sg. |
463 | */ |
464 | if (append) { |
465 | BUG_ON(idx < 1); |
466 | ccw = &mxs_chan->ccw[idx - 1]; |
467 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; |
468 | ccw->bits |= CCW_CHAIN; |
469 | ccw->bits &= ~CCW_IRQ; |
470 | ccw->bits &= ~CCW_DEC_SEM; |
471 | } else { |
472 | idx = 0; |
473 | } |
474 | |
475 | if (direction == DMA_TRANS_NONE) { |
476 | ccw = &mxs_chan->ccw[idx++]; |
477 | pio = (u32 *) sgl; |
478 | |
479 | for (j = 0; j < sg_len;) |
480 | ccw->pio_words[j++] = *pio++; |
481 | |
482 | ccw->bits = 0; |
483 | ccw->bits |= CCW_IRQ; |
484 | ccw->bits |= CCW_DEC_SEM; |
485 | if (flags & DMA_CTRL_ACK) |
486 | ccw->bits |= CCW_WAIT4END; |
487 | ccw->bits |= CCW_HALT_ON_TERM; |
488 | ccw->bits |= CCW_TERM_FLUSH; |
489 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); |
490 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); |
491 | } else { |
492 | for_each_sg(sgl, sg, sg_len, i) { |
493 | if (sg_dma_len(sg) > MAX_XFER_BYTES) { |
494 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", |
495 | sg_dma_len(sg), MAX_XFER_BYTES); |
496 | goto err_out; |
497 | } |
498 | |
499 | ccw = &mxs_chan->ccw[idx++]; |
500 | |
501 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; |
502 | ccw->bufaddr = sg->dma_address; |
503 | ccw->xfer_bytes = sg_dma_len(sg); |
504 | |
505 | ccw->bits = 0; |
506 | ccw->bits |= CCW_CHAIN; |
507 | ccw->bits |= CCW_HALT_ON_TERM; |
508 | ccw->bits |= CCW_TERM_FLUSH; |
509 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
510 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, |
511 | COMMAND); |
512 | |
513 | if (i + 1 == sg_len) { |
514 | ccw->bits &= ~CCW_CHAIN; |
515 | ccw->bits |= CCW_IRQ; |
516 | ccw->bits |= CCW_DEC_SEM; |
517 | if (flags & DMA_CTRL_ACK) |
518 | ccw->bits |= CCW_WAIT4END; |
519 | } |
520 | } |
521 | } |
522 | mxs_chan->desc_count = idx; |
523 | |
524 | return &mxs_chan->desc; |
525 | |
526 | err_out: |
527 | mxs_chan->status = DMA_ERROR; |
528 | return NULL; |
529 | } |
530 | |
531 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
532 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
533 | size_t period_len, enum dma_transfer_direction direction, |
534 | void *context) |
535 | { |
536 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
537 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
538 | int num_periods = buf_len / period_len; |
539 | int i = 0, buf = 0; |
540 | |
541 | if (mxs_chan->status == DMA_IN_PROGRESS) |
542 | return NULL; |
543 | |
544 | mxs_chan->status = DMA_IN_PROGRESS; |
545 | mxs_chan->flags |= MXS_DMA_SG_LOOP; |
546 | |
547 | if (num_periods > NUM_CCW) { |
548 | dev_err(mxs_dma->dma_device.dev, |
549 | "maximum number of sg exceeded: %d > %d\n", |
550 | num_periods, NUM_CCW); |
551 | goto err_out; |
552 | } |
553 | |
554 | if (period_len > MAX_XFER_BYTES) { |
555 | dev_err(mxs_dma->dma_device.dev, |
556 | "maximum period size exceeded: %d > %d\n", |
557 | period_len, MAX_XFER_BYTES); |
558 | goto err_out; |
559 | } |
560 | |
561 | while (buf < buf_len) { |
562 | struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; |
563 | |
564 | if (i + 1 == num_periods) |
565 | ccw->next = mxs_chan->ccw_phys; |
566 | else |
567 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); |
568 | |
569 | ccw->bufaddr = dma_addr; |
570 | ccw->xfer_bytes = period_len; |
571 | |
572 | ccw->bits = 0; |
573 | ccw->bits |= CCW_CHAIN; |
574 | ccw->bits |= CCW_IRQ; |
575 | ccw->bits |= CCW_HALT_ON_TERM; |
576 | ccw->bits |= CCW_TERM_FLUSH; |
577 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
578 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
579 | |
580 | dma_addr += period_len; |
581 | buf += period_len; |
582 | |
583 | i++; |
584 | } |
585 | mxs_chan->desc_count = i; |
586 | |
587 | return &mxs_chan->desc; |
588 | |
589 | err_out: |
590 | mxs_chan->status = DMA_ERROR; |
591 | return NULL; |
592 | } |
593 | |
594 | static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
595 | unsigned long arg) |
596 | { |
597 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
598 | int ret = 0; |
599 | |
600 | switch (cmd) { |
601 | case DMA_TERMINATE_ALL: |
602 | mxs_dma_reset_chan(mxs_chan); |
603 | mxs_dma_disable_chan(mxs_chan); |
604 | break; |
605 | case DMA_PAUSE: |
606 | mxs_dma_pause_chan(mxs_chan); |
607 | break; |
608 | case DMA_RESUME: |
609 | mxs_dma_resume_chan(mxs_chan); |
610 | break; |
611 | default: |
612 | ret = -ENOSYS; |
613 | } |
614 | |
615 | return ret; |
616 | } |
617 | |
618 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, |
619 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
620 | { |
621 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
622 | dma_cookie_t last_used; |
623 | |
624 | last_used = chan->cookie; |
625 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); |
626 | |
627 | return mxs_chan->status; |
628 | } |
629 | |
630 | static void mxs_dma_issue_pending(struct dma_chan *chan) |
631 | { |
632 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
633 | |
634 | mxs_dma_enable_chan(mxs_chan); |
635 | } |
636 | |
637 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) |
638 | { |
639 | int ret; |
640 | |
641 | ret = clk_prepare_enable(mxs_dma->clk); |
642 | if (ret) |
643 | return ret; |
644 | |
645 | ret = stmp_reset_block(mxs_dma->base); |
646 | if (ret) |
647 | goto err_out; |
648 | |
649 | /* enable apbh burst */ |
650 | if (dma_is_apbh(mxs_dma)) { |
651 | writel(BM_APBH_CTRL0_APB_BURST_EN, |
652 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
653 | writel(BM_APBH_CTRL0_APB_BURST8_EN, |
654 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
655 | } |
656 | |
657 | /* enable irq for all the channels */ |
658 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, |
659 | mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); |
660 | |
661 | err_out: |
662 | clk_disable_unprepare(mxs_dma->clk); |
663 | return ret; |
664 | } |
665 | |
666 | static int __init mxs_dma_probe(struct platform_device *pdev) |
667 | { |
668 | const struct platform_device_id *id_entry; |
669 | const struct of_device_id *of_id; |
670 | const struct mxs_dma_type *dma_type; |
671 | struct mxs_dma_engine *mxs_dma; |
672 | struct resource *iores; |
673 | int ret, i; |
674 | |
675 | mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); |
676 | if (!mxs_dma) |
677 | return -ENOMEM; |
678 | |
679 | of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); |
680 | if (of_id) |
681 | id_entry = of_id->data; |
682 | else |
683 | id_entry = platform_get_device_id(pdev); |
684 | |
685 | dma_type = (struct mxs_dma_type *)id_entry->driver_data; |
686 | mxs_dma->type = dma_type->type; |
687 | mxs_dma->dev_id = dma_type->id; |
688 | |
689 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
690 | |
691 | if (!request_mem_region(iores->start, resource_size(iores), |
692 | pdev->name)) { |
693 | ret = -EBUSY; |
694 | goto err_request_region; |
695 | } |
696 | |
697 | mxs_dma->base = ioremap(iores->start, resource_size(iores)); |
698 | if (!mxs_dma->base) { |
699 | ret = -ENOMEM; |
700 | goto err_ioremap; |
701 | } |
702 | |
703 | mxs_dma->clk = clk_get(&pdev->dev, NULL); |
704 | if (IS_ERR(mxs_dma->clk)) { |
705 | ret = PTR_ERR(mxs_dma->clk); |
706 | goto err_clk; |
707 | } |
708 | |
709 | dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); |
710 | dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); |
711 | |
712 | INIT_LIST_HEAD(&mxs_dma->dma_device.channels); |
713 | |
714 | /* Initialize channel parameters */ |
715 | for (i = 0; i < MXS_DMA_CHANNELS; i++) { |
716 | struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; |
717 | |
718 | mxs_chan->mxs_dma = mxs_dma; |
719 | mxs_chan->chan.device = &mxs_dma->dma_device; |
720 | dma_cookie_init(&mxs_chan->chan); |
721 | |
722 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, |
723 | (unsigned long) mxs_chan); |
724 | |
725 | |
726 | /* Add the channel to mxs_chan list */ |
727 | list_add_tail(&mxs_chan->chan.device_node, |
728 | &mxs_dma->dma_device.channels); |
729 | } |
730 | |
731 | ret = mxs_dma_init(mxs_dma); |
732 | if (ret) |
733 | goto err_init; |
734 | |
735 | mxs_dma->dma_device.dev = &pdev->dev; |
736 | |
737 | /* mxs_dma gets 65535 bytes maximum sg size */ |
738 | mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; |
739 | dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); |
740 | |
741 | mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; |
742 | mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; |
743 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; |
744 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; |
745 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; |
746 | mxs_dma->dma_device.device_control = mxs_dma_control; |
747 | mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; |
748 | |
749 | ret = dma_async_device_register(&mxs_dma->dma_device); |
750 | if (ret) { |
751 | dev_err(mxs_dma->dma_device.dev, "unable to register\n"); |
752 | goto err_init; |
753 | } |
754 | |
755 | dev_info(mxs_dma->dma_device.dev, "initialized\n"); |
756 | |
757 | return 0; |
758 | |
759 | err_init: |
760 | clk_put(mxs_dma->clk); |
761 | err_clk: |
762 | iounmap(mxs_dma->base); |
763 | err_ioremap: |
764 | release_mem_region(iores->start, resource_size(iores)); |
765 | err_request_region: |
766 | kfree(mxs_dma); |
767 | return ret; |
768 | } |
769 | |
770 | static struct platform_driver mxs_dma_driver = { |
771 | .driver = { |
772 | .name = "mxs-dma", |
773 | .of_match_table = mxs_dma_dt_ids, |
774 | }, |
775 | .id_table = mxs_dma_ids, |
776 | }; |
777 | |
778 | static int __init mxs_dma_module_init(void) |
779 | { |
780 | return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); |
781 | } |
782 | subsys_initcall(mxs_dma_module_init); |
783 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9