Root/
Source at commit 651ef2fc7a6fbe42fe9391104c95bb17ca6ee227 created 9 years 8 months ago. By Apelete Seketeli, mmc: jz4740: prepare next dma transfer in parallel with current transfer | |
---|---|
1 | /* |
2 | * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> |
3 | * JZ4740 SD/MMC controller driver |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License as published by the |
7 | * Free Software Foundation; either version 2 of the License, or (at your |
8 | * option) any later version. |
9 | * |
10 | * You should have received a copy of the GNU General Public License along |
11 | * with this program; if not, write to the Free Software Foundation, Inc., |
12 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
13 | * |
14 | */ |
15 | |
16 | #include <linux/mmc/host.h> |
17 | #include <linux/mmc/slot-gpio.h> |
18 | #include <linux/err.h> |
19 | #include <linux/io.h> |
20 | #include <linux/irq.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/module.h> |
23 | #include <linux/platform_device.h> |
24 | #include <linux/delay.h> |
25 | #include <linux/scatterlist.h> |
26 | #include <linux/clk.h> |
27 | #include <linux/cpufreq.h> |
28 | |
29 | #include <linux/bitops.h> |
30 | #include <linux/gpio.h> |
31 | #include <asm/mach-jz4740/gpio.h> |
32 | #include <asm/cacheflush.h> |
33 | #include <linux/dma-mapping.h> |
34 | #include <linux/dmaengine.h> |
35 | |
36 | #include <asm/mach-jz4740/dma.h> |
37 | #include <asm/mach-jz4740/jz4740_mmc.h> |
38 | |
39 | #define JZ_REG_MMC_STRPCL 0x00 |
40 | #define JZ_REG_MMC_STATUS 0x04 |
41 | #define JZ_REG_MMC_CLKRT 0x08 |
42 | #define JZ_REG_MMC_CMDAT 0x0C |
43 | #define JZ_REG_MMC_RESTO 0x10 |
44 | #define JZ_REG_MMC_RDTO 0x14 |
45 | #define JZ_REG_MMC_BLKLEN 0x18 |
46 | #define JZ_REG_MMC_NOB 0x1C |
47 | #define JZ_REG_MMC_SNOB 0x20 |
48 | #define JZ_REG_MMC_IMASK 0x24 |
49 | #define JZ_REG_MMC_IREG 0x28 |
50 | #define JZ_REG_MMC_CMD 0x2C |
51 | #define JZ_REG_MMC_ARG 0x30 |
52 | #define JZ_REG_MMC_RESP_FIFO 0x34 |
53 | #define JZ_REG_MMC_RXFIFO 0x38 |
54 | #define JZ_REG_MMC_TXFIFO 0x3C |
55 | |
56 | #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) |
57 | #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) |
58 | #define JZ_MMC_STRPCL_START_READWAIT BIT(5) |
59 | #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) |
60 | #define JZ_MMC_STRPCL_RESET BIT(3) |
61 | #define JZ_MMC_STRPCL_START_OP BIT(2) |
62 | #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) |
63 | #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) |
64 | #define JZ_MMC_STRPCL_CLOCK_START BIT(1) |
65 | |
66 | |
67 | #define JZ_MMC_STATUS_IS_RESETTING BIT(15) |
68 | #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) |
69 | #define JZ_MMC_STATUS_PRG_DONE BIT(13) |
70 | #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) |
71 | #define JZ_MMC_STATUS_END_CMD_RES BIT(11) |
72 | #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) |
73 | #define JZ_MMC_STATUS_IS_READWAIT BIT(9) |
74 | #define JZ_MMC_STATUS_CLK_EN BIT(8) |
75 | #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) |
76 | #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) |
77 | #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) |
78 | #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) |
79 | #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) |
80 | #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) |
81 | #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) |
82 | #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) |
83 | |
84 | #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) |
85 | #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) |
86 | |
87 | |
88 | #define JZ_MMC_CMDAT_IO_ABORT BIT(11) |
89 | #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) |
90 | #define JZ_MMC_CMDAT_DMA_EN BIT(8) |
91 | #define JZ_MMC_CMDAT_INIT BIT(7) |
92 | #define JZ_MMC_CMDAT_BUSY BIT(6) |
93 | #define JZ_MMC_CMDAT_STREAM BIT(5) |
94 | #define JZ_MMC_CMDAT_WRITE BIT(4) |
95 | #define JZ_MMC_CMDAT_DATA_EN BIT(3) |
96 | #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) |
97 | #define JZ_MMC_CMDAT_RSP_R1 1 |
98 | #define JZ_MMC_CMDAT_RSP_R2 2 |
99 | #define JZ_MMC_CMDAT_RSP_R3 3 |
100 | |
101 | #define JZ_MMC_IRQ_SDIO BIT(7) |
102 | #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) |
103 | #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) |
104 | #define JZ_MMC_IRQ_END_CMD_RES BIT(2) |
105 | #define JZ_MMC_IRQ_PRG_DONE BIT(1) |
106 | #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) |
107 | |
108 | |
109 | #define JZ_MMC_CLK_RATE 24000000 |
110 | |
111 | enum jz4740_mmc_state { |
112 | JZ4740_MMC_STATE_READ_RESPONSE, |
113 | JZ4740_MMC_STATE_TRANSFER_DATA, |
114 | JZ4740_MMC_STATE_SEND_STOP, |
115 | JZ4740_MMC_STATE_DONE, |
116 | }; |
117 | |
118 | struct jz4740_mmc_host_next { |
119 | int sg_len; |
120 | s32 cookie; |
121 | }; |
122 | |
123 | struct jz4740_mmc_host { |
124 | struct mmc_host *mmc; |
125 | struct platform_device *pdev; |
126 | struct jz4740_mmc_platform_data *pdata; |
127 | struct clk *clk; |
128 | |
129 | int irq; |
130 | int card_detect_irq; |
131 | |
132 | void __iomem *base; |
133 | struct resource *mem_res; |
134 | struct mmc_request *req; |
135 | struct mmc_command *cmd; |
136 | |
137 | unsigned long waiting; |
138 | |
139 | uint32_t cmdat; |
140 | |
141 | uint16_t irq_mask; |
142 | |
143 | spinlock_t lock; |
144 | |
145 | struct timer_list timeout_timer; |
146 | struct sg_mapping_iter miter; |
147 | enum jz4740_mmc_state state; |
148 | |
149 | /* DMA support */ |
150 | struct dma_chan *dma_rx; |
151 | struct dma_chan *dma_tx; |
152 | struct jz4740_mmc_host_next next_data; |
153 | bool use_dma; |
154 | int sg_len; |
155 | |
156 | /* The DMA trigger level is 8 words, that is to say, the DMA read |
157 | * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write |
158 | * trigger is when data words in MSC_TXFIFO is < 8. |
159 | */ |
160 | #define JZ4740_MMC_FIFO_HALF_SIZE 8 |
161 | }; |
162 | |
163 | /*----------------------------------------------------------------------------*/ |
164 | /* DMA infrastructure */ |
165 | |
166 | static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host) |
167 | { |
168 | if (!host->use_dma) |
169 | return; |
170 | |
171 | dma_release_channel(host->dma_tx); |
172 | dma_release_channel(host->dma_rx); |
173 | } |
174 | |
175 | static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) |
176 | { |
177 | dma_cap_mask_t mask; |
178 | |
179 | dma_cap_zero(mask); |
180 | dma_cap_set(DMA_SLAVE, mask); |
181 | |
182 | host->dma_tx = dma_request_channel(mask, NULL, host); |
183 | if (!host->dma_tx) { |
184 | dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); |
185 | return -ENODEV; |
186 | } |
187 | |
188 | host->dma_rx = dma_request_channel(mask, NULL, host); |
189 | if (!host->dma_rx) { |
190 | dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); |
191 | goto free_master_write; |
192 | } |
193 | |
194 | /* Initialize DMA pre request cookie */ |
195 | host->next_data.cookie = 1; |
196 | |
197 | return 0; |
198 | |
199 | free_master_write: |
200 | dma_release_channel(host->dma_tx); |
201 | return -ENODEV; |
202 | } |
203 | |
204 | static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data) |
205 | { |
206 | return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
207 | } |
208 | |
209 | static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, |
210 | struct mmc_data *data) |
211 | { |
212 | return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx; |
213 | } |
214 | |
215 | static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, |
216 | struct mmc_data *data) |
217 | { |
218 | struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); |
219 | enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data); |
220 | |
221 | dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); |
222 | } |
223 | |
224 | /* Prepares DMA data for current/next transfer, returns non-zero on failure */ |
225 | static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, |
226 | struct mmc_data *data, |
227 | struct jz4740_mmc_host_next *next, |
228 | struct dma_chan *chan) |
229 | { |
230 | struct jz4740_mmc_host_next *next_data = &host->next_data; |
231 | enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data); |
232 | int sg_len; |
233 | |
234 | if (!next && data->host_cookie && |
235 | data->host_cookie != host->next_data.cookie) { |
236 | dev_warn(mmc_dev(host->mmc), |
237 | "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n", |
238 | __func__, |
239 | data->host_cookie, |
240 | host->next_data.cookie); |
241 | data->host_cookie = 0; |
242 | } |
243 | |
244 | /* Check if next job is already prepared */ |
245 | if (next || data->host_cookie != host->next_data.cookie) { |
246 | sg_len = dma_map_sg(chan->device->dev, |
247 | data->sg, |
248 | data->sg_len, |
249 | dir); |
250 | |
251 | } else { |
252 | sg_len = next_data->sg_len; |
253 | next_data->sg_len = 0; |
254 | } |
255 | |
256 | if (sg_len <= 0) { |
257 | dev_err(mmc_dev(host->mmc), |
258 | "Failed to map scatterlist for DMA operation\n"); |
259 | return -EINVAL; |
260 | } |
261 | |
262 | if (next) { |
263 | next->sg_len = sg_len; |
264 | data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; |
265 | } else |
266 | host->sg_len = sg_len; |
267 | |
268 | return 0; |
269 | } |
270 | |
271 | static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, |
272 | struct mmc_data *data) |
273 | { |
274 | int ret; |
275 | struct dma_chan *chan; |
276 | struct dma_async_tx_descriptor *desc; |
277 | struct dma_slave_config conf = { |
278 | .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, |
279 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, |
280 | .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, |
281 | .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, |
282 | }; |
283 | |
284 | if (data->flags & MMC_DATA_WRITE) { |
285 | conf.direction = DMA_MEM_TO_DEV; |
286 | conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; |
287 | conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT; |
288 | chan = host->dma_tx; |
289 | } else { |
290 | conf.direction = DMA_DEV_TO_MEM; |
291 | conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; |
292 | conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE; |
293 | chan = host->dma_rx; |
294 | } |
295 | |
296 | ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan); |
297 | if (ret) |
298 | return ret; |
299 | |
300 | dmaengine_slave_config(chan, &conf); |
301 | desc = dmaengine_prep_slave_sg(chan, |
302 | data->sg, |
303 | host->sg_len, |
304 | conf.direction, |
305 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
306 | if (!desc) { |
307 | dev_err(mmc_dev(host->mmc), |
308 | "Failed to allocate DMA %s descriptor", |
309 | conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX"); |
310 | goto dma_unmap; |
311 | } |
312 | |
313 | dmaengine_submit(desc); |
314 | dma_async_issue_pending(chan); |
315 | |
316 | return 0; |
317 | |
318 | dma_unmap: |
319 | jz4740_mmc_dma_unmap(host, data); |
320 | return -ENOMEM; |
321 | } |
322 | |
323 | static void jz4740_mmc_pre_request(struct mmc_host *mmc, |
324 | struct mmc_request *mrq, |
325 | bool is_first_req) |
326 | { |
327 | struct jz4740_mmc_host *host = mmc_priv(mmc); |
328 | struct mmc_data *data = mrq->data; |
329 | struct jz4740_mmc_host_next *next_data = &host->next_data; |
330 | |
331 | BUG_ON(data->host_cookie); |
332 | |
333 | if (host->use_dma) { |
334 | struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); |
335 | |
336 | if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan)) |
337 | data->host_cookie = 0; |
338 | } |
339 | } |
340 | |
341 | static void jz4740_mmc_post_request(struct mmc_host *mmc, |
342 | struct mmc_request *mrq, |
343 | int err) |
344 | { |
345 | struct jz4740_mmc_host *host = mmc_priv(mmc); |
346 | struct mmc_data *data = mrq->data; |
347 | |
348 | if (host->use_dma && data->host_cookie) { |
349 | jz4740_mmc_dma_unmap(host, data); |
350 | data->host_cookie = 0; |
351 | } |
352 | |
353 | if (err) { |
354 | struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); |
355 | |
356 | dmaengine_terminate_all(chan); |
357 | } |
358 | } |
359 | |
360 | /*----------------------------------------------------------------------------*/ |
361 | |
362 | static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, |
363 | unsigned int irq, bool enabled) |
364 | { |
365 | unsigned long flags; |
366 | |
367 | spin_lock_irqsave(&host->lock, flags); |
368 | if (enabled) |
369 | host->irq_mask &= ~irq; |
370 | else |
371 | host->irq_mask |= irq; |
372 | spin_unlock_irqrestore(&host->lock, flags); |
373 | |
374 | writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK); |
375 | } |
376 | |
377 | static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, |
378 | bool start_transfer) |
379 | { |
380 | uint16_t val = JZ_MMC_STRPCL_CLOCK_START; |
381 | |
382 | if (start_transfer) |
383 | val |= JZ_MMC_STRPCL_START_OP; |
384 | |
385 | writew(val, host->base + JZ_REG_MMC_STRPCL); |
386 | } |
387 | |
388 | static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) |
389 | { |
390 | uint32_t status; |
391 | unsigned int timeout = 1000; |
392 | |
393 | writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); |
394 | do { |
395 | status = readl(host->base + JZ_REG_MMC_STATUS); |
396 | } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); |
397 | } |
398 | |
399 | static void jz4740_mmc_reset(struct jz4740_mmc_host *host) |
400 | { |
401 | uint32_t status; |
402 | unsigned int timeout = 1000; |
403 | |
404 | writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); |
405 | udelay(10); |
406 | do { |
407 | status = readl(host->base + JZ_REG_MMC_STATUS); |
408 | } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); |
409 | } |
410 | |
411 | static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) |
412 | { |
413 | struct mmc_request *req; |
414 | |
415 | req = host->req; |
416 | host->req = NULL; |
417 | |
418 | mmc_request_done(host->mmc, req); |
419 | } |
420 | |
421 | static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, |
422 | unsigned int irq) |
423 | { |
424 | unsigned int timeout = 0x800; |
425 | uint16_t status; |
426 | |
427 | do { |
428 | status = readw(host->base + JZ_REG_MMC_IREG); |
429 | } while (!(status & irq) && --timeout); |
430 | |
431 | if (timeout == 0) { |
432 | set_bit(0, &host->waiting); |
433 | mod_timer(&host->timeout_timer, jiffies + 5*HZ); |
434 | jz4740_mmc_set_irq_enabled(host, irq, true); |
435 | return true; |
436 | } |
437 | |
438 | return false; |
439 | } |
440 | |
441 | static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, |
442 | struct mmc_data *data) |
443 | { |
444 | int status; |
445 | |
446 | status = readl(host->base + JZ_REG_MMC_STATUS); |
447 | if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { |
448 | if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { |
449 | host->req->cmd->error = -ETIMEDOUT; |
450 | data->error = -ETIMEDOUT; |
451 | } else { |
452 | host->req->cmd->error = -EIO; |
453 | data->error = -EIO; |
454 | } |
455 | } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { |
456 | if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { |
457 | host->req->cmd->error = -ETIMEDOUT; |
458 | data->error = -ETIMEDOUT; |
459 | } else { |
460 | host->req->cmd->error = -EIO; |
461 | data->error = -EIO; |
462 | } |
463 | } |
464 | } |
465 | |
466 | static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, |
467 | struct mmc_data *data) |
468 | { |
469 | struct sg_mapping_iter *miter = &host->miter; |
470 | void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; |
471 | uint32_t *buf; |
472 | bool timeout; |
473 | size_t i, j; |
474 | |
475 | while (sg_miter_next(miter)) { |
476 | buf = miter->addr; |
477 | i = miter->length / 4; |
478 | j = i / 8; |
479 | i = i & 0x7; |
480 | while (j) { |
481 | timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); |
482 | if (unlikely(timeout)) |
483 | goto poll_timeout; |
484 | |
485 | writel(buf[0], fifo_addr); |
486 | writel(buf[1], fifo_addr); |
487 | writel(buf[2], fifo_addr); |
488 | writel(buf[3], fifo_addr); |
489 | writel(buf[4], fifo_addr); |
490 | writel(buf[5], fifo_addr); |
491 | writel(buf[6], fifo_addr); |
492 | writel(buf[7], fifo_addr); |
493 | buf += 8; |
494 | --j; |
495 | } |
496 | if (unlikely(i)) { |
497 | timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); |
498 | if (unlikely(timeout)) |
499 | goto poll_timeout; |
500 | |
501 | while (i) { |
502 | writel(*buf, fifo_addr); |
503 | ++buf; |
504 | --i; |
505 | } |
506 | } |
507 | data->bytes_xfered += miter->length; |
508 | } |
509 | sg_miter_stop(miter); |
510 | |
511 | return false; |
512 | |
513 | poll_timeout: |
514 | miter->consumed = (void *)buf - miter->addr; |
515 | data->bytes_xfered += miter->consumed; |
516 | sg_miter_stop(miter); |
517 | |
518 | return true; |
519 | } |
520 | |
521 | static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, |
522 | struct mmc_data *data) |
523 | { |
524 | struct sg_mapping_iter *miter = &host->miter; |
525 | void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; |
526 | uint32_t *buf; |
527 | uint32_t d; |
528 | uint16_t status; |
529 | size_t i, j; |
530 | unsigned int timeout; |
531 | |
532 | while (sg_miter_next(miter)) { |
533 | buf = miter->addr; |
534 | i = miter->length; |
535 | j = i / 32; |
536 | i = i & 0x1f; |
537 | while (j) { |
538 | timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); |
539 | if (unlikely(timeout)) |
540 | goto poll_timeout; |
541 | |
542 | buf[0] = readl(fifo_addr); |
543 | buf[1] = readl(fifo_addr); |
544 | buf[2] = readl(fifo_addr); |
545 | buf[3] = readl(fifo_addr); |
546 | buf[4] = readl(fifo_addr); |
547 | buf[5] = readl(fifo_addr); |
548 | buf[6] = readl(fifo_addr); |
549 | buf[7] = readl(fifo_addr); |
550 | |
551 | buf += 8; |
552 | --j; |
553 | } |
554 | |
555 | if (unlikely(i)) { |
556 | timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); |
557 | if (unlikely(timeout)) |
558 | goto poll_timeout; |
559 | |
560 | while (i >= 4) { |
561 | *buf++ = readl(fifo_addr); |
562 | i -= 4; |
563 | } |
564 | if (unlikely(i > 0)) { |
565 | d = readl(fifo_addr); |
566 | memcpy(buf, &d, i); |
567 | } |
568 | } |
569 | data->bytes_xfered += miter->length; |
570 | |
571 | /* This can go away once MIPS implements |
572 | * flush_kernel_dcache_page */ |
573 | flush_dcache_page(miter->page); |
574 | } |
575 | sg_miter_stop(miter); |
576 | |
577 | /* For whatever reason there is sometime one word more in the fifo then |
578 | * requested */ |
579 | timeout = 1000; |
580 | status = readl(host->base + JZ_REG_MMC_STATUS); |
581 | while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { |
582 | d = readl(fifo_addr); |
583 | status = readl(host->base + JZ_REG_MMC_STATUS); |
584 | } |
585 | |
586 | return false; |
587 | |
588 | poll_timeout: |
589 | miter->consumed = (void *)buf - miter->addr; |
590 | data->bytes_xfered += miter->consumed; |
591 | sg_miter_stop(miter); |
592 | |
593 | return true; |
594 | } |
595 | |
596 | static void jz4740_mmc_timeout(unsigned long data) |
597 | { |
598 | struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data; |
599 | |
600 | if (!test_and_clear_bit(0, &host->waiting)) |
601 | return; |
602 | |
603 | jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); |
604 | |
605 | host->req->cmd->error = -ETIMEDOUT; |
606 | jz4740_mmc_request_done(host); |
607 | } |
608 | |
609 | static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, |
610 | struct mmc_command *cmd) |
611 | { |
612 | int i; |
613 | uint16_t tmp; |
614 | void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; |
615 | |
616 | if (cmd->flags & MMC_RSP_136) { |
617 | tmp = readw(fifo_addr); |
618 | for (i = 0; i < 4; ++i) { |
619 | cmd->resp[i] = tmp << 24; |
620 | tmp = readw(fifo_addr); |
621 | cmd->resp[i] |= tmp << 8; |
622 | tmp = readw(fifo_addr); |
623 | cmd->resp[i] |= tmp >> 8; |
624 | } |
625 | } else { |
626 | cmd->resp[0] = readw(fifo_addr) << 24; |
627 | cmd->resp[0] |= readw(fifo_addr) << 8; |
628 | cmd->resp[0] |= readw(fifo_addr) & 0xff; |
629 | } |
630 | } |
631 | |
632 | static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, |
633 | struct mmc_command *cmd) |
634 | { |
635 | uint32_t cmdat = host->cmdat; |
636 | |
637 | host->cmdat &= ~JZ_MMC_CMDAT_INIT; |
638 | jz4740_mmc_clock_disable(host); |
639 | |
640 | host->cmd = cmd; |
641 | |
642 | if (cmd->flags & MMC_RSP_BUSY) |
643 | cmdat |= JZ_MMC_CMDAT_BUSY; |
644 | |
645 | switch (mmc_resp_type(cmd)) { |
646 | case MMC_RSP_R1B: |
647 | case MMC_RSP_R1: |
648 | cmdat |= JZ_MMC_CMDAT_RSP_R1; |
649 | break; |
650 | case MMC_RSP_R2: |
651 | cmdat |= JZ_MMC_CMDAT_RSP_R2; |
652 | break; |
653 | case MMC_RSP_R3: |
654 | cmdat |= JZ_MMC_CMDAT_RSP_R3; |
655 | break; |
656 | default: |
657 | break; |
658 | } |
659 | |
660 | if (cmd->data) { |
661 | cmdat |= JZ_MMC_CMDAT_DATA_EN; |
662 | if (cmd->data->flags & MMC_DATA_WRITE) |
663 | cmdat |= JZ_MMC_CMDAT_WRITE; |
664 | if (cmd->data->flags & MMC_DATA_STREAM) |
665 | cmdat |= JZ_MMC_CMDAT_STREAM; |
666 | if (host->use_dma) |
667 | cmdat |= JZ_MMC_CMDAT_DMA_EN; |
668 | |
669 | writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); |
670 | writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); |
671 | } |
672 | |
673 | writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); |
674 | writel(cmd->arg, host->base + JZ_REG_MMC_ARG); |
675 | writel(cmdat, host->base + JZ_REG_MMC_CMDAT); |
676 | |
677 | jz4740_mmc_clock_enable(host, 1); |
678 | } |
679 | |
680 | static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) |
681 | { |
682 | struct mmc_command *cmd = host->req->cmd; |
683 | struct mmc_data *data = cmd->data; |
684 | int direction; |
685 | |
686 | if (data->flags & MMC_DATA_READ) |
687 | direction = SG_MITER_TO_SG; |
688 | else |
689 | direction = SG_MITER_FROM_SG; |
690 | |
691 | sg_miter_start(&host->miter, data->sg, data->sg_len, direction); |
692 | } |
693 | |
694 | |
695 | static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) |
696 | { |
697 | struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; |
698 | struct mmc_command *cmd = host->req->cmd; |
699 | struct mmc_request *req = host->req; |
700 | struct mmc_data *data = cmd->data; |
701 | bool timeout = false; |
702 | |
703 | if (cmd->error) |
704 | host->state = JZ4740_MMC_STATE_DONE; |
705 | |
706 | switch (host->state) { |
707 | case JZ4740_MMC_STATE_READ_RESPONSE: |
708 | if (cmd->flags & MMC_RSP_PRESENT) |
709 | jz4740_mmc_read_response(host, cmd); |
710 | |
711 | if (!data) |
712 | break; |
713 | |
714 | jz_mmc_prepare_data_transfer(host); |
715 | |
716 | case JZ4740_MMC_STATE_TRANSFER_DATA: |
717 | if (host->use_dma) { |
718 | /* Use DMA if enabled. |
719 | * Data transfer direction is defined later by |
720 | * relying on data flags in jz4740_prepare_dma_data(). |
721 | */ |
722 | timeout = jz4740_mmc_start_dma_transfer(host, data); |
723 | data->bytes_xfered = data->blocks * data->blksz; |
724 | } else if (data->flags & MMC_DATA_READ) |
725 | /* Use PIO if DMA is not enabled. |
726 | * Data transfer direction was defined before |
727 | * by relying on data flags in |
728 | * jz_mmc_prepare_data_transfer(). |
729 | */ |
730 | timeout = jz4740_mmc_read_data(host, data); |
731 | else |
732 | timeout = jz4740_mmc_write_data(host, data); |
733 | |
734 | if (unlikely(timeout)) { |
735 | host->state = JZ4740_MMC_STATE_TRANSFER_DATA; |
736 | break; |
737 | } |
738 | |
739 | jz4740_mmc_transfer_check_state(host, data); |
740 | |
741 | timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); |
742 | if (unlikely(timeout)) { |
743 | host->state = JZ4740_MMC_STATE_SEND_STOP; |
744 | break; |
745 | } |
746 | writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG); |
747 | |
748 | case JZ4740_MMC_STATE_SEND_STOP: |
749 | if (!req->stop) |
750 | break; |
751 | |
752 | jz4740_mmc_send_command(host, req->stop); |
753 | |
754 | if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { |
755 | timeout = jz4740_mmc_poll_irq(host, |
756 | JZ_MMC_IRQ_PRG_DONE); |
757 | if (timeout) { |
758 | host->state = JZ4740_MMC_STATE_DONE; |
759 | break; |
760 | } |
761 | } |
762 | case JZ4740_MMC_STATE_DONE: |
763 | break; |
764 | } |
765 | |
766 | if (!timeout) |
767 | jz4740_mmc_request_done(host); |
768 | |
769 | return IRQ_HANDLED; |
770 | } |
771 | |
772 | static irqreturn_t jz_mmc_irq(int irq, void *devid) |
773 | { |
774 | struct jz4740_mmc_host *host = devid; |
775 | struct mmc_command *cmd = host->cmd; |
776 | uint16_t irq_reg, status, tmp; |
777 | |
778 | irq_reg = readw(host->base + JZ_REG_MMC_IREG); |
779 | |
780 | tmp = irq_reg; |
781 | irq_reg &= ~host->irq_mask; |
782 | |
783 | tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | |
784 | JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); |
785 | |
786 | if (tmp != irq_reg) |
787 | writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG); |
788 | |
789 | if (irq_reg & JZ_MMC_IRQ_SDIO) { |
790 | writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG); |
791 | mmc_signal_sdio_irq(host->mmc); |
792 | irq_reg &= ~JZ_MMC_IRQ_SDIO; |
793 | } |
794 | |
795 | if (host->req && cmd && irq_reg) { |
796 | if (test_and_clear_bit(0, &host->waiting)) { |
797 | del_timer(&host->timeout_timer); |
798 | |
799 | status = readl(host->base + JZ_REG_MMC_STATUS); |
800 | |
801 | if (status & JZ_MMC_STATUS_TIMEOUT_RES) { |
802 | cmd->error = -ETIMEDOUT; |
803 | } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { |
804 | cmd->error = -EIO; |
805 | } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | |
806 | JZ_MMC_STATUS_CRC_WRITE_ERROR)) { |
807 | if (cmd->data) |
808 | cmd->data->error = -EIO; |
809 | cmd->error = -EIO; |
810 | } |
811 | |
812 | jz4740_mmc_set_irq_enabled(host, irq_reg, false); |
813 | writew(irq_reg, host->base + JZ_REG_MMC_IREG); |
814 | |
815 | return IRQ_WAKE_THREAD; |
816 | } |
817 | } |
818 | |
819 | return IRQ_HANDLED; |
820 | } |
821 | |
822 | static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) |
823 | { |
824 | int div = 0; |
825 | int real_rate; |
826 | |
827 | jz4740_mmc_clock_disable(host); |
828 | clk_set_rate(host->clk, JZ_MMC_CLK_RATE); |
829 | |
830 | real_rate = clk_get_rate(host->clk); |
831 | |
832 | while (real_rate > rate && div < 7) { |
833 | ++div; |
834 | real_rate >>= 1; |
835 | } |
836 | |
837 | writew(div, host->base + JZ_REG_MMC_CLKRT); |
838 | return real_rate; |
839 | } |
840 | |
841 | static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) |
842 | { |
843 | struct jz4740_mmc_host *host = mmc_priv(mmc); |
844 | |
845 | host->req = req; |
846 | |
847 | writew(0xffff, host->base + JZ_REG_MMC_IREG); |
848 | |
849 | writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG); |
850 | jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); |
851 | |
852 | host->state = JZ4740_MMC_STATE_READ_RESPONSE; |
853 | set_bit(0, &host->waiting); |
854 | mod_timer(&host->timeout_timer, jiffies + 5*HZ); |
855 | jz4740_mmc_send_command(host, req->cmd); |
856 | } |
857 | |
858 | static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
859 | { |
860 | struct jz4740_mmc_host *host = mmc_priv(mmc); |
861 | if (ios->clock) |
862 | jz4740_mmc_set_clock_rate(host, ios->clock); |
863 | |
864 | switch (ios->power_mode) { |
865 | case MMC_POWER_UP: |
866 | jz4740_mmc_reset(host); |
867 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); |
868 | host->cmdat |= JZ_MMC_CMDAT_INIT; |
869 | clk_prepare_enable(host->clk); |
870 | break; |
871 | case MMC_POWER_ON: |
872 | break; |
873 | default: |
874 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); |
875 | clk_disable_unprepare(host->clk); |
876 | break; |
877 | } |
878 | |
879 | switch (ios->bus_width) { |
880 | case MMC_BUS_WIDTH_1: |
881 | host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT; |
882 | break; |
883 | case MMC_BUS_WIDTH_4: |
884 | host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; |
885 | break; |
886 | default: |
887 | break; |
888 | } |
889 | } |
890 | |
891 | static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) |
892 | { |
893 | struct jz4740_mmc_host *host = mmc_priv(mmc); |
894 | jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); |
895 | } |
896 | |
897 | #ifdef CONFIG_CPU_FREQ |
898 | |
899 | static struct jz4740_mmc_host *cpufreq_host; |
900 | |
901 | static int jz4740_mmc_cpufreq_transition(struct notifier_block *nb, |
902 | unsigned long val, void *data) |
903 | { |
904 | /* TODO: We only have to take action when the PLL freq changes: |
905 | the main dividers have no influence on the MSC device clock. */ |
906 | |
907 | if (val == CPUFREQ_PRECHANGE) { |
908 | mmc_claim_host(cpufreq_host->mmc); |
909 | clk_disable_unprepare(cpufreq_host->clk); |
910 | } else if (val == CPUFREQ_POSTCHANGE) { |
911 | struct mmc_ios *ios = &cpufreq_host->mmc->ios; |
912 | if (ios->clock) |
913 | jz4740_mmc_set_clock_rate(cpufreq_host, ios->clock); |
914 | if (ios->power_mode != MMC_POWER_OFF) |
915 | clk_prepare_enable(cpufreq_host->clk); |
916 | mmc_release_host(cpufreq_host->mmc); |
917 | } |
918 | return 0; |
919 | } |
920 | |
921 | static struct notifier_block jz4740_mmc_cpufreq_nb = { |
922 | .notifier_call = jz4740_mmc_cpufreq_transition, |
923 | }; |
924 | |
925 | static inline int jz4740_mmc_cpufreq_register(struct jz4740_mmc_host *host) |
926 | { |
927 | cpufreq_host = host; |
928 | return cpufreq_register_notifier(&jz4740_mmc_cpufreq_nb, |
929 | CPUFREQ_TRANSITION_NOTIFIER); |
930 | } |
931 | |
932 | static inline void jz4740_mmc_cpufreq_unregister(void) |
933 | { |
934 | cpufreq_unregister_notifier(&jz4740_mmc_cpufreq_nb, |
935 | CPUFREQ_TRANSITION_NOTIFIER); |
936 | } |
937 | |
938 | #else |
939 | |
940 | static inline int jz4740_mmc_cpufreq_register(struct jz4740_mmc_host *host) |
941 | { |
942 | return 0; |
943 | } |
944 | |
945 | static inline void jz4740_mmc_cpufreq_unregister(void) |
946 | { |
947 | } |
948 | |
949 | #endif |
950 | |
951 | static const struct mmc_host_ops jz4740_mmc_ops = { |
952 | .request = jz4740_mmc_request, |
953 | .pre_req = jz4740_mmc_pre_request, |
954 | .post_req = jz4740_mmc_post_request, |
955 | .set_ios = jz4740_mmc_set_ios, |
956 | .get_ro = mmc_gpio_get_ro, |
957 | .get_cd = mmc_gpio_get_cd, |
958 | .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, |
959 | }; |
960 | |
961 | static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = { |
962 | JZ_GPIO_BULK_PIN(MSC_CMD), |
963 | JZ_GPIO_BULK_PIN(MSC_CLK), |
964 | JZ_GPIO_BULK_PIN(MSC_DATA0), |
965 | JZ_GPIO_BULK_PIN(MSC_DATA1), |
966 | JZ_GPIO_BULK_PIN(MSC_DATA2), |
967 | JZ_GPIO_BULK_PIN(MSC_DATA3), |
968 | }; |
969 | |
970 | static int jz4740_mmc_request_gpios(struct mmc_host *mmc, |
971 | struct platform_device *pdev) |
972 | { |
973 | struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; |
974 | int ret = 0; |
975 | |
976 | if (!pdata) |
977 | return 0; |
978 | |
979 | if (!pdata->card_detect_active_low) |
980 | mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; |
981 | if (!pdata->read_only_active_low) |
982 | mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
983 | |
984 | if (gpio_is_valid(pdata->gpio_card_detect)) { |
985 | ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0); |
986 | if (ret) |
987 | return ret; |
988 | } |
989 | |
990 | if (gpio_is_valid(pdata->gpio_read_only)) |
991 | ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only); |
992 | |
993 | return ret; |
994 | } |
995 | |
996 | static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host) |
997 | { |
998 | size_t num_pins = ARRAY_SIZE(jz4740_mmc_pins); |
999 | if (host->pdata && host->pdata->data_1bit) |
1000 | num_pins -= 3; |
1001 | |
1002 | return num_pins; |
1003 | } |
1004 | |
1005 | static int jz4740_mmc_probe(struct platform_device* pdev) |
1006 | { |
1007 | int ret; |
1008 | struct mmc_host *mmc; |
1009 | struct jz4740_mmc_host *host; |
1010 | struct jz4740_mmc_platform_data *pdata; |
1011 | |
1012 | pdata = pdev->dev.platform_data; |
1013 | |
1014 | mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); |
1015 | if (!mmc) { |
1016 | dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); |
1017 | return -ENOMEM; |
1018 | } |
1019 | |
1020 | host = mmc_priv(mmc); |
1021 | host->pdata = pdata; |
1022 | |
1023 | host->irq = platform_get_irq(pdev, 0); |
1024 | if (host->irq < 0) { |
1025 | ret = host->irq; |
1026 | dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); |
1027 | goto err_free_host; |
1028 | } |
1029 | |
1030 | host->clk = devm_clk_get(&pdev->dev, "mmc"); |
1031 | if (IS_ERR(host->clk)) { |
1032 | ret = PTR_ERR(host->clk); |
1033 | dev_err(&pdev->dev, "Failed to get mmc clock\n"); |
1034 | goto err_free_host; |
1035 | } |
1036 | |
1037 | ret = jz4740_mmc_cpufreq_register(host); |
1038 | if (ret) { |
1039 | dev_err(&pdev->dev, |
1040 | "Failed to register cpufreq transition notifier\n"); |
1041 | goto err_free_host; |
1042 | } |
1043 | |
1044 | host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1045 | host->base = devm_ioremap_resource(&pdev->dev, host->mem_res); |
1046 | if (IS_ERR(host->base)) { |
1047 | ret = PTR_ERR(host->base); |
1048 | dev_err(&pdev->dev, "Failed to ioremap base memory\n"); |
1049 | goto err_cpufreq_unreg; |
1050 | } |
1051 | |
1052 | ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); |
1053 | if (ret) { |
1054 | dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret); |
1055 | goto err_cpufreq_unreg; |
1056 | } |
1057 | |
1058 | if (pdata) { |
1059 | ret = jz4740_mmc_request_gpios(mmc, pdev); |
1060 | if (ret) |
1061 | goto err_gpio_bulk_free; |
1062 | } else { |
1063 | mmc_of_parse(mmc); |
1064 | } |
1065 | |
1066 | ret = mmc_regulator_get_supply(mmc); |
1067 | if (ret) |
1068 | goto err_gpio_bulk_free; |
1069 | |
1070 | mmc->ops = &jz4740_mmc_ops; |
1071 | mmc->f_min = JZ_MMC_CLK_RATE / 128; |
1072 | mmc->f_max = JZ_MMC_CLK_RATE; |
1073 | mmc->caps = (pdata && pdata->data_1bit) ? 0 : MMC_CAP_4_BIT_DATA; |
1074 | mmc->caps |= MMC_CAP_SDIO_IRQ; |
1075 | |
1076 | mmc->max_blk_size = (1 << 10) - 1; |
1077 | mmc->max_blk_count = (1 << 15) - 1; |
1078 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; |
1079 | |
1080 | mmc->max_segs = 128; |
1081 | mmc->max_seg_size = mmc->max_req_size; |
1082 | |
1083 | host->mmc = mmc; |
1084 | host->pdev = pdev; |
1085 | spin_lock_init(&host->lock); |
1086 | host->irq_mask = 0xffff; |
1087 | |
1088 | ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, |
1089 | dev_name(&pdev->dev), host); |
1090 | if (ret) { |
1091 | dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); |
1092 | goto err_gpio_bulk_free; |
1093 | } |
1094 | |
1095 | jz4740_mmc_reset(host); |
1096 | jz4740_mmc_clock_disable(host); |
1097 | setup_timer(&host->timeout_timer, jz4740_mmc_timeout, |
1098 | (unsigned long)host); |
1099 | /* It is not important when it times out, it just needs to timeout. */ |
1100 | set_timer_slack(&host->timeout_timer, HZ); |
1101 | |
1102 | host->use_dma = true; |
1103 | if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0) |
1104 | host->use_dma = false; |
1105 | |
1106 | platform_set_drvdata(pdev, host); |
1107 | ret = mmc_add_host(mmc); |
1108 | |
1109 | if (ret) { |
1110 | dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); |
1111 | goto err_free_irq; |
1112 | } |
1113 | dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n"); |
1114 | |
1115 | dev_info(&pdev->dev, "Using %s, %d-bit mode\n", |
1116 | host->use_dma ? "DMA" : "PIO", |
1117 | (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); |
1118 | |
1119 | return 0; |
1120 | |
1121 | err_free_irq: |
1122 | free_irq(host->irq, host); |
1123 | err_gpio_bulk_free: |
1124 | if (host->use_dma) |
1125 | jz4740_mmc_release_dma_channels(host); |
1126 | jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); |
1127 | err_cpufreq_unreg: |
1128 | jz4740_mmc_cpufreq_unregister(); |
1129 | err_free_host: |
1130 | mmc_free_host(mmc); |
1131 | |
1132 | return ret; |
1133 | } |
1134 | |
1135 | static int jz4740_mmc_remove(struct platform_device *pdev) |
1136 | { |
1137 | struct jz4740_mmc_host *host = platform_get_drvdata(pdev); |
1138 | |
1139 | del_timer_sync(&host->timeout_timer); |
1140 | jz4740_mmc_set_irq_enabled(host, 0xff, false); |
1141 | jz4740_mmc_reset(host); |
1142 | |
1143 | mmc_remove_host(host->mmc); |
1144 | |
1145 | free_irq(host->irq, host); |
1146 | |
1147 | if (host->use_dma) |
1148 | jz4740_mmc_release_dma_channels(host); |
1149 | |
1150 | jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); |
1151 | |
1152 | jz4740_mmc_cpufreq_unregister(); |
1153 | mmc_free_host(host->mmc); |
1154 | |
1155 | return 0; |
1156 | } |
1157 | |
1158 | #ifdef CONFIG_PM_SLEEP |
1159 | |
1160 | static int jz4740_mmc_suspend(struct device *dev) |
1161 | { |
1162 | struct jz4740_mmc_host *host = dev_get_drvdata(dev); |
1163 | |
1164 | jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); |
1165 | |
1166 | return 0; |
1167 | } |
1168 | |
1169 | static int jz4740_mmc_resume(struct device *dev) |
1170 | { |
1171 | struct jz4740_mmc_host *host = dev_get_drvdata(dev); |
1172 | |
1173 | jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); |
1174 | |
1175 | return 0; |
1176 | } |
1177 | |
1178 | static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, |
1179 | jz4740_mmc_resume); |
1180 | #define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops) |
1181 | #else |
1182 | #define JZ4740_MMC_PM_OPS NULL |
1183 | #endif |
1184 | |
1185 | static const struct of_device_id jz4740_mmc_of_match[] = { |
1186 | { .compatible = "ingenic,jz4740-msc" }, |
1187 | {}, |
1188 | }; |
1189 | MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match); |
1190 | |
1191 | static struct platform_driver jz4740_mmc_driver = { |
1192 | .probe = jz4740_mmc_probe, |
1193 | .remove = jz4740_mmc_remove, |
1194 | .driver = { |
1195 | .name = "jz4740-mmc", |
1196 | .owner = THIS_MODULE, |
1197 | .pm = JZ4740_MMC_PM_OPS, |
1198 | .of_match_table = jz4740_mmc_of_match, |
1199 | }, |
1200 | }; |
1201 | |
1202 | module_platform_driver(jz4740_mmc_driver); |
1203 | |
1204 | MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); |
1205 | MODULE_LICENSE("GPL"); |
1206 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); |
1207 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9