Date:2014-01-18 22:53:52 (6 years 6 months ago)
Author:Apelete Seketeli
Commit:f0df554d02bd84f4154be6599cbc8db3b0fb33cc
Message:mmc: jz4740: add dma infrastructure for data transfers

Until now the MMC driver for JZ4740 SoC was relying on PIO mode only
for data transfers.
This adds a DMA infrastructure for data trasnfers in addition to PIO
mode by relying on DMA Engine.

DMA tranfers performance might be further improved by taking advantage
of the asynchronous request capability of the MMC framework.

Signed-off-by: Apelete Seketeli <apelete@seketeli.net>
Files: drivers/mmc/host/jz4740_mmc.c (11 diffs)

Change Details

drivers/mmc/host/jz4740_mmc.c
3131#include <asm/mach-jz4740/gpio.h>
3232#include <asm/cacheflush.h>
3333#include <linux/dma-mapping.h>
34#include <linux/dmaengine.h>
3435
36#include <asm/mach-jz4740/dma.h>
3537#include <asm/mach-jz4740/jz4740_mmc.h>
3638
3739#define JZ_REG_MMC_STRPCL 0x00
...... 
123125    int card_detect_irq;
124126
125127    void __iomem *base;
128    struct resource *mem_res;
126129    struct mmc_request *req;
127130    struct mmc_command *cmd;
128131
...... 
137140    struct timer_list timeout_timer;
138141    struct sg_mapping_iter miter;
139142    enum jz4740_mmc_state state;
143
144    /* DMA support */
145    struct dma_chan *dma_rx;
146    struct dma_chan *dma_tx;
147    bool use_dma;
148    int sg_len;
149
150/* The DMA trigger level is 8 words, that is to say, the DMA read
151 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write
152 * trigger is when data words in MSC_TXFIFO is < 8.
153 */
154#define JZ4740_MMC_FIFO_HALF_SIZE 8
140155};
141156
157/*----------------------------------------------------------------------------*/
158/* DMA infrastructure */
159
160static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
161{
162    if (!host->use_dma)
163        return;
164
165    dma_release_channel(host->dma_tx);
166    dma_release_channel(host->dma_rx);
167}
168
169static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
170{
171    dma_cap_mask_t mask;
172
173    dma_cap_zero(mask);
174    dma_cap_set(DMA_SLAVE, mask);
175
176    host->dma_tx = dma_request_channel(mask, NULL, host);
177    if (!host->dma_tx) {
178        dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
179        return -ENODEV;
180    }
181
182    host->dma_rx = dma_request_channel(mask, NULL, host);
183    if (!host->dma_rx) {
184        dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
185        goto free_master_write;
186    }
187
188    return 0;
189
190free_master_write:
191    dma_release_channel(host->dma_tx);
192    return -ENODEV;
193}
194
195static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
196{
197    return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
198}
199
200static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
201                 struct mmc_data *data)
202{
203    struct dma_chan *chan;
204    enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
205
206    if (dir == DMA_TO_DEVICE)
207        chan = host->dma_tx;
208    else
209        chan = host->dma_rx;
210
211    dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
212}
213
214static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
215                     struct mmc_data *data)
216{
217    struct dma_chan *chan;
218    struct dma_async_tx_descriptor *desc;
219    struct dma_slave_config conf = {
220        .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
221        .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
222        .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
223        .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
224    };
225    enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
226
227    if (dir == DMA_TO_DEVICE) {
228        conf.direction = DMA_MEM_TO_DEV;
229        conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
230        conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT;
231        chan = host->dma_tx;
232    } else {
233        conf.direction = DMA_DEV_TO_MEM;
234        conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
235        conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE;
236        chan = host->dma_rx;
237    }
238
239    host->sg_len = dma_map_sg(chan->device->dev,
240                  data->sg,
241                  data->sg_len,
242                  dir);
243
244    if (host->sg_len == 0) {
245        dev_err(mmc_dev(host->mmc),
246            "Failed to map scatterlist for DMA operation\n");
247        return -EINVAL;
248    }
249
250    dmaengine_slave_config(chan, &conf);
251    desc = dmaengine_prep_slave_sg(chan,
252                       data->sg,
253                       host->sg_len,
254                       conf.direction,
255                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
256    if (!desc) {
257        dev_err(mmc_dev(host->mmc),
258            "Failed to allocate DMA %s descriptor",
259            conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
260        goto dma_unmap;
261    }
262
263    dmaengine_submit(desc);
264    dma_async_issue_pending(chan);
265
266    return 0;
267
268dma_unmap:
269    jz4740_mmc_dma_unmap(host, data);
270    return -ENOMEM;
271}
272
273/*----------------------------------------------------------------------------*/
274
142275static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
143276    unsigned int irq, bool enabled)
144277{
...... 
443576            cmdat |= JZ_MMC_CMDAT_WRITE;
444577        if (cmd->data->flags & MMC_DATA_STREAM)
445578            cmdat |= JZ_MMC_CMDAT_STREAM;
579        if (host->use_dma)
580            cmdat |= JZ_MMC_CMDAT_DMA_EN;
446581
447582        writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
448583        writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
...... 
475610    struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
476611    struct mmc_command *cmd = host->req->cmd;
477612    struct mmc_request *req = host->req;
613    struct mmc_data *data = cmd->data;
478614    bool timeout = false;
479615
480616    if (cmd->error)
...... 
485621        if (cmd->flags & MMC_RSP_PRESENT)
486622            jz4740_mmc_read_response(host, cmd);
487623
488        if (!cmd->data)
624        if (!data)
489625            break;
490626
491627        jz_mmc_prepare_data_transfer(host);
492628
493629    case JZ4740_MMC_STATE_TRANSFER_DATA:
494        if (cmd->data->flags & MMC_DATA_READ)
495            timeout = jz4740_mmc_read_data(host, cmd->data);
630        if (host->use_dma) {
631            /* Use DMA if enabled, data transfer direction was
632             * defined before in jz_mmc_prepare_data_transfer().
633             */
634            timeout = jz4740_mmc_start_dma_transfer(host, data);
635            data->bytes_xfered = data->blocks * data->blksz;
636        } else if (data->flags & MMC_DATA_READ)
637            /* If DMA is not enabled, rely on data flags
638             * to establish data transfer direction.
639             */
640            timeout = jz4740_mmc_read_data(host, data);
496641        else
497            timeout = jz4740_mmc_write_data(host, cmd->data);
642            timeout = jz4740_mmc_write_data(host, data);
498643
499644        if (unlikely(timeout)) {
500645            host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
501646            break;
502647        }
503648
504        jz4740_mmc_transfer_check_state(host, cmd->data);
649        jz4740_mmc_transfer_check_state(host, data);
505650
506651        timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
507652        if (unlikely(timeout)) {
...... 
771916    struct mmc_host *mmc;
772917    struct jz4740_mmc_host *host;
773918    struct jz4740_mmc_platform_data *pdata;
774    struct resource *res;
775919
776920    pdata = pdev->dev.platform_data;
777921
...... 
805949        goto err_free_host;
806950    }
807951
808    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
809    host->base = devm_ioremap_resource(&pdev->dev, res);
952    host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
953    host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
810954    if (IS_ERR(host->base)) {
811955        ret = PTR_ERR(host->base);
812956        dev_err(&pdev->dev, "Failed to ioremap base memory\n");
...... 
8631007    /* It is not important when it times out, it just needs to timeout. */
8641008    set_timer_slack(&host->timeout_timer, HZ);
8651009
1010    host->use_dma = true;
1011    if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
1012        host->use_dma = false;
1013
8661014    platform_set_drvdata(pdev, host);
8671015    ret = mmc_add_host(mmc);
8681016
...... 
8721020    }
8731021    dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
8741022
1023    dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1024         host->use_dma ? "DMA" : "PIO",
1025         (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
1026
8751027    return 0;
8761028
8771029err_free_irq:
8781030    free_irq(host->irq, host);
8791031err_gpio_bulk_free:
1032    if (host->use_dma)
1033        jz4740_mmc_release_dma_channels(host);
8801034    jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
8811035err_cpufreq_unreg:
8821036    jz4740_mmc_cpufreq_unregister();
...... 
8981052
8991053    free_irq(host->irq, host);
9001054
1055    if (host->use_dma)
1056        jz4740_mmc_release_dma_channels(host);
1057
9011058    jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
9021059
9031060    jz4740_mmc_cpufreq_unregister();

Archive Download the corresponding diff file



interactive