Root/mm/dmapool.c

1/*
2 * DMA Pool allocator
3 *
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 *
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
11 *
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
16 *
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
23 */
24
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/export.h>
31#include <linux/mutex.h>
32#include <linux/poison.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/stat.h>
36#include <linux/spinlock.h>
37#include <linux/string.h>
38#include <linux/types.h>
39#include <linux/wait.h>
40
41#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42#define DMAPOOL_DEBUG 1
43#endif
44
45struct dma_pool { /* the pool */
46    struct list_head page_list;
47    spinlock_t lock;
48    size_t size;
49    struct device *dev;
50    size_t allocation;
51    size_t boundary;
52    char name[32];
53    wait_queue_head_t waitq;
54    struct list_head pools;
55};
56
57struct dma_page { /* cacheable header for 'allocation' bytes */
58    struct list_head page_list;
59    void *vaddr;
60    dma_addr_t dma;
61    unsigned int in_use;
62    unsigned int offset;
63};
64
65#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
66
67static DEFINE_MUTEX(pools_lock);
68
69static ssize_t
70show_pools(struct device *dev, struct device_attribute *attr, char *buf)
71{
72    unsigned temp;
73    unsigned size;
74    char *next;
75    struct dma_page *page;
76    struct dma_pool *pool;
77
78    next = buf;
79    size = PAGE_SIZE;
80
81    temp = scnprintf(next, size, "poolinfo - 0.1\n");
82    size -= temp;
83    next += temp;
84
85    mutex_lock(&pools_lock);
86    list_for_each_entry(pool, &dev->dma_pools, pools) {
87        unsigned pages = 0;
88        unsigned blocks = 0;
89
90        spin_lock_irq(&pool->lock);
91        list_for_each_entry(page, &pool->page_list, page_list) {
92            pages++;
93            blocks += page->in_use;
94        }
95        spin_unlock_irq(&pool->lock);
96
97        /* per-pool info, no real statistics yet */
98        temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
99                 pool->name, blocks,
100                 pages * (pool->allocation / pool->size),
101                 pool->size, pages);
102        size -= temp;
103        next += temp;
104    }
105    mutex_unlock(&pools_lock);
106
107    return PAGE_SIZE - size;
108}
109
110static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
111
112/**
113 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
114 * @name: name of pool, for diagnostics
115 * @dev: device that will be doing the DMA
116 * @size: size of the blocks in this pool.
117 * @align: alignment requirement for blocks; must be a power of two
118 * @boundary: returned blocks won't cross this power of two boundary
119 * Context: !in_interrupt()
120 *
121 * Returns a dma allocation pool with the requested characteristics, or
122 * null if one can't be created. Given one of these pools, dma_pool_alloc()
123 * may be used to allocate memory. Such memory will all have "consistent"
124 * DMA mappings, accessible by the device and its driver without using
125 * cache flushing primitives. The actual size of blocks allocated may be
126 * larger than requested because of alignment.
127 *
128 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
129 * cross that size boundary. This is useful for devices which have
130 * addressing restrictions on individual DMA transfers, such as not crossing
131 * boundaries of 4KBytes.
132 */
133struct dma_pool *dma_pool_create(const char *name, struct device *dev,
134                 size_t size, size_t align, size_t boundary)
135{
136    struct dma_pool *retval;
137    size_t allocation;
138
139    if (align == 0) {
140        align = 1;
141    } else if (align & (align - 1)) {
142        return NULL;
143    }
144
145    if (size == 0) {
146        return NULL;
147    } else if (size < 4) {
148        size = 4;
149    }
150
151    if ((size % align) != 0)
152        size = ALIGN(size, align);
153
154    allocation = max_t(size_t, size, PAGE_SIZE);
155
156    if (!boundary) {
157        boundary = allocation;
158    } else if ((boundary < size) || (boundary & (boundary - 1))) {
159        return NULL;
160    }
161
162    retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
163    if (!retval)
164        return retval;
165
166    strlcpy(retval->name, name, sizeof(retval->name));
167
168    retval->dev = dev;
169
170    INIT_LIST_HEAD(&retval->page_list);
171    spin_lock_init(&retval->lock);
172    retval->size = size;
173    retval->boundary = boundary;
174    retval->allocation = allocation;
175    init_waitqueue_head(&retval->waitq);
176
177    if (dev) {
178        int ret;
179
180        mutex_lock(&pools_lock);
181        if (list_empty(&dev->dma_pools))
182            ret = device_create_file(dev, &dev_attr_pools);
183        else
184            ret = 0;
185        /* note: not currently insisting "name" be unique */
186        if (!ret)
187            list_add(&retval->pools, &dev->dma_pools);
188        else {
189            kfree(retval);
190            retval = NULL;
191        }
192        mutex_unlock(&pools_lock);
193    } else
194        INIT_LIST_HEAD(&retval->pools);
195
196    return retval;
197}
198EXPORT_SYMBOL(dma_pool_create);
199
200static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
201{
202    unsigned int offset = 0;
203    unsigned int next_boundary = pool->boundary;
204
205    do {
206        unsigned int next = offset + pool->size;
207        if (unlikely((next + pool->size) >= next_boundary)) {
208            next = next_boundary;
209            next_boundary += pool->boundary;
210        }
211        *(int *)(page->vaddr + offset) = next;
212        offset = next;
213    } while (offset < pool->allocation);
214}
215
216static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
217{
218    struct dma_page *page;
219
220    page = kmalloc(sizeof(*page), mem_flags);
221    if (!page)
222        return NULL;
223    page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
224                     &page->dma, mem_flags);
225    if (page->vaddr) {
226#ifdef DMAPOOL_DEBUG
227        memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
228#endif
229        pool_initialise_page(pool, page);
230        list_add(&page->page_list, &pool->page_list);
231        page->in_use = 0;
232        page->offset = 0;
233    } else {
234        kfree(page);
235        page = NULL;
236    }
237    return page;
238}
239
240static inline int is_page_busy(struct dma_page *page)
241{
242    return page->in_use != 0;
243}
244
245static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
246{
247    dma_addr_t dma = page->dma;
248
249#ifdef DMAPOOL_DEBUG
250    memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
251#endif
252    dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
253    list_del(&page->page_list);
254    kfree(page);
255}
256
257/**
258 * dma_pool_destroy - destroys a pool of dma memory blocks.
259 * @pool: dma pool that will be destroyed
260 * Context: !in_interrupt()
261 *
262 * Caller guarantees that no more memory from the pool is in use,
263 * and that nothing will try to use the pool after this call.
264 */
265void dma_pool_destroy(struct dma_pool *pool)
266{
267    mutex_lock(&pools_lock);
268    list_del(&pool->pools);
269    if (pool->dev && list_empty(&pool->dev->dma_pools))
270        device_remove_file(pool->dev, &dev_attr_pools);
271    mutex_unlock(&pools_lock);
272
273    while (!list_empty(&pool->page_list)) {
274        struct dma_page *page;
275        page = list_entry(pool->page_list.next,
276                  struct dma_page, page_list);
277        if (is_page_busy(page)) {
278            if (pool->dev)
279                dev_err(pool->dev,
280                    "dma_pool_destroy %s, %p busy\n",
281                    pool->name, page->vaddr);
282            else
283                printk(KERN_ERR
284                       "dma_pool_destroy %s, %p busy\n",
285                       pool->name, page->vaddr);
286            /* leak the still-in-use consistent memory */
287            list_del(&page->page_list);
288            kfree(page);
289        } else
290            pool_free_page(pool, page);
291    }
292
293    kfree(pool);
294}
295EXPORT_SYMBOL(dma_pool_destroy);
296
297/**
298 * dma_pool_alloc - get a block of consistent memory
299 * @pool: dma pool that will produce the block
300 * @mem_flags: GFP_* bitmask
301 * @handle: pointer to dma address of block
302 *
303 * This returns the kernel virtual address of a currently unused block,
304 * and reports its dma address through the handle.
305 * If such a memory block can't be allocated, %NULL is returned.
306 */
307void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
308             dma_addr_t *handle)
309{
310    unsigned long flags;
311    struct dma_page *page;
312    size_t offset;
313    void *retval;
314
315    might_sleep_if(mem_flags & __GFP_WAIT);
316
317    spin_lock_irqsave(&pool->lock, flags);
318 restart:
319    list_for_each_entry(page, &pool->page_list, page_list) {
320        if (page->offset < pool->allocation)
321            goto ready;
322    }
323    page = pool_alloc_page(pool, GFP_ATOMIC);
324    if (!page) {
325        if (mem_flags & __GFP_WAIT) {
326            DECLARE_WAITQUEUE(wait, current);
327
328            __set_current_state(TASK_UNINTERRUPTIBLE);
329            __add_wait_queue(&pool->waitq, &wait);
330            spin_unlock_irqrestore(&pool->lock, flags);
331
332            schedule_timeout(POOL_TIMEOUT_JIFFIES);
333
334            spin_lock_irqsave(&pool->lock, flags);
335            __remove_wait_queue(&pool->waitq, &wait);
336            goto restart;
337        }
338        retval = NULL;
339        goto done;
340    }
341
342 ready:
343    page->in_use++;
344    offset = page->offset;
345    page->offset = *(int *)(page->vaddr + offset);
346    retval = offset + page->vaddr;
347    *handle = offset + page->dma;
348#ifdef DMAPOOL_DEBUG
349    memset(retval, POOL_POISON_ALLOCATED, pool->size);
350#endif
351 done:
352    spin_unlock_irqrestore(&pool->lock, flags);
353    return retval;
354}
355EXPORT_SYMBOL(dma_pool_alloc);
356
357static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
358{
359    struct dma_page *page;
360
361    list_for_each_entry(page, &pool->page_list, page_list) {
362        if (dma < page->dma)
363            continue;
364        if (dma < (page->dma + pool->allocation))
365            return page;
366    }
367    return NULL;
368}
369
370/**
371 * dma_pool_free - put block back into dma pool
372 * @pool: the dma pool holding the block
373 * @vaddr: virtual address of block
374 * @dma: dma address of block
375 *
376 * Caller promises neither device nor driver will again touch this block
377 * unless it is first re-allocated.
378 */
379void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
380{
381    struct dma_page *page;
382    unsigned long flags;
383    unsigned int offset;
384
385    spin_lock_irqsave(&pool->lock, flags);
386    page = pool_find_page(pool, dma);
387    if (!page) {
388        spin_unlock_irqrestore(&pool->lock, flags);
389        if (pool->dev)
390            dev_err(pool->dev,
391                "dma_pool_free %s, %p/%lx (bad dma)\n",
392                pool->name, vaddr, (unsigned long)dma);
393        else
394            printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
395                   pool->name, vaddr, (unsigned long)dma);
396        return;
397    }
398
399    offset = vaddr - page->vaddr;
400#ifdef DMAPOOL_DEBUG
401    if ((dma - page->dma) != offset) {
402        spin_unlock_irqrestore(&pool->lock, flags);
403        if (pool->dev)
404            dev_err(pool->dev,
405                "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
406                pool->name, vaddr, (unsigned long long)dma);
407        else
408            printk(KERN_ERR
409                   "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
410                   pool->name, vaddr, (unsigned long long)dma);
411        return;
412    }
413    {
414        unsigned int chain = page->offset;
415        while (chain < pool->allocation) {
416            if (chain != offset) {
417                chain = *(int *)(page->vaddr + chain);
418                continue;
419            }
420            spin_unlock_irqrestore(&pool->lock, flags);
421            if (pool->dev)
422                dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
423                    "already free\n", pool->name,
424                    (unsigned long long)dma);
425            else
426                printk(KERN_ERR "dma_pool_free %s, dma %Lx "
427                    "already free\n", pool->name,
428                    (unsigned long long)dma);
429            return;
430        }
431    }
432    memset(vaddr, POOL_POISON_FREED, pool->size);
433#endif
434
435    page->in_use--;
436    *(int *)vaddr = page->offset;
437    page->offset = offset;
438    if (waitqueue_active(&pool->waitq))
439        wake_up_locked(&pool->waitq);
440    /*
441     * Resist a temptation to do
442     * if (!is_page_busy(page)) pool_free_page(pool, page);
443     * Better have a few empty pages hang around.
444     */
445    spin_unlock_irqrestore(&pool->lock, flags);
446}
447EXPORT_SYMBOL(dma_pool_free);
448
449/*
450 * Managed DMA pool
451 */
452static void dmam_pool_release(struct device *dev, void *res)
453{
454    struct dma_pool *pool = *(struct dma_pool **)res;
455
456    dma_pool_destroy(pool);
457}
458
459static int dmam_pool_match(struct device *dev, void *res, void *match_data)
460{
461    return *(struct dma_pool **)res == match_data;
462}
463
464/**
465 * dmam_pool_create - Managed dma_pool_create()
466 * @name: name of pool, for diagnostics
467 * @dev: device that will be doing the DMA
468 * @size: size of the blocks in this pool.
469 * @align: alignment requirement for blocks; must be a power of two
470 * @allocation: returned blocks won't cross this boundary (or zero)
471 *
472 * Managed dma_pool_create(). DMA pool created with this function is
473 * automatically destroyed on driver detach.
474 */
475struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
476                  size_t size, size_t align, size_t allocation)
477{
478    struct dma_pool **ptr, *pool;
479
480    ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
481    if (!ptr)
482        return NULL;
483
484    pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
485    if (pool)
486        devres_add(dev, ptr);
487    else
488        devres_free(ptr);
489
490    return pool;
491}
492EXPORT_SYMBOL(dmam_pool_create);
493
494/**
495 * dmam_pool_destroy - Managed dma_pool_destroy()
496 * @pool: dma pool that will be destroyed
497 *
498 * Managed dma_pool_destroy().
499 */
500void dmam_pool_destroy(struct dma_pool *pool)
501{
502    struct device *dev = pool->dev;
503
504    WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
505    dma_pool_destroy(pool);
506}
507EXPORT_SYMBOL(dmam_pool_destroy);
508

Archive Download this file



interactive