Root/
1 | /* |
2 | * DMA Pool allocator |
3 | * |
4 | * Copyright 2001 David Brownell |
5 | * Copyright 2007 Intel Corporation |
6 | * Author: Matthew Wilcox <willy@linux.intel.com> |
7 | * |
8 | * This software may be redistributed and/or modified under the terms of |
9 | * the GNU General Public License ("GPL") version 2 as published by the |
10 | * Free Software Foundation. |
11 | * |
12 | * This allocator returns small blocks of a given size which are DMA-able by |
13 | * the given device. It uses the dma_alloc_coherent page allocator to get |
14 | * new pages, then splits them up into blocks of the required size. |
15 | * Many older drivers still have their own code to do this. |
16 | * |
17 | * The current design of this allocator is fairly simple. The pool is |
18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of |
19 | * allocated pages. Each page in the page_list is split into blocks of at |
20 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
21 | * list of free blocks within the page. Used blocks aren't tracked, but we |
22 | * keep a count of how many are currently allocated from each page. |
23 | */ |
24 | |
25 | #include <linux/device.h> |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/dmapool.h> |
28 | #include <linux/kernel.h> |
29 | #include <linux/list.h> |
30 | #include <linux/export.h> |
31 | #include <linux/mutex.h> |
32 | #include <linux/poison.h> |
33 | #include <linux/sched.h> |
34 | #include <linux/slab.h> |
35 | #include <linux/stat.h> |
36 | #include <linux/spinlock.h> |
37 | #include <linux/string.h> |
38 | #include <linux/types.h> |
39 | #include <linux/wait.h> |
40 | |
41 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
42 | #define DMAPOOL_DEBUG 1 |
43 | #endif |
44 | |
45 | struct dma_pool { /* the pool */ |
46 | struct list_head page_list; |
47 | spinlock_t lock; |
48 | size_t size; |
49 | struct device *dev; |
50 | size_t allocation; |
51 | size_t boundary; |
52 | char name[32]; |
53 | struct list_head pools; |
54 | }; |
55 | |
56 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
57 | struct list_head page_list; |
58 | void *vaddr; |
59 | dma_addr_t dma; |
60 | unsigned int in_use; |
61 | unsigned int offset; |
62 | }; |
63 | |
64 | static DEFINE_MUTEX(pools_lock); |
65 | |
66 | static ssize_t |
67 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) |
68 | { |
69 | unsigned temp; |
70 | unsigned size; |
71 | char *next; |
72 | struct dma_page *page; |
73 | struct dma_pool *pool; |
74 | |
75 | next = buf; |
76 | size = PAGE_SIZE; |
77 | |
78 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); |
79 | size -= temp; |
80 | next += temp; |
81 | |
82 | mutex_lock(&pools_lock); |
83 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
84 | unsigned pages = 0; |
85 | unsigned blocks = 0; |
86 | |
87 | spin_lock_irq(&pool->lock); |
88 | list_for_each_entry(page, &pool->page_list, page_list) { |
89 | pages++; |
90 | blocks += page->in_use; |
91 | } |
92 | spin_unlock_irq(&pool->lock); |
93 | |
94 | /* per-pool info, no real statistics yet */ |
95 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", |
96 | pool->name, blocks, |
97 | pages * (pool->allocation / pool->size), |
98 | pool->size, pages); |
99 | size -= temp; |
100 | next += temp; |
101 | } |
102 | mutex_unlock(&pools_lock); |
103 | |
104 | return PAGE_SIZE - size; |
105 | } |
106 | |
107 | static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); |
108 | |
109 | /** |
110 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
111 | * @name: name of pool, for diagnostics |
112 | * @dev: device that will be doing the DMA |
113 | * @size: size of the blocks in this pool. |
114 | * @align: alignment requirement for blocks; must be a power of two |
115 | * @boundary: returned blocks won't cross this power of two boundary |
116 | * Context: !in_interrupt() |
117 | * |
118 | * Returns a dma allocation pool with the requested characteristics, or |
119 | * null if one can't be created. Given one of these pools, dma_pool_alloc() |
120 | * may be used to allocate memory. Such memory will all have "consistent" |
121 | * DMA mappings, accessible by the device and its driver without using |
122 | * cache flushing primitives. The actual size of blocks allocated may be |
123 | * larger than requested because of alignment. |
124 | * |
125 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
126 | * cross that size boundary. This is useful for devices which have |
127 | * addressing restrictions on individual DMA transfers, such as not crossing |
128 | * boundaries of 4KBytes. |
129 | */ |
130 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
131 | size_t size, size_t align, size_t boundary) |
132 | { |
133 | struct dma_pool *retval; |
134 | size_t allocation; |
135 | |
136 | if (align == 0) { |
137 | align = 1; |
138 | } else if (align & (align - 1)) { |
139 | return NULL; |
140 | } |
141 | |
142 | if (size == 0) { |
143 | return NULL; |
144 | } else if (size < 4) { |
145 | size = 4; |
146 | } |
147 | |
148 | if ((size % align) != 0) |
149 | size = ALIGN(size, align); |
150 | |
151 | allocation = max_t(size_t, size, PAGE_SIZE); |
152 | |
153 | if (!boundary) { |
154 | boundary = allocation; |
155 | } else if ((boundary < size) || (boundary & (boundary - 1))) { |
156 | return NULL; |
157 | } |
158 | |
159 | retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); |
160 | if (!retval) |
161 | return retval; |
162 | |
163 | strlcpy(retval->name, name, sizeof(retval->name)); |
164 | |
165 | retval->dev = dev; |
166 | |
167 | INIT_LIST_HEAD(&retval->page_list); |
168 | spin_lock_init(&retval->lock); |
169 | retval->size = size; |
170 | retval->boundary = boundary; |
171 | retval->allocation = allocation; |
172 | |
173 | if (dev) { |
174 | int ret; |
175 | |
176 | mutex_lock(&pools_lock); |
177 | if (list_empty(&dev->dma_pools)) |
178 | ret = device_create_file(dev, &dev_attr_pools); |
179 | else |
180 | ret = 0; |
181 | /* note: not currently insisting "name" be unique */ |
182 | if (!ret) |
183 | list_add(&retval->pools, &dev->dma_pools); |
184 | else { |
185 | kfree(retval); |
186 | retval = NULL; |
187 | } |
188 | mutex_unlock(&pools_lock); |
189 | } else |
190 | INIT_LIST_HEAD(&retval->pools); |
191 | |
192 | return retval; |
193 | } |
194 | EXPORT_SYMBOL(dma_pool_create); |
195 | |
196 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
197 | { |
198 | unsigned int offset = 0; |
199 | unsigned int next_boundary = pool->boundary; |
200 | |
201 | do { |
202 | unsigned int next = offset + pool->size; |
203 | if (unlikely((next + pool->size) >= next_boundary)) { |
204 | next = next_boundary; |
205 | next_boundary += pool->boundary; |
206 | } |
207 | *(int *)(page->vaddr + offset) = next; |
208 | offset = next; |
209 | } while (offset < pool->allocation); |
210 | } |
211 | |
212 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
213 | { |
214 | struct dma_page *page; |
215 | |
216 | page = kmalloc(sizeof(*page), mem_flags); |
217 | if (!page) |
218 | return NULL; |
219 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, |
220 | &page->dma, mem_flags); |
221 | if (page->vaddr) { |
222 | #ifdef DMAPOOL_DEBUG |
223 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
224 | #endif |
225 | pool_initialise_page(pool, page); |
226 | page->in_use = 0; |
227 | page->offset = 0; |
228 | } else { |
229 | kfree(page); |
230 | page = NULL; |
231 | } |
232 | return page; |
233 | } |
234 | |
235 | static inline int is_page_busy(struct dma_page *page) |
236 | { |
237 | return page->in_use != 0; |
238 | } |
239 | |
240 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
241 | { |
242 | dma_addr_t dma = page->dma; |
243 | |
244 | #ifdef DMAPOOL_DEBUG |
245 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
246 | #endif |
247 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
248 | list_del(&page->page_list); |
249 | kfree(page); |
250 | } |
251 | |
252 | /** |
253 | * dma_pool_destroy - destroys a pool of dma memory blocks. |
254 | * @pool: dma pool that will be destroyed |
255 | * Context: !in_interrupt() |
256 | * |
257 | * Caller guarantees that no more memory from the pool is in use, |
258 | * and that nothing will try to use the pool after this call. |
259 | */ |
260 | void dma_pool_destroy(struct dma_pool *pool) |
261 | { |
262 | mutex_lock(&pools_lock); |
263 | list_del(&pool->pools); |
264 | if (pool->dev && list_empty(&pool->dev->dma_pools)) |
265 | device_remove_file(pool->dev, &dev_attr_pools); |
266 | mutex_unlock(&pools_lock); |
267 | |
268 | while (!list_empty(&pool->page_list)) { |
269 | struct dma_page *page; |
270 | page = list_entry(pool->page_list.next, |
271 | struct dma_page, page_list); |
272 | if (is_page_busy(page)) { |
273 | if (pool->dev) |
274 | dev_err(pool->dev, |
275 | "dma_pool_destroy %s, %p busy\n", |
276 | pool->name, page->vaddr); |
277 | else |
278 | printk(KERN_ERR |
279 | "dma_pool_destroy %s, %p busy\n", |
280 | pool->name, page->vaddr); |
281 | /* leak the still-in-use consistent memory */ |
282 | list_del(&page->page_list); |
283 | kfree(page); |
284 | } else |
285 | pool_free_page(pool, page); |
286 | } |
287 | |
288 | kfree(pool); |
289 | } |
290 | EXPORT_SYMBOL(dma_pool_destroy); |
291 | |
292 | /** |
293 | * dma_pool_alloc - get a block of consistent memory |
294 | * @pool: dma pool that will produce the block |
295 | * @mem_flags: GFP_* bitmask |
296 | * @handle: pointer to dma address of block |
297 | * |
298 | * This returns the kernel virtual address of a currently unused block, |
299 | * and reports its dma address through the handle. |
300 | * If such a memory block can't be allocated, %NULL is returned. |
301 | */ |
302 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
303 | dma_addr_t *handle) |
304 | { |
305 | unsigned long flags; |
306 | struct dma_page *page; |
307 | size_t offset; |
308 | void *retval; |
309 | |
310 | might_sleep_if(mem_flags & __GFP_WAIT); |
311 | |
312 | spin_lock_irqsave(&pool->lock, flags); |
313 | list_for_each_entry(page, &pool->page_list, page_list) { |
314 | if (page->offset < pool->allocation) |
315 | goto ready; |
316 | } |
317 | |
318 | /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
319 | spin_unlock_irqrestore(&pool->lock, flags); |
320 | |
321 | page = pool_alloc_page(pool, mem_flags); |
322 | if (!page) |
323 | return NULL; |
324 | |
325 | spin_lock_irqsave(&pool->lock, flags); |
326 | |
327 | list_add(&page->page_list, &pool->page_list); |
328 | ready: |
329 | page->in_use++; |
330 | offset = page->offset; |
331 | page->offset = *(int *)(page->vaddr + offset); |
332 | retval = offset + page->vaddr; |
333 | *handle = offset + page->dma; |
334 | #ifdef DMAPOOL_DEBUG |
335 | { |
336 | int i; |
337 | u8 *data = retval; |
338 | /* page->offset is stored in first 4 bytes */ |
339 | for (i = sizeof(page->offset); i < pool->size; i++) { |
340 | if (data[i] == POOL_POISON_FREED) |
341 | continue; |
342 | if (pool->dev) |
343 | dev_err(pool->dev, |
344 | "dma_pool_alloc %s, %p (corruped)\n", |
345 | pool->name, retval); |
346 | else |
347 | pr_err("dma_pool_alloc %s, %p (corruped)\n", |
348 | pool->name, retval); |
349 | |
350 | /* |
351 | * Dump the first 4 bytes even if they are not |
352 | * POOL_POISON_FREED |
353 | */ |
354 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, |
355 | data, pool->size, 1); |
356 | break; |
357 | } |
358 | } |
359 | memset(retval, POOL_POISON_ALLOCATED, pool->size); |
360 | #endif |
361 | spin_unlock_irqrestore(&pool->lock, flags); |
362 | return retval; |
363 | } |
364 | EXPORT_SYMBOL(dma_pool_alloc); |
365 | |
366 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
367 | { |
368 | struct dma_page *page; |
369 | |
370 | list_for_each_entry(page, &pool->page_list, page_list) { |
371 | if (dma < page->dma) |
372 | continue; |
373 | if (dma < (page->dma + pool->allocation)) |
374 | return page; |
375 | } |
376 | return NULL; |
377 | } |
378 | |
379 | /** |
380 | * dma_pool_free - put block back into dma pool |
381 | * @pool: the dma pool holding the block |
382 | * @vaddr: virtual address of block |
383 | * @dma: dma address of block |
384 | * |
385 | * Caller promises neither device nor driver will again touch this block |
386 | * unless it is first re-allocated. |
387 | */ |
388 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
389 | { |
390 | struct dma_page *page; |
391 | unsigned long flags; |
392 | unsigned int offset; |
393 | |
394 | spin_lock_irqsave(&pool->lock, flags); |
395 | page = pool_find_page(pool, dma); |
396 | if (!page) { |
397 | spin_unlock_irqrestore(&pool->lock, flags); |
398 | if (pool->dev) |
399 | dev_err(pool->dev, |
400 | "dma_pool_free %s, %p/%lx (bad dma)\n", |
401 | pool->name, vaddr, (unsigned long)dma); |
402 | else |
403 | printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
404 | pool->name, vaddr, (unsigned long)dma); |
405 | return; |
406 | } |
407 | |
408 | offset = vaddr - page->vaddr; |
409 | #ifdef DMAPOOL_DEBUG |
410 | if ((dma - page->dma) != offset) { |
411 | spin_unlock_irqrestore(&pool->lock, flags); |
412 | if (pool->dev) |
413 | dev_err(pool->dev, |
414 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
415 | pool->name, vaddr, (unsigned long long)dma); |
416 | else |
417 | printk(KERN_ERR |
418 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
419 | pool->name, vaddr, (unsigned long long)dma); |
420 | return; |
421 | } |
422 | { |
423 | unsigned int chain = page->offset; |
424 | while (chain < pool->allocation) { |
425 | if (chain != offset) { |
426 | chain = *(int *)(page->vaddr + chain); |
427 | continue; |
428 | } |
429 | spin_unlock_irqrestore(&pool->lock, flags); |
430 | if (pool->dev) |
431 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx " |
432 | "already free\n", pool->name, |
433 | (unsigned long long)dma); |
434 | else |
435 | printk(KERN_ERR "dma_pool_free %s, dma %Lx " |
436 | "already free\n", pool->name, |
437 | (unsigned long long)dma); |
438 | return; |
439 | } |
440 | } |
441 | memset(vaddr, POOL_POISON_FREED, pool->size); |
442 | #endif |
443 | |
444 | page->in_use--; |
445 | *(int *)vaddr = page->offset; |
446 | page->offset = offset; |
447 | /* |
448 | * Resist a temptation to do |
449 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
450 | * Better have a few empty pages hang around. |
451 | */ |
452 | spin_unlock_irqrestore(&pool->lock, flags); |
453 | } |
454 | EXPORT_SYMBOL(dma_pool_free); |
455 | |
456 | /* |
457 | * Managed DMA pool |
458 | */ |
459 | static void dmam_pool_release(struct device *dev, void *res) |
460 | { |
461 | struct dma_pool *pool = *(struct dma_pool **)res; |
462 | |
463 | dma_pool_destroy(pool); |
464 | } |
465 | |
466 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) |
467 | { |
468 | return *(struct dma_pool **)res == match_data; |
469 | } |
470 | |
471 | /** |
472 | * dmam_pool_create - Managed dma_pool_create() |
473 | * @name: name of pool, for diagnostics |
474 | * @dev: device that will be doing the DMA |
475 | * @size: size of the blocks in this pool. |
476 | * @align: alignment requirement for blocks; must be a power of two |
477 | * @allocation: returned blocks won't cross this boundary (or zero) |
478 | * |
479 | * Managed dma_pool_create(). DMA pool created with this function is |
480 | * automatically destroyed on driver detach. |
481 | */ |
482 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, |
483 | size_t size, size_t align, size_t allocation) |
484 | { |
485 | struct dma_pool **ptr, *pool; |
486 | |
487 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); |
488 | if (!ptr) |
489 | return NULL; |
490 | |
491 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); |
492 | if (pool) |
493 | devres_add(dev, ptr); |
494 | else |
495 | devres_free(ptr); |
496 | |
497 | return pool; |
498 | } |
499 | EXPORT_SYMBOL(dmam_pool_create); |
500 | |
501 | /** |
502 | * dmam_pool_destroy - Managed dma_pool_destroy() |
503 | * @pool: dma pool that will be destroyed |
504 | * |
505 | * Managed dma_pool_destroy(). |
506 | */ |
507 | void dmam_pool_destroy(struct dma_pool *pool) |
508 | { |
509 | struct device *dev = pool->dev; |
510 | |
511 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); |
512 | dma_pool_destroy(pool); |
513 | } |
514 | EXPORT_SYMBOL(dmam_pool_destroy); |
515 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9