Root/
1 | /* |
2 | * Coherent per-device memory handling. |
3 | * Borrowed from i386 |
4 | */ |
5 | #include <linux/slab.h> |
6 | #include <linux/kernel.h> |
7 | #include <linux/module.h> |
8 | #include <linux/dma-mapping.h> |
9 | |
10 | struct dma_coherent_mem { |
11 | void *virt_base; |
12 | dma_addr_t device_base; |
13 | phys_addr_t pfn_base; |
14 | int size; |
15 | int flags; |
16 | unsigned long *bitmap; |
17 | }; |
18 | |
19 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, |
20 | dma_addr_t device_addr, size_t size, int flags) |
21 | { |
22 | void __iomem *mem_base = NULL; |
23 | int pages = size >> PAGE_SHIFT; |
24 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); |
25 | |
26 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) |
27 | goto out; |
28 | if (!size) |
29 | goto out; |
30 | if (dev->dma_mem) |
31 | goto out; |
32 | |
33 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ |
34 | |
35 | mem_base = ioremap(bus_addr, size); |
36 | if (!mem_base) |
37 | goto out; |
38 | |
39 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); |
40 | if (!dev->dma_mem) |
41 | goto out; |
42 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
43 | if (!dev->dma_mem->bitmap) |
44 | goto free1_out; |
45 | |
46 | dev->dma_mem->virt_base = mem_base; |
47 | dev->dma_mem->device_base = device_addr; |
48 | dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); |
49 | dev->dma_mem->size = pages; |
50 | dev->dma_mem->flags = flags; |
51 | |
52 | if (flags & DMA_MEMORY_MAP) |
53 | return DMA_MEMORY_MAP; |
54 | |
55 | return DMA_MEMORY_IO; |
56 | |
57 | free1_out: |
58 | kfree(dev->dma_mem); |
59 | out: |
60 | if (mem_base) |
61 | iounmap(mem_base); |
62 | return 0; |
63 | } |
64 | EXPORT_SYMBOL(dma_declare_coherent_memory); |
65 | |
66 | void dma_release_declared_memory(struct device *dev) |
67 | { |
68 | struct dma_coherent_mem *mem = dev->dma_mem; |
69 | |
70 | if (!mem) |
71 | return; |
72 | dev->dma_mem = NULL; |
73 | iounmap(mem->virt_base); |
74 | kfree(mem->bitmap); |
75 | kfree(mem); |
76 | } |
77 | EXPORT_SYMBOL(dma_release_declared_memory); |
78 | |
79 | void *dma_mark_declared_memory_occupied(struct device *dev, |
80 | dma_addr_t device_addr, size_t size) |
81 | { |
82 | struct dma_coherent_mem *mem = dev->dma_mem; |
83 | int pos, err; |
84 | |
85 | size += device_addr & ~PAGE_MASK; |
86 | |
87 | if (!mem) |
88 | return ERR_PTR(-EINVAL); |
89 | |
90 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; |
91 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); |
92 | if (err != 0) |
93 | return ERR_PTR(err); |
94 | return mem->virt_base + (pos << PAGE_SHIFT); |
95 | } |
96 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); |
97 | |
98 | /** |
99 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area |
100 | * |
101 | * @dev: device from which we allocate memory |
102 | * @size: size of requested memory area |
103 | * @dma_handle: This will be filled with the correct dma handle |
104 | * @ret: This pointer will be filled with the virtual address |
105 | * to allocated area. |
106 | * |
107 | * This function should be only called from per-arch dma_alloc_coherent() |
108 | * to support allocation from per-device coherent memory pools. |
109 | * |
110 | * Returns 0 if dma_alloc_coherent should continue with allocating from |
111 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. |
112 | */ |
113 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, |
114 | dma_addr_t *dma_handle, void **ret) |
115 | { |
116 | struct dma_coherent_mem *mem; |
117 | int order = get_order(size); |
118 | int pageno; |
119 | |
120 | if (!dev) |
121 | return 0; |
122 | mem = dev->dma_mem; |
123 | if (!mem) |
124 | return 0; |
125 | |
126 | *ret = NULL; |
127 | |
128 | if (unlikely(size > (mem->size << PAGE_SHIFT))) |
129 | goto err; |
130 | |
131 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); |
132 | if (unlikely(pageno < 0)) |
133 | goto err; |
134 | |
135 | /* |
136 | * Memory was found in the per-device area. |
137 | */ |
138 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); |
139 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); |
140 | memset(*ret, 0, size); |
141 | |
142 | return 1; |
143 | |
144 | err: |
145 | /* |
146 | * In the case where the allocation can not be satisfied from the |
147 | * per-device area, try to fall back to generic memory if the |
148 | * constraints allow it. |
149 | */ |
150 | return mem->flags & DMA_MEMORY_EXCLUSIVE; |
151 | } |
152 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
153 | |
154 | /** |
155 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool |
156 | * @dev: device from which the memory was allocated |
157 | * @order: the order of pages allocated |
158 | * @vaddr: virtual address of allocated pages |
159 | * |
160 | * This checks whether the memory was allocated from the per-device |
161 | * coherent memory pool and if so, releases that memory. |
162 | * |
163 | * Returns 1 if we correctly released the memory, or 0 if |
164 | * dma_release_coherent() should proceed with releasing memory from |
165 | * generic pools. |
166 | */ |
167 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) |
168 | { |
169 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
170 | |
171 | if (mem && vaddr >= mem->virt_base && vaddr < |
172 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
173 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
174 | |
175 | bitmap_release_region(mem->bitmap, page, order); |
176 | return 1; |
177 | } |
178 | return 0; |
179 | } |
180 | EXPORT_SYMBOL(dma_release_from_coherent); |
181 | |
182 | /** |
183 | * dma_mmap_from_coherent() - try to mmap the memory allocated from |
184 | * per-device coherent memory pool to userspace |
185 | * @dev: device from which the memory was allocated |
186 | * @vma: vm_area for the userspace memory |
187 | * @vaddr: cpu address returned by dma_alloc_from_coherent |
188 | * @size: size of the memory buffer allocated by dma_alloc_from_coherent |
189 | * @ret: result from remap_pfn_range() |
190 | * |
191 | * This checks whether the memory was allocated from the per-device |
192 | * coherent memory pool and if so, maps that memory to the provided vma. |
193 | * |
194 | * Returns 1 if we correctly mapped the memory, or 0 if |
195 | * dma_release_coherent() should proceed with mapping memory from |
196 | * generic pools. |
197 | */ |
198 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, |
199 | void *vaddr, size_t size, int *ret) |
200 | { |
201 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
202 | |
203 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
204 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
205 | unsigned long off = vma->vm_pgoff; |
206 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
207 | int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
208 | int count = size >> PAGE_SHIFT; |
209 | |
210 | *ret = -ENXIO; |
211 | if (off < count && user_count <= count - off) { |
212 | unsigned pfn = mem->pfn_base + start + off; |
213 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, |
214 | user_count << PAGE_SHIFT, |
215 | vma->vm_page_prot); |
216 | } |
217 | return 1; |
218 | } |
219 | return 0; |
220 | } |
221 | EXPORT_SYMBOL(dma_mmap_from_coherent); |
222 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9