Root/
Source at commit 694c7fbe86b8a9c91392e505afcb9fcfc91deccc created 12 years 8 months ago. By Maarten ter Huurne, MIPS: JZ4740: Add cpufreq support | |
---|---|
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> |
7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
9 | */ |
10 | |
11 | #include <linux/types.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/module.h> |
15 | #include <linux/scatterlist.h> |
16 | #include <linux/string.h> |
17 | #include <linux/gfp.h> |
18 | #include <linux/highmem.h> |
19 | |
20 | #include <asm/cache.h> |
21 | #include <asm/cpu-type.h> |
22 | #include <asm/io.h> |
23 | |
24 | #include <dma-coherence.h> |
25 | |
26 | int coherentio = 0; /* User defined DMA coherency from command line. */ |
27 | EXPORT_SYMBOL_GPL(coherentio); |
28 | int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */ |
29 | |
30 | static int __init setcoherentio(char *str) |
31 | { |
32 | coherentio = 1; |
33 | pr_info("Hardware DMA cache coherency (command line)\n"); |
34 | return 0; |
35 | } |
36 | early_param("coherentio", setcoherentio); |
37 | |
38 | static int __init setnocoherentio(char *str) |
39 | { |
40 | coherentio = 0; |
41 | pr_info("Software DMA cache coherency (command line)\n"); |
42 | return 0; |
43 | } |
44 | early_param("nocoherentio", setnocoherentio); |
45 | |
46 | static inline struct page *dma_addr_to_page(struct device *dev, |
47 | dma_addr_t dma_addr) |
48 | { |
49 | return pfn_to_page( |
50 | plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); |
51 | } |
52 | |
53 | /* |
54 | * The affected CPUs below in 'cpu_needs_post_dma_flush()' can |
55 | * speculatively fill random cachelines with stale data at any time, |
56 | * requiring an extra flush post-DMA. |
57 | * |
58 | * Warning on the terminology - Linux calls an uncached area coherent; |
59 | * MIPS terminology calls memory areas with hardware maintained coherency |
60 | * coherent. |
61 | */ |
62 | static inline int cpu_needs_post_dma_flush(struct device *dev) |
63 | { |
64 | return !plat_device_is_coherent(dev) && |
65 | (boot_cpu_type() == CPU_R10000 || |
66 | boot_cpu_type() == CPU_R12000 || |
67 | boot_cpu_type() == CPU_BMIPS5000); |
68 | } |
69 | |
70 | static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) |
71 | { |
72 | gfp_t dma_flag; |
73 | |
74 | /* ignore region specifiers */ |
75 | gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
76 | |
77 | #ifdef CONFIG_ISA |
78 | if (dev == NULL) |
79 | dma_flag = __GFP_DMA; |
80 | else |
81 | #endif |
82 | #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) |
83 | if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) |
84 | dma_flag = __GFP_DMA; |
85 | else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) |
86 | dma_flag = __GFP_DMA32; |
87 | else |
88 | #endif |
89 | #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) |
90 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) |
91 | dma_flag = __GFP_DMA32; |
92 | else |
93 | #endif |
94 | #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) |
95 | if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) |
96 | dma_flag = __GFP_DMA; |
97 | else |
98 | #endif |
99 | dma_flag = 0; |
100 | |
101 | /* Don't invoke OOM killer */ |
102 | gfp |= __GFP_NORETRY; |
103 | |
104 | return gfp | dma_flag; |
105 | } |
106 | |
107 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
108 | dma_addr_t * dma_handle, gfp_t gfp) |
109 | { |
110 | void *ret; |
111 | |
112 | gfp = massage_gfp_flags(dev, gfp); |
113 | |
114 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
115 | |
116 | if (ret != NULL) { |
117 | memset(ret, 0, size); |
118 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
119 | } |
120 | |
121 | return ret; |
122 | } |
123 | EXPORT_SYMBOL(dma_alloc_noncoherent); |
124 | |
125 | static void *mips_dma_alloc_coherent(struct device *dev, size_t size, |
126 | dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) |
127 | { |
128 | void *ret; |
129 | |
130 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) |
131 | return ret; |
132 | |
133 | gfp = massage_gfp_flags(dev, gfp); |
134 | |
135 | ret = (void *) __get_free_pages(gfp, get_order(size)); |
136 | |
137 | if (ret) { |
138 | memset(ret, 0, size); |
139 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
140 | |
141 | if (!plat_device_is_coherent(dev)) { |
142 | dma_cache_wback_inv((unsigned long) ret, size); |
143 | if (!hw_coherentio) |
144 | ret = UNCAC_ADDR(ret); |
145 | } |
146 | } |
147 | |
148 | return ret; |
149 | } |
150 | |
151 | |
152 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
153 | dma_addr_t dma_handle) |
154 | { |
155 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
156 | free_pages((unsigned long) vaddr, get_order(size)); |
157 | } |
158 | EXPORT_SYMBOL(dma_free_noncoherent); |
159 | |
160 | static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, |
161 | dma_addr_t dma_handle, struct dma_attrs *attrs) |
162 | { |
163 | unsigned long addr = (unsigned long) vaddr; |
164 | int order = get_order(size); |
165 | |
166 | if (dma_release_from_coherent(dev, order, vaddr)) |
167 | return; |
168 | |
169 | plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); |
170 | |
171 | if (!plat_device_is_coherent(dev) && !hw_coherentio) |
172 | addr = CAC_ADDR(addr); |
173 | |
174 | free_pages(addr, get_order(size)); |
175 | } |
176 | |
177 | static inline void __dma_sync_virtual(void *addr, size_t size, |
178 | enum dma_data_direction direction) |
179 | { |
180 | switch (direction) { |
181 | case DMA_TO_DEVICE: |
182 | dma_cache_wback((unsigned long)addr, size); |
183 | break; |
184 | |
185 | case DMA_FROM_DEVICE: |
186 | dma_cache_inv((unsigned long)addr, size); |
187 | break; |
188 | |
189 | case DMA_BIDIRECTIONAL: |
190 | dma_cache_wback_inv((unsigned long)addr, size); |
191 | break; |
192 | |
193 | default: |
194 | BUG(); |
195 | } |
196 | } |
197 | |
198 | /* |
199 | * A single sg entry may refer to multiple physically contiguous |
200 | * pages. But we still need to process highmem pages individually. |
201 | * If highmem is not configured then the bulk of this loop gets |
202 | * optimized out. |
203 | */ |
204 | static inline void __dma_sync(struct page *page, |
205 | unsigned long offset, size_t size, enum dma_data_direction direction) |
206 | { |
207 | size_t left = size; |
208 | |
209 | do { |
210 | size_t len = left; |
211 | |
212 | if (PageHighMem(page)) { |
213 | void *addr; |
214 | |
215 | if (offset + len > PAGE_SIZE) { |
216 | if (offset >= PAGE_SIZE) { |
217 | page += offset >> PAGE_SHIFT; |
218 | offset &= ~PAGE_MASK; |
219 | } |
220 | len = PAGE_SIZE - offset; |
221 | } |
222 | |
223 | addr = kmap_atomic(page); |
224 | __dma_sync_virtual(addr + offset, len, direction); |
225 | kunmap_atomic(addr); |
226 | } else |
227 | __dma_sync_virtual(page_address(page) + offset, |
228 | size, direction); |
229 | offset = 0; |
230 | page++; |
231 | left -= len; |
232 | } while (left); |
233 | } |
234 | |
235 | static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
236 | size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) |
237 | { |
238 | if (cpu_needs_post_dma_flush(dev)) |
239 | __dma_sync(dma_addr_to_page(dev, dma_addr), |
240 | dma_addr & ~PAGE_MASK, size, direction); |
241 | |
242 | plat_unmap_dma_mem(dev, dma_addr, size, direction); |
243 | } |
244 | |
245 | static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, |
246 | int nents, enum dma_data_direction direction, struct dma_attrs *attrs) |
247 | { |
248 | int i; |
249 | |
250 | for (i = 0; i < nents; i++, sg++) { |
251 | if (!plat_device_is_coherent(dev)) |
252 | __dma_sync(sg_page(sg), sg->offset, sg->length, |
253 | direction); |
254 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
255 | sg->dma_length = sg->length; |
256 | #endif |
257 | sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + |
258 | sg->offset; |
259 | } |
260 | |
261 | return nents; |
262 | } |
263 | |
264 | static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, |
265 | unsigned long offset, size_t size, enum dma_data_direction direction, |
266 | struct dma_attrs *attrs) |
267 | { |
268 | if (!plat_device_is_coherent(dev)) |
269 | __dma_sync(page, offset, size, direction); |
270 | |
271 | return plat_map_dma_mem_page(dev, page) + offset; |
272 | } |
273 | |
274 | static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
275 | int nhwentries, enum dma_data_direction direction, |
276 | struct dma_attrs *attrs) |
277 | { |
278 | int i; |
279 | |
280 | for (i = 0; i < nhwentries; i++, sg++) { |
281 | if (!plat_device_is_coherent(dev) && |
282 | direction != DMA_TO_DEVICE) |
283 | __dma_sync(sg_page(sg), sg->offset, sg->length, |
284 | direction); |
285 | plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); |
286 | } |
287 | } |
288 | |
289 | static void mips_dma_sync_single_for_cpu(struct device *dev, |
290 | dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) |
291 | { |
292 | if (cpu_needs_post_dma_flush(dev)) |
293 | __dma_sync(dma_addr_to_page(dev, dma_handle), |
294 | dma_handle & ~PAGE_MASK, size, direction); |
295 | } |
296 | |
297 | static void mips_dma_sync_single_for_device(struct device *dev, |
298 | dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) |
299 | { |
300 | if (!plat_device_is_coherent(dev)) |
301 | __dma_sync(dma_addr_to_page(dev, dma_handle), |
302 | dma_handle & ~PAGE_MASK, size, direction); |
303 | } |
304 | |
305 | static void mips_dma_sync_sg_for_cpu(struct device *dev, |
306 | struct scatterlist *sg, int nelems, enum dma_data_direction direction) |
307 | { |
308 | int i; |
309 | |
310 | if (cpu_needs_post_dma_flush(dev)) |
311 | for (i = 0; i < nelems; i++, sg++) |
312 | __dma_sync(sg_page(sg), sg->offset, sg->length, |
313 | direction); |
314 | } |
315 | |
316 | static void mips_dma_sync_sg_for_device(struct device *dev, |
317 | struct scatterlist *sg, int nelems, enum dma_data_direction direction) |
318 | { |
319 | int i; |
320 | |
321 | if (!plat_device_is_coherent(dev)) |
322 | for (i = 0; i < nelems; i++, sg++) |
323 | __dma_sync(sg_page(sg), sg->offset, sg->length, |
324 | direction); |
325 | } |
326 | |
327 | int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
328 | { |
329 | return 0; |
330 | } |
331 | |
332 | int mips_dma_supported(struct device *dev, u64 mask) |
333 | { |
334 | return plat_dma_supported(dev, mask); |
335 | } |
336 | |
337 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
338 | enum dma_data_direction direction) |
339 | { |
340 | BUG_ON(direction == DMA_NONE); |
341 | |
342 | if (!plat_device_is_coherent(dev)) |
343 | __dma_sync_virtual(vaddr, size, direction); |
344 | } |
345 | |
346 | EXPORT_SYMBOL(dma_cache_sync); |
347 | |
348 | static struct dma_map_ops mips_default_dma_map_ops = { |
349 | .alloc = mips_dma_alloc_coherent, |
350 | .free = mips_dma_free_coherent, |
351 | .map_page = mips_dma_map_page, |
352 | .unmap_page = mips_dma_unmap_page, |
353 | .map_sg = mips_dma_map_sg, |
354 | .unmap_sg = mips_dma_unmap_sg, |
355 | .sync_single_for_cpu = mips_dma_sync_single_for_cpu, |
356 | .sync_single_for_device = mips_dma_sync_single_for_device, |
357 | .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, |
358 | .sync_sg_for_device = mips_dma_sync_sg_for_device, |
359 | .mapping_error = mips_dma_mapping_error, |
360 | .dma_supported = mips_dma_supported |
361 | }; |
362 | |
363 | struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; |
364 | EXPORT_SYMBOL(mips_dma_map_ops); |
365 | |
366 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
367 | |
368 | static int __init mips_dma_init(void) |
369 | { |
370 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
371 | |
372 | return 0; |
373 | } |
374 | fs_initcall(mips_dma_init); |
375 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9