Root/
1 | /* |
2 | * Framework for buffer objects that can be shared across devices/subsystems. |
3 | * |
4 | * Copyright(C) 2011 Linaro Limited. All rights reserved. |
5 | * Author: Sumit Semwal <sumit.semwal@ti.com> |
6 | * |
7 | * Many thanks to linaro-mm-sig list, and specially |
8 | * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and |
9 | * Daniel Vetter <daniel@ffwll.ch> for their support in creation and |
10 | * refining of this idea. |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms of the GNU General Public License version 2 as published by |
14 | * the Free Software Foundation. |
15 | * |
16 | * This program is distributed in the hope that it will be useful, but WITHOUT |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
19 | * more details. |
20 | * |
21 | * You should have received a copy of the GNU General Public License along with |
22 | * this program. If not, see <http://www.gnu.org/licenses/>. |
23 | */ |
24 | |
25 | #include <linux/fs.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/dma-buf.h> |
28 | #include <linux/anon_inodes.h> |
29 | #include <linux/export.h> |
30 | |
31 | static inline int is_dma_buf_file(struct file *); |
32 | |
33 | static int dma_buf_release(struct inode *inode, struct file *file) |
34 | { |
35 | struct dma_buf *dmabuf; |
36 | |
37 | if (!is_dma_buf_file(file)) |
38 | return -EINVAL; |
39 | |
40 | dmabuf = file->private_data; |
41 | |
42 | BUG_ON(dmabuf->vmapping_counter); |
43 | |
44 | dmabuf->ops->release(dmabuf); |
45 | kfree(dmabuf); |
46 | return 0; |
47 | } |
48 | |
49 | static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) |
50 | { |
51 | struct dma_buf *dmabuf; |
52 | |
53 | if (!is_dma_buf_file(file)) |
54 | return -EINVAL; |
55 | |
56 | dmabuf = file->private_data; |
57 | |
58 | /* check for overflowing the buffer's size */ |
59 | if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > |
60 | dmabuf->size >> PAGE_SHIFT) |
61 | return -EINVAL; |
62 | |
63 | return dmabuf->ops->mmap(dmabuf, vma); |
64 | } |
65 | |
66 | static const struct file_operations dma_buf_fops = { |
67 | .release = dma_buf_release, |
68 | .mmap = dma_buf_mmap_internal, |
69 | }; |
70 | |
71 | /* |
72 | * is_dma_buf_file - Check if struct file* is associated with dma_buf |
73 | */ |
74 | static inline int is_dma_buf_file(struct file *file) |
75 | { |
76 | return file->f_op == &dma_buf_fops; |
77 | } |
78 | |
79 | /** |
80 | * dma_buf_export - Creates a new dma_buf, and associates an anon file |
81 | * with this buffer, so it can be exported. |
82 | * Also connect the allocator specific data and ops to the buffer. |
83 | * |
84 | * @priv: [in] Attach private data of allocator to this buffer |
85 | * @ops: [in] Attach allocator-defined dma buf ops to the new buffer. |
86 | * @size: [in] Size of the buffer |
87 | * @flags: [in] mode flags for the file. |
88 | * |
89 | * Returns, on success, a newly created dma_buf object, which wraps the |
90 | * supplied private data and operations for dma_buf_ops. On either missing |
91 | * ops, or error in allocating struct dma_buf, will return negative error. |
92 | * |
93 | */ |
94 | struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, |
95 | size_t size, int flags) |
96 | { |
97 | struct dma_buf *dmabuf; |
98 | struct file *file; |
99 | |
100 | if (WARN_ON(!priv || !ops |
101 | || !ops->map_dma_buf |
102 | || !ops->unmap_dma_buf |
103 | || !ops->release |
104 | || !ops->kmap_atomic |
105 | || !ops->kmap |
106 | || !ops->mmap)) { |
107 | return ERR_PTR(-EINVAL); |
108 | } |
109 | |
110 | dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); |
111 | if (dmabuf == NULL) |
112 | return ERR_PTR(-ENOMEM); |
113 | |
114 | dmabuf->priv = priv; |
115 | dmabuf->ops = ops; |
116 | dmabuf->size = size; |
117 | |
118 | file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); |
119 | |
120 | dmabuf->file = file; |
121 | |
122 | mutex_init(&dmabuf->lock); |
123 | INIT_LIST_HEAD(&dmabuf->attachments); |
124 | |
125 | return dmabuf; |
126 | } |
127 | EXPORT_SYMBOL_GPL(dma_buf_export); |
128 | |
129 | |
130 | /** |
131 | * dma_buf_fd - returns a file descriptor for the given dma_buf |
132 | * @dmabuf: [in] pointer to dma_buf for which fd is required. |
133 | * @flags: [in] flags to give to fd |
134 | * |
135 | * On success, returns an associated 'fd'. Else, returns error. |
136 | */ |
137 | int dma_buf_fd(struct dma_buf *dmabuf, int flags) |
138 | { |
139 | int fd; |
140 | |
141 | if (!dmabuf || !dmabuf->file) |
142 | return -EINVAL; |
143 | |
144 | fd = get_unused_fd_flags(flags); |
145 | if (fd < 0) |
146 | return fd; |
147 | |
148 | fd_install(fd, dmabuf->file); |
149 | |
150 | return fd; |
151 | } |
152 | EXPORT_SYMBOL_GPL(dma_buf_fd); |
153 | |
154 | /** |
155 | * dma_buf_get - returns the dma_buf structure related to an fd |
156 | * @fd: [in] fd associated with the dma_buf to be returned |
157 | * |
158 | * On success, returns the dma_buf structure associated with an fd; uses |
159 | * file's refcounting done by fget to increase refcount. returns ERR_PTR |
160 | * otherwise. |
161 | */ |
162 | struct dma_buf *dma_buf_get(int fd) |
163 | { |
164 | struct file *file; |
165 | |
166 | file = fget(fd); |
167 | |
168 | if (!file) |
169 | return ERR_PTR(-EBADF); |
170 | |
171 | if (!is_dma_buf_file(file)) { |
172 | fput(file); |
173 | return ERR_PTR(-EINVAL); |
174 | } |
175 | |
176 | return file->private_data; |
177 | } |
178 | EXPORT_SYMBOL_GPL(dma_buf_get); |
179 | |
180 | /** |
181 | * dma_buf_put - decreases refcount of the buffer |
182 | * @dmabuf: [in] buffer to reduce refcount of |
183 | * |
184 | * Uses file's refcounting done implicitly by fput() |
185 | */ |
186 | void dma_buf_put(struct dma_buf *dmabuf) |
187 | { |
188 | if (WARN_ON(!dmabuf || !dmabuf->file)) |
189 | return; |
190 | |
191 | fput(dmabuf->file); |
192 | } |
193 | EXPORT_SYMBOL_GPL(dma_buf_put); |
194 | |
195 | /** |
196 | * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, |
197 | * calls attach() of dma_buf_ops to allow device-specific attach functionality |
198 | * @dmabuf: [in] buffer to attach device to. |
199 | * @dev: [in] device to be attached. |
200 | * |
201 | * Returns struct dma_buf_attachment * for this attachment; may return negative |
202 | * error codes. |
203 | * |
204 | */ |
205 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, |
206 | struct device *dev) |
207 | { |
208 | struct dma_buf_attachment *attach; |
209 | int ret; |
210 | |
211 | if (WARN_ON(!dmabuf || !dev)) |
212 | return ERR_PTR(-EINVAL); |
213 | |
214 | attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); |
215 | if (attach == NULL) |
216 | return ERR_PTR(-ENOMEM); |
217 | |
218 | attach->dev = dev; |
219 | attach->dmabuf = dmabuf; |
220 | |
221 | mutex_lock(&dmabuf->lock); |
222 | |
223 | if (dmabuf->ops->attach) { |
224 | ret = dmabuf->ops->attach(dmabuf, dev, attach); |
225 | if (ret) |
226 | goto err_attach; |
227 | } |
228 | list_add(&attach->node, &dmabuf->attachments); |
229 | |
230 | mutex_unlock(&dmabuf->lock); |
231 | return attach; |
232 | |
233 | err_attach: |
234 | kfree(attach); |
235 | mutex_unlock(&dmabuf->lock); |
236 | return ERR_PTR(ret); |
237 | } |
238 | EXPORT_SYMBOL_GPL(dma_buf_attach); |
239 | |
240 | /** |
241 | * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; |
242 | * optionally calls detach() of dma_buf_ops for device-specific detach |
243 | * @dmabuf: [in] buffer to detach from. |
244 | * @attach: [in] attachment to be detached; is free'd after this call. |
245 | * |
246 | */ |
247 | void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) |
248 | { |
249 | if (WARN_ON(!dmabuf || !attach)) |
250 | return; |
251 | |
252 | mutex_lock(&dmabuf->lock); |
253 | list_del(&attach->node); |
254 | if (dmabuf->ops->detach) |
255 | dmabuf->ops->detach(dmabuf, attach); |
256 | |
257 | mutex_unlock(&dmabuf->lock); |
258 | kfree(attach); |
259 | } |
260 | EXPORT_SYMBOL_GPL(dma_buf_detach); |
261 | |
262 | /** |
263 | * dma_buf_map_attachment - Returns the scatterlist table of the attachment; |
264 | * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the |
265 | * dma_buf_ops. |
266 | * @attach: [in] attachment whose scatterlist is to be returned |
267 | * @direction: [in] direction of DMA transfer |
268 | * |
269 | * Returns sg_table containing the scatterlist to be returned; may return NULL |
270 | * or ERR_PTR. |
271 | * |
272 | */ |
273 | struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, |
274 | enum dma_data_direction direction) |
275 | { |
276 | struct sg_table *sg_table = ERR_PTR(-EINVAL); |
277 | |
278 | might_sleep(); |
279 | |
280 | if (WARN_ON(!attach || !attach->dmabuf)) |
281 | return ERR_PTR(-EINVAL); |
282 | |
283 | sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); |
284 | |
285 | return sg_table; |
286 | } |
287 | EXPORT_SYMBOL_GPL(dma_buf_map_attachment); |
288 | |
289 | /** |
290 | * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might |
291 | * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of |
292 | * dma_buf_ops. |
293 | * @attach: [in] attachment to unmap buffer from |
294 | * @sg_table: [in] scatterlist info of the buffer to unmap |
295 | * @direction: [in] direction of DMA transfer |
296 | * |
297 | */ |
298 | void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, |
299 | struct sg_table *sg_table, |
300 | enum dma_data_direction direction) |
301 | { |
302 | might_sleep(); |
303 | |
304 | if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) |
305 | return; |
306 | |
307 | attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, |
308 | direction); |
309 | } |
310 | EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); |
311 | |
312 | |
313 | /** |
314 | * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the |
315 | * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific |
316 | * preparations. Coherency is only guaranteed in the specified range for the |
317 | * specified access direction. |
318 | * @dmabuf: [in] buffer to prepare cpu access for. |
319 | * @start: [in] start of range for cpu access. |
320 | * @len: [in] length of range for cpu access. |
321 | * @direction: [in] length of range for cpu access. |
322 | * |
323 | * Can return negative error values, returns 0 on success. |
324 | */ |
325 | int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, |
326 | enum dma_data_direction direction) |
327 | { |
328 | int ret = 0; |
329 | |
330 | if (WARN_ON(!dmabuf)) |
331 | return -EINVAL; |
332 | |
333 | if (dmabuf->ops->begin_cpu_access) |
334 | ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction); |
335 | |
336 | return ret; |
337 | } |
338 | EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); |
339 | |
340 | /** |
341 | * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the |
342 | * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific |
343 | * actions. Coherency is only guaranteed in the specified range for the |
344 | * specified access direction. |
345 | * @dmabuf: [in] buffer to complete cpu access for. |
346 | * @start: [in] start of range for cpu access. |
347 | * @len: [in] length of range for cpu access. |
348 | * @direction: [in] length of range for cpu access. |
349 | * |
350 | * This call must always succeed. |
351 | */ |
352 | void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, |
353 | enum dma_data_direction direction) |
354 | { |
355 | WARN_ON(!dmabuf); |
356 | |
357 | if (dmabuf->ops->end_cpu_access) |
358 | dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); |
359 | } |
360 | EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); |
361 | |
362 | /** |
363 | * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address |
364 | * space. The same restrictions as for kmap_atomic and friends apply. |
365 | * @dmabuf: [in] buffer to map page from. |
366 | * @page_num: [in] page in PAGE_SIZE units to map. |
367 | * |
368 | * This call must always succeed, any necessary preparations that might fail |
369 | * need to be done in begin_cpu_access. |
370 | */ |
371 | void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) |
372 | { |
373 | WARN_ON(!dmabuf); |
374 | |
375 | return dmabuf->ops->kmap_atomic(dmabuf, page_num); |
376 | } |
377 | EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); |
378 | |
379 | /** |
380 | * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. |
381 | * @dmabuf: [in] buffer to unmap page from. |
382 | * @page_num: [in] page in PAGE_SIZE units to unmap. |
383 | * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. |
384 | * |
385 | * This call must always succeed. |
386 | */ |
387 | void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, |
388 | void *vaddr) |
389 | { |
390 | WARN_ON(!dmabuf); |
391 | |
392 | if (dmabuf->ops->kunmap_atomic) |
393 | dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); |
394 | } |
395 | EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); |
396 | |
397 | /** |
398 | * dma_buf_kmap - Map a page of the buffer object into kernel address space. The |
399 | * same restrictions as for kmap and friends apply. |
400 | * @dmabuf: [in] buffer to map page from. |
401 | * @page_num: [in] page in PAGE_SIZE units to map. |
402 | * |
403 | * This call must always succeed, any necessary preparations that might fail |
404 | * need to be done in begin_cpu_access. |
405 | */ |
406 | void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) |
407 | { |
408 | WARN_ON(!dmabuf); |
409 | |
410 | return dmabuf->ops->kmap(dmabuf, page_num); |
411 | } |
412 | EXPORT_SYMBOL_GPL(dma_buf_kmap); |
413 | |
414 | /** |
415 | * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap. |
416 | * @dmabuf: [in] buffer to unmap page from. |
417 | * @page_num: [in] page in PAGE_SIZE units to unmap. |
418 | * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap. |
419 | * |
420 | * This call must always succeed. |
421 | */ |
422 | void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, |
423 | void *vaddr) |
424 | { |
425 | WARN_ON(!dmabuf); |
426 | |
427 | if (dmabuf->ops->kunmap) |
428 | dmabuf->ops->kunmap(dmabuf, page_num, vaddr); |
429 | } |
430 | EXPORT_SYMBOL_GPL(dma_buf_kunmap); |
431 | |
432 | |
433 | /** |
434 | * dma_buf_mmap - Setup up a userspace mmap with the given vma |
435 | * @dmabuf: [in] buffer that should back the vma |
436 | * @vma: [in] vma for the mmap |
437 | * @pgoff: [in] offset in pages where this mmap should start within the |
438 | * dma-buf buffer. |
439 | * |
440 | * This function adjusts the passed in vma so that it points at the file of the |
441 | * dma_buf operation. It alsog adjusts the starting pgoff and does bounds |
442 | * checking on the size of the vma. Then it calls the exporters mmap function to |
443 | * set up the mapping. |
444 | * |
445 | * Can return negative error values, returns 0 on success. |
446 | */ |
447 | int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, |
448 | unsigned long pgoff) |
449 | { |
450 | struct file *oldfile; |
451 | int ret; |
452 | |
453 | if (WARN_ON(!dmabuf || !vma)) |
454 | return -EINVAL; |
455 | |
456 | /* check for offset overflow */ |
457 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) |
458 | return -EOVERFLOW; |
459 | |
460 | /* check for overflowing the buffer's size */ |
461 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > |
462 | dmabuf->size >> PAGE_SHIFT) |
463 | return -EINVAL; |
464 | |
465 | /* readjust the vma */ |
466 | get_file(dmabuf->file); |
467 | oldfile = vma->vm_file; |
468 | vma->vm_file = dmabuf->file; |
469 | vma->vm_pgoff = pgoff; |
470 | |
471 | ret = dmabuf->ops->mmap(dmabuf, vma); |
472 | if (ret) { |
473 | /* restore old parameters on failure */ |
474 | vma->vm_file = oldfile; |
475 | fput(dmabuf->file); |
476 | } else { |
477 | if (oldfile) |
478 | fput(oldfile); |
479 | } |
480 | return ret; |
481 | |
482 | } |
483 | EXPORT_SYMBOL_GPL(dma_buf_mmap); |
484 | |
485 | /** |
486 | * dma_buf_vmap - Create virtual mapping for the buffer object into kernel |
487 | * address space. Same restrictions as for vmap and friends apply. |
488 | * @dmabuf: [in] buffer to vmap |
489 | * |
490 | * This call may fail due to lack of virtual mapping address space. |
491 | * These calls are optional in drivers. The intended use for them |
492 | * is for mapping objects linear in kernel space for high use objects. |
493 | * Please attempt to use kmap/kunmap before thinking about these interfaces. |
494 | */ |
495 | void *dma_buf_vmap(struct dma_buf *dmabuf) |
496 | { |
497 | void *ptr; |
498 | |
499 | if (WARN_ON(!dmabuf)) |
500 | return NULL; |
501 | |
502 | if (!dmabuf->ops->vmap) |
503 | return NULL; |
504 | |
505 | mutex_lock(&dmabuf->lock); |
506 | if (dmabuf->vmapping_counter) { |
507 | dmabuf->vmapping_counter++; |
508 | BUG_ON(!dmabuf->vmap_ptr); |
509 | ptr = dmabuf->vmap_ptr; |
510 | goto out_unlock; |
511 | } |
512 | |
513 | BUG_ON(dmabuf->vmap_ptr); |
514 | |
515 | ptr = dmabuf->ops->vmap(dmabuf); |
516 | if (IS_ERR_OR_NULL(ptr)) |
517 | goto out_unlock; |
518 | |
519 | dmabuf->vmap_ptr = ptr; |
520 | dmabuf->vmapping_counter = 1; |
521 | |
522 | out_unlock: |
523 | mutex_unlock(&dmabuf->lock); |
524 | return ptr; |
525 | } |
526 | EXPORT_SYMBOL_GPL(dma_buf_vmap); |
527 | |
528 | /** |
529 | * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. |
530 | * @dmabuf: [in] buffer to vunmap |
531 | * @vaddr: [in] vmap to vunmap |
532 | */ |
533 | void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) |
534 | { |
535 | if (WARN_ON(!dmabuf)) |
536 | return; |
537 | |
538 | BUG_ON(!dmabuf->vmap_ptr); |
539 | BUG_ON(dmabuf->vmapping_counter == 0); |
540 | BUG_ON(dmabuf->vmap_ptr != vaddr); |
541 | |
542 | mutex_lock(&dmabuf->lock); |
543 | if (--dmabuf->vmapping_counter == 0) { |
544 | if (dmabuf->ops->vunmap) |
545 | dmabuf->ops->vunmap(dmabuf, vaddr); |
546 | dmabuf->vmap_ptr = NULL; |
547 | } |
548 | mutex_unlock(&dmabuf->lock); |
549 | } |
550 | EXPORT_SYMBOL_GPL(dma_buf_vunmap); |
551 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9