Root/
1 | /* |
2 | * drivers/uio/uio.c |
3 | * |
4 | * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> |
5 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> |
6 | * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de> |
7 | * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> |
8 | * |
9 | * Userspace IO |
10 | * |
11 | * Base Functions |
12 | * |
13 | * Licensed under the GPLv2 only. |
14 | */ |
15 | |
16 | #include <linux/module.h> |
17 | #include <linux/init.h> |
18 | #include <linux/poll.h> |
19 | #include <linux/device.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/idr.h> |
23 | #include <linux/sched.h> |
24 | #include <linux/string.h> |
25 | #include <linux/kobject.h> |
26 | #include <linux/cdev.h> |
27 | #include <linux/uio_driver.h> |
28 | |
29 | #define UIO_MAX_DEVICES (1U << MINORBITS) |
30 | |
31 | struct uio_device { |
32 | struct module *owner; |
33 | struct device *dev; |
34 | int minor; |
35 | atomic_t event; |
36 | struct fasync_struct *async_queue; |
37 | wait_queue_head_t wait; |
38 | int vma_count; |
39 | struct uio_info *info; |
40 | struct kobject *map_dir; |
41 | struct kobject *portio_dir; |
42 | }; |
43 | |
44 | static int uio_major; |
45 | static struct cdev *uio_cdev; |
46 | static DEFINE_IDR(uio_idr); |
47 | static const struct file_operations uio_fops; |
48 | |
49 | /* Protect idr accesses */ |
50 | static DEFINE_MUTEX(minor_lock); |
51 | |
52 | /* |
53 | * attributes |
54 | */ |
55 | |
56 | struct uio_map { |
57 | struct kobject kobj; |
58 | struct uio_mem *mem; |
59 | }; |
60 | #define to_map(map) container_of(map, struct uio_map, kobj) |
61 | |
62 | static ssize_t map_name_show(struct uio_mem *mem, char *buf) |
63 | { |
64 | if (unlikely(!mem->name)) |
65 | mem->name = ""; |
66 | |
67 | return sprintf(buf, "%s\n", mem->name); |
68 | } |
69 | |
70 | static ssize_t map_addr_show(struct uio_mem *mem, char *buf) |
71 | { |
72 | return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr); |
73 | } |
74 | |
75 | static ssize_t map_size_show(struct uio_mem *mem, char *buf) |
76 | { |
77 | return sprintf(buf, "0x%lx\n", mem->size); |
78 | } |
79 | |
80 | static ssize_t map_offset_show(struct uio_mem *mem, char *buf) |
81 | { |
82 | return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr & ~PAGE_MASK); |
83 | } |
84 | |
85 | struct map_sysfs_entry { |
86 | struct attribute attr; |
87 | ssize_t (*show)(struct uio_mem *, char *); |
88 | ssize_t (*store)(struct uio_mem *, const char *, size_t); |
89 | }; |
90 | |
91 | static struct map_sysfs_entry name_attribute = |
92 | __ATTR(name, S_IRUGO, map_name_show, NULL); |
93 | static struct map_sysfs_entry addr_attribute = |
94 | __ATTR(addr, S_IRUGO, map_addr_show, NULL); |
95 | static struct map_sysfs_entry size_attribute = |
96 | __ATTR(size, S_IRUGO, map_size_show, NULL); |
97 | static struct map_sysfs_entry offset_attribute = |
98 | __ATTR(offset, S_IRUGO, map_offset_show, NULL); |
99 | |
100 | static struct attribute *attrs[] = { |
101 | &name_attribute.attr, |
102 | &addr_attribute.attr, |
103 | &size_attribute.attr, |
104 | &offset_attribute.attr, |
105 | NULL, /* need to NULL terminate the list of attributes */ |
106 | }; |
107 | |
108 | static void map_release(struct kobject *kobj) |
109 | { |
110 | struct uio_map *map = to_map(kobj); |
111 | kfree(map); |
112 | } |
113 | |
114 | static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, |
115 | char *buf) |
116 | { |
117 | struct uio_map *map = to_map(kobj); |
118 | struct uio_mem *mem = map->mem; |
119 | struct map_sysfs_entry *entry; |
120 | |
121 | entry = container_of(attr, struct map_sysfs_entry, attr); |
122 | |
123 | if (!entry->show) |
124 | return -EIO; |
125 | |
126 | return entry->show(mem, buf); |
127 | } |
128 | |
129 | static const struct sysfs_ops map_sysfs_ops = { |
130 | .show = map_type_show, |
131 | }; |
132 | |
133 | static struct kobj_type map_attr_type = { |
134 | .release = map_release, |
135 | .sysfs_ops = &map_sysfs_ops, |
136 | .default_attrs = attrs, |
137 | }; |
138 | |
139 | struct uio_portio { |
140 | struct kobject kobj; |
141 | struct uio_port *port; |
142 | }; |
143 | #define to_portio(portio) container_of(portio, struct uio_portio, kobj) |
144 | |
145 | static ssize_t portio_name_show(struct uio_port *port, char *buf) |
146 | { |
147 | if (unlikely(!port->name)) |
148 | port->name = ""; |
149 | |
150 | return sprintf(buf, "%s\n", port->name); |
151 | } |
152 | |
153 | static ssize_t portio_start_show(struct uio_port *port, char *buf) |
154 | { |
155 | return sprintf(buf, "0x%lx\n", port->start); |
156 | } |
157 | |
158 | static ssize_t portio_size_show(struct uio_port *port, char *buf) |
159 | { |
160 | return sprintf(buf, "0x%lx\n", port->size); |
161 | } |
162 | |
163 | static ssize_t portio_porttype_show(struct uio_port *port, char *buf) |
164 | { |
165 | const char *porttypes[] = {"none", "x86", "gpio", "other"}; |
166 | |
167 | if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER)) |
168 | return -EINVAL; |
169 | |
170 | return sprintf(buf, "port_%s\n", porttypes[port->porttype]); |
171 | } |
172 | |
173 | struct portio_sysfs_entry { |
174 | struct attribute attr; |
175 | ssize_t (*show)(struct uio_port *, char *); |
176 | ssize_t (*store)(struct uio_port *, const char *, size_t); |
177 | }; |
178 | |
179 | static struct portio_sysfs_entry portio_name_attribute = |
180 | __ATTR(name, S_IRUGO, portio_name_show, NULL); |
181 | static struct portio_sysfs_entry portio_start_attribute = |
182 | __ATTR(start, S_IRUGO, portio_start_show, NULL); |
183 | static struct portio_sysfs_entry portio_size_attribute = |
184 | __ATTR(size, S_IRUGO, portio_size_show, NULL); |
185 | static struct portio_sysfs_entry portio_porttype_attribute = |
186 | __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL); |
187 | |
188 | static struct attribute *portio_attrs[] = { |
189 | &portio_name_attribute.attr, |
190 | &portio_start_attribute.attr, |
191 | &portio_size_attribute.attr, |
192 | &portio_porttype_attribute.attr, |
193 | NULL, |
194 | }; |
195 | |
196 | static void portio_release(struct kobject *kobj) |
197 | { |
198 | struct uio_portio *portio = to_portio(kobj); |
199 | kfree(portio); |
200 | } |
201 | |
202 | static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr, |
203 | char *buf) |
204 | { |
205 | struct uio_portio *portio = to_portio(kobj); |
206 | struct uio_port *port = portio->port; |
207 | struct portio_sysfs_entry *entry; |
208 | |
209 | entry = container_of(attr, struct portio_sysfs_entry, attr); |
210 | |
211 | if (!entry->show) |
212 | return -EIO; |
213 | |
214 | return entry->show(port, buf); |
215 | } |
216 | |
217 | static const struct sysfs_ops portio_sysfs_ops = { |
218 | .show = portio_type_show, |
219 | }; |
220 | |
221 | static struct kobj_type portio_attr_type = { |
222 | .release = portio_release, |
223 | .sysfs_ops = &portio_sysfs_ops, |
224 | .default_attrs = portio_attrs, |
225 | }; |
226 | |
227 | static ssize_t show_name(struct device *dev, |
228 | struct device_attribute *attr, char *buf) |
229 | { |
230 | struct uio_device *idev = dev_get_drvdata(dev); |
231 | return sprintf(buf, "%s\n", idev->info->name); |
232 | } |
233 | |
234 | static ssize_t show_version(struct device *dev, |
235 | struct device_attribute *attr, char *buf) |
236 | { |
237 | struct uio_device *idev = dev_get_drvdata(dev); |
238 | return sprintf(buf, "%s\n", idev->info->version); |
239 | } |
240 | |
241 | static ssize_t show_event(struct device *dev, |
242 | struct device_attribute *attr, char *buf) |
243 | { |
244 | struct uio_device *idev = dev_get_drvdata(dev); |
245 | return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); |
246 | } |
247 | |
248 | static struct device_attribute uio_class_attributes[] = { |
249 | __ATTR(name, S_IRUGO, show_name, NULL), |
250 | __ATTR(version, S_IRUGO, show_version, NULL), |
251 | __ATTR(event, S_IRUGO, show_event, NULL), |
252 | {} |
253 | }; |
254 | |
255 | /* UIO class infrastructure */ |
256 | static struct class uio_class = { |
257 | .name = "uio", |
258 | .dev_attrs = uio_class_attributes, |
259 | }; |
260 | |
261 | /* |
262 | * device functions |
263 | */ |
264 | static int uio_dev_add_attributes(struct uio_device *idev) |
265 | { |
266 | int ret; |
267 | int mi, pi; |
268 | int map_found = 0; |
269 | int portio_found = 0; |
270 | struct uio_mem *mem; |
271 | struct uio_map *map; |
272 | struct uio_port *port; |
273 | struct uio_portio *portio; |
274 | |
275 | for (mi = 0; mi < MAX_UIO_MAPS; mi++) { |
276 | mem = &idev->info->mem[mi]; |
277 | if (mem->size == 0) |
278 | break; |
279 | if (!map_found) { |
280 | map_found = 1; |
281 | idev->map_dir = kobject_create_and_add("maps", |
282 | &idev->dev->kobj); |
283 | if (!idev->map_dir) |
284 | goto err_map; |
285 | } |
286 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
287 | if (!map) |
288 | goto err_map; |
289 | kobject_init(&map->kobj, &map_attr_type); |
290 | map->mem = mem; |
291 | mem->map = map; |
292 | ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); |
293 | if (ret) |
294 | goto err_map; |
295 | ret = kobject_uevent(&map->kobj, KOBJ_ADD); |
296 | if (ret) |
297 | goto err_map; |
298 | } |
299 | |
300 | for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { |
301 | port = &idev->info->port[pi]; |
302 | if (port->size == 0) |
303 | break; |
304 | if (!portio_found) { |
305 | portio_found = 1; |
306 | idev->portio_dir = kobject_create_and_add("portio", |
307 | &idev->dev->kobj); |
308 | if (!idev->portio_dir) |
309 | goto err_portio; |
310 | } |
311 | portio = kzalloc(sizeof(*portio), GFP_KERNEL); |
312 | if (!portio) |
313 | goto err_portio; |
314 | kobject_init(&portio->kobj, &portio_attr_type); |
315 | portio->port = port; |
316 | port->portio = portio; |
317 | ret = kobject_add(&portio->kobj, idev->portio_dir, |
318 | "port%d", pi); |
319 | if (ret) |
320 | goto err_portio; |
321 | ret = kobject_uevent(&portio->kobj, KOBJ_ADD); |
322 | if (ret) |
323 | goto err_portio; |
324 | } |
325 | |
326 | return 0; |
327 | |
328 | err_portio: |
329 | for (pi--; pi >= 0; pi--) { |
330 | port = &idev->info->port[pi]; |
331 | portio = port->portio; |
332 | kobject_put(&portio->kobj); |
333 | } |
334 | kobject_put(idev->portio_dir); |
335 | err_map: |
336 | for (mi--; mi>=0; mi--) { |
337 | mem = &idev->info->mem[mi]; |
338 | map = mem->map; |
339 | kobject_put(&map->kobj); |
340 | } |
341 | kobject_put(idev->map_dir); |
342 | dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); |
343 | return ret; |
344 | } |
345 | |
346 | static void uio_dev_del_attributes(struct uio_device *idev) |
347 | { |
348 | int i; |
349 | struct uio_mem *mem; |
350 | struct uio_port *port; |
351 | |
352 | for (i = 0; i < MAX_UIO_MAPS; i++) { |
353 | mem = &idev->info->mem[i]; |
354 | if (mem->size == 0) |
355 | break; |
356 | kobject_put(&mem->map->kobj); |
357 | } |
358 | kobject_put(idev->map_dir); |
359 | |
360 | for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) { |
361 | port = &idev->info->port[i]; |
362 | if (port->size == 0) |
363 | break; |
364 | kobject_put(&port->portio->kobj); |
365 | } |
366 | kobject_put(idev->portio_dir); |
367 | } |
368 | |
369 | static int uio_get_minor(struct uio_device *idev) |
370 | { |
371 | int retval = -ENOMEM; |
372 | int id; |
373 | |
374 | mutex_lock(&minor_lock); |
375 | if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0) |
376 | goto exit; |
377 | |
378 | retval = idr_get_new(&uio_idr, idev, &id); |
379 | if (retval < 0) { |
380 | if (retval == -EAGAIN) |
381 | retval = -ENOMEM; |
382 | goto exit; |
383 | } |
384 | if (id < UIO_MAX_DEVICES) { |
385 | idev->minor = id; |
386 | } else { |
387 | dev_err(idev->dev, "too many uio devices\n"); |
388 | retval = -EINVAL; |
389 | idr_remove(&uio_idr, id); |
390 | } |
391 | exit: |
392 | mutex_unlock(&minor_lock); |
393 | return retval; |
394 | } |
395 | |
396 | static void uio_free_minor(struct uio_device *idev) |
397 | { |
398 | mutex_lock(&minor_lock); |
399 | idr_remove(&uio_idr, idev->minor); |
400 | mutex_unlock(&minor_lock); |
401 | } |
402 | |
403 | /** |
404 | * uio_event_notify - trigger an interrupt event |
405 | * @info: UIO device capabilities |
406 | */ |
407 | void uio_event_notify(struct uio_info *info) |
408 | { |
409 | struct uio_device *idev = info->uio_dev; |
410 | |
411 | atomic_inc(&idev->event); |
412 | wake_up_interruptible(&idev->wait); |
413 | kill_fasync(&idev->async_queue, SIGIO, POLL_IN); |
414 | } |
415 | EXPORT_SYMBOL_GPL(uio_event_notify); |
416 | |
417 | /** |
418 | * uio_interrupt - hardware interrupt handler |
419 | * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer |
420 | * @dev_id: Pointer to the devices uio_device structure |
421 | */ |
422 | static irqreturn_t uio_interrupt(int irq, void *dev_id) |
423 | { |
424 | struct uio_device *idev = (struct uio_device *)dev_id; |
425 | irqreturn_t ret = idev->info->handler(irq, idev->info); |
426 | |
427 | if (ret == IRQ_HANDLED) |
428 | uio_event_notify(idev->info); |
429 | |
430 | return ret; |
431 | } |
432 | |
433 | struct uio_listener { |
434 | struct uio_device *dev; |
435 | s32 event_count; |
436 | }; |
437 | |
438 | static int uio_open(struct inode *inode, struct file *filep) |
439 | { |
440 | struct uio_device *idev; |
441 | struct uio_listener *listener; |
442 | int ret = 0; |
443 | |
444 | mutex_lock(&minor_lock); |
445 | idev = idr_find(&uio_idr, iminor(inode)); |
446 | mutex_unlock(&minor_lock); |
447 | if (!idev) { |
448 | ret = -ENODEV; |
449 | goto out; |
450 | } |
451 | |
452 | if (!try_module_get(idev->owner)) { |
453 | ret = -ENODEV; |
454 | goto out; |
455 | } |
456 | |
457 | listener = kmalloc(sizeof(*listener), GFP_KERNEL); |
458 | if (!listener) { |
459 | ret = -ENOMEM; |
460 | goto err_alloc_listener; |
461 | } |
462 | |
463 | listener->dev = idev; |
464 | listener->event_count = atomic_read(&idev->event); |
465 | filep->private_data = listener; |
466 | |
467 | if (idev->info->open) { |
468 | ret = idev->info->open(idev->info, inode); |
469 | if (ret) |
470 | goto err_infoopen; |
471 | } |
472 | return 0; |
473 | |
474 | err_infoopen: |
475 | kfree(listener); |
476 | |
477 | err_alloc_listener: |
478 | module_put(idev->owner); |
479 | |
480 | out: |
481 | return ret; |
482 | } |
483 | |
484 | static int uio_fasync(int fd, struct file *filep, int on) |
485 | { |
486 | struct uio_listener *listener = filep->private_data; |
487 | struct uio_device *idev = listener->dev; |
488 | |
489 | return fasync_helper(fd, filep, on, &idev->async_queue); |
490 | } |
491 | |
492 | static int uio_release(struct inode *inode, struct file *filep) |
493 | { |
494 | int ret = 0; |
495 | struct uio_listener *listener = filep->private_data; |
496 | struct uio_device *idev = listener->dev; |
497 | |
498 | if (idev->info->release) |
499 | ret = idev->info->release(idev->info, inode); |
500 | |
501 | module_put(idev->owner); |
502 | kfree(listener); |
503 | return ret; |
504 | } |
505 | |
506 | static unsigned int uio_poll(struct file *filep, poll_table *wait) |
507 | { |
508 | struct uio_listener *listener = filep->private_data; |
509 | struct uio_device *idev = listener->dev; |
510 | |
511 | if (!idev->info->irq) |
512 | return -EIO; |
513 | |
514 | poll_wait(filep, &idev->wait, wait); |
515 | if (listener->event_count != atomic_read(&idev->event)) |
516 | return POLLIN | POLLRDNORM; |
517 | return 0; |
518 | } |
519 | |
520 | static ssize_t uio_read(struct file *filep, char __user *buf, |
521 | size_t count, loff_t *ppos) |
522 | { |
523 | struct uio_listener *listener = filep->private_data; |
524 | struct uio_device *idev = listener->dev; |
525 | DECLARE_WAITQUEUE(wait, current); |
526 | ssize_t retval; |
527 | s32 event_count; |
528 | |
529 | if (!idev->info->irq) |
530 | return -EIO; |
531 | |
532 | if (count != sizeof(s32)) |
533 | return -EINVAL; |
534 | |
535 | add_wait_queue(&idev->wait, &wait); |
536 | |
537 | do { |
538 | set_current_state(TASK_INTERRUPTIBLE); |
539 | |
540 | event_count = atomic_read(&idev->event); |
541 | if (event_count != listener->event_count) { |
542 | if (copy_to_user(buf, &event_count, count)) |
543 | retval = -EFAULT; |
544 | else { |
545 | listener->event_count = event_count; |
546 | retval = count; |
547 | } |
548 | break; |
549 | } |
550 | |
551 | if (filep->f_flags & O_NONBLOCK) { |
552 | retval = -EAGAIN; |
553 | break; |
554 | } |
555 | |
556 | if (signal_pending(current)) { |
557 | retval = -ERESTARTSYS; |
558 | break; |
559 | } |
560 | schedule(); |
561 | } while (1); |
562 | |
563 | __set_current_state(TASK_RUNNING); |
564 | remove_wait_queue(&idev->wait, &wait); |
565 | |
566 | return retval; |
567 | } |
568 | |
569 | static ssize_t uio_write(struct file *filep, const char __user *buf, |
570 | size_t count, loff_t *ppos) |
571 | { |
572 | struct uio_listener *listener = filep->private_data; |
573 | struct uio_device *idev = listener->dev; |
574 | ssize_t retval; |
575 | s32 irq_on; |
576 | |
577 | if (!idev->info->irq) |
578 | return -EIO; |
579 | |
580 | if (count != sizeof(s32)) |
581 | return -EINVAL; |
582 | |
583 | if (!idev->info->irqcontrol) |
584 | return -ENOSYS; |
585 | |
586 | if (copy_from_user(&irq_on, buf, count)) |
587 | return -EFAULT; |
588 | |
589 | retval = idev->info->irqcontrol(idev->info, irq_on); |
590 | |
591 | return retval ? retval : sizeof(s32); |
592 | } |
593 | |
594 | static int uio_find_mem_index(struct vm_area_struct *vma) |
595 | { |
596 | struct uio_device *idev = vma->vm_private_data; |
597 | |
598 | if (vma->vm_pgoff < MAX_UIO_MAPS) { |
599 | if (idev->info->mem[vma->vm_pgoff].size == 0) |
600 | return -1; |
601 | return (int)vma->vm_pgoff; |
602 | } |
603 | return -1; |
604 | } |
605 | |
606 | static void uio_vma_open(struct vm_area_struct *vma) |
607 | { |
608 | struct uio_device *idev = vma->vm_private_data; |
609 | idev->vma_count++; |
610 | } |
611 | |
612 | static void uio_vma_close(struct vm_area_struct *vma) |
613 | { |
614 | struct uio_device *idev = vma->vm_private_data; |
615 | idev->vma_count--; |
616 | } |
617 | |
618 | static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
619 | { |
620 | struct uio_device *idev = vma->vm_private_data; |
621 | struct page *page; |
622 | unsigned long offset; |
623 | |
624 | int mi = uio_find_mem_index(vma); |
625 | if (mi < 0) |
626 | return VM_FAULT_SIGBUS; |
627 | |
628 | /* |
629 | * We need to subtract mi because userspace uses offset = N*PAGE_SIZE |
630 | * to use mem[N]. |
631 | */ |
632 | offset = (vmf->pgoff - mi) << PAGE_SHIFT; |
633 | |
634 | if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) |
635 | page = virt_to_page(idev->info->mem[mi].addr + offset); |
636 | else |
637 | page = vmalloc_to_page((void *)(unsigned long)idev->info->mem[mi].addr + offset); |
638 | get_page(page); |
639 | vmf->page = page; |
640 | return 0; |
641 | } |
642 | |
643 | static const struct vm_operations_struct uio_vm_ops = { |
644 | .open = uio_vma_open, |
645 | .close = uio_vma_close, |
646 | .fault = uio_vma_fault, |
647 | }; |
648 | |
649 | static int uio_mmap_physical(struct vm_area_struct *vma) |
650 | { |
651 | struct uio_device *idev = vma->vm_private_data; |
652 | int mi = uio_find_mem_index(vma); |
653 | if (mi < 0) |
654 | return -EINVAL; |
655 | |
656 | vma->vm_flags |= VM_IO | VM_RESERVED; |
657 | |
658 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
659 | |
660 | return remap_pfn_range(vma, |
661 | vma->vm_start, |
662 | idev->info->mem[mi].addr >> PAGE_SHIFT, |
663 | vma->vm_end - vma->vm_start, |
664 | vma->vm_page_prot); |
665 | } |
666 | |
667 | static int uio_mmap_logical(struct vm_area_struct *vma) |
668 | { |
669 | vma->vm_flags |= VM_RESERVED; |
670 | vma->vm_ops = &uio_vm_ops; |
671 | uio_vma_open(vma); |
672 | return 0; |
673 | } |
674 | |
675 | static int uio_mmap(struct file *filep, struct vm_area_struct *vma) |
676 | { |
677 | struct uio_listener *listener = filep->private_data; |
678 | struct uio_device *idev = listener->dev; |
679 | int mi; |
680 | unsigned long requested_pages, actual_pages; |
681 | int ret = 0; |
682 | |
683 | if (vma->vm_end < vma->vm_start) |
684 | return -EINVAL; |
685 | |
686 | vma->vm_private_data = idev; |
687 | |
688 | mi = uio_find_mem_index(vma); |
689 | if (mi < 0) |
690 | return -EINVAL; |
691 | |
692 | requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
693 | actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) |
694 | + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; |
695 | if (requested_pages > actual_pages) |
696 | return -EINVAL; |
697 | |
698 | if (idev->info->mmap) { |
699 | ret = idev->info->mmap(idev->info, vma); |
700 | return ret; |
701 | } |
702 | |
703 | switch (idev->info->mem[mi].memtype) { |
704 | case UIO_MEM_PHYS: |
705 | return uio_mmap_physical(vma); |
706 | case UIO_MEM_LOGICAL: |
707 | case UIO_MEM_VIRTUAL: |
708 | return uio_mmap_logical(vma); |
709 | default: |
710 | return -EINVAL; |
711 | } |
712 | } |
713 | |
714 | static const struct file_operations uio_fops = { |
715 | .owner = THIS_MODULE, |
716 | .open = uio_open, |
717 | .release = uio_release, |
718 | .read = uio_read, |
719 | .write = uio_write, |
720 | .mmap = uio_mmap, |
721 | .poll = uio_poll, |
722 | .fasync = uio_fasync, |
723 | .llseek = noop_llseek, |
724 | }; |
725 | |
726 | static int uio_major_init(void) |
727 | { |
728 | static const char name[] = "uio"; |
729 | struct cdev *cdev = NULL; |
730 | dev_t uio_dev = 0; |
731 | int result; |
732 | |
733 | result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name); |
734 | if (result) |
735 | goto out; |
736 | |
737 | result = -ENOMEM; |
738 | cdev = cdev_alloc(); |
739 | if (!cdev) |
740 | goto out_unregister; |
741 | |
742 | cdev->owner = THIS_MODULE; |
743 | cdev->ops = &uio_fops; |
744 | kobject_set_name(&cdev->kobj, "%s", name); |
745 | |
746 | result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES); |
747 | if (result) |
748 | goto out_put; |
749 | |
750 | uio_major = MAJOR(uio_dev); |
751 | uio_cdev = cdev; |
752 | return 0; |
753 | out_put: |
754 | kobject_put(&cdev->kobj); |
755 | out_unregister: |
756 | unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES); |
757 | out: |
758 | return result; |
759 | } |
760 | |
761 | static void uio_major_cleanup(void) |
762 | { |
763 | unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES); |
764 | cdev_del(uio_cdev); |
765 | } |
766 | |
767 | static int init_uio_class(void) |
768 | { |
769 | int ret; |
770 | |
771 | /* This is the first time in here, set everything up properly */ |
772 | ret = uio_major_init(); |
773 | if (ret) |
774 | goto exit; |
775 | |
776 | ret = class_register(&uio_class); |
777 | if (ret) { |
778 | printk(KERN_ERR "class_register failed for uio\n"); |
779 | goto err_class_register; |
780 | } |
781 | return 0; |
782 | |
783 | err_class_register: |
784 | uio_major_cleanup(); |
785 | exit: |
786 | return ret; |
787 | } |
788 | |
789 | static void release_uio_class(void) |
790 | { |
791 | class_unregister(&uio_class); |
792 | uio_major_cleanup(); |
793 | } |
794 | |
795 | /** |
796 | * uio_register_device - register a new userspace IO device |
797 | * @owner: module that creates the new device |
798 | * @parent: parent device |
799 | * @info: UIO device capabilities |
800 | * |
801 | * returns zero on success or a negative error code. |
802 | */ |
803 | int __uio_register_device(struct module *owner, |
804 | struct device *parent, |
805 | struct uio_info *info) |
806 | { |
807 | struct uio_device *idev; |
808 | int ret = 0; |
809 | |
810 | if (!parent || !info || !info->name || !info->version) |
811 | return -EINVAL; |
812 | |
813 | info->uio_dev = NULL; |
814 | |
815 | idev = kzalloc(sizeof(*idev), GFP_KERNEL); |
816 | if (!idev) { |
817 | ret = -ENOMEM; |
818 | goto err_kzalloc; |
819 | } |
820 | |
821 | idev->owner = owner; |
822 | idev->info = info; |
823 | init_waitqueue_head(&idev->wait); |
824 | atomic_set(&idev->event, 0); |
825 | |
826 | ret = uio_get_minor(idev); |
827 | if (ret) |
828 | goto err_get_minor; |
829 | |
830 | idev->dev = device_create(&uio_class, parent, |
831 | MKDEV(uio_major, idev->minor), idev, |
832 | "uio%d", idev->minor); |
833 | if (IS_ERR(idev->dev)) { |
834 | printk(KERN_ERR "UIO: device register failed\n"); |
835 | ret = PTR_ERR(idev->dev); |
836 | goto err_device_create; |
837 | } |
838 | |
839 | ret = uio_dev_add_attributes(idev); |
840 | if (ret) |
841 | goto err_uio_dev_add_attributes; |
842 | |
843 | info->uio_dev = idev; |
844 | |
845 | if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { |
846 | ret = request_irq(info->irq, uio_interrupt, |
847 | info->irq_flags, info->name, idev); |
848 | if (ret) |
849 | goto err_request_irq; |
850 | } |
851 | |
852 | return 0; |
853 | |
854 | err_request_irq: |
855 | uio_dev_del_attributes(idev); |
856 | err_uio_dev_add_attributes: |
857 | device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); |
858 | err_device_create: |
859 | uio_free_minor(idev); |
860 | err_get_minor: |
861 | kfree(idev); |
862 | err_kzalloc: |
863 | return ret; |
864 | } |
865 | EXPORT_SYMBOL_GPL(__uio_register_device); |
866 | |
867 | /** |
868 | * uio_unregister_device - unregister a industrial IO device |
869 | * @info: UIO device capabilities |
870 | * |
871 | */ |
872 | void uio_unregister_device(struct uio_info *info) |
873 | { |
874 | struct uio_device *idev; |
875 | |
876 | if (!info || !info->uio_dev) |
877 | return; |
878 | |
879 | idev = info->uio_dev; |
880 | |
881 | uio_free_minor(idev); |
882 | |
883 | if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) |
884 | free_irq(info->irq, idev); |
885 | |
886 | uio_dev_del_attributes(idev); |
887 | |
888 | device_destroy(&uio_class, MKDEV(uio_major, idev->minor)); |
889 | kfree(idev); |
890 | |
891 | return; |
892 | } |
893 | EXPORT_SYMBOL_GPL(uio_unregister_device); |
894 | |
895 | static int __init uio_init(void) |
896 | { |
897 | return init_uio_class(); |
898 | } |
899 | |
900 | static void __exit uio_exit(void) |
901 | { |
902 | release_uio_class(); |
903 | } |
904 | |
905 | module_init(uio_init) |
906 | module_exit(uio_exit) |
907 | MODULE_LICENSE("GPL v2"); |
908 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9