Root/
1 | /* |
2 | * linux/drivers/char/raw.c |
3 | * |
4 | * Front-end raw character devices. These can be bound to any block |
5 | * devices to provide genuine Unix raw character device semantics. |
6 | * |
7 | * We reserve minor number 0 for a control interface. ioctl()s on this |
8 | * device are used to bind the other minor numbers to block devices. |
9 | */ |
10 | |
11 | #include <linux/init.h> |
12 | #include <linux/fs.h> |
13 | #include <linux/major.h> |
14 | #include <linux/blkdev.h> |
15 | #include <linux/module.h> |
16 | #include <linux/raw.h> |
17 | #include <linux/capability.h> |
18 | #include <linux/uio.h> |
19 | #include <linux/cdev.h> |
20 | #include <linux/device.h> |
21 | #include <linux/mutex.h> |
22 | #include <linux/gfp.h> |
23 | #include <linux/compat.h> |
24 | #include <linux/vmalloc.h> |
25 | |
26 | #include <asm/uaccess.h> |
27 | |
28 | struct raw_device_data { |
29 | struct block_device *binding; |
30 | int inuse; |
31 | }; |
32 | |
33 | static struct class *raw_class; |
34 | static struct raw_device_data *raw_devices; |
35 | static DEFINE_MUTEX(raw_mutex); |
36 | static const struct file_operations raw_ctl_fops; /* forward declaration */ |
37 | |
38 | static int max_raw_minors = MAX_RAW_MINORS; |
39 | |
40 | module_param(max_raw_minors, int, 0); |
41 | MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)"); |
42 | |
43 | /* |
44 | * Open/close code for raw IO. |
45 | * |
46 | * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to |
47 | * point at the blockdev's address_space and set the file handle to use |
48 | * O_DIRECT. |
49 | * |
50 | * Set the device's soft blocksize to the minimum possible. This gives the |
51 | * finest possible alignment and has no adverse impact on performance. |
52 | */ |
53 | static int raw_open(struct inode *inode, struct file *filp) |
54 | { |
55 | const int minor = iminor(inode); |
56 | struct block_device *bdev; |
57 | int err; |
58 | |
59 | if (minor == 0) { /* It is the control device */ |
60 | filp->f_op = &raw_ctl_fops; |
61 | return 0; |
62 | } |
63 | |
64 | mutex_lock(&raw_mutex); |
65 | |
66 | /* |
67 | * All we need to do on open is check that the device is bound. |
68 | */ |
69 | bdev = raw_devices[minor].binding; |
70 | err = -ENODEV; |
71 | if (!bdev) |
72 | goto out; |
73 | igrab(bdev->bd_inode); |
74 | err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open); |
75 | if (err) |
76 | goto out; |
77 | err = set_blocksize(bdev, bdev_logical_block_size(bdev)); |
78 | if (err) |
79 | goto out1; |
80 | filp->f_flags |= O_DIRECT; |
81 | filp->f_mapping = bdev->bd_inode->i_mapping; |
82 | if (++raw_devices[minor].inuse == 1) |
83 | file_inode(filp)->i_mapping = |
84 | bdev->bd_inode->i_mapping; |
85 | filp->private_data = bdev; |
86 | mutex_unlock(&raw_mutex); |
87 | return 0; |
88 | |
89 | out1: |
90 | blkdev_put(bdev, filp->f_mode | FMODE_EXCL); |
91 | out: |
92 | mutex_unlock(&raw_mutex); |
93 | return err; |
94 | } |
95 | |
96 | /* |
97 | * When the final fd which refers to this character-special node is closed, we |
98 | * make its ->mapping point back at its own i_data. |
99 | */ |
100 | static int raw_release(struct inode *inode, struct file *filp) |
101 | { |
102 | const int minor= iminor(inode); |
103 | struct block_device *bdev; |
104 | |
105 | mutex_lock(&raw_mutex); |
106 | bdev = raw_devices[minor].binding; |
107 | if (--raw_devices[minor].inuse == 0) { |
108 | /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ |
109 | inode->i_mapping = &inode->i_data; |
110 | inode->i_mapping->backing_dev_info = &default_backing_dev_info; |
111 | } |
112 | mutex_unlock(&raw_mutex); |
113 | |
114 | blkdev_put(bdev, filp->f_mode | FMODE_EXCL); |
115 | return 0; |
116 | } |
117 | |
118 | /* |
119 | * Forward ioctls to the underlying block device. |
120 | */ |
121 | static long |
122 | raw_ioctl(struct file *filp, unsigned int command, unsigned long arg) |
123 | { |
124 | struct block_device *bdev = filp->private_data; |
125 | return blkdev_ioctl(bdev, 0, command, arg); |
126 | } |
127 | |
128 | static int bind_set(int number, u64 major, u64 minor) |
129 | { |
130 | dev_t dev = MKDEV(major, minor); |
131 | struct raw_device_data *rawdev; |
132 | int err = 0; |
133 | |
134 | if (number <= 0 || number >= max_raw_minors) |
135 | return -EINVAL; |
136 | |
137 | if (MAJOR(dev) != major || MINOR(dev) != minor) |
138 | return -EINVAL; |
139 | |
140 | rawdev = &raw_devices[number]; |
141 | |
142 | /* |
143 | * This is like making block devices, so demand the |
144 | * same capability |
145 | */ |
146 | if (!capable(CAP_SYS_ADMIN)) |
147 | return -EPERM; |
148 | |
149 | /* |
150 | * For now, we don't need to check that the underlying |
151 | * block device is present or not: we can do that when |
152 | * the raw device is opened. Just check that the |
153 | * major/minor numbers make sense. |
154 | */ |
155 | |
156 | if (MAJOR(dev) == 0 && dev != 0) |
157 | return -EINVAL; |
158 | |
159 | mutex_lock(&raw_mutex); |
160 | if (rawdev->inuse) { |
161 | mutex_unlock(&raw_mutex); |
162 | return -EBUSY; |
163 | } |
164 | if (rawdev->binding) { |
165 | bdput(rawdev->binding); |
166 | module_put(THIS_MODULE); |
167 | } |
168 | if (!dev) { |
169 | /* unbind */ |
170 | rawdev->binding = NULL; |
171 | device_destroy(raw_class, MKDEV(RAW_MAJOR, number)); |
172 | } else { |
173 | rawdev->binding = bdget(dev); |
174 | if (rawdev->binding == NULL) { |
175 | err = -ENOMEM; |
176 | } else { |
177 | dev_t raw = MKDEV(RAW_MAJOR, number); |
178 | __module_get(THIS_MODULE); |
179 | device_destroy(raw_class, raw); |
180 | device_create(raw_class, NULL, raw, NULL, |
181 | "raw%d", number); |
182 | } |
183 | } |
184 | mutex_unlock(&raw_mutex); |
185 | return err; |
186 | } |
187 | |
188 | static int bind_get(int number, dev_t *dev) |
189 | { |
190 | struct raw_device_data *rawdev; |
191 | struct block_device *bdev; |
192 | |
193 | if (number <= 0 || number >= MAX_RAW_MINORS) |
194 | return -EINVAL; |
195 | |
196 | rawdev = &raw_devices[number]; |
197 | |
198 | mutex_lock(&raw_mutex); |
199 | bdev = rawdev->binding; |
200 | *dev = bdev ? bdev->bd_dev : 0; |
201 | mutex_unlock(&raw_mutex); |
202 | return 0; |
203 | } |
204 | |
205 | /* |
206 | * Deal with ioctls against the raw-device control interface, to bind |
207 | * and unbind other raw devices. |
208 | */ |
209 | static long raw_ctl_ioctl(struct file *filp, unsigned int command, |
210 | unsigned long arg) |
211 | { |
212 | struct raw_config_request rq; |
213 | dev_t dev; |
214 | int err; |
215 | |
216 | switch (command) { |
217 | case RAW_SETBIND: |
218 | if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) |
219 | return -EFAULT; |
220 | |
221 | return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); |
222 | |
223 | case RAW_GETBIND: |
224 | if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) |
225 | return -EFAULT; |
226 | |
227 | err = bind_get(rq.raw_minor, &dev); |
228 | if (err) |
229 | return err; |
230 | |
231 | rq.block_major = MAJOR(dev); |
232 | rq.block_minor = MINOR(dev); |
233 | |
234 | if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) |
235 | return -EFAULT; |
236 | |
237 | return 0; |
238 | } |
239 | |
240 | return -EINVAL; |
241 | } |
242 | |
243 | #ifdef CONFIG_COMPAT |
244 | struct raw32_config_request { |
245 | compat_int_t raw_minor; |
246 | compat_u64 block_major; |
247 | compat_u64 block_minor; |
248 | }; |
249 | |
250 | static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd, |
251 | unsigned long arg) |
252 | { |
253 | struct raw32_config_request __user *user_req = compat_ptr(arg); |
254 | struct raw32_config_request rq; |
255 | dev_t dev; |
256 | int err = 0; |
257 | |
258 | switch (cmd) { |
259 | case RAW_SETBIND: |
260 | if (copy_from_user(&rq, user_req, sizeof(rq))) |
261 | return -EFAULT; |
262 | |
263 | return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); |
264 | |
265 | case RAW_GETBIND: |
266 | if (copy_from_user(&rq, user_req, sizeof(rq))) |
267 | return -EFAULT; |
268 | |
269 | err = bind_get(rq.raw_minor, &dev); |
270 | if (err) |
271 | return err; |
272 | |
273 | rq.block_major = MAJOR(dev); |
274 | rq.block_minor = MINOR(dev); |
275 | |
276 | if (copy_to_user(user_req, &rq, sizeof(rq))) |
277 | return -EFAULT; |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | return -EINVAL; |
283 | } |
284 | #endif |
285 | |
286 | static const struct file_operations raw_fops = { |
287 | .read = do_sync_read, |
288 | .aio_read = generic_file_aio_read, |
289 | .write = do_sync_write, |
290 | .aio_write = blkdev_aio_write, |
291 | .fsync = blkdev_fsync, |
292 | .open = raw_open, |
293 | .release = raw_release, |
294 | .unlocked_ioctl = raw_ioctl, |
295 | .llseek = default_llseek, |
296 | .owner = THIS_MODULE, |
297 | }; |
298 | |
299 | static const struct file_operations raw_ctl_fops = { |
300 | .unlocked_ioctl = raw_ctl_ioctl, |
301 | #ifdef CONFIG_COMPAT |
302 | .compat_ioctl = raw_ctl_compat_ioctl, |
303 | #endif |
304 | .open = raw_open, |
305 | .owner = THIS_MODULE, |
306 | .llseek = noop_llseek, |
307 | }; |
308 | |
309 | static struct cdev raw_cdev; |
310 | |
311 | static char *raw_devnode(struct device *dev, umode_t *mode) |
312 | { |
313 | return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev)); |
314 | } |
315 | |
316 | static int __init raw_init(void) |
317 | { |
318 | dev_t dev = MKDEV(RAW_MAJOR, 0); |
319 | int ret; |
320 | |
321 | if (max_raw_minors < 1 || max_raw_minors > 65536) { |
322 | printk(KERN_WARNING "raw: invalid max_raw_minors (must be" |
323 | " between 1 and 65536), using %d\n", MAX_RAW_MINORS); |
324 | max_raw_minors = MAX_RAW_MINORS; |
325 | } |
326 | |
327 | raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors); |
328 | if (!raw_devices) { |
329 | printk(KERN_ERR "Not enough memory for raw device structures\n"); |
330 | ret = -ENOMEM; |
331 | goto error; |
332 | } |
333 | |
334 | ret = register_chrdev_region(dev, max_raw_minors, "raw"); |
335 | if (ret) |
336 | goto error; |
337 | |
338 | cdev_init(&raw_cdev, &raw_fops); |
339 | ret = cdev_add(&raw_cdev, dev, max_raw_minors); |
340 | if (ret) { |
341 | goto error_region; |
342 | } |
343 | |
344 | raw_class = class_create(THIS_MODULE, "raw"); |
345 | if (IS_ERR(raw_class)) { |
346 | printk(KERN_ERR "Error creating raw class.\n"); |
347 | cdev_del(&raw_cdev); |
348 | ret = PTR_ERR(raw_class); |
349 | goto error_region; |
350 | } |
351 | raw_class->devnode = raw_devnode; |
352 | device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl"); |
353 | |
354 | return 0; |
355 | |
356 | error_region: |
357 | unregister_chrdev_region(dev, max_raw_minors); |
358 | error: |
359 | vfree(raw_devices); |
360 | return ret; |
361 | } |
362 | |
363 | static void __exit raw_exit(void) |
364 | { |
365 | device_destroy(raw_class, MKDEV(RAW_MAJOR, 0)); |
366 | class_destroy(raw_class); |
367 | cdev_del(&raw_cdev); |
368 | unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors); |
369 | } |
370 | |
371 | module_init(raw_init); |
372 | module_exit(raw_exit); |
373 | MODULE_LICENSE("GPL"); |
374 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9