Root/
Source at commit cdde9cf73945d547acd3e96f9508c79e84ad0bf1 created 12 years 9 months ago. By Maarten ter Huurne, MMC: JZ4740: Added support for CPU frequency changing | |
---|---|
1 | /* |
2 | kmod, the new module loader (replaces kerneld) |
3 | Kirk Petersen |
4 | |
5 | Reorganized not to be a daemon by Adam Richter, with guidance |
6 | from Greg Zornetzer. |
7 | |
8 | Modified to avoid chroot and file sharing problems. |
9 | Mikael Pettersson |
10 | |
11 | Limit the concurrent number of kmod modprobes to catch loops from |
12 | "modprobe needs a service that is in a module". |
13 | Keith Owens <kaos@ocs.com.au> December 1999 |
14 | |
15 | Unblock all signals when we exec a usermode process. |
16 | Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000 |
17 | |
18 | call_usermodehelper wait flag, and remove exec_usermodehelper. |
19 | Rusty Russell <rusty@rustcorp.com.au> Jan 2003 |
20 | */ |
21 | #include <linux/module.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/syscalls.h> |
24 | #include <linux/unistd.h> |
25 | #include <linux/kmod.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/completion.h> |
28 | #include <linux/cred.h> |
29 | #include <linux/file.h> |
30 | #include <linux/fdtable.h> |
31 | #include <linux/workqueue.h> |
32 | #include <linux/security.h> |
33 | #include <linux/mount.h> |
34 | #include <linux/kernel.h> |
35 | #include <linux/init.h> |
36 | #include <linux/resource.h> |
37 | #include <linux/notifier.h> |
38 | #include <linux/suspend.h> |
39 | #include <linux/rwsem.h> |
40 | #include <asm/uaccess.h> |
41 | |
42 | #include <trace/events/module.h> |
43 | |
44 | extern int max_threads; |
45 | |
46 | static struct workqueue_struct *khelper_wq; |
47 | |
48 | /* |
49 | * kmod_thread_locker is used for deadlock avoidance. There is no explicit |
50 | * locking to protect this global - it is private to the singleton khelper |
51 | * thread and should only ever be modified by that thread. |
52 | */ |
53 | static const struct task_struct *kmod_thread_locker; |
54 | |
55 | #define CAP_BSET (void *)1 |
56 | #define CAP_PI (void *)2 |
57 | |
58 | static kernel_cap_t usermodehelper_bset = CAP_FULL_SET; |
59 | static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; |
60 | static DEFINE_SPINLOCK(umh_sysctl_lock); |
61 | static DECLARE_RWSEM(umhelper_sem); |
62 | |
63 | #ifdef CONFIG_MODULES |
64 | |
65 | /* |
66 | modprobe_path is set via /proc/sys. |
67 | */ |
68 | char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe"; |
69 | |
70 | static void free_modprobe_argv(struct subprocess_info *info) |
71 | { |
72 | kfree(info->argv[3]); /* check call_modprobe() */ |
73 | kfree(info->argv); |
74 | } |
75 | |
76 | static int call_modprobe(char *module_name, int wait) |
77 | { |
78 | static char *envp[] = { |
79 | "HOME=/", |
80 | "TERM=linux", |
81 | "PATH=/sbin:/usr/sbin:/bin:/usr/bin", |
82 | NULL |
83 | }; |
84 | |
85 | char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL); |
86 | if (!argv) |
87 | goto out; |
88 | |
89 | module_name = kstrdup(module_name, GFP_KERNEL); |
90 | if (!module_name) |
91 | goto free_argv; |
92 | |
93 | argv[0] = modprobe_path; |
94 | argv[1] = "-q"; |
95 | argv[2] = "--"; |
96 | argv[3] = module_name; /* check free_modprobe_argv() */ |
97 | argv[4] = NULL; |
98 | |
99 | return call_usermodehelper_fns(modprobe_path, argv, envp, |
100 | wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL); |
101 | free_argv: |
102 | kfree(argv); |
103 | out: |
104 | return -ENOMEM; |
105 | } |
106 | |
107 | /** |
108 | * __request_module - try to load a kernel module |
109 | * @wait: wait (or not) for the operation to complete |
110 | * @fmt: printf style format string for the name of the module |
111 | * @...: arguments as specified in the format string |
112 | * |
113 | * Load a module using the user mode module loader. The function returns |
114 | * zero on success or a negative errno code on failure. Note that a |
115 | * successful module load does not mean the module did not then unload |
116 | * and exit on an error of its own. Callers must check that the service |
117 | * they requested is now available not blindly invoke it. |
118 | * |
119 | * If module auto-loading support is disabled then this function |
120 | * becomes a no-operation. |
121 | */ |
122 | int __request_module(bool wait, const char *fmt, ...) |
123 | { |
124 | va_list args; |
125 | char module_name[MODULE_NAME_LEN]; |
126 | unsigned int max_modprobes; |
127 | int ret; |
128 | static atomic_t kmod_concurrent = ATOMIC_INIT(0); |
129 | #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ |
130 | static int kmod_loop_msg; |
131 | |
132 | va_start(args, fmt); |
133 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); |
134 | va_end(args); |
135 | if (ret >= MODULE_NAME_LEN) |
136 | return -ENAMETOOLONG; |
137 | |
138 | ret = security_kernel_module_request(module_name); |
139 | if (ret) |
140 | return ret; |
141 | |
142 | /* If modprobe needs a service that is in a module, we get a recursive |
143 | * loop. Limit the number of running kmod threads to max_threads/2 or |
144 | * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method |
145 | * would be to run the parents of this process, counting how many times |
146 | * kmod was invoked. That would mean accessing the internals of the |
147 | * process tables to get the command line, proc_pid_cmdline is static |
148 | * and it is not worth changing the proc code just to handle this case. |
149 | * KAO. |
150 | * |
151 | * "trace the ppid" is simple, but will fail if someone's |
152 | * parent exits. I think this is as good as it gets. --RR |
153 | */ |
154 | max_modprobes = min(max_threads/2, MAX_KMOD_CONCURRENT); |
155 | atomic_inc(&kmod_concurrent); |
156 | if (atomic_read(&kmod_concurrent) > max_modprobes) { |
157 | /* We may be blaming an innocent here, but unlikely */ |
158 | if (kmod_loop_msg < 5) { |
159 | printk(KERN_ERR |
160 | "request_module: runaway loop modprobe %s\n", |
161 | module_name); |
162 | kmod_loop_msg++; |
163 | } |
164 | atomic_dec(&kmod_concurrent); |
165 | return -ENOMEM; |
166 | } |
167 | |
168 | trace_module_request(module_name, wait, _RET_IP_); |
169 | |
170 | ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); |
171 | |
172 | atomic_dec(&kmod_concurrent); |
173 | return ret; |
174 | } |
175 | EXPORT_SYMBOL(__request_module); |
176 | #endif /* CONFIG_MODULES */ |
177 | |
178 | /* |
179 | * This is the task which runs the usermode application |
180 | */ |
181 | static int ____call_usermodehelper(void *data) |
182 | { |
183 | struct subprocess_info *sub_info = data; |
184 | struct cred *new; |
185 | int retval; |
186 | |
187 | spin_lock_irq(¤t->sighand->siglock); |
188 | flush_signal_handlers(current, 1); |
189 | spin_unlock_irq(¤t->sighand->siglock); |
190 | |
191 | /* We can run anywhere, unlike our parent keventd(). */ |
192 | set_cpus_allowed_ptr(current, cpu_all_mask); |
193 | |
194 | /* |
195 | * Our parent is keventd, which runs with elevated scheduling priority. |
196 | * Avoid propagating that into the userspace child. |
197 | */ |
198 | set_user_nice(current, 0); |
199 | |
200 | retval = -ENOMEM; |
201 | new = prepare_kernel_cred(current); |
202 | if (!new) |
203 | goto fail; |
204 | |
205 | spin_lock(&umh_sysctl_lock); |
206 | new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); |
207 | new->cap_inheritable = cap_intersect(usermodehelper_inheritable, |
208 | new->cap_inheritable); |
209 | spin_unlock(&umh_sysctl_lock); |
210 | |
211 | if (sub_info->init) { |
212 | retval = sub_info->init(sub_info, new); |
213 | if (retval) { |
214 | abort_creds(new); |
215 | goto fail; |
216 | } |
217 | } |
218 | |
219 | commit_creds(new); |
220 | |
221 | retval = kernel_execve(sub_info->path, |
222 | (const char *const *)sub_info->argv, |
223 | (const char *const *)sub_info->envp); |
224 | |
225 | /* Exec failed? */ |
226 | fail: |
227 | sub_info->retval = retval; |
228 | return 0; |
229 | } |
230 | |
231 | static int call_helper(void *data) |
232 | { |
233 | /* Worker thread started blocking khelper thread. */ |
234 | kmod_thread_locker = current; |
235 | return ____call_usermodehelper(data); |
236 | } |
237 | |
238 | static void call_usermodehelper_freeinfo(struct subprocess_info *info) |
239 | { |
240 | if (info->cleanup) |
241 | (*info->cleanup)(info); |
242 | kfree(info); |
243 | } |
244 | |
245 | static void umh_complete(struct subprocess_info *sub_info) |
246 | { |
247 | struct completion *comp = xchg(&sub_info->complete, NULL); |
248 | /* |
249 | * See call_usermodehelper_exec(). If xchg() returns NULL |
250 | * we own sub_info, the UMH_KILLABLE caller has gone away. |
251 | */ |
252 | if (comp) |
253 | complete(comp); |
254 | else |
255 | call_usermodehelper_freeinfo(sub_info); |
256 | } |
257 | |
258 | /* Keventd can't block, but this (a child) can. */ |
259 | static int wait_for_helper(void *data) |
260 | { |
261 | struct subprocess_info *sub_info = data; |
262 | pid_t pid; |
263 | |
264 | /* If SIGCLD is ignored sys_wait4 won't populate the status. */ |
265 | spin_lock_irq(¤t->sighand->siglock); |
266 | current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL; |
267 | spin_unlock_irq(¤t->sighand->siglock); |
268 | |
269 | pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); |
270 | if (pid < 0) { |
271 | sub_info->retval = pid; |
272 | } else { |
273 | int ret = -ECHILD; |
274 | /* |
275 | * Normally it is bogus to call wait4() from in-kernel because |
276 | * wait4() wants to write the exit code to a userspace address. |
277 | * But wait_for_helper() always runs as keventd, and put_user() |
278 | * to a kernel address works OK for kernel threads, due to their |
279 | * having an mm_segment_t which spans the entire address space. |
280 | * |
281 | * Thus the __user pointer cast is valid here. |
282 | */ |
283 | sys_wait4(pid, (int __user *)&ret, 0, NULL); |
284 | |
285 | /* |
286 | * If ret is 0, either ____call_usermodehelper failed and the |
287 | * real error code is already in sub_info->retval or |
288 | * sub_info->retval is 0 anyway, so don't mess with it then. |
289 | */ |
290 | if (ret) |
291 | sub_info->retval = ret; |
292 | } |
293 | |
294 | umh_complete(sub_info); |
295 | return 0; |
296 | } |
297 | |
298 | /* This is run by khelper thread */ |
299 | static void __call_usermodehelper(struct work_struct *work) |
300 | { |
301 | struct subprocess_info *sub_info = |
302 | container_of(work, struct subprocess_info, work); |
303 | int wait = sub_info->wait & ~UMH_KILLABLE; |
304 | pid_t pid; |
305 | |
306 | /* CLONE_VFORK: wait until the usermode helper has execve'd |
307 | * successfully We need the data structures to stay around |
308 | * until that is done. */ |
309 | if (wait == UMH_WAIT_PROC) |
310 | pid = kernel_thread(wait_for_helper, sub_info, |
311 | CLONE_FS | CLONE_FILES | SIGCHLD); |
312 | else { |
313 | pid = kernel_thread(call_helper, sub_info, |
314 | CLONE_VFORK | SIGCHLD); |
315 | /* Worker thread stopped blocking khelper thread. */ |
316 | kmod_thread_locker = NULL; |
317 | } |
318 | |
319 | switch (wait) { |
320 | case UMH_NO_WAIT: |
321 | call_usermodehelper_freeinfo(sub_info); |
322 | break; |
323 | |
324 | case UMH_WAIT_PROC: |
325 | if (pid > 0) |
326 | break; |
327 | /* FALLTHROUGH */ |
328 | case UMH_WAIT_EXEC: |
329 | if (pid < 0) |
330 | sub_info->retval = pid; |
331 | umh_complete(sub_info); |
332 | } |
333 | } |
334 | |
335 | /* |
336 | * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY |
337 | * (used for preventing user land processes from being created after the user |
338 | * land has been frozen during a system-wide hibernation or suspend operation). |
339 | * Should always be manipulated under umhelper_sem acquired for write. |
340 | */ |
341 | static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED; |
342 | |
343 | /* Number of helpers running */ |
344 | static atomic_t running_helpers = ATOMIC_INIT(0); |
345 | |
346 | /* |
347 | * Wait queue head used by usermodehelper_disable() to wait for all running |
348 | * helpers to finish. |
349 | */ |
350 | static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); |
351 | |
352 | /* |
353 | * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled |
354 | * to become 'false'. |
355 | */ |
356 | static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); |
357 | |
358 | /* |
359 | * Time to wait for running_helpers to become zero before the setting of |
360 | * usermodehelper_disabled in usermodehelper_disable() fails |
361 | */ |
362 | #define RUNNING_HELPERS_TIMEOUT (5 * HZ) |
363 | |
364 | int usermodehelper_read_trylock(void) |
365 | { |
366 | DEFINE_WAIT(wait); |
367 | int ret = 0; |
368 | |
369 | down_read(&umhelper_sem); |
370 | for (;;) { |
371 | prepare_to_wait(&usermodehelper_disabled_waitq, &wait, |
372 | TASK_INTERRUPTIBLE); |
373 | if (!usermodehelper_disabled) |
374 | break; |
375 | |
376 | if (usermodehelper_disabled == UMH_DISABLED) |
377 | ret = -EAGAIN; |
378 | |
379 | up_read(&umhelper_sem); |
380 | |
381 | if (ret) |
382 | break; |
383 | |
384 | schedule(); |
385 | try_to_freeze(); |
386 | |
387 | down_read(&umhelper_sem); |
388 | } |
389 | finish_wait(&usermodehelper_disabled_waitq, &wait); |
390 | return ret; |
391 | } |
392 | EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); |
393 | |
394 | long usermodehelper_read_lock_wait(long timeout) |
395 | { |
396 | DEFINE_WAIT(wait); |
397 | |
398 | if (timeout < 0) |
399 | return -EINVAL; |
400 | |
401 | down_read(&umhelper_sem); |
402 | for (;;) { |
403 | prepare_to_wait(&usermodehelper_disabled_waitq, &wait, |
404 | TASK_UNINTERRUPTIBLE); |
405 | if (!usermodehelper_disabled) |
406 | break; |
407 | |
408 | up_read(&umhelper_sem); |
409 | |
410 | timeout = schedule_timeout(timeout); |
411 | if (!timeout) |
412 | break; |
413 | |
414 | down_read(&umhelper_sem); |
415 | } |
416 | finish_wait(&usermodehelper_disabled_waitq, &wait); |
417 | return timeout; |
418 | } |
419 | EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait); |
420 | |
421 | void usermodehelper_read_unlock(void) |
422 | { |
423 | up_read(&umhelper_sem); |
424 | } |
425 | EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); |
426 | |
427 | /** |
428 | * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled. |
429 | * @depth: New value to assign to usermodehelper_disabled. |
430 | * |
431 | * Change the value of usermodehelper_disabled (under umhelper_sem locked for |
432 | * writing) and wakeup tasks waiting for it to change. |
433 | */ |
434 | void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) |
435 | { |
436 | down_write(&umhelper_sem); |
437 | usermodehelper_disabled = depth; |
438 | wake_up(&usermodehelper_disabled_waitq); |
439 | up_write(&umhelper_sem); |
440 | } |
441 | |
442 | /** |
443 | * __usermodehelper_disable - Prevent new helpers from being started. |
444 | * @depth: New value to assign to usermodehelper_disabled. |
445 | * |
446 | * Set usermodehelper_disabled to @depth and wait for running helpers to exit. |
447 | */ |
448 | int __usermodehelper_disable(enum umh_disable_depth depth) |
449 | { |
450 | long retval; |
451 | |
452 | if (!depth) |
453 | return -EINVAL; |
454 | |
455 | down_write(&umhelper_sem); |
456 | usermodehelper_disabled = depth; |
457 | up_write(&umhelper_sem); |
458 | |
459 | /* |
460 | * From now on call_usermodehelper_exec() won't start any new |
461 | * helpers, so it is sufficient if running_helpers turns out to |
462 | * be zero at one point (it may be increased later, but that |
463 | * doesn't matter). |
464 | */ |
465 | retval = wait_event_timeout(running_helpers_waitq, |
466 | atomic_read(&running_helpers) == 0, |
467 | RUNNING_HELPERS_TIMEOUT); |
468 | if (retval) |
469 | return 0; |
470 | |
471 | __usermodehelper_set_disable_depth(UMH_ENABLED); |
472 | return -EAGAIN; |
473 | } |
474 | |
475 | static void helper_lock(void) |
476 | { |
477 | atomic_inc(&running_helpers); |
478 | smp_mb__after_atomic_inc(); |
479 | } |
480 | |
481 | static void helper_unlock(void) |
482 | { |
483 | if (atomic_dec_and_test(&running_helpers)) |
484 | wake_up(&running_helpers_waitq); |
485 | } |
486 | |
487 | /** |
488 | * call_usermodehelper_setup - prepare to call a usermode helper |
489 | * @path: path to usermode executable |
490 | * @argv: arg vector for process |
491 | * @envp: environment for process |
492 | * @gfp_mask: gfp mask for memory allocation |
493 | * |
494 | * Returns either %NULL on allocation failure, or a subprocess_info |
495 | * structure. This should be passed to call_usermodehelper_exec to |
496 | * exec the process and free the structure. |
497 | */ |
498 | static |
499 | struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, |
500 | char **envp, gfp_t gfp_mask) |
501 | { |
502 | struct subprocess_info *sub_info; |
503 | sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); |
504 | if (!sub_info) |
505 | goto out; |
506 | |
507 | INIT_WORK(&sub_info->work, __call_usermodehelper); |
508 | sub_info->path = path; |
509 | sub_info->argv = argv; |
510 | sub_info->envp = envp; |
511 | out: |
512 | return sub_info; |
513 | } |
514 | |
515 | /** |
516 | * call_usermodehelper_setfns - set a cleanup/init function |
517 | * @info: a subprocess_info returned by call_usermodehelper_setup |
518 | * @cleanup: a cleanup function |
519 | * @init: an init function |
520 | * @data: arbitrary context sensitive data |
521 | * |
522 | * The init function is used to customize the helper process prior to |
523 | * exec. A non-zero return code causes the process to error out, exit, |
524 | * and return the failure to the calling process |
525 | * |
526 | * The cleanup function is just before ethe subprocess_info is about to |
527 | * be freed. This can be used for freeing the argv and envp. The |
528 | * Function must be runnable in either a process context or the |
529 | * context in which call_usermodehelper_exec is called. |
530 | */ |
531 | static |
532 | void call_usermodehelper_setfns(struct subprocess_info *info, |
533 | int (*init)(struct subprocess_info *info, struct cred *new), |
534 | void (*cleanup)(struct subprocess_info *info), |
535 | void *data) |
536 | { |
537 | info->cleanup = cleanup; |
538 | info->init = init; |
539 | info->data = data; |
540 | } |
541 | |
542 | /** |
543 | * call_usermodehelper_exec - start a usermode application |
544 | * @sub_info: information about the subprocessa |
545 | * @wait: wait for the application to finish and return status. |
546 | * when -1 don't wait at all, but you get no useful error back when |
547 | * the program couldn't be exec'ed. This makes it safe to call |
548 | * from interrupt context. |
549 | * |
550 | * Runs a user-space application. The application is started |
551 | * asynchronously if wait is not set, and runs as a child of keventd. |
552 | * (ie. it runs with full root capabilities). |
553 | */ |
554 | static |
555 | int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) |
556 | { |
557 | DECLARE_COMPLETION_ONSTACK(done); |
558 | int retval = 0; |
559 | |
560 | helper_lock(); |
561 | if (sub_info->path[0] == '\0') |
562 | goto out; |
563 | |
564 | if (!khelper_wq || usermodehelper_disabled) { |
565 | retval = -EBUSY; |
566 | goto out; |
567 | } |
568 | /* |
569 | * Worker thread must not wait for khelper thread at below |
570 | * wait_for_completion() if the thread was created with CLONE_VFORK |
571 | * flag, for khelper thread is already waiting for the thread at |
572 | * wait_for_completion() in do_fork(). |
573 | */ |
574 | if (wait != UMH_NO_WAIT && current == kmod_thread_locker) { |
575 | retval = -EBUSY; |
576 | goto out; |
577 | } |
578 | |
579 | sub_info->complete = &done; |
580 | sub_info->wait = wait; |
581 | |
582 | queue_work(khelper_wq, &sub_info->work); |
583 | if (wait == UMH_NO_WAIT) /* task has freed sub_info */ |
584 | goto unlock; |
585 | |
586 | if (wait & UMH_KILLABLE) { |
587 | retval = wait_for_completion_killable(&done); |
588 | if (!retval) |
589 | goto wait_done; |
590 | |
591 | /* umh_complete() will see NULL and free sub_info */ |
592 | if (xchg(&sub_info->complete, NULL)) |
593 | goto unlock; |
594 | /* fallthrough, umh_complete() was already called */ |
595 | } |
596 | |
597 | wait_for_completion(&done); |
598 | wait_done: |
599 | retval = sub_info->retval; |
600 | out: |
601 | call_usermodehelper_freeinfo(sub_info); |
602 | unlock: |
603 | helper_unlock(); |
604 | return retval; |
605 | } |
606 | |
607 | /* |
608 | * call_usermodehelper_fns() will not run the caller-provided cleanup function |
609 | * if a memory allocation failure is experienced. So the caller might need to |
610 | * check the call_usermodehelper_fns() return value: if it is -ENOMEM, perform |
611 | * the necessaary cleanup within the caller. |
612 | */ |
613 | int call_usermodehelper_fns( |
614 | char *path, char **argv, char **envp, int wait, |
615 | int (*init)(struct subprocess_info *info, struct cred *new), |
616 | void (*cleanup)(struct subprocess_info *), void *data) |
617 | { |
618 | struct subprocess_info *info; |
619 | gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; |
620 | |
621 | info = call_usermodehelper_setup(path, argv, envp, gfp_mask); |
622 | |
623 | if (info == NULL) |
624 | return -ENOMEM; |
625 | |
626 | call_usermodehelper_setfns(info, init, cleanup, data); |
627 | |
628 | return call_usermodehelper_exec(info, wait); |
629 | } |
630 | EXPORT_SYMBOL(call_usermodehelper_fns); |
631 | |
632 | static int proc_cap_handler(struct ctl_table *table, int write, |
633 | void __user *buffer, size_t *lenp, loff_t *ppos) |
634 | { |
635 | struct ctl_table t; |
636 | unsigned long cap_array[_KERNEL_CAPABILITY_U32S]; |
637 | kernel_cap_t new_cap; |
638 | int err, i; |
639 | |
640 | if (write && (!capable(CAP_SETPCAP) || |
641 | !capable(CAP_SYS_MODULE))) |
642 | return -EPERM; |
643 | |
644 | /* |
645 | * convert from the global kernel_cap_t to the ulong array to print to |
646 | * userspace if this is a read. |
647 | */ |
648 | spin_lock(&umh_sysctl_lock); |
649 | for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) { |
650 | if (table->data == CAP_BSET) |
651 | cap_array[i] = usermodehelper_bset.cap[i]; |
652 | else if (table->data == CAP_PI) |
653 | cap_array[i] = usermodehelper_inheritable.cap[i]; |
654 | else |
655 | BUG(); |
656 | } |
657 | spin_unlock(&umh_sysctl_lock); |
658 | |
659 | t = *table; |
660 | t.data = &cap_array; |
661 | |
662 | /* |
663 | * actually read or write and array of ulongs from userspace. Remember |
664 | * these are least significant 32 bits first |
665 | */ |
666 | err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); |
667 | if (err < 0) |
668 | return err; |
669 | |
670 | /* |
671 | * convert from the sysctl array of ulongs to the kernel_cap_t |
672 | * internal representation |
673 | */ |
674 | for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++) |
675 | new_cap.cap[i] = cap_array[i]; |
676 | |
677 | /* |
678 | * Drop everything not in the new_cap (but don't add things) |
679 | */ |
680 | spin_lock(&umh_sysctl_lock); |
681 | if (write) { |
682 | if (table->data == CAP_BSET) |
683 | usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap); |
684 | if (table->data == CAP_PI) |
685 | usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap); |
686 | } |
687 | spin_unlock(&umh_sysctl_lock); |
688 | |
689 | return 0; |
690 | } |
691 | |
692 | struct ctl_table usermodehelper_table[] = { |
693 | { |
694 | .procname = "bset", |
695 | .data = CAP_BSET, |
696 | .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), |
697 | .mode = 0600, |
698 | .proc_handler = proc_cap_handler, |
699 | }, |
700 | { |
701 | .procname = "inheritable", |
702 | .data = CAP_PI, |
703 | .maxlen = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long), |
704 | .mode = 0600, |
705 | .proc_handler = proc_cap_handler, |
706 | }, |
707 | { } |
708 | }; |
709 | |
710 | void __init usermodehelper_init(void) |
711 | { |
712 | khelper_wq = create_singlethread_workqueue("khelper"); |
713 | BUG_ON(!khelper_wq); |
714 | } |
715 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9