Root/
Source at commit ec7cab4cbb721bff91ec924ec691efd8daf36579 created 12 years 8 months ago. By Maarten ter Huurne, MIPS: JZ4740: A320: Updated quickstart documentation. | |
---|---|
1 | /* |
2 | * linux/kernel/sys.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | */ |
6 | |
7 | #include <linux/module.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/utsname.h> |
10 | #include <linux/mman.h> |
11 | #include <linux/notifier.h> |
12 | #include <linux/reboot.h> |
13 | #include <linux/prctl.h> |
14 | #include <linux/highuid.h> |
15 | #include <linux/fs.h> |
16 | #include <linux/perf_event.h> |
17 | #include <linux/resource.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/kexec.h> |
20 | #include <linux/workqueue.h> |
21 | #include <linux/capability.h> |
22 | #include <linux/device.h> |
23 | #include <linux/key.h> |
24 | #include <linux/times.h> |
25 | #include <linux/posix-timers.h> |
26 | #include <linux/security.h> |
27 | #include <linux/dcookies.h> |
28 | #include <linux/suspend.h> |
29 | #include <linux/tty.h> |
30 | #include <linux/signal.h> |
31 | #include <linux/cn_proc.h> |
32 | #include <linux/getcpu.h> |
33 | #include <linux/task_io_accounting_ops.h> |
34 | #include <linux/seccomp.h> |
35 | #include <linux/cpu.h> |
36 | #include <linux/personality.h> |
37 | #include <linux/ptrace.h> |
38 | #include <linux/fs_struct.h> |
39 | #include <linux/gfp.h> |
40 | #include <linux/syscore_ops.h> |
41 | |
42 | #include <linux/compat.h> |
43 | #include <linux/syscalls.h> |
44 | #include <linux/kprobes.h> |
45 | #include <linux/user_namespace.h> |
46 | |
47 | #include <linux/kmsg_dump.h> |
48 | |
49 | #include <asm/uaccess.h> |
50 | #include <asm/io.h> |
51 | #include <asm/unistd.h> |
52 | |
53 | #ifndef SET_UNALIGN_CTL |
54 | # define SET_UNALIGN_CTL(a,b) (-EINVAL) |
55 | #endif |
56 | #ifndef GET_UNALIGN_CTL |
57 | # define GET_UNALIGN_CTL(a,b) (-EINVAL) |
58 | #endif |
59 | #ifndef SET_FPEMU_CTL |
60 | # define SET_FPEMU_CTL(a,b) (-EINVAL) |
61 | #endif |
62 | #ifndef GET_FPEMU_CTL |
63 | # define GET_FPEMU_CTL(a,b) (-EINVAL) |
64 | #endif |
65 | #ifndef SET_FPEXC_CTL |
66 | # define SET_FPEXC_CTL(a,b) (-EINVAL) |
67 | #endif |
68 | #ifndef GET_FPEXC_CTL |
69 | # define GET_FPEXC_CTL(a,b) (-EINVAL) |
70 | #endif |
71 | #ifndef GET_ENDIAN |
72 | # define GET_ENDIAN(a,b) (-EINVAL) |
73 | #endif |
74 | #ifndef SET_ENDIAN |
75 | # define SET_ENDIAN(a,b) (-EINVAL) |
76 | #endif |
77 | #ifndef GET_TSC_CTL |
78 | # define GET_TSC_CTL(a) (-EINVAL) |
79 | #endif |
80 | #ifndef SET_TSC_CTL |
81 | # define SET_TSC_CTL(a) (-EINVAL) |
82 | #endif |
83 | |
84 | /* |
85 | * this is where the system-wide overflow UID and GID are defined, for |
86 | * architectures that now have 32-bit UID/GID but didn't in the past |
87 | */ |
88 | |
89 | int overflowuid = DEFAULT_OVERFLOWUID; |
90 | int overflowgid = DEFAULT_OVERFLOWGID; |
91 | |
92 | #ifdef CONFIG_UID16 |
93 | EXPORT_SYMBOL(overflowuid); |
94 | EXPORT_SYMBOL(overflowgid); |
95 | #endif |
96 | |
97 | /* |
98 | * the same as above, but for filesystems which can only store a 16-bit |
99 | * UID and GID. as such, this is needed on all architectures |
100 | */ |
101 | |
102 | int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; |
103 | int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; |
104 | |
105 | EXPORT_SYMBOL(fs_overflowuid); |
106 | EXPORT_SYMBOL(fs_overflowgid); |
107 | |
108 | /* |
109 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes |
110 | */ |
111 | |
112 | int C_A_D = 1; |
113 | struct pid *cad_pid; |
114 | EXPORT_SYMBOL(cad_pid); |
115 | |
116 | /* |
117 | * If set, this is used for preparing the system to power off. |
118 | */ |
119 | |
120 | void (*pm_power_off_prepare)(void); |
121 | |
122 | /* |
123 | * Returns true if current's euid is same as p's uid or euid, |
124 | * or has CAP_SYS_NICE to p's user_ns. |
125 | * |
126 | * Called with rcu_read_lock, creds are safe |
127 | */ |
128 | static bool set_one_prio_perm(struct task_struct *p) |
129 | { |
130 | const struct cred *cred = current_cred(), *pcred = __task_cred(p); |
131 | |
132 | if (pcred->user->user_ns == cred->user->user_ns && |
133 | (pcred->uid == cred->euid || |
134 | pcred->euid == cred->euid)) |
135 | return true; |
136 | if (ns_capable(pcred->user->user_ns, CAP_SYS_NICE)) |
137 | return true; |
138 | return false; |
139 | } |
140 | |
141 | /* |
142 | * set the priority of a task |
143 | * - the caller must hold the RCU read lock |
144 | */ |
145 | static int set_one_prio(struct task_struct *p, int niceval, int error) |
146 | { |
147 | int no_nice; |
148 | |
149 | if (!set_one_prio_perm(p)) { |
150 | error = -EPERM; |
151 | goto out; |
152 | } |
153 | if (niceval < task_nice(p) && !can_nice(p, niceval)) { |
154 | error = -EACCES; |
155 | goto out; |
156 | } |
157 | no_nice = security_task_setnice(p, niceval); |
158 | if (no_nice) { |
159 | error = no_nice; |
160 | goto out; |
161 | } |
162 | if (error == -ESRCH) |
163 | error = 0; |
164 | set_user_nice(p, niceval); |
165 | out: |
166 | return error; |
167 | } |
168 | |
169 | SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) |
170 | { |
171 | struct task_struct *g, *p; |
172 | struct user_struct *user; |
173 | const struct cred *cred = current_cred(); |
174 | int error = -EINVAL; |
175 | struct pid *pgrp; |
176 | |
177 | if (which > PRIO_USER || which < PRIO_PROCESS) |
178 | goto out; |
179 | |
180 | /* normalize: avoid signed division (rounding problems) */ |
181 | error = -ESRCH; |
182 | if (niceval < -20) |
183 | niceval = -20; |
184 | if (niceval > 19) |
185 | niceval = 19; |
186 | |
187 | rcu_read_lock(); |
188 | read_lock(&tasklist_lock); |
189 | switch (which) { |
190 | case PRIO_PROCESS: |
191 | if (who) |
192 | p = find_task_by_vpid(who); |
193 | else |
194 | p = current; |
195 | if (p) |
196 | error = set_one_prio(p, niceval, error); |
197 | break; |
198 | case PRIO_PGRP: |
199 | if (who) |
200 | pgrp = find_vpid(who); |
201 | else |
202 | pgrp = task_pgrp(current); |
203 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
204 | error = set_one_prio(p, niceval, error); |
205 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
206 | break; |
207 | case PRIO_USER: |
208 | user = (struct user_struct *) cred->user; |
209 | if (!who) |
210 | who = cred->uid; |
211 | else if ((who != cred->uid) && |
212 | !(user = find_user(who))) |
213 | goto out_unlock; /* No processes for this user */ |
214 | |
215 | do_each_thread(g, p) { |
216 | if (__task_cred(p)->uid == who) |
217 | error = set_one_prio(p, niceval, error); |
218 | } while_each_thread(g, p); |
219 | if (who != cred->uid) |
220 | free_uid(user); /* For find_user() */ |
221 | break; |
222 | } |
223 | out_unlock: |
224 | read_unlock(&tasklist_lock); |
225 | rcu_read_unlock(); |
226 | out: |
227 | return error; |
228 | } |
229 | |
230 | /* |
231 | * Ugh. To avoid negative return values, "getpriority()" will |
232 | * not return the normal nice-value, but a negated value that |
233 | * has been offset by 20 (ie it returns 40..1 instead of -20..19) |
234 | * to stay compatible. |
235 | */ |
236 | SYSCALL_DEFINE2(getpriority, int, which, int, who) |
237 | { |
238 | struct task_struct *g, *p; |
239 | struct user_struct *user; |
240 | const struct cred *cred = current_cred(); |
241 | long niceval, retval = -ESRCH; |
242 | struct pid *pgrp; |
243 | |
244 | if (which > PRIO_USER || which < PRIO_PROCESS) |
245 | return -EINVAL; |
246 | |
247 | rcu_read_lock(); |
248 | read_lock(&tasklist_lock); |
249 | switch (which) { |
250 | case PRIO_PROCESS: |
251 | if (who) |
252 | p = find_task_by_vpid(who); |
253 | else |
254 | p = current; |
255 | if (p) { |
256 | niceval = 20 - task_nice(p); |
257 | if (niceval > retval) |
258 | retval = niceval; |
259 | } |
260 | break; |
261 | case PRIO_PGRP: |
262 | if (who) |
263 | pgrp = find_vpid(who); |
264 | else |
265 | pgrp = task_pgrp(current); |
266 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
267 | niceval = 20 - task_nice(p); |
268 | if (niceval > retval) |
269 | retval = niceval; |
270 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
271 | break; |
272 | case PRIO_USER: |
273 | user = (struct user_struct *) cred->user; |
274 | if (!who) |
275 | who = cred->uid; |
276 | else if ((who != cred->uid) && |
277 | !(user = find_user(who))) |
278 | goto out_unlock; /* No processes for this user */ |
279 | |
280 | do_each_thread(g, p) { |
281 | if (__task_cred(p)->uid == who) { |
282 | niceval = 20 - task_nice(p); |
283 | if (niceval > retval) |
284 | retval = niceval; |
285 | } |
286 | } while_each_thread(g, p); |
287 | if (who != cred->uid) |
288 | free_uid(user); /* for find_user() */ |
289 | break; |
290 | } |
291 | out_unlock: |
292 | read_unlock(&tasklist_lock); |
293 | rcu_read_unlock(); |
294 | |
295 | return retval; |
296 | } |
297 | |
298 | /** |
299 | * emergency_restart - reboot the system |
300 | * |
301 | * Without shutting down any hardware or taking any locks |
302 | * reboot the system. This is called when we know we are in |
303 | * trouble so this is our best effort to reboot. This is |
304 | * safe to call in interrupt context. |
305 | */ |
306 | void emergency_restart(void) |
307 | { |
308 | kmsg_dump(KMSG_DUMP_EMERG); |
309 | machine_emergency_restart(); |
310 | } |
311 | EXPORT_SYMBOL_GPL(emergency_restart); |
312 | |
313 | void kernel_restart_prepare(char *cmd) |
314 | { |
315 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
316 | system_state = SYSTEM_RESTART; |
317 | device_shutdown(); |
318 | syscore_shutdown(); |
319 | } |
320 | |
321 | /** |
322 | * kernel_restart - reboot the system |
323 | * @cmd: pointer to buffer containing command to execute for restart |
324 | * or %NULL |
325 | * |
326 | * Shutdown everything and perform a clean reboot. |
327 | * This is not safe to call in interrupt context. |
328 | */ |
329 | void kernel_restart(char *cmd) |
330 | { |
331 | kernel_restart_prepare(cmd); |
332 | if (!cmd) |
333 | printk(KERN_EMERG "Restarting system.\n"); |
334 | else |
335 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); |
336 | kmsg_dump(KMSG_DUMP_RESTART); |
337 | machine_restart(cmd); |
338 | } |
339 | EXPORT_SYMBOL_GPL(kernel_restart); |
340 | |
341 | static void kernel_shutdown_prepare(enum system_states state) |
342 | { |
343 | blocking_notifier_call_chain(&reboot_notifier_list, |
344 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); |
345 | system_state = state; |
346 | device_shutdown(); |
347 | } |
348 | /** |
349 | * kernel_halt - halt the system |
350 | * |
351 | * Shutdown everything and perform a clean system halt. |
352 | */ |
353 | void kernel_halt(void) |
354 | { |
355 | kernel_shutdown_prepare(SYSTEM_HALT); |
356 | syscore_shutdown(); |
357 | printk(KERN_EMERG "System halted.\n"); |
358 | kmsg_dump(KMSG_DUMP_HALT); |
359 | machine_halt(); |
360 | } |
361 | |
362 | EXPORT_SYMBOL_GPL(kernel_halt); |
363 | |
364 | /** |
365 | * kernel_power_off - power_off the system |
366 | * |
367 | * Shutdown everything and perform a clean system power_off. |
368 | */ |
369 | void kernel_power_off(void) |
370 | { |
371 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); |
372 | if (pm_power_off_prepare) |
373 | pm_power_off_prepare(); |
374 | disable_nonboot_cpus(); |
375 | syscore_shutdown(); |
376 | printk(KERN_EMERG "Power down.\n"); |
377 | kmsg_dump(KMSG_DUMP_POWEROFF); |
378 | machine_power_off(); |
379 | } |
380 | EXPORT_SYMBOL_GPL(kernel_power_off); |
381 | |
382 | static DEFINE_MUTEX(reboot_mutex); |
383 | |
384 | /* |
385 | * Reboot system call: for obvious reasons only root may call it, |
386 | * and even root needs to set up some magic numbers in the registers |
387 | * so that some mistake won't make this reboot the whole machine. |
388 | * You can also set the meaning of the ctrl-alt-del-key here. |
389 | * |
390 | * reboot doesn't sync: do that yourself before calling this. |
391 | */ |
392 | SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, |
393 | void __user *, arg) |
394 | { |
395 | char buffer[256]; |
396 | int ret = 0; |
397 | |
398 | /* We only trust the superuser with rebooting the system. */ |
399 | if (!capable(CAP_SYS_BOOT)) |
400 | return -EPERM; |
401 | |
402 | /* For safety, we require "magic" arguments. */ |
403 | if (magic1 != LINUX_REBOOT_MAGIC1 || |
404 | (magic2 != LINUX_REBOOT_MAGIC2 && |
405 | magic2 != LINUX_REBOOT_MAGIC2A && |
406 | magic2 != LINUX_REBOOT_MAGIC2B && |
407 | magic2 != LINUX_REBOOT_MAGIC2C)) |
408 | return -EINVAL; |
409 | |
410 | /* Instead of trying to make the power_off code look like |
411 | * halt when pm_power_off is not set do it the easy way. |
412 | */ |
413 | if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) |
414 | cmd = LINUX_REBOOT_CMD_HALT; |
415 | |
416 | mutex_lock(&reboot_mutex); |
417 | switch (cmd) { |
418 | case LINUX_REBOOT_CMD_RESTART: |
419 | kernel_restart(NULL); |
420 | break; |
421 | |
422 | case LINUX_REBOOT_CMD_CAD_ON: |
423 | C_A_D = 1; |
424 | break; |
425 | |
426 | case LINUX_REBOOT_CMD_CAD_OFF: |
427 | C_A_D = 0; |
428 | break; |
429 | |
430 | case LINUX_REBOOT_CMD_HALT: |
431 | kernel_halt(); |
432 | do_exit(0); |
433 | panic("cannot halt"); |
434 | |
435 | case LINUX_REBOOT_CMD_POWER_OFF: |
436 | kernel_power_off(); |
437 | do_exit(0); |
438 | break; |
439 | |
440 | case LINUX_REBOOT_CMD_RESTART2: |
441 | if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { |
442 | ret = -EFAULT; |
443 | break; |
444 | } |
445 | buffer[sizeof(buffer) - 1] = '\0'; |
446 | |
447 | kernel_restart(buffer); |
448 | break; |
449 | |
450 | #ifdef CONFIG_KEXEC |
451 | case LINUX_REBOOT_CMD_KEXEC: |
452 | ret = kernel_kexec(); |
453 | break; |
454 | #endif |
455 | |
456 | #ifdef CONFIG_HIBERNATION |
457 | case LINUX_REBOOT_CMD_SW_SUSPEND: |
458 | ret = hibernate(); |
459 | break; |
460 | #endif |
461 | |
462 | default: |
463 | ret = -EINVAL; |
464 | break; |
465 | } |
466 | mutex_unlock(&reboot_mutex); |
467 | return ret; |
468 | } |
469 | |
470 | static void deferred_cad(struct work_struct *dummy) |
471 | { |
472 | kernel_restart(NULL); |
473 | } |
474 | |
475 | /* |
476 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. |
477 | * As it's called within an interrupt, it may NOT sync: the only choice |
478 | * is whether to reboot at once, or just ignore the ctrl-alt-del. |
479 | */ |
480 | void ctrl_alt_del(void) |
481 | { |
482 | static DECLARE_WORK(cad_work, deferred_cad); |
483 | |
484 | if (C_A_D) |
485 | schedule_work(&cad_work); |
486 | else |
487 | kill_cad_pid(SIGINT, 1); |
488 | } |
489 | |
490 | /* |
491 | * Unprivileged users may change the real gid to the effective gid |
492 | * or vice versa. (BSD-style) |
493 | * |
494 | * If you set the real gid at all, or set the effective gid to a value not |
495 | * equal to the real gid, then the saved gid is set to the new effective gid. |
496 | * |
497 | * This makes it possible for a setgid program to completely drop its |
498 | * privileges, which is often a useful assertion to make when you are doing |
499 | * a security audit over a program. |
500 | * |
501 | * The general idea is that a program which uses just setregid() will be |
502 | * 100% compatible with BSD. A program which uses just setgid() will be |
503 | * 100% compatible with POSIX with saved IDs. |
504 | * |
505 | * SMP: There are not races, the GIDs are checked only by filesystem |
506 | * operations (as far as semantic preservation is concerned). |
507 | */ |
508 | SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) |
509 | { |
510 | const struct cred *old; |
511 | struct cred *new; |
512 | int retval; |
513 | |
514 | new = prepare_creds(); |
515 | if (!new) |
516 | return -ENOMEM; |
517 | old = current_cred(); |
518 | |
519 | retval = -EPERM; |
520 | if (rgid != (gid_t) -1) { |
521 | if (old->gid == rgid || |
522 | old->egid == rgid || |
523 | nsown_capable(CAP_SETGID)) |
524 | new->gid = rgid; |
525 | else |
526 | goto error; |
527 | } |
528 | if (egid != (gid_t) -1) { |
529 | if (old->gid == egid || |
530 | old->egid == egid || |
531 | old->sgid == egid || |
532 | nsown_capable(CAP_SETGID)) |
533 | new->egid = egid; |
534 | else |
535 | goto error; |
536 | } |
537 | |
538 | if (rgid != (gid_t) -1 || |
539 | (egid != (gid_t) -1 && egid != old->gid)) |
540 | new->sgid = new->egid; |
541 | new->fsgid = new->egid; |
542 | |
543 | return commit_creds(new); |
544 | |
545 | error: |
546 | abort_creds(new); |
547 | return retval; |
548 | } |
549 | |
550 | /* |
551 | * setgid() is implemented like SysV w/ SAVED_IDS |
552 | * |
553 | * SMP: Same implicit races as above. |
554 | */ |
555 | SYSCALL_DEFINE1(setgid, gid_t, gid) |
556 | { |
557 | const struct cred *old; |
558 | struct cred *new; |
559 | int retval; |
560 | |
561 | new = prepare_creds(); |
562 | if (!new) |
563 | return -ENOMEM; |
564 | old = current_cred(); |
565 | |
566 | retval = -EPERM; |
567 | if (nsown_capable(CAP_SETGID)) |
568 | new->gid = new->egid = new->sgid = new->fsgid = gid; |
569 | else if (gid == old->gid || gid == old->sgid) |
570 | new->egid = new->fsgid = gid; |
571 | else |
572 | goto error; |
573 | |
574 | return commit_creds(new); |
575 | |
576 | error: |
577 | abort_creds(new); |
578 | return retval; |
579 | } |
580 | |
581 | /* |
582 | * change the user struct in a credentials set to match the new UID |
583 | */ |
584 | static int set_user(struct cred *new) |
585 | { |
586 | struct user_struct *new_user; |
587 | |
588 | new_user = alloc_uid(current_user_ns(), new->uid); |
589 | if (!new_user) |
590 | return -EAGAIN; |
591 | |
592 | if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && |
593 | new_user != INIT_USER) { |
594 | free_uid(new_user); |
595 | return -EAGAIN; |
596 | } |
597 | |
598 | free_uid(new->user); |
599 | new->user = new_user; |
600 | return 0; |
601 | } |
602 | |
603 | /* |
604 | * Unprivileged users may change the real uid to the effective uid |
605 | * or vice versa. (BSD-style) |
606 | * |
607 | * If you set the real uid at all, or set the effective uid to a value not |
608 | * equal to the real uid, then the saved uid is set to the new effective uid. |
609 | * |
610 | * This makes it possible for a setuid program to completely drop its |
611 | * privileges, which is often a useful assertion to make when you are doing |
612 | * a security audit over a program. |
613 | * |
614 | * The general idea is that a program which uses just setreuid() will be |
615 | * 100% compatible with BSD. A program which uses just setuid() will be |
616 | * 100% compatible with POSIX with saved IDs. |
617 | */ |
618 | SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) |
619 | { |
620 | const struct cred *old; |
621 | struct cred *new; |
622 | int retval; |
623 | |
624 | new = prepare_creds(); |
625 | if (!new) |
626 | return -ENOMEM; |
627 | old = current_cred(); |
628 | |
629 | retval = -EPERM; |
630 | if (ruid != (uid_t) -1) { |
631 | new->uid = ruid; |
632 | if (old->uid != ruid && |
633 | old->euid != ruid && |
634 | !nsown_capable(CAP_SETUID)) |
635 | goto error; |
636 | } |
637 | |
638 | if (euid != (uid_t) -1) { |
639 | new->euid = euid; |
640 | if (old->uid != euid && |
641 | old->euid != euid && |
642 | old->suid != euid && |
643 | !nsown_capable(CAP_SETUID)) |
644 | goto error; |
645 | } |
646 | |
647 | if (new->uid != old->uid) { |
648 | retval = set_user(new); |
649 | if (retval < 0) |
650 | goto error; |
651 | } |
652 | if (ruid != (uid_t) -1 || |
653 | (euid != (uid_t) -1 && euid != old->uid)) |
654 | new->suid = new->euid; |
655 | new->fsuid = new->euid; |
656 | |
657 | retval = security_task_fix_setuid(new, old, LSM_SETID_RE); |
658 | if (retval < 0) |
659 | goto error; |
660 | |
661 | return commit_creds(new); |
662 | |
663 | error: |
664 | abort_creds(new); |
665 | return retval; |
666 | } |
667 | |
668 | /* |
669 | * setuid() is implemented like SysV with SAVED_IDS |
670 | * |
671 | * Note that SAVED_ID's is deficient in that a setuid root program |
672 | * like sendmail, for example, cannot set its uid to be a normal |
673 | * user and then switch back, because if you're root, setuid() sets |
674 | * the saved uid too. If you don't like this, blame the bright people |
675 | * in the POSIX committee and/or USG. Note that the BSD-style setreuid() |
676 | * will allow a root program to temporarily drop privileges and be able to |
677 | * regain them by swapping the real and effective uid. |
678 | */ |
679 | SYSCALL_DEFINE1(setuid, uid_t, uid) |
680 | { |
681 | const struct cred *old; |
682 | struct cred *new; |
683 | int retval; |
684 | |
685 | new = prepare_creds(); |
686 | if (!new) |
687 | return -ENOMEM; |
688 | old = current_cred(); |
689 | |
690 | retval = -EPERM; |
691 | if (nsown_capable(CAP_SETUID)) { |
692 | new->suid = new->uid = uid; |
693 | if (uid != old->uid) { |
694 | retval = set_user(new); |
695 | if (retval < 0) |
696 | goto error; |
697 | } |
698 | } else if (uid != old->uid && uid != new->suid) { |
699 | goto error; |
700 | } |
701 | |
702 | new->fsuid = new->euid = uid; |
703 | |
704 | retval = security_task_fix_setuid(new, old, LSM_SETID_ID); |
705 | if (retval < 0) |
706 | goto error; |
707 | |
708 | return commit_creds(new); |
709 | |
710 | error: |
711 | abort_creds(new); |
712 | return retval; |
713 | } |
714 | |
715 | |
716 | /* |
717 | * This function implements a generic ability to update ruid, euid, |
718 | * and suid. This allows you to implement the 4.4 compatible seteuid(). |
719 | */ |
720 | SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) |
721 | { |
722 | const struct cred *old; |
723 | struct cred *new; |
724 | int retval; |
725 | |
726 | new = prepare_creds(); |
727 | if (!new) |
728 | return -ENOMEM; |
729 | |
730 | old = current_cred(); |
731 | |
732 | retval = -EPERM; |
733 | if (!nsown_capable(CAP_SETUID)) { |
734 | if (ruid != (uid_t) -1 && ruid != old->uid && |
735 | ruid != old->euid && ruid != old->suid) |
736 | goto error; |
737 | if (euid != (uid_t) -1 && euid != old->uid && |
738 | euid != old->euid && euid != old->suid) |
739 | goto error; |
740 | if (suid != (uid_t) -1 && suid != old->uid && |
741 | suid != old->euid && suid != old->suid) |
742 | goto error; |
743 | } |
744 | |
745 | if (ruid != (uid_t) -1) { |
746 | new->uid = ruid; |
747 | if (ruid != old->uid) { |
748 | retval = set_user(new); |
749 | if (retval < 0) |
750 | goto error; |
751 | } |
752 | } |
753 | if (euid != (uid_t) -1) |
754 | new->euid = euid; |
755 | if (suid != (uid_t) -1) |
756 | new->suid = suid; |
757 | new->fsuid = new->euid; |
758 | |
759 | retval = security_task_fix_setuid(new, old, LSM_SETID_RES); |
760 | if (retval < 0) |
761 | goto error; |
762 | |
763 | return commit_creds(new); |
764 | |
765 | error: |
766 | abort_creds(new); |
767 | return retval; |
768 | } |
769 | |
770 | SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid) |
771 | { |
772 | const struct cred *cred = current_cred(); |
773 | int retval; |
774 | |
775 | if (!(retval = put_user(cred->uid, ruid)) && |
776 | !(retval = put_user(cred->euid, euid))) |
777 | retval = put_user(cred->suid, suid); |
778 | |
779 | return retval; |
780 | } |
781 | |
782 | /* |
783 | * Same as above, but for rgid, egid, sgid. |
784 | */ |
785 | SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) |
786 | { |
787 | const struct cred *old; |
788 | struct cred *new; |
789 | int retval; |
790 | |
791 | new = prepare_creds(); |
792 | if (!new) |
793 | return -ENOMEM; |
794 | old = current_cred(); |
795 | |
796 | retval = -EPERM; |
797 | if (!nsown_capable(CAP_SETGID)) { |
798 | if (rgid != (gid_t) -1 && rgid != old->gid && |
799 | rgid != old->egid && rgid != old->sgid) |
800 | goto error; |
801 | if (egid != (gid_t) -1 && egid != old->gid && |
802 | egid != old->egid && egid != old->sgid) |
803 | goto error; |
804 | if (sgid != (gid_t) -1 && sgid != old->gid && |
805 | sgid != old->egid && sgid != old->sgid) |
806 | goto error; |
807 | } |
808 | |
809 | if (rgid != (gid_t) -1) |
810 | new->gid = rgid; |
811 | if (egid != (gid_t) -1) |
812 | new->egid = egid; |
813 | if (sgid != (gid_t) -1) |
814 | new->sgid = sgid; |
815 | new->fsgid = new->egid; |
816 | |
817 | return commit_creds(new); |
818 | |
819 | error: |
820 | abort_creds(new); |
821 | return retval; |
822 | } |
823 | |
824 | SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid) |
825 | { |
826 | const struct cred *cred = current_cred(); |
827 | int retval; |
828 | |
829 | if (!(retval = put_user(cred->gid, rgid)) && |
830 | !(retval = put_user(cred->egid, egid))) |
831 | retval = put_user(cred->sgid, sgid); |
832 | |
833 | return retval; |
834 | } |
835 | |
836 | |
837 | /* |
838 | * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This |
839 | * is used for "access()" and for the NFS daemon (letting nfsd stay at |
840 | * whatever uid it wants to). It normally shadows "euid", except when |
841 | * explicitly set by setfsuid() or for access.. |
842 | */ |
843 | SYSCALL_DEFINE1(setfsuid, uid_t, uid) |
844 | { |
845 | const struct cred *old; |
846 | struct cred *new; |
847 | uid_t old_fsuid; |
848 | |
849 | new = prepare_creds(); |
850 | if (!new) |
851 | return current_fsuid(); |
852 | old = current_cred(); |
853 | old_fsuid = old->fsuid; |
854 | |
855 | if (uid == old->uid || uid == old->euid || |
856 | uid == old->suid || uid == old->fsuid || |
857 | nsown_capable(CAP_SETUID)) { |
858 | if (uid != old_fsuid) { |
859 | new->fsuid = uid; |
860 | if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) |
861 | goto change_okay; |
862 | } |
863 | } |
864 | |
865 | abort_creds(new); |
866 | return old_fsuid; |
867 | |
868 | change_okay: |
869 | commit_creds(new); |
870 | return old_fsuid; |
871 | } |
872 | |
873 | /* |
874 | * Samma på svenska.. |
875 | */ |
876 | SYSCALL_DEFINE1(setfsgid, gid_t, gid) |
877 | { |
878 | const struct cred *old; |
879 | struct cred *new; |
880 | gid_t old_fsgid; |
881 | |
882 | new = prepare_creds(); |
883 | if (!new) |
884 | return current_fsgid(); |
885 | old = current_cred(); |
886 | old_fsgid = old->fsgid; |
887 | |
888 | if (gid == old->gid || gid == old->egid || |
889 | gid == old->sgid || gid == old->fsgid || |
890 | nsown_capable(CAP_SETGID)) { |
891 | if (gid != old_fsgid) { |
892 | new->fsgid = gid; |
893 | goto change_okay; |
894 | } |
895 | } |
896 | |
897 | abort_creds(new); |
898 | return old_fsgid; |
899 | |
900 | change_okay: |
901 | commit_creds(new); |
902 | return old_fsgid; |
903 | } |
904 | |
905 | void do_sys_times(struct tms *tms) |
906 | { |
907 | cputime_t tgutime, tgstime, cutime, cstime; |
908 | |
909 | spin_lock_irq(¤t->sighand->siglock); |
910 | thread_group_times(current, &tgutime, &tgstime); |
911 | cutime = current->signal->cutime; |
912 | cstime = current->signal->cstime; |
913 | spin_unlock_irq(¤t->sighand->siglock); |
914 | tms->tms_utime = cputime_to_clock_t(tgutime); |
915 | tms->tms_stime = cputime_to_clock_t(tgstime); |
916 | tms->tms_cutime = cputime_to_clock_t(cutime); |
917 | tms->tms_cstime = cputime_to_clock_t(cstime); |
918 | } |
919 | |
920 | SYSCALL_DEFINE1(times, struct tms __user *, tbuf) |
921 | { |
922 | if (tbuf) { |
923 | struct tms tmp; |
924 | |
925 | do_sys_times(&tmp); |
926 | if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) |
927 | return -EFAULT; |
928 | } |
929 | force_successful_syscall_return(); |
930 | return (long) jiffies_64_to_clock_t(get_jiffies_64()); |
931 | } |
932 | |
933 | /* |
934 | * This needs some heavy checking ... |
935 | * I just haven't the stomach for it. I also don't fully |
936 | * understand sessions/pgrp etc. Let somebody who does explain it. |
937 | * |
938 | * OK, I think I have the protection semantics right.... this is really |
939 | * only important on a multi-user system anyway, to make sure one user |
940 | * can't send a signal to a process owned by another. -TYT, 12/12/91 |
941 | * |
942 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. |
943 | * LBT 04.03.94 |
944 | */ |
945 | SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) |
946 | { |
947 | struct task_struct *p; |
948 | struct task_struct *group_leader = current->group_leader; |
949 | struct pid *pgrp; |
950 | int err; |
951 | |
952 | if (!pid) |
953 | pid = task_pid_vnr(group_leader); |
954 | if (!pgid) |
955 | pgid = pid; |
956 | if (pgid < 0) |
957 | return -EINVAL; |
958 | rcu_read_lock(); |
959 | |
960 | /* From this point forward we keep holding onto the tasklist lock |
961 | * so that our parent does not change from under us. -DaveM |
962 | */ |
963 | write_lock_irq(&tasklist_lock); |
964 | |
965 | err = -ESRCH; |
966 | p = find_task_by_vpid(pid); |
967 | if (!p) |
968 | goto out; |
969 | |
970 | err = -EINVAL; |
971 | if (!thread_group_leader(p)) |
972 | goto out; |
973 | |
974 | if (same_thread_group(p->real_parent, group_leader)) { |
975 | err = -EPERM; |
976 | if (task_session(p) != task_session(group_leader)) |
977 | goto out; |
978 | err = -EACCES; |
979 | if (p->did_exec) |
980 | goto out; |
981 | } else { |
982 | err = -ESRCH; |
983 | if (p != group_leader) |
984 | goto out; |
985 | } |
986 | |
987 | err = -EPERM; |
988 | if (p->signal->leader) |
989 | goto out; |
990 | |
991 | pgrp = task_pid(p); |
992 | if (pgid != pid) { |
993 | struct task_struct *g; |
994 | |
995 | pgrp = find_vpid(pgid); |
996 | g = pid_task(pgrp, PIDTYPE_PGID); |
997 | if (!g || task_session(g) != task_session(group_leader)) |
998 | goto out; |
999 | } |
1000 | |
1001 | err = security_task_setpgid(p, pgid); |
1002 | if (err) |
1003 | goto out; |
1004 | |
1005 | if (task_pgrp(p) != pgrp) |
1006 | change_pid(p, PIDTYPE_PGID, pgrp); |
1007 | |
1008 | err = 0; |
1009 | out: |
1010 | /* All paths lead to here, thus we are safe. -DaveM */ |
1011 | write_unlock_irq(&tasklist_lock); |
1012 | rcu_read_unlock(); |
1013 | return err; |
1014 | } |
1015 | |
1016 | SYSCALL_DEFINE1(getpgid, pid_t, pid) |
1017 | { |
1018 | struct task_struct *p; |
1019 | struct pid *grp; |
1020 | int retval; |
1021 | |
1022 | rcu_read_lock(); |
1023 | if (!pid) |
1024 | grp = task_pgrp(current); |
1025 | else { |
1026 | retval = -ESRCH; |
1027 | p = find_task_by_vpid(pid); |
1028 | if (!p) |
1029 | goto out; |
1030 | grp = task_pgrp(p); |
1031 | if (!grp) |
1032 | goto out; |
1033 | |
1034 | retval = security_task_getpgid(p); |
1035 | if (retval) |
1036 | goto out; |
1037 | } |
1038 | retval = pid_vnr(grp); |
1039 | out: |
1040 | rcu_read_unlock(); |
1041 | return retval; |
1042 | } |
1043 | |
1044 | #ifdef __ARCH_WANT_SYS_GETPGRP |
1045 | |
1046 | SYSCALL_DEFINE0(getpgrp) |
1047 | { |
1048 | return sys_getpgid(0); |
1049 | } |
1050 | |
1051 | #endif |
1052 | |
1053 | SYSCALL_DEFINE1(getsid, pid_t, pid) |
1054 | { |
1055 | struct task_struct *p; |
1056 | struct pid *sid; |
1057 | int retval; |
1058 | |
1059 | rcu_read_lock(); |
1060 | if (!pid) |
1061 | sid = task_session(current); |
1062 | else { |
1063 | retval = -ESRCH; |
1064 | p = find_task_by_vpid(pid); |
1065 | if (!p) |
1066 | goto out; |
1067 | sid = task_session(p); |
1068 | if (!sid) |
1069 | goto out; |
1070 | |
1071 | retval = security_task_getsid(p); |
1072 | if (retval) |
1073 | goto out; |
1074 | } |
1075 | retval = pid_vnr(sid); |
1076 | out: |
1077 | rcu_read_unlock(); |
1078 | return retval; |
1079 | } |
1080 | |
1081 | SYSCALL_DEFINE0(setsid) |
1082 | { |
1083 | struct task_struct *group_leader = current->group_leader; |
1084 | struct pid *sid = task_pid(group_leader); |
1085 | pid_t session = pid_vnr(sid); |
1086 | int err = -EPERM; |
1087 | |
1088 | write_lock_irq(&tasklist_lock); |
1089 | /* Fail if I am already a session leader */ |
1090 | if (group_leader->signal->leader) |
1091 | goto out; |
1092 | |
1093 | /* Fail if a process group id already exists that equals the |
1094 | * proposed session id. |
1095 | */ |
1096 | if (pid_task(sid, PIDTYPE_PGID)) |
1097 | goto out; |
1098 | |
1099 | group_leader->signal->leader = 1; |
1100 | __set_special_pids(sid); |
1101 | |
1102 | proc_clear_tty(group_leader); |
1103 | |
1104 | err = session; |
1105 | out: |
1106 | write_unlock_irq(&tasklist_lock); |
1107 | if (err > 0) { |
1108 | proc_sid_connector(group_leader); |
1109 | sched_autogroup_create_attach(group_leader); |
1110 | } |
1111 | return err; |
1112 | } |
1113 | |
1114 | DECLARE_RWSEM(uts_sem); |
1115 | |
1116 | #ifdef COMPAT_UTS_MACHINE |
1117 | #define override_architecture(name) \ |
1118 | (personality(current->personality) == PER_LINUX32 && \ |
1119 | copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ |
1120 | sizeof(COMPAT_UTS_MACHINE))) |
1121 | #else |
1122 | #define override_architecture(name) 0 |
1123 | #endif |
1124 | |
1125 | SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) |
1126 | { |
1127 | int errno = 0; |
1128 | |
1129 | down_read(&uts_sem); |
1130 | if (copy_to_user(name, utsname(), sizeof *name)) |
1131 | errno = -EFAULT; |
1132 | up_read(&uts_sem); |
1133 | |
1134 | if (!errno && override_architecture(name)) |
1135 | errno = -EFAULT; |
1136 | return errno; |
1137 | } |
1138 | |
1139 | #ifdef __ARCH_WANT_SYS_OLD_UNAME |
1140 | /* |
1141 | * Old cruft |
1142 | */ |
1143 | SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) |
1144 | { |
1145 | int error = 0; |
1146 | |
1147 | if (!name) |
1148 | return -EFAULT; |
1149 | |
1150 | down_read(&uts_sem); |
1151 | if (copy_to_user(name, utsname(), sizeof(*name))) |
1152 | error = -EFAULT; |
1153 | up_read(&uts_sem); |
1154 | |
1155 | if (!error && override_architecture(name)) |
1156 | error = -EFAULT; |
1157 | return error; |
1158 | } |
1159 | |
1160 | SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) |
1161 | { |
1162 | int error; |
1163 | |
1164 | if (!name) |
1165 | return -EFAULT; |
1166 | if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) |
1167 | return -EFAULT; |
1168 | |
1169 | down_read(&uts_sem); |
1170 | error = __copy_to_user(&name->sysname, &utsname()->sysname, |
1171 | __OLD_UTS_LEN); |
1172 | error |= __put_user(0, name->sysname + __OLD_UTS_LEN); |
1173 | error |= __copy_to_user(&name->nodename, &utsname()->nodename, |
1174 | __OLD_UTS_LEN); |
1175 | error |= __put_user(0, name->nodename + __OLD_UTS_LEN); |
1176 | error |= __copy_to_user(&name->release, &utsname()->release, |
1177 | __OLD_UTS_LEN); |
1178 | error |= __put_user(0, name->release + __OLD_UTS_LEN); |
1179 | error |= __copy_to_user(&name->version, &utsname()->version, |
1180 | __OLD_UTS_LEN); |
1181 | error |= __put_user(0, name->version + __OLD_UTS_LEN); |
1182 | error |= __copy_to_user(&name->machine, &utsname()->machine, |
1183 | __OLD_UTS_LEN); |
1184 | error |= __put_user(0, name->machine + __OLD_UTS_LEN); |
1185 | up_read(&uts_sem); |
1186 | |
1187 | if (!error && override_architecture(name)) |
1188 | error = -EFAULT; |
1189 | return error ? -EFAULT : 0; |
1190 | } |
1191 | #endif |
1192 | |
1193 | SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) |
1194 | { |
1195 | int errno; |
1196 | char tmp[__NEW_UTS_LEN]; |
1197 | |
1198 | if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) |
1199 | return -EPERM; |
1200 | |
1201 | if (len < 0 || len > __NEW_UTS_LEN) |
1202 | return -EINVAL; |
1203 | down_write(&uts_sem); |
1204 | errno = -EFAULT; |
1205 | if (!copy_from_user(tmp, name, len)) { |
1206 | struct new_utsname *u = utsname(); |
1207 | |
1208 | memcpy(u->nodename, tmp, len); |
1209 | memset(u->nodename + len, 0, sizeof(u->nodename) - len); |
1210 | errno = 0; |
1211 | } |
1212 | up_write(&uts_sem); |
1213 | return errno; |
1214 | } |
1215 | |
1216 | #ifdef __ARCH_WANT_SYS_GETHOSTNAME |
1217 | |
1218 | SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) |
1219 | { |
1220 | int i, errno; |
1221 | struct new_utsname *u; |
1222 | |
1223 | if (len < 0) |
1224 | return -EINVAL; |
1225 | down_read(&uts_sem); |
1226 | u = utsname(); |
1227 | i = 1 + strlen(u->nodename); |
1228 | if (i > len) |
1229 | i = len; |
1230 | errno = 0; |
1231 | if (copy_to_user(name, u->nodename, i)) |
1232 | errno = -EFAULT; |
1233 | up_read(&uts_sem); |
1234 | return errno; |
1235 | } |
1236 | |
1237 | #endif |
1238 | |
1239 | /* |
1240 | * Only setdomainname; getdomainname can be implemented by calling |
1241 | * uname() |
1242 | */ |
1243 | SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) |
1244 | { |
1245 | int errno; |
1246 | char tmp[__NEW_UTS_LEN]; |
1247 | |
1248 | if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) |
1249 | return -EPERM; |
1250 | if (len < 0 || len > __NEW_UTS_LEN) |
1251 | return -EINVAL; |
1252 | |
1253 | down_write(&uts_sem); |
1254 | errno = -EFAULT; |
1255 | if (!copy_from_user(tmp, name, len)) { |
1256 | struct new_utsname *u = utsname(); |
1257 | |
1258 | memcpy(u->domainname, tmp, len); |
1259 | memset(u->domainname + len, 0, sizeof(u->domainname) - len); |
1260 | errno = 0; |
1261 | } |
1262 | up_write(&uts_sem); |
1263 | return errno; |
1264 | } |
1265 | |
1266 | SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) |
1267 | { |
1268 | struct rlimit value; |
1269 | int ret; |
1270 | |
1271 | ret = do_prlimit(current, resource, NULL, &value); |
1272 | if (!ret) |
1273 | ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; |
1274 | |
1275 | return ret; |
1276 | } |
1277 | |
1278 | #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT |
1279 | |
1280 | /* |
1281 | * Back compatibility for getrlimit. Needed for some apps. |
1282 | */ |
1283 | |
1284 | SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, |
1285 | struct rlimit __user *, rlim) |
1286 | { |
1287 | struct rlimit x; |
1288 | if (resource >= RLIM_NLIMITS) |
1289 | return -EINVAL; |
1290 | |
1291 | task_lock(current->group_leader); |
1292 | x = current->signal->rlim[resource]; |
1293 | task_unlock(current->group_leader); |
1294 | if (x.rlim_cur > 0x7FFFFFFF) |
1295 | x.rlim_cur = 0x7FFFFFFF; |
1296 | if (x.rlim_max > 0x7FFFFFFF) |
1297 | x.rlim_max = 0x7FFFFFFF; |
1298 | return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; |
1299 | } |
1300 | |
1301 | #endif |
1302 | |
1303 | static inline bool rlim64_is_infinity(__u64 rlim64) |
1304 | { |
1305 | #if BITS_PER_LONG < 64 |
1306 | return rlim64 >= ULONG_MAX; |
1307 | #else |
1308 | return rlim64 == RLIM64_INFINITY; |
1309 | #endif |
1310 | } |
1311 | |
1312 | static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) |
1313 | { |
1314 | if (rlim->rlim_cur == RLIM_INFINITY) |
1315 | rlim64->rlim_cur = RLIM64_INFINITY; |
1316 | else |
1317 | rlim64->rlim_cur = rlim->rlim_cur; |
1318 | if (rlim->rlim_max == RLIM_INFINITY) |
1319 | rlim64->rlim_max = RLIM64_INFINITY; |
1320 | else |
1321 | rlim64->rlim_max = rlim->rlim_max; |
1322 | } |
1323 | |
1324 | static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) |
1325 | { |
1326 | if (rlim64_is_infinity(rlim64->rlim_cur)) |
1327 | rlim->rlim_cur = RLIM_INFINITY; |
1328 | else |
1329 | rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; |
1330 | if (rlim64_is_infinity(rlim64->rlim_max)) |
1331 | rlim->rlim_max = RLIM_INFINITY; |
1332 | else |
1333 | rlim->rlim_max = (unsigned long)rlim64->rlim_max; |
1334 | } |
1335 | |
1336 | /* make sure you are allowed to change @tsk limits before calling this */ |
1337 | int do_prlimit(struct task_struct *tsk, unsigned int resource, |
1338 | struct rlimit *new_rlim, struct rlimit *old_rlim) |
1339 | { |
1340 | struct rlimit *rlim; |
1341 | int retval = 0; |
1342 | |
1343 | if (resource >= RLIM_NLIMITS) |
1344 | return -EINVAL; |
1345 | if (new_rlim) { |
1346 | if (new_rlim->rlim_cur > new_rlim->rlim_max) |
1347 | return -EINVAL; |
1348 | if (resource == RLIMIT_NOFILE && |
1349 | new_rlim->rlim_max > sysctl_nr_open) |
1350 | return -EPERM; |
1351 | } |
1352 | |
1353 | /* protect tsk->signal and tsk->sighand from disappearing */ |
1354 | read_lock(&tasklist_lock); |
1355 | if (!tsk->sighand) { |
1356 | retval = -ESRCH; |
1357 | goto out; |
1358 | } |
1359 | |
1360 | rlim = tsk->signal->rlim + resource; |
1361 | task_lock(tsk->group_leader); |
1362 | if (new_rlim) { |
1363 | /* Keep the capable check against init_user_ns until |
1364 | cgroups can contain all limits */ |
1365 | if (new_rlim->rlim_max > rlim->rlim_max && |
1366 | !capable(CAP_SYS_RESOURCE)) |
1367 | retval = -EPERM; |
1368 | if (!retval) |
1369 | retval = security_task_setrlimit(tsk->group_leader, |
1370 | resource, new_rlim); |
1371 | if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { |
1372 | /* |
1373 | * The caller is asking for an immediate RLIMIT_CPU |
1374 | * expiry. But we use the zero value to mean "it was |
1375 | * never set". So let's cheat and make it one second |
1376 | * instead |
1377 | */ |
1378 | new_rlim->rlim_cur = 1; |
1379 | } |
1380 | } |
1381 | if (!retval) { |
1382 | if (old_rlim) |
1383 | *old_rlim = *rlim; |
1384 | if (new_rlim) |
1385 | *rlim = *new_rlim; |
1386 | } |
1387 | task_unlock(tsk->group_leader); |
1388 | |
1389 | /* |
1390 | * RLIMIT_CPU handling. Note that the kernel fails to return an error |
1391 | * code if it rejected the user's attempt to set RLIMIT_CPU. This is a |
1392 | * very long-standing error, and fixing it now risks breakage of |
1393 | * applications, so we live with it |
1394 | */ |
1395 | if (!retval && new_rlim && resource == RLIMIT_CPU && |
1396 | new_rlim->rlim_cur != RLIM_INFINITY) |
1397 | update_rlimit_cpu(tsk, new_rlim->rlim_cur); |
1398 | out: |
1399 | read_unlock(&tasklist_lock); |
1400 | return retval; |
1401 | } |
1402 | |
1403 | /* rcu lock must be held */ |
1404 | static int check_prlimit_permission(struct task_struct *task) |
1405 | { |
1406 | const struct cred *cred = current_cred(), *tcred; |
1407 | |
1408 | if (current == task) |
1409 | return 0; |
1410 | |
1411 | tcred = __task_cred(task); |
1412 | if (cred->user->user_ns == tcred->user->user_ns && |
1413 | (cred->uid == tcred->euid && |
1414 | cred->uid == tcred->suid && |
1415 | cred->uid == tcred->uid && |
1416 | cred->gid == tcred->egid && |
1417 | cred->gid == tcred->sgid && |
1418 | cred->gid == tcred->gid)) |
1419 | return 0; |
1420 | if (ns_capable(tcred->user->user_ns, CAP_SYS_RESOURCE)) |
1421 | return 0; |
1422 | |
1423 | return -EPERM; |
1424 | } |
1425 | |
1426 | SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, |
1427 | const struct rlimit64 __user *, new_rlim, |
1428 | struct rlimit64 __user *, old_rlim) |
1429 | { |
1430 | struct rlimit64 old64, new64; |
1431 | struct rlimit old, new; |
1432 | struct task_struct *tsk; |
1433 | int ret; |
1434 | |
1435 | if (new_rlim) { |
1436 | if (copy_from_user(&new64, new_rlim, sizeof(new64))) |
1437 | return -EFAULT; |
1438 | rlim64_to_rlim(&new64, &new); |
1439 | } |
1440 | |
1441 | rcu_read_lock(); |
1442 | tsk = pid ? find_task_by_vpid(pid) : current; |
1443 | if (!tsk) { |
1444 | rcu_read_unlock(); |
1445 | return -ESRCH; |
1446 | } |
1447 | ret = check_prlimit_permission(tsk); |
1448 | if (ret) { |
1449 | rcu_read_unlock(); |
1450 | return ret; |
1451 | } |
1452 | get_task_struct(tsk); |
1453 | rcu_read_unlock(); |
1454 | |
1455 | ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, |
1456 | old_rlim ? &old : NULL); |
1457 | |
1458 | if (!ret && old_rlim) { |
1459 | rlim_to_rlim64(&old, &old64); |
1460 | if (copy_to_user(old_rlim, &old64, sizeof(old64))) |
1461 | ret = -EFAULT; |
1462 | } |
1463 | |
1464 | put_task_struct(tsk); |
1465 | return ret; |
1466 | } |
1467 | |
1468 | SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) |
1469 | { |
1470 | struct rlimit new_rlim; |
1471 | |
1472 | if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) |
1473 | return -EFAULT; |
1474 | return do_prlimit(current, resource, &new_rlim, NULL); |
1475 | } |
1476 | |
1477 | /* |
1478 | * It would make sense to put struct rusage in the task_struct, |
1479 | * except that would make the task_struct be *really big*. After |
1480 | * task_struct gets moved into malloc'ed memory, it would |
1481 | * make sense to do this. It will make moving the rest of the information |
1482 | * a lot simpler! (Which we're not doing right now because we're not |
1483 | * measuring them yet). |
1484 | * |
1485 | * When sampling multiple threads for RUSAGE_SELF, under SMP we might have |
1486 | * races with threads incrementing their own counters. But since word |
1487 | * reads are atomic, we either get new values or old values and we don't |
1488 | * care which for the sums. We always take the siglock to protect reading |
1489 | * the c* fields from p->signal from races with exit.c updating those |
1490 | * fields when reaping, so a sample either gets all the additions of a |
1491 | * given child after it's reaped, or none so this sample is before reaping. |
1492 | * |
1493 | * Locking: |
1494 | * We need to take the siglock for CHILDEREN, SELF and BOTH |
1495 | * for the cases current multithreaded, non-current single threaded |
1496 | * non-current multithreaded. Thread traversal is now safe with |
1497 | * the siglock held. |
1498 | * Strictly speaking, we donot need to take the siglock if we are current and |
1499 | * single threaded, as no one else can take our signal_struct away, no one |
1500 | * else can reap the children to update signal->c* counters, and no one else |
1501 | * can race with the signal-> fields. If we do not take any lock, the |
1502 | * signal-> fields could be read out of order while another thread was just |
1503 | * exiting. So we should place a read memory barrier when we avoid the lock. |
1504 | * On the writer side, write memory barrier is implied in __exit_signal |
1505 | * as __exit_signal releases the siglock spinlock after updating the signal-> |
1506 | * fields. But we don't do this yet to keep things simple. |
1507 | * |
1508 | */ |
1509 | |
1510 | static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) |
1511 | { |
1512 | r->ru_nvcsw += t->nvcsw; |
1513 | r->ru_nivcsw += t->nivcsw; |
1514 | r->ru_minflt += t->min_flt; |
1515 | r->ru_majflt += t->maj_flt; |
1516 | r->ru_inblock += task_io_get_inblock(t); |
1517 | r->ru_oublock += task_io_get_oublock(t); |
1518 | } |
1519 | |
1520 | static void k_getrusage(struct task_struct *p, int who, struct rusage *r) |
1521 | { |
1522 | struct task_struct *t; |
1523 | unsigned long flags; |
1524 | cputime_t tgutime, tgstime, utime, stime; |
1525 | unsigned long maxrss = 0; |
1526 | |
1527 | memset((char *) r, 0, sizeof *r); |
1528 | utime = stime = cputime_zero; |
1529 | |
1530 | if (who == RUSAGE_THREAD) { |
1531 | task_times(current, &utime, &stime); |
1532 | accumulate_thread_rusage(p, r); |
1533 | maxrss = p->signal->maxrss; |
1534 | goto out; |
1535 | } |
1536 | |
1537 | if (!lock_task_sighand(p, &flags)) |
1538 | return; |
1539 | |
1540 | switch (who) { |
1541 | case RUSAGE_BOTH: |
1542 | case RUSAGE_CHILDREN: |
1543 | utime = p->signal->cutime; |
1544 | stime = p->signal->cstime; |
1545 | r->ru_nvcsw = p->signal->cnvcsw; |
1546 | r->ru_nivcsw = p->signal->cnivcsw; |
1547 | r->ru_minflt = p->signal->cmin_flt; |
1548 | r->ru_majflt = p->signal->cmaj_flt; |
1549 | r->ru_inblock = p->signal->cinblock; |
1550 | r->ru_oublock = p->signal->coublock; |
1551 | maxrss = p->signal->cmaxrss; |
1552 | |
1553 | if (who == RUSAGE_CHILDREN) |
1554 | break; |
1555 | |
1556 | case RUSAGE_SELF: |
1557 | thread_group_times(p, &tgutime, &tgstime); |
1558 | utime = cputime_add(utime, tgutime); |
1559 | stime = cputime_add(stime, tgstime); |
1560 | r->ru_nvcsw += p->signal->nvcsw; |
1561 | r->ru_nivcsw += p->signal->nivcsw; |
1562 | r->ru_minflt += p->signal->min_flt; |
1563 | r->ru_majflt += p->signal->maj_flt; |
1564 | r->ru_inblock += p->signal->inblock; |
1565 | r->ru_oublock += p->signal->oublock; |
1566 | if (maxrss < p->signal->maxrss) |
1567 | maxrss = p->signal->maxrss; |
1568 | t = p; |
1569 | do { |
1570 | accumulate_thread_rusage(t, r); |
1571 | t = next_thread(t); |
1572 | } while (t != p); |
1573 | break; |
1574 | |
1575 | default: |
1576 | BUG(); |
1577 | } |
1578 | unlock_task_sighand(p, &flags); |
1579 | |
1580 | out: |
1581 | cputime_to_timeval(utime, &r->ru_utime); |
1582 | cputime_to_timeval(stime, &r->ru_stime); |
1583 | |
1584 | if (who != RUSAGE_CHILDREN) { |
1585 | struct mm_struct *mm = get_task_mm(p); |
1586 | if (mm) { |
1587 | setmax_mm_hiwater_rss(&maxrss, mm); |
1588 | mmput(mm); |
1589 | } |
1590 | } |
1591 | r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ |
1592 | } |
1593 | |
1594 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru) |
1595 | { |
1596 | struct rusage r; |
1597 | k_getrusage(p, who, &r); |
1598 | return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; |
1599 | } |
1600 | |
1601 | SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) |
1602 | { |
1603 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && |
1604 | who != RUSAGE_THREAD) |
1605 | return -EINVAL; |
1606 | return getrusage(current, who, ru); |
1607 | } |
1608 | |
1609 | SYSCALL_DEFINE1(umask, int, mask) |
1610 | { |
1611 | mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); |
1612 | return mask; |
1613 | } |
1614 | |
1615 | SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, |
1616 | unsigned long, arg4, unsigned long, arg5) |
1617 | { |
1618 | struct task_struct *me = current; |
1619 | unsigned char comm[sizeof(me->comm)]; |
1620 | long error; |
1621 | |
1622 | error = security_task_prctl(option, arg2, arg3, arg4, arg5); |
1623 | if (error != -ENOSYS) |
1624 | return error; |
1625 | |
1626 | error = 0; |
1627 | switch (option) { |
1628 | case PR_SET_PDEATHSIG: |
1629 | if (!valid_signal(arg2)) { |
1630 | error = -EINVAL; |
1631 | break; |
1632 | } |
1633 | me->pdeath_signal = arg2; |
1634 | error = 0; |
1635 | break; |
1636 | case PR_GET_PDEATHSIG: |
1637 | error = put_user(me->pdeath_signal, (int __user *)arg2); |
1638 | break; |
1639 | case PR_GET_DUMPABLE: |
1640 | error = get_dumpable(me->mm); |
1641 | break; |
1642 | case PR_SET_DUMPABLE: |
1643 | if (arg2 < 0 || arg2 > 1) { |
1644 | error = -EINVAL; |
1645 | break; |
1646 | } |
1647 | set_dumpable(me->mm, arg2); |
1648 | error = 0; |
1649 | break; |
1650 | |
1651 | case PR_SET_UNALIGN: |
1652 | error = SET_UNALIGN_CTL(me, arg2); |
1653 | break; |
1654 | case PR_GET_UNALIGN: |
1655 | error = GET_UNALIGN_CTL(me, arg2); |
1656 | break; |
1657 | case PR_SET_FPEMU: |
1658 | error = SET_FPEMU_CTL(me, arg2); |
1659 | break; |
1660 | case PR_GET_FPEMU: |
1661 | error = GET_FPEMU_CTL(me, arg2); |
1662 | break; |
1663 | case PR_SET_FPEXC: |
1664 | error = SET_FPEXC_CTL(me, arg2); |
1665 | break; |
1666 | case PR_GET_FPEXC: |
1667 | error = GET_FPEXC_CTL(me, arg2); |
1668 | break; |
1669 | case PR_GET_TIMING: |
1670 | error = PR_TIMING_STATISTICAL; |
1671 | break; |
1672 | case PR_SET_TIMING: |
1673 | if (arg2 != PR_TIMING_STATISTICAL) |
1674 | error = -EINVAL; |
1675 | else |
1676 | error = 0; |
1677 | break; |
1678 | |
1679 | case PR_SET_NAME: |
1680 | comm[sizeof(me->comm)-1] = 0; |
1681 | if (strncpy_from_user(comm, (char __user *)arg2, |
1682 | sizeof(me->comm) - 1) < 0) |
1683 | return -EFAULT; |
1684 | set_task_comm(me, comm); |
1685 | return 0; |
1686 | case PR_GET_NAME: |
1687 | get_task_comm(comm, me); |
1688 | if (copy_to_user((char __user *)arg2, comm, |
1689 | sizeof(comm))) |
1690 | return -EFAULT; |
1691 | return 0; |
1692 | case PR_GET_ENDIAN: |
1693 | error = GET_ENDIAN(me, arg2); |
1694 | break; |
1695 | case PR_SET_ENDIAN: |
1696 | error = SET_ENDIAN(me, arg2); |
1697 | break; |
1698 | |
1699 | case PR_GET_SECCOMP: |
1700 | error = prctl_get_seccomp(); |
1701 | break; |
1702 | case PR_SET_SECCOMP: |
1703 | error = prctl_set_seccomp(arg2); |
1704 | break; |
1705 | case PR_GET_TSC: |
1706 | error = GET_TSC_CTL(arg2); |
1707 | break; |
1708 | case PR_SET_TSC: |
1709 | error = SET_TSC_CTL(arg2); |
1710 | break; |
1711 | case PR_TASK_PERF_EVENTS_DISABLE: |
1712 | error = perf_event_task_disable(); |
1713 | break; |
1714 | case PR_TASK_PERF_EVENTS_ENABLE: |
1715 | error = perf_event_task_enable(); |
1716 | break; |
1717 | case PR_GET_TIMERSLACK: |
1718 | error = current->timer_slack_ns; |
1719 | break; |
1720 | case PR_SET_TIMERSLACK: |
1721 | if (arg2 <= 0) |
1722 | current->timer_slack_ns = |
1723 | current->default_timer_slack_ns; |
1724 | else |
1725 | current->timer_slack_ns = arg2; |
1726 | error = 0; |
1727 | break; |
1728 | case PR_MCE_KILL: |
1729 | if (arg4 | arg5) |
1730 | return -EINVAL; |
1731 | switch (arg2) { |
1732 | case PR_MCE_KILL_CLEAR: |
1733 | if (arg3 != 0) |
1734 | return -EINVAL; |
1735 | current->flags &= ~PF_MCE_PROCESS; |
1736 | break; |
1737 | case PR_MCE_KILL_SET: |
1738 | current->flags |= PF_MCE_PROCESS; |
1739 | if (arg3 == PR_MCE_KILL_EARLY) |
1740 | current->flags |= PF_MCE_EARLY; |
1741 | else if (arg3 == PR_MCE_KILL_LATE) |
1742 | current->flags &= ~PF_MCE_EARLY; |
1743 | else if (arg3 == PR_MCE_KILL_DEFAULT) |
1744 | current->flags &= |
1745 | ~(PF_MCE_EARLY|PF_MCE_PROCESS); |
1746 | else |
1747 | return -EINVAL; |
1748 | break; |
1749 | default: |
1750 | return -EINVAL; |
1751 | } |
1752 | error = 0; |
1753 | break; |
1754 | case PR_MCE_KILL_GET: |
1755 | if (arg2 | arg3 | arg4 | arg5) |
1756 | return -EINVAL; |
1757 | if (current->flags & PF_MCE_PROCESS) |
1758 | error = (current->flags & PF_MCE_EARLY) ? |
1759 | PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; |
1760 | else |
1761 | error = PR_MCE_KILL_DEFAULT; |
1762 | break; |
1763 | default: |
1764 | error = -EINVAL; |
1765 | break; |
1766 | } |
1767 | return error; |
1768 | } |
1769 | |
1770 | SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, |
1771 | struct getcpu_cache __user *, unused) |
1772 | { |
1773 | int err = 0; |
1774 | int cpu = raw_smp_processor_id(); |
1775 | if (cpup) |
1776 | err |= put_user(cpu, cpup); |
1777 | if (nodep) |
1778 | err |= put_user(cpu_to_node(cpu), nodep); |
1779 | return err ? -EFAULT : 0; |
1780 | } |
1781 | |
1782 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; |
1783 | |
1784 | static void argv_cleanup(struct subprocess_info *info) |
1785 | { |
1786 | argv_free(info->argv); |
1787 | } |
1788 | |
1789 | /** |
1790 | * orderly_poweroff - Trigger an orderly system poweroff |
1791 | * @force: force poweroff if command execution fails |
1792 | * |
1793 | * This may be called from any context to trigger a system shutdown. |
1794 | * If the orderly shutdown fails, it will force an immediate shutdown. |
1795 | */ |
1796 | int orderly_poweroff(bool force) |
1797 | { |
1798 | int argc; |
1799 | char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); |
1800 | static char *envp[] = { |
1801 | "HOME=/", |
1802 | "PATH=/sbin:/bin:/usr/sbin:/usr/bin", |
1803 | NULL |
1804 | }; |
1805 | int ret = -ENOMEM; |
1806 | struct subprocess_info *info; |
1807 | |
1808 | if (argv == NULL) { |
1809 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", |
1810 | __func__, poweroff_cmd); |
1811 | goto out; |
1812 | } |
1813 | |
1814 | info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC); |
1815 | if (info == NULL) { |
1816 | argv_free(argv); |
1817 | goto out; |
1818 | } |
1819 | |
1820 | call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL); |
1821 | |
1822 | ret = call_usermodehelper_exec(info, UMH_NO_WAIT); |
1823 | |
1824 | out: |
1825 | if (ret && force) { |
1826 | printk(KERN_WARNING "Failed to start orderly shutdown: " |
1827 | "forcing the issue\n"); |
1828 | |
1829 | /* I guess this should try to kick off some daemon to |
1830 | sync and poweroff asap. Or not even bother syncing |
1831 | if we're doing an emergency shutdown? */ |
1832 | emergency_sync(); |
1833 | kernel_power_off(); |
1834 | } |
1835 | |
1836 | return ret; |
1837 | } |
1838 | EXPORT_SYMBOL_GPL(orderly_poweroff); |
1839 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9