Root/
1 | /* |
2 | * linux/kernel/seccomp.c |
3 | * |
4 | * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com> |
5 | * |
6 | * Copyright (C) 2012 Google, Inc. |
7 | * Will Drewry <wad@chromium.org> |
8 | * |
9 | * This defines a simple but solid secure-computing facility. |
10 | * |
11 | * Mode 1 uses a fixed list of allowed system calls. |
12 | * Mode 2 allows user-defined system call filters in the form |
13 | * of Berkeley Packet Filters/Linux Socket Filters. |
14 | */ |
15 | |
16 | #include <linux/atomic.h> |
17 | #include <linux/audit.h> |
18 | #include <linux/compat.h> |
19 | #include <linux/sched.h> |
20 | #include <linux/seccomp.h> |
21 | |
22 | /* #define SECCOMP_DEBUG 1 */ |
23 | |
24 | #ifdef CONFIG_SECCOMP_FILTER |
25 | #include <asm/syscall.h> |
26 | #include <linux/filter.h> |
27 | #include <linux/ptrace.h> |
28 | #include <linux/security.h> |
29 | #include <linux/slab.h> |
30 | #include <linux/tracehook.h> |
31 | #include <linux/uaccess.h> |
32 | |
33 | /** |
34 | * struct seccomp_filter - container for seccomp BPF programs |
35 | * |
36 | * @usage: reference count to manage the object lifetime. |
37 | * get/put helpers should be used when accessing an instance |
38 | * outside of a lifetime-guarded section. In general, this |
39 | * is only needed for handling filters shared across tasks. |
40 | * @prev: points to a previously installed, or inherited, filter |
41 | * @len: the number of instructions in the program |
42 | * @insns: the BPF program instructions to evaluate |
43 | * |
44 | * seccomp_filter objects are organized in a tree linked via the @prev |
45 | * pointer. For any task, it appears to be a singly-linked list starting |
46 | * with current->seccomp.filter, the most recently attached or inherited filter. |
47 | * However, multiple filters may share a @prev node, by way of fork(), which |
48 | * results in a unidirectional tree existing in memory. This is similar to |
49 | * how namespaces work. |
50 | * |
51 | * seccomp_filter objects should never be modified after being attached |
52 | * to a task_struct (other than @usage). |
53 | */ |
54 | struct seccomp_filter { |
55 | atomic_t usage; |
56 | struct seccomp_filter *prev; |
57 | unsigned short len; /* Instruction count */ |
58 | struct sock_filter insns[]; |
59 | }; |
60 | |
61 | /* Limit any path through the tree to 256KB worth of instructions. */ |
62 | #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) |
63 | |
64 | /** |
65 | * get_u32 - returns a u32 offset into data |
66 | * @data: a unsigned 64 bit value |
67 | * @index: 0 or 1 to return the first or second 32-bits |
68 | * |
69 | * This inline exists to hide the length of unsigned long. If a 32-bit |
70 | * unsigned long is passed in, it will be extended and the top 32-bits will be |
71 | * 0. If it is a 64-bit unsigned long, then whatever data is resident will be |
72 | * properly returned. |
73 | * |
74 | * Endianness is explicitly ignored and left for BPF program authors to manage |
75 | * as per the specific architecture. |
76 | */ |
77 | static inline u32 get_u32(u64 data, int index) |
78 | { |
79 | return ((u32 *)&data)[index]; |
80 | } |
81 | |
82 | /* Helper for bpf_load below. */ |
83 | #define BPF_DATA(_name) offsetof(struct seccomp_data, _name) |
84 | /** |
85 | * bpf_load: checks and returns a pointer to the requested offset |
86 | * @off: offset into struct seccomp_data to load from |
87 | * |
88 | * Returns the requested 32-bits of data. |
89 | * seccomp_check_filter() should assure that @off is 32-bit aligned |
90 | * and not out of bounds. Failure to do so is a BUG. |
91 | */ |
92 | u32 seccomp_bpf_load(int off) |
93 | { |
94 | struct pt_regs *regs = task_pt_regs(current); |
95 | if (off == BPF_DATA(nr)) |
96 | return syscall_get_nr(current, regs); |
97 | if (off == BPF_DATA(arch)) |
98 | return syscall_get_arch(current, regs); |
99 | if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { |
100 | unsigned long value; |
101 | int arg = (off - BPF_DATA(args[0])) / sizeof(u64); |
102 | int index = !!(off % sizeof(u64)); |
103 | syscall_get_arguments(current, regs, arg, 1, &value); |
104 | return get_u32(value, index); |
105 | } |
106 | if (off == BPF_DATA(instruction_pointer)) |
107 | return get_u32(KSTK_EIP(current), 0); |
108 | if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) |
109 | return get_u32(KSTK_EIP(current), 1); |
110 | /* seccomp_check_filter should make this impossible. */ |
111 | BUG(); |
112 | } |
113 | |
114 | /** |
115 | * seccomp_check_filter - verify seccomp filter code |
116 | * @filter: filter to verify |
117 | * @flen: length of filter |
118 | * |
119 | * Takes a previously checked filter (by sk_chk_filter) and |
120 | * redirects all filter code that loads struct sk_buff data |
121 | * and related data through seccomp_bpf_load. It also |
122 | * enforces length and alignment checking of those loads. |
123 | * |
124 | * Returns 0 if the rule set is legal or -EINVAL if not. |
125 | */ |
126 | static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) |
127 | { |
128 | int pc; |
129 | for (pc = 0; pc < flen; pc++) { |
130 | struct sock_filter *ftest = &filter[pc]; |
131 | u16 code = ftest->code; |
132 | u32 k = ftest->k; |
133 | |
134 | switch (code) { |
135 | case BPF_S_LD_W_ABS: |
136 | ftest->code = BPF_S_ANC_SECCOMP_LD_W; |
137 | /* 32-bit aligned and not out of bounds. */ |
138 | if (k >= sizeof(struct seccomp_data) || k & 3) |
139 | return -EINVAL; |
140 | continue; |
141 | case BPF_S_LD_W_LEN: |
142 | ftest->code = BPF_S_LD_IMM; |
143 | ftest->k = sizeof(struct seccomp_data); |
144 | continue; |
145 | case BPF_S_LDX_W_LEN: |
146 | ftest->code = BPF_S_LDX_IMM; |
147 | ftest->k = sizeof(struct seccomp_data); |
148 | continue; |
149 | /* Explicitly include allowed calls. */ |
150 | case BPF_S_RET_K: |
151 | case BPF_S_RET_A: |
152 | case BPF_S_ALU_ADD_K: |
153 | case BPF_S_ALU_ADD_X: |
154 | case BPF_S_ALU_SUB_K: |
155 | case BPF_S_ALU_SUB_X: |
156 | case BPF_S_ALU_MUL_K: |
157 | case BPF_S_ALU_MUL_X: |
158 | case BPF_S_ALU_DIV_X: |
159 | case BPF_S_ALU_AND_K: |
160 | case BPF_S_ALU_AND_X: |
161 | case BPF_S_ALU_OR_K: |
162 | case BPF_S_ALU_OR_X: |
163 | case BPF_S_ALU_LSH_K: |
164 | case BPF_S_ALU_LSH_X: |
165 | case BPF_S_ALU_RSH_K: |
166 | case BPF_S_ALU_RSH_X: |
167 | case BPF_S_ALU_NEG: |
168 | case BPF_S_LD_IMM: |
169 | case BPF_S_LDX_IMM: |
170 | case BPF_S_MISC_TAX: |
171 | case BPF_S_MISC_TXA: |
172 | case BPF_S_ALU_DIV_K: |
173 | case BPF_S_LD_MEM: |
174 | case BPF_S_LDX_MEM: |
175 | case BPF_S_ST: |
176 | case BPF_S_STX: |
177 | case BPF_S_JMP_JA: |
178 | case BPF_S_JMP_JEQ_K: |
179 | case BPF_S_JMP_JEQ_X: |
180 | case BPF_S_JMP_JGE_K: |
181 | case BPF_S_JMP_JGE_X: |
182 | case BPF_S_JMP_JGT_K: |
183 | case BPF_S_JMP_JGT_X: |
184 | case BPF_S_JMP_JSET_K: |
185 | case BPF_S_JMP_JSET_X: |
186 | continue; |
187 | default: |
188 | return -EINVAL; |
189 | } |
190 | } |
191 | return 0; |
192 | } |
193 | |
194 | /** |
195 | * seccomp_run_filters - evaluates all seccomp filters against @syscall |
196 | * @syscall: number of the current system call |
197 | * |
198 | * Returns valid seccomp BPF response codes. |
199 | */ |
200 | static u32 seccomp_run_filters(int syscall) |
201 | { |
202 | struct seccomp_filter *f; |
203 | u32 ret = SECCOMP_RET_ALLOW; |
204 | |
205 | /* Ensure unexpected behavior doesn't result in failing open. */ |
206 | if (WARN_ON(current->seccomp.filter == NULL)) |
207 | return SECCOMP_RET_KILL; |
208 | |
209 | /* |
210 | * All filters in the list are evaluated and the lowest BPF return |
211 | * value always takes priority (ignoring the DATA). |
212 | */ |
213 | for (f = current->seccomp.filter; f; f = f->prev) { |
214 | u32 cur_ret = sk_run_filter(NULL, f->insns); |
215 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) |
216 | ret = cur_ret; |
217 | } |
218 | return ret; |
219 | } |
220 | |
221 | /** |
222 | * seccomp_attach_filter: Attaches a seccomp filter to current. |
223 | * @fprog: BPF program to install |
224 | * |
225 | * Returns 0 on success or an errno on failure. |
226 | */ |
227 | static long seccomp_attach_filter(struct sock_fprog *fprog) |
228 | { |
229 | struct seccomp_filter *filter; |
230 | unsigned long fp_size = fprog->len * sizeof(struct sock_filter); |
231 | unsigned long total_insns = fprog->len; |
232 | long ret; |
233 | |
234 | if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) |
235 | return -EINVAL; |
236 | |
237 | for (filter = current->seccomp.filter; filter; filter = filter->prev) |
238 | total_insns += filter->len + 4; /* include a 4 instr penalty */ |
239 | if (total_insns > MAX_INSNS_PER_PATH) |
240 | return -ENOMEM; |
241 | |
242 | /* |
243 | * Installing a seccomp filter requires that the task have |
244 | * CAP_SYS_ADMIN in its namespace or be running with no_new_privs. |
245 | * This avoids scenarios where unprivileged tasks can affect the |
246 | * behavior of privileged children. |
247 | */ |
248 | if (!current->no_new_privs && |
249 | security_capable_noaudit(current_cred(), current_user_ns(), |
250 | CAP_SYS_ADMIN) != 0) |
251 | return -EACCES; |
252 | |
253 | /* Allocate a new seccomp_filter */ |
254 | filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, |
255 | GFP_KERNEL|__GFP_NOWARN); |
256 | if (!filter) |
257 | return -ENOMEM; |
258 | atomic_set(&filter->usage, 1); |
259 | filter->len = fprog->len; |
260 | |
261 | /* Copy the instructions from fprog. */ |
262 | ret = -EFAULT; |
263 | if (copy_from_user(filter->insns, fprog->filter, fp_size)) |
264 | goto fail; |
265 | |
266 | /* Check and rewrite the fprog via the skb checker */ |
267 | ret = sk_chk_filter(filter->insns, filter->len); |
268 | if (ret) |
269 | goto fail; |
270 | |
271 | /* Check and rewrite the fprog for seccomp use */ |
272 | ret = seccomp_check_filter(filter->insns, filter->len); |
273 | if (ret) |
274 | goto fail; |
275 | |
276 | /* |
277 | * If there is an existing filter, make it the prev and don't drop its |
278 | * task reference. |
279 | */ |
280 | filter->prev = current->seccomp.filter; |
281 | current->seccomp.filter = filter; |
282 | return 0; |
283 | fail: |
284 | kfree(filter); |
285 | return ret; |
286 | } |
287 | |
288 | /** |
289 | * seccomp_attach_user_filter - attaches a user-supplied sock_fprog |
290 | * @user_filter: pointer to the user data containing a sock_fprog. |
291 | * |
292 | * Returns 0 on success and non-zero otherwise. |
293 | */ |
294 | long seccomp_attach_user_filter(char __user *user_filter) |
295 | { |
296 | struct sock_fprog fprog; |
297 | long ret = -EFAULT; |
298 | |
299 | #ifdef CONFIG_COMPAT |
300 | if (is_compat_task()) { |
301 | struct compat_sock_fprog fprog32; |
302 | if (copy_from_user(&fprog32, user_filter, sizeof(fprog32))) |
303 | goto out; |
304 | fprog.len = fprog32.len; |
305 | fprog.filter = compat_ptr(fprog32.filter); |
306 | } else /* falls through to the if below. */ |
307 | #endif |
308 | if (copy_from_user(&fprog, user_filter, sizeof(fprog))) |
309 | goto out; |
310 | ret = seccomp_attach_filter(&fprog); |
311 | out: |
312 | return ret; |
313 | } |
314 | |
315 | /* get_seccomp_filter - increments the reference count of the filter on @tsk */ |
316 | void get_seccomp_filter(struct task_struct *tsk) |
317 | { |
318 | struct seccomp_filter *orig = tsk->seccomp.filter; |
319 | if (!orig) |
320 | return; |
321 | /* Reference count is bounded by the number of total processes. */ |
322 | atomic_inc(&orig->usage); |
323 | } |
324 | |
325 | /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ |
326 | void put_seccomp_filter(struct task_struct *tsk) |
327 | { |
328 | struct seccomp_filter *orig = tsk->seccomp.filter; |
329 | /* Clean up single-reference branches iteratively. */ |
330 | while (orig && atomic_dec_and_test(&orig->usage)) { |
331 | struct seccomp_filter *freeme = orig; |
332 | orig = orig->prev; |
333 | kfree(freeme); |
334 | } |
335 | } |
336 | |
337 | /** |
338 | * seccomp_send_sigsys - signals the task to allow in-process syscall emulation |
339 | * @syscall: syscall number to send to userland |
340 | * @reason: filter-supplied reason code to send to userland (via si_errno) |
341 | * |
342 | * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. |
343 | */ |
344 | static void seccomp_send_sigsys(int syscall, int reason) |
345 | { |
346 | struct siginfo info; |
347 | memset(&info, 0, sizeof(info)); |
348 | info.si_signo = SIGSYS; |
349 | info.si_code = SYS_SECCOMP; |
350 | info.si_call_addr = (void __user *)KSTK_EIP(current); |
351 | info.si_errno = reason; |
352 | info.si_arch = syscall_get_arch(current, task_pt_regs(current)); |
353 | info.si_syscall = syscall; |
354 | force_sig_info(SIGSYS, &info, current); |
355 | } |
356 | #endif /* CONFIG_SECCOMP_FILTER */ |
357 | |
358 | /* |
359 | * Secure computing mode 1 allows only read/write/exit/sigreturn. |
360 | * To be fully secure this must be combined with rlimit |
361 | * to limit the stack allocations too. |
362 | */ |
363 | static int mode1_syscalls[] = { |
364 | __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn, |
365 | 0, /* null terminated */ |
366 | }; |
367 | |
368 | #ifdef CONFIG_COMPAT |
369 | static int mode1_syscalls_32[] = { |
370 | __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32, |
371 | 0, /* null terminated */ |
372 | }; |
373 | #endif |
374 | |
375 | int __secure_computing(int this_syscall) |
376 | { |
377 | int mode = current->seccomp.mode; |
378 | int exit_sig = 0; |
379 | int *syscall; |
380 | u32 ret; |
381 | |
382 | switch (mode) { |
383 | case SECCOMP_MODE_STRICT: |
384 | syscall = mode1_syscalls; |
385 | #ifdef CONFIG_COMPAT |
386 | if (is_compat_task()) |
387 | syscall = mode1_syscalls_32; |
388 | #endif |
389 | do { |
390 | if (*syscall == this_syscall) |
391 | return 0; |
392 | } while (*++syscall); |
393 | exit_sig = SIGKILL; |
394 | ret = SECCOMP_RET_KILL; |
395 | break; |
396 | #ifdef CONFIG_SECCOMP_FILTER |
397 | case SECCOMP_MODE_FILTER: { |
398 | int data; |
399 | struct pt_regs *regs = task_pt_regs(current); |
400 | ret = seccomp_run_filters(this_syscall); |
401 | data = ret & SECCOMP_RET_DATA; |
402 | ret &= SECCOMP_RET_ACTION; |
403 | switch (ret) { |
404 | case SECCOMP_RET_ERRNO: |
405 | /* Set the low-order 16-bits as a errno. */ |
406 | syscall_set_return_value(current, regs, |
407 | -data, 0); |
408 | goto skip; |
409 | case SECCOMP_RET_TRAP: |
410 | /* Show the handler the original registers. */ |
411 | syscall_rollback(current, regs); |
412 | /* Let the filter pass back 16 bits of data. */ |
413 | seccomp_send_sigsys(this_syscall, data); |
414 | goto skip; |
415 | case SECCOMP_RET_TRACE: |
416 | /* Skip these calls if there is no tracer. */ |
417 | if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) { |
418 | syscall_set_return_value(current, regs, |
419 | -ENOSYS, 0); |
420 | goto skip; |
421 | } |
422 | /* Allow the BPF to provide the event message */ |
423 | ptrace_event(PTRACE_EVENT_SECCOMP, data); |
424 | /* |
425 | * The delivery of a fatal signal during event |
426 | * notification may silently skip tracer notification. |
427 | * Terminating the task now avoids executing a system |
428 | * call that may not be intended. |
429 | */ |
430 | if (fatal_signal_pending(current)) |
431 | break; |
432 | if (syscall_get_nr(current, regs) < 0) |
433 | goto skip; /* Explicit request to skip. */ |
434 | |
435 | return 0; |
436 | case SECCOMP_RET_ALLOW: |
437 | return 0; |
438 | case SECCOMP_RET_KILL: |
439 | default: |
440 | break; |
441 | } |
442 | exit_sig = SIGSYS; |
443 | break; |
444 | } |
445 | #endif |
446 | default: |
447 | BUG(); |
448 | } |
449 | |
450 | #ifdef SECCOMP_DEBUG |
451 | dump_stack(); |
452 | #endif |
453 | audit_seccomp(this_syscall, exit_sig, ret); |
454 | do_exit(exit_sig); |
455 | #ifdef CONFIG_SECCOMP_FILTER |
456 | skip: |
457 | audit_seccomp(this_syscall, exit_sig, ret); |
458 | #endif |
459 | return -1; |
460 | } |
461 | |
462 | long prctl_get_seccomp(void) |
463 | { |
464 | return current->seccomp.mode; |
465 | } |
466 | |
467 | /** |
468 | * prctl_set_seccomp: configures current->seccomp.mode |
469 | * @seccomp_mode: requested mode to use |
470 | * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER |
471 | * |
472 | * This function may be called repeatedly with a @seccomp_mode of |
473 | * SECCOMP_MODE_FILTER to install additional filters. Every filter |
474 | * successfully installed will be evaluated (in reverse order) for each system |
475 | * call the task makes. |
476 | * |
477 | * Once current->seccomp.mode is non-zero, it may not be changed. |
478 | * |
479 | * Returns 0 on success or -EINVAL on failure. |
480 | */ |
481 | long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter) |
482 | { |
483 | long ret = -EINVAL; |
484 | |
485 | if (current->seccomp.mode && |
486 | current->seccomp.mode != seccomp_mode) |
487 | goto out; |
488 | |
489 | switch (seccomp_mode) { |
490 | case SECCOMP_MODE_STRICT: |
491 | ret = 0; |
492 | #ifdef TIF_NOTSC |
493 | disable_TSC(); |
494 | #endif |
495 | break; |
496 | #ifdef CONFIG_SECCOMP_FILTER |
497 | case SECCOMP_MODE_FILTER: |
498 | ret = seccomp_attach_user_filter(filter); |
499 | if (ret) |
500 | goto out; |
501 | break; |
502 | #endif |
503 | default: |
504 | goto out; |
505 | } |
506 | |
507 | current->seccomp.mode = seccomp_mode; |
508 | set_thread_flag(TIF_SECCOMP); |
509 | out: |
510 | return ret; |
511 | } |
512 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9