Root/
1 | /* |
2 | * linux/kernel/ptrace.c |
3 | * |
4 | * (C) Copyright 1999 Linus Torvalds |
5 | * |
6 | * Common interfaces for "ptrace()" which we do not want |
7 | * to continually duplicate across every architecture. |
8 | */ |
9 | |
10 | #include <linux/capability.h> |
11 | #include <linux/module.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/errno.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/highmem.h> |
16 | #include <linux/pagemap.h> |
17 | #include <linux/ptrace.h> |
18 | #include <linux/security.h> |
19 | #include <linux/signal.h> |
20 | #include <linux/audit.h> |
21 | #include <linux/pid_namespace.h> |
22 | #include <linux/syscalls.h> |
23 | #include <linux/uaccess.h> |
24 | #include <linux/regset.h> |
25 | |
26 | |
27 | /* |
28 | * ptrace a task: make the debugger its new parent and |
29 | * move it to the ptrace list. |
30 | * |
31 | * Must be called with the tasklist lock write-held. |
32 | */ |
33 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
34 | { |
35 | BUG_ON(!list_empty(&child->ptrace_entry)); |
36 | list_add(&child->ptrace_entry, &new_parent->ptraced); |
37 | child->parent = new_parent; |
38 | } |
39 | |
40 | /* |
41 | * Turn a tracing stop into a normal stop now, since with no tracer there |
42 | * would be no way to wake it up with SIGCONT or SIGKILL. If there was a |
43 | * signal sent that would resume the child, but didn't because it was in |
44 | * TASK_TRACED, resume it now. |
45 | * Requires that irqs be disabled. |
46 | */ |
47 | static void ptrace_untrace(struct task_struct *child) |
48 | { |
49 | spin_lock(&child->sighand->siglock); |
50 | if (task_is_traced(child)) { |
51 | /* |
52 | * If the group stop is completed or in progress, |
53 | * this thread was already counted as stopped. |
54 | */ |
55 | if (child->signal->flags & SIGNAL_STOP_STOPPED || |
56 | child->signal->group_stop_count) |
57 | __set_task_state(child, TASK_STOPPED); |
58 | else |
59 | signal_wake_up(child, 1); |
60 | } |
61 | spin_unlock(&child->sighand->siglock); |
62 | } |
63 | |
64 | /* |
65 | * unptrace a task: move it back to its original parent and |
66 | * remove it from the ptrace list. |
67 | * |
68 | * Must be called with the tasklist lock write-held. |
69 | */ |
70 | void __ptrace_unlink(struct task_struct *child) |
71 | { |
72 | BUG_ON(!child->ptrace); |
73 | |
74 | child->ptrace = 0; |
75 | child->parent = child->real_parent; |
76 | list_del_init(&child->ptrace_entry); |
77 | |
78 | if (task_is_traced(child)) |
79 | ptrace_untrace(child); |
80 | } |
81 | |
82 | /* |
83 | * Check that we have indeed attached to the thing.. |
84 | */ |
85 | int ptrace_check_attach(struct task_struct *child, int kill) |
86 | { |
87 | int ret = -ESRCH; |
88 | |
89 | /* |
90 | * We take the read lock around doing both checks to close a |
91 | * possible race where someone else was tracing our child and |
92 | * detached between these two checks. After this locked check, |
93 | * we are sure that this is our traced child and that can only |
94 | * be changed by us so it's not changing right after this. |
95 | */ |
96 | read_lock(&tasklist_lock); |
97 | if ((child->ptrace & PT_PTRACED) && child->parent == current) { |
98 | ret = 0; |
99 | /* |
100 | * child->sighand can't be NULL, release_task() |
101 | * does ptrace_unlink() before __exit_signal(). |
102 | */ |
103 | spin_lock_irq(&child->sighand->siglock); |
104 | if (task_is_stopped(child)) |
105 | child->state = TASK_TRACED; |
106 | else if (!task_is_traced(child) && !kill) |
107 | ret = -ESRCH; |
108 | spin_unlock_irq(&child->sighand->siglock); |
109 | } |
110 | read_unlock(&tasklist_lock); |
111 | |
112 | if (!ret && !kill) |
113 | ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; |
114 | |
115 | /* All systems go.. */ |
116 | return ret; |
117 | } |
118 | |
119 | int __ptrace_may_access(struct task_struct *task, unsigned int mode) |
120 | { |
121 | const struct cred *cred = current_cred(), *tcred; |
122 | |
123 | /* May we inspect the given task? |
124 | * This check is used both for attaching with ptrace |
125 | * and for allowing access to sensitive information in /proc. |
126 | * |
127 | * ptrace_attach denies several cases that /proc allows |
128 | * because setting up the necessary parent/child relationship |
129 | * or halting the specified task is impossible. |
130 | */ |
131 | int dumpable = 0; |
132 | /* Don't let security modules deny introspection */ |
133 | if (task == current) |
134 | return 0; |
135 | rcu_read_lock(); |
136 | tcred = __task_cred(task); |
137 | if ((cred->uid != tcred->euid || |
138 | cred->uid != tcred->suid || |
139 | cred->uid != tcred->uid || |
140 | cred->gid != tcred->egid || |
141 | cred->gid != tcred->sgid || |
142 | cred->gid != tcred->gid) && |
143 | !capable(CAP_SYS_PTRACE)) { |
144 | rcu_read_unlock(); |
145 | return -EPERM; |
146 | } |
147 | rcu_read_unlock(); |
148 | smp_rmb(); |
149 | if (task->mm) |
150 | dumpable = get_dumpable(task->mm); |
151 | if (!dumpable && !capable(CAP_SYS_PTRACE)) |
152 | return -EPERM; |
153 | |
154 | return security_ptrace_access_check(task, mode); |
155 | } |
156 | |
157 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
158 | { |
159 | int err; |
160 | task_lock(task); |
161 | err = __ptrace_may_access(task, mode); |
162 | task_unlock(task); |
163 | return !err; |
164 | } |
165 | |
166 | static int ptrace_attach(struct task_struct *task) |
167 | { |
168 | int retval; |
169 | |
170 | audit_ptrace(task); |
171 | |
172 | retval = -EPERM; |
173 | if (unlikely(task->flags & PF_KTHREAD)) |
174 | goto out; |
175 | if (same_thread_group(task, current)) |
176 | goto out; |
177 | |
178 | /* |
179 | * Protect exec's credential calculations against our interference; |
180 | * interference; SUID, SGID and LSM creds get determined differently |
181 | * under ptrace. |
182 | */ |
183 | retval = -ERESTARTNOINTR; |
184 | if (mutex_lock_interruptible(&task->signal->cred_guard_mutex)) |
185 | goto out; |
186 | |
187 | task_lock(task); |
188 | retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); |
189 | task_unlock(task); |
190 | if (retval) |
191 | goto unlock_creds; |
192 | |
193 | write_lock_irq(&tasklist_lock); |
194 | retval = -EPERM; |
195 | if (unlikely(task->exit_state)) |
196 | goto unlock_tasklist; |
197 | if (task->ptrace) |
198 | goto unlock_tasklist; |
199 | |
200 | task->ptrace = PT_PTRACED; |
201 | if (capable(CAP_SYS_PTRACE)) |
202 | task->ptrace |= PT_PTRACE_CAP; |
203 | |
204 | __ptrace_link(task, current); |
205 | send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); |
206 | |
207 | retval = 0; |
208 | unlock_tasklist: |
209 | write_unlock_irq(&tasklist_lock); |
210 | unlock_creds: |
211 | mutex_unlock(&task->signal->cred_guard_mutex); |
212 | out: |
213 | return retval; |
214 | } |
215 | |
216 | /** |
217 | * ptrace_traceme -- helper for PTRACE_TRACEME |
218 | * |
219 | * Performs checks and sets PT_PTRACED. |
220 | * Should be used by all ptrace implementations for PTRACE_TRACEME. |
221 | */ |
222 | static int ptrace_traceme(void) |
223 | { |
224 | int ret = -EPERM; |
225 | |
226 | write_lock_irq(&tasklist_lock); |
227 | /* Are we already being traced? */ |
228 | if (!current->ptrace) { |
229 | ret = security_ptrace_traceme(current->parent); |
230 | /* |
231 | * Check PF_EXITING to ensure ->real_parent has not passed |
232 | * exit_ptrace(). Otherwise we don't report the error but |
233 | * pretend ->real_parent untraces us right after return. |
234 | */ |
235 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { |
236 | current->ptrace = PT_PTRACED; |
237 | __ptrace_link(current, current->real_parent); |
238 | } |
239 | } |
240 | write_unlock_irq(&tasklist_lock); |
241 | |
242 | return ret; |
243 | } |
244 | |
245 | /* |
246 | * Called with irqs disabled, returns true if childs should reap themselves. |
247 | */ |
248 | static int ignoring_children(struct sighand_struct *sigh) |
249 | { |
250 | int ret; |
251 | spin_lock(&sigh->siglock); |
252 | ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || |
253 | (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); |
254 | spin_unlock(&sigh->siglock); |
255 | return ret; |
256 | } |
257 | |
258 | /* |
259 | * Called with tasklist_lock held for writing. |
260 | * Unlink a traced task, and clean it up if it was a traced zombie. |
261 | * Return true if it needs to be reaped with release_task(). |
262 | * (We can't call release_task() here because we already hold tasklist_lock.) |
263 | * |
264 | * If it's a zombie, our attachedness prevented normal parent notification |
265 | * or self-reaping. Do notification now if it would have happened earlier. |
266 | * If it should reap itself, return true. |
267 | * |
268 | * If it's our own child, there is no notification to do. But if our normal |
269 | * children self-reap, then this child was prevented by ptrace and we must |
270 | * reap it now, in that case we must also wake up sub-threads sleeping in |
271 | * do_wait(). |
272 | */ |
273 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) |
274 | { |
275 | __ptrace_unlink(p); |
276 | |
277 | if (p->exit_state == EXIT_ZOMBIE) { |
278 | if (!task_detached(p) && thread_group_empty(p)) { |
279 | if (!same_thread_group(p->real_parent, tracer)) |
280 | do_notify_parent(p, p->exit_signal); |
281 | else if (ignoring_children(tracer->sighand)) { |
282 | __wake_up_parent(p, tracer); |
283 | p->exit_signal = -1; |
284 | } |
285 | } |
286 | if (task_detached(p)) { |
287 | /* Mark it as in the process of being reaped. */ |
288 | p->exit_state = EXIT_DEAD; |
289 | return true; |
290 | } |
291 | } |
292 | |
293 | return false; |
294 | } |
295 | |
296 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
297 | { |
298 | bool dead = false; |
299 | |
300 | if (!valid_signal(data)) |
301 | return -EIO; |
302 | |
303 | /* Architecture-specific hardware disable .. */ |
304 | ptrace_disable(child); |
305 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
306 | |
307 | write_lock_irq(&tasklist_lock); |
308 | /* |
309 | * This child can be already killed. Make sure de_thread() or |
310 | * our sub-thread doing do_wait() didn't do release_task() yet. |
311 | */ |
312 | if (child->ptrace) { |
313 | child->exit_code = data; |
314 | dead = __ptrace_detach(current, child); |
315 | if (!child->exit_state) |
316 | wake_up_state(child, TASK_TRACED | TASK_STOPPED); |
317 | } |
318 | write_unlock_irq(&tasklist_lock); |
319 | |
320 | if (unlikely(dead)) |
321 | release_task(child); |
322 | |
323 | return 0; |
324 | } |
325 | |
326 | /* |
327 | * Detach all tasks we were using ptrace on. Called with tasklist held |
328 | * for writing, and returns with it held too. But note it can release |
329 | * and reacquire the lock. |
330 | */ |
331 | void exit_ptrace(struct task_struct *tracer) |
332 | __releases(&tasklist_lock) |
333 | __acquires(&tasklist_lock) |
334 | { |
335 | struct task_struct *p, *n; |
336 | LIST_HEAD(ptrace_dead); |
337 | |
338 | if (likely(list_empty(&tracer->ptraced))) |
339 | return; |
340 | |
341 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { |
342 | if (__ptrace_detach(tracer, p)) |
343 | list_add(&p->ptrace_entry, &ptrace_dead); |
344 | } |
345 | |
346 | write_unlock_irq(&tasklist_lock); |
347 | BUG_ON(!list_empty(&tracer->ptraced)); |
348 | |
349 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { |
350 | list_del_init(&p->ptrace_entry); |
351 | release_task(p); |
352 | } |
353 | |
354 | write_lock_irq(&tasklist_lock); |
355 | } |
356 | |
357 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
358 | { |
359 | int copied = 0; |
360 | |
361 | while (len > 0) { |
362 | char buf[128]; |
363 | int this_len, retval; |
364 | |
365 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; |
366 | retval = access_process_vm(tsk, src, buf, this_len, 0); |
367 | if (!retval) { |
368 | if (copied) |
369 | break; |
370 | return -EIO; |
371 | } |
372 | if (copy_to_user(dst, buf, retval)) |
373 | return -EFAULT; |
374 | copied += retval; |
375 | src += retval; |
376 | dst += retval; |
377 | len -= retval; |
378 | } |
379 | return copied; |
380 | } |
381 | |
382 | int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) |
383 | { |
384 | int copied = 0; |
385 | |
386 | while (len > 0) { |
387 | char buf[128]; |
388 | int this_len, retval; |
389 | |
390 | this_len = (len > sizeof(buf)) ? sizeof(buf) : len; |
391 | if (copy_from_user(buf, src, this_len)) |
392 | return -EFAULT; |
393 | retval = access_process_vm(tsk, dst, buf, this_len, 1); |
394 | if (!retval) { |
395 | if (copied) |
396 | break; |
397 | return -EIO; |
398 | } |
399 | copied += retval; |
400 | src += retval; |
401 | dst += retval; |
402 | len -= retval; |
403 | } |
404 | return copied; |
405 | } |
406 | |
407 | static int ptrace_setoptions(struct task_struct *child, unsigned long data) |
408 | { |
409 | child->ptrace &= ~PT_TRACE_MASK; |
410 | |
411 | if (data & PTRACE_O_TRACESYSGOOD) |
412 | child->ptrace |= PT_TRACESYSGOOD; |
413 | |
414 | if (data & PTRACE_O_TRACEFORK) |
415 | child->ptrace |= PT_TRACE_FORK; |
416 | |
417 | if (data & PTRACE_O_TRACEVFORK) |
418 | child->ptrace |= PT_TRACE_VFORK; |
419 | |
420 | if (data & PTRACE_O_TRACECLONE) |
421 | child->ptrace |= PT_TRACE_CLONE; |
422 | |
423 | if (data & PTRACE_O_TRACEEXEC) |
424 | child->ptrace |= PT_TRACE_EXEC; |
425 | |
426 | if (data & PTRACE_O_TRACEVFORKDONE) |
427 | child->ptrace |= PT_TRACE_VFORK_DONE; |
428 | |
429 | if (data & PTRACE_O_TRACEEXIT) |
430 | child->ptrace |= PT_TRACE_EXIT; |
431 | |
432 | return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; |
433 | } |
434 | |
435 | static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) |
436 | { |
437 | unsigned long flags; |
438 | int error = -ESRCH; |
439 | |
440 | if (lock_task_sighand(child, &flags)) { |
441 | error = -EINVAL; |
442 | if (likely(child->last_siginfo != NULL)) { |
443 | *info = *child->last_siginfo; |
444 | error = 0; |
445 | } |
446 | unlock_task_sighand(child, &flags); |
447 | } |
448 | return error; |
449 | } |
450 | |
451 | static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) |
452 | { |
453 | unsigned long flags; |
454 | int error = -ESRCH; |
455 | |
456 | if (lock_task_sighand(child, &flags)) { |
457 | error = -EINVAL; |
458 | if (likely(child->last_siginfo != NULL)) { |
459 | *child->last_siginfo = *info; |
460 | error = 0; |
461 | } |
462 | unlock_task_sighand(child, &flags); |
463 | } |
464 | return error; |
465 | } |
466 | |
467 | |
468 | #ifdef PTRACE_SINGLESTEP |
469 | #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) |
470 | #else |
471 | #define is_singlestep(request) 0 |
472 | #endif |
473 | |
474 | #ifdef PTRACE_SINGLEBLOCK |
475 | #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) |
476 | #else |
477 | #define is_singleblock(request) 0 |
478 | #endif |
479 | |
480 | #ifdef PTRACE_SYSEMU |
481 | #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) |
482 | #else |
483 | #define is_sysemu_singlestep(request) 0 |
484 | #endif |
485 | |
486 | static int ptrace_resume(struct task_struct *child, long request, |
487 | unsigned long data) |
488 | { |
489 | if (!valid_signal(data)) |
490 | return -EIO; |
491 | |
492 | if (request == PTRACE_SYSCALL) |
493 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
494 | else |
495 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
496 | |
497 | #ifdef TIF_SYSCALL_EMU |
498 | if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) |
499 | set_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
500 | else |
501 | clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); |
502 | #endif |
503 | |
504 | if (is_singleblock(request)) { |
505 | if (unlikely(!arch_has_block_step())) |
506 | return -EIO; |
507 | user_enable_block_step(child); |
508 | } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { |
509 | if (unlikely(!arch_has_single_step())) |
510 | return -EIO; |
511 | user_enable_single_step(child); |
512 | } else { |
513 | user_disable_single_step(child); |
514 | } |
515 | |
516 | child->exit_code = data; |
517 | wake_up_process(child); |
518 | |
519 | return 0; |
520 | } |
521 | |
522 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
523 | |
524 | static const struct user_regset * |
525 | find_regset(const struct user_regset_view *view, unsigned int type) |
526 | { |
527 | const struct user_regset *regset; |
528 | int n; |
529 | |
530 | for (n = 0; n < view->n; ++n) { |
531 | regset = view->regsets + n; |
532 | if (regset->core_note_type == type) |
533 | return regset; |
534 | } |
535 | |
536 | return NULL; |
537 | } |
538 | |
539 | static int ptrace_regset(struct task_struct *task, int req, unsigned int type, |
540 | struct iovec *kiov) |
541 | { |
542 | const struct user_regset_view *view = task_user_regset_view(task); |
543 | const struct user_regset *regset = find_regset(view, type); |
544 | int regset_no; |
545 | |
546 | if (!regset || (kiov->iov_len % regset->size) != 0) |
547 | return -EINVAL; |
548 | |
549 | regset_no = regset - view->regsets; |
550 | kiov->iov_len = min(kiov->iov_len, |
551 | (__kernel_size_t) (regset->n * regset->size)); |
552 | |
553 | if (req == PTRACE_GETREGSET) |
554 | return copy_regset_to_user(task, view, regset_no, 0, |
555 | kiov->iov_len, kiov->iov_base); |
556 | else |
557 | return copy_regset_from_user(task, view, regset_no, 0, |
558 | kiov->iov_len, kiov->iov_base); |
559 | } |
560 | |
561 | #endif |
562 | |
563 | int ptrace_request(struct task_struct *child, long request, |
564 | unsigned long addr, unsigned long data) |
565 | { |
566 | int ret = -EIO; |
567 | siginfo_t siginfo; |
568 | void __user *datavp = (void __user *) data; |
569 | unsigned long __user *datalp = datavp; |
570 | |
571 | switch (request) { |
572 | case PTRACE_PEEKTEXT: |
573 | case PTRACE_PEEKDATA: |
574 | return generic_ptrace_peekdata(child, addr, data); |
575 | case PTRACE_POKETEXT: |
576 | case PTRACE_POKEDATA: |
577 | return generic_ptrace_pokedata(child, addr, data); |
578 | |
579 | #ifdef PTRACE_OLDSETOPTIONS |
580 | case PTRACE_OLDSETOPTIONS: |
581 | #endif |
582 | case PTRACE_SETOPTIONS: |
583 | ret = ptrace_setoptions(child, data); |
584 | break; |
585 | case PTRACE_GETEVENTMSG: |
586 | ret = put_user(child->ptrace_message, datalp); |
587 | break; |
588 | |
589 | case PTRACE_GETSIGINFO: |
590 | ret = ptrace_getsiginfo(child, &siginfo); |
591 | if (!ret) |
592 | ret = copy_siginfo_to_user(datavp, &siginfo); |
593 | break; |
594 | |
595 | case PTRACE_SETSIGINFO: |
596 | if (copy_from_user(&siginfo, datavp, sizeof siginfo)) |
597 | ret = -EFAULT; |
598 | else |
599 | ret = ptrace_setsiginfo(child, &siginfo); |
600 | break; |
601 | |
602 | case PTRACE_DETACH: /* detach a process that was attached. */ |
603 | ret = ptrace_detach(child, data); |
604 | break; |
605 | |
606 | #ifdef CONFIG_BINFMT_ELF_FDPIC |
607 | case PTRACE_GETFDPIC: { |
608 | struct mm_struct *mm = get_task_mm(child); |
609 | unsigned long tmp = 0; |
610 | |
611 | ret = -ESRCH; |
612 | if (!mm) |
613 | break; |
614 | |
615 | switch (addr) { |
616 | case PTRACE_GETFDPIC_EXEC: |
617 | tmp = mm->context.exec_fdpic_loadmap; |
618 | break; |
619 | case PTRACE_GETFDPIC_INTERP: |
620 | tmp = mm->context.interp_fdpic_loadmap; |
621 | break; |
622 | default: |
623 | break; |
624 | } |
625 | mmput(mm); |
626 | |
627 | ret = put_user(tmp, datalp); |
628 | break; |
629 | } |
630 | #endif |
631 | |
632 | #ifdef PTRACE_SINGLESTEP |
633 | case PTRACE_SINGLESTEP: |
634 | #endif |
635 | #ifdef PTRACE_SINGLEBLOCK |
636 | case PTRACE_SINGLEBLOCK: |
637 | #endif |
638 | #ifdef PTRACE_SYSEMU |
639 | case PTRACE_SYSEMU: |
640 | case PTRACE_SYSEMU_SINGLESTEP: |
641 | #endif |
642 | case PTRACE_SYSCALL: |
643 | case PTRACE_CONT: |
644 | return ptrace_resume(child, request, data); |
645 | |
646 | case PTRACE_KILL: |
647 | if (child->exit_state) /* already dead */ |
648 | return 0; |
649 | return ptrace_resume(child, request, SIGKILL); |
650 | |
651 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
652 | case PTRACE_GETREGSET: |
653 | case PTRACE_SETREGSET: |
654 | { |
655 | struct iovec kiov; |
656 | struct iovec __user *uiov = datavp; |
657 | |
658 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) |
659 | return -EFAULT; |
660 | |
661 | if (__get_user(kiov.iov_base, &uiov->iov_base) || |
662 | __get_user(kiov.iov_len, &uiov->iov_len)) |
663 | return -EFAULT; |
664 | |
665 | ret = ptrace_regset(child, request, addr, &kiov); |
666 | if (!ret) |
667 | ret = __put_user(kiov.iov_len, &uiov->iov_len); |
668 | break; |
669 | } |
670 | #endif |
671 | default: |
672 | break; |
673 | } |
674 | |
675 | return ret; |
676 | } |
677 | |
678 | static struct task_struct *ptrace_get_task_struct(pid_t pid) |
679 | { |
680 | struct task_struct *child; |
681 | |
682 | rcu_read_lock(); |
683 | child = find_task_by_vpid(pid); |
684 | if (child) |
685 | get_task_struct(child); |
686 | rcu_read_unlock(); |
687 | |
688 | if (!child) |
689 | return ERR_PTR(-ESRCH); |
690 | return child; |
691 | } |
692 | |
693 | #ifndef arch_ptrace_attach |
694 | #define arch_ptrace_attach(child) do { } while (0) |
695 | #endif |
696 | |
697 | SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, |
698 | unsigned long, data) |
699 | { |
700 | struct task_struct *child; |
701 | long ret; |
702 | |
703 | if (request == PTRACE_TRACEME) { |
704 | ret = ptrace_traceme(); |
705 | if (!ret) |
706 | arch_ptrace_attach(current); |
707 | goto out; |
708 | } |
709 | |
710 | child = ptrace_get_task_struct(pid); |
711 | if (IS_ERR(child)) { |
712 | ret = PTR_ERR(child); |
713 | goto out; |
714 | } |
715 | |
716 | if (request == PTRACE_ATTACH) { |
717 | ret = ptrace_attach(child); |
718 | /* |
719 | * Some architectures need to do book-keeping after |
720 | * a ptrace attach. |
721 | */ |
722 | if (!ret) |
723 | arch_ptrace_attach(child); |
724 | goto out_put_task_struct; |
725 | } |
726 | |
727 | ret = ptrace_check_attach(child, request == PTRACE_KILL); |
728 | if (ret < 0) |
729 | goto out_put_task_struct; |
730 | |
731 | ret = arch_ptrace(child, request, addr, data); |
732 | |
733 | out_put_task_struct: |
734 | put_task_struct(child); |
735 | out: |
736 | return ret; |
737 | } |
738 | |
739 | int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, |
740 | unsigned long data) |
741 | { |
742 | unsigned long tmp; |
743 | int copied; |
744 | |
745 | copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); |
746 | if (copied != sizeof(tmp)) |
747 | return -EIO; |
748 | return put_user(tmp, (unsigned long __user *)data); |
749 | } |
750 | |
751 | int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, |
752 | unsigned long data) |
753 | { |
754 | int copied; |
755 | |
756 | copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); |
757 | return (copied == sizeof(data)) ? 0 : -EIO; |
758 | } |
759 | |
760 | #if defined CONFIG_COMPAT |
761 | #include <linux/compat.h> |
762 | |
763 | int compat_ptrace_request(struct task_struct *child, compat_long_t request, |
764 | compat_ulong_t addr, compat_ulong_t data) |
765 | { |
766 | compat_ulong_t __user *datap = compat_ptr(data); |
767 | compat_ulong_t word; |
768 | siginfo_t siginfo; |
769 | int ret; |
770 | |
771 | switch (request) { |
772 | case PTRACE_PEEKTEXT: |
773 | case PTRACE_PEEKDATA: |
774 | ret = access_process_vm(child, addr, &word, sizeof(word), 0); |
775 | if (ret != sizeof(word)) |
776 | ret = -EIO; |
777 | else |
778 | ret = put_user(word, datap); |
779 | break; |
780 | |
781 | case PTRACE_POKETEXT: |
782 | case PTRACE_POKEDATA: |
783 | ret = access_process_vm(child, addr, &data, sizeof(data), 1); |
784 | ret = (ret != sizeof(data) ? -EIO : 0); |
785 | break; |
786 | |
787 | case PTRACE_GETEVENTMSG: |
788 | ret = put_user((compat_ulong_t) child->ptrace_message, datap); |
789 | break; |
790 | |
791 | case PTRACE_GETSIGINFO: |
792 | ret = ptrace_getsiginfo(child, &siginfo); |
793 | if (!ret) |
794 | ret = copy_siginfo_to_user32( |
795 | (struct compat_siginfo __user *) datap, |
796 | &siginfo); |
797 | break; |
798 | |
799 | case PTRACE_SETSIGINFO: |
800 | memset(&siginfo, 0, sizeof siginfo); |
801 | if (copy_siginfo_from_user32( |
802 | &siginfo, (struct compat_siginfo __user *) datap)) |
803 | ret = -EFAULT; |
804 | else |
805 | ret = ptrace_setsiginfo(child, &siginfo); |
806 | break; |
807 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
808 | case PTRACE_GETREGSET: |
809 | case PTRACE_SETREGSET: |
810 | { |
811 | struct iovec kiov; |
812 | struct compat_iovec __user *uiov = |
813 | (struct compat_iovec __user *) datap; |
814 | compat_uptr_t ptr; |
815 | compat_size_t len; |
816 | |
817 | if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov))) |
818 | return -EFAULT; |
819 | |
820 | if (__get_user(ptr, &uiov->iov_base) || |
821 | __get_user(len, &uiov->iov_len)) |
822 | return -EFAULT; |
823 | |
824 | kiov.iov_base = compat_ptr(ptr); |
825 | kiov.iov_len = len; |
826 | |
827 | ret = ptrace_regset(child, request, addr, &kiov); |
828 | if (!ret) |
829 | ret = __put_user(kiov.iov_len, &uiov->iov_len); |
830 | break; |
831 | } |
832 | #endif |
833 | |
834 | default: |
835 | ret = ptrace_request(child, request, addr, data); |
836 | } |
837 | |
838 | return ret; |
839 | } |
840 | |
841 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, |
842 | compat_long_t addr, compat_long_t data) |
843 | { |
844 | struct task_struct *child; |
845 | long ret; |
846 | |
847 | if (request == PTRACE_TRACEME) { |
848 | ret = ptrace_traceme(); |
849 | goto out; |
850 | } |
851 | |
852 | child = ptrace_get_task_struct(pid); |
853 | if (IS_ERR(child)) { |
854 | ret = PTR_ERR(child); |
855 | goto out; |
856 | } |
857 | |
858 | if (request == PTRACE_ATTACH) { |
859 | ret = ptrace_attach(child); |
860 | /* |
861 | * Some architectures need to do book-keeping after |
862 | * a ptrace attach. |
863 | */ |
864 | if (!ret) |
865 | arch_ptrace_attach(child); |
866 | goto out_put_task_struct; |
867 | } |
868 | |
869 | ret = ptrace_check_attach(child, request == PTRACE_KILL); |
870 | if (!ret) |
871 | ret = compat_arch_ptrace(child, request, addr, data); |
872 | |
873 | out_put_task_struct: |
874 | put_task_struct(child); |
875 | out: |
876 | return ret; |
877 | } |
878 | #endif /* CONFIG_COMPAT */ |
879 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9