Root/
1 | #ifndef _LINUX_PTRACE_H |
2 | #define _LINUX_PTRACE_H |
3 | /* ptrace.h */ |
4 | /* structs and defines to help the user use the ptrace system call. */ |
5 | |
6 | /* has the defines to get at the registers. */ |
7 | |
8 | #define PTRACE_TRACEME 0 |
9 | #define PTRACE_PEEKTEXT 1 |
10 | #define PTRACE_PEEKDATA 2 |
11 | #define PTRACE_PEEKUSR 3 |
12 | #define PTRACE_POKETEXT 4 |
13 | #define PTRACE_POKEDATA 5 |
14 | #define PTRACE_POKEUSR 6 |
15 | #define PTRACE_CONT 7 |
16 | #define PTRACE_KILL 8 |
17 | #define PTRACE_SINGLESTEP 9 |
18 | |
19 | #define PTRACE_ATTACH 16 |
20 | #define PTRACE_DETACH 17 |
21 | |
22 | #define PTRACE_SYSCALL 24 |
23 | |
24 | /* 0x4200-0x4300 are reserved for architecture-independent additions. */ |
25 | #define PTRACE_SETOPTIONS 0x4200 |
26 | #define PTRACE_GETEVENTMSG 0x4201 |
27 | #define PTRACE_GETSIGINFO 0x4202 |
28 | #define PTRACE_SETSIGINFO 0x4203 |
29 | |
30 | /* |
31 | * Generic ptrace interface that exports the architecture specific regsets |
32 | * using the corresponding NT_* types (which are also used in the core dump). |
33 | * Please note that the NT_PRSTATUS note type in a core dump contains a full |
34 | * 'struct elf_prstatus'. But the user_regset for NT_PRSTATUS contains just the |
35 | * elf_gregset_t that is the pr_reg field of 'struct elf_prstatus'. For all the |
36 | * other user_regset flavors, the user_regset layout and the ELF core dump note |
37 | * payload are exactly the same layout. |
38 | * |
39 | * This interface usage is as follows: |
40 | * struct iovec iov = { buf, len}; |
41 | * |
42 | * ret = ptrace(PTRACE_GETREGSET/PTRACE_SETREGSET, pid, NT_XXX_TYPE, &iov); |
43 | * |
44 | * On the successful completion, iov.len will be updated by the kernel, |
45 | * specifying how much the kernel has written/read to/from the user's iov.buf. |
46 | */ |
47 | #define PTRACE_GETREGSET 0x4204 |
48 | #define PTRACE_SETREGSET 0x4205 |
49 | |
50 | /* options set using PTRACE_SETOPTIONS */ |
51 | #define PTRACE_O_TRACESYSGOOD 0x00000001 |
52 | #define PTRACE_O_TRACEFORK 0x00000002 |
53 | #define PTRACE_O_TRACEVFORK 0x00000004 |
54 | #define PTRACE_O_TRACECLONE 0x00000008 |
55 | #define PTRACE_O_TRACEEXEC 0x00000010 |
56 | #define PTRACE_O_TRACEVFORKDONE 0x00000020 |
57 | #define PTRACE_O_TRACEEXIT 0x00000040 |
58 | |
59 | #define PTRACE_O_MASK 0x0000007f |
60 | |
61 | /* Wait extended result codes for the above trace options. */ |
62 | #define PTRACE_EVENT_FORK 1 |
63 | #define PTRACE_EVENT_VFORK 2 |
64 | #define PTRACE_EVENT_CLONE 3 |
65 | #define PTRACE_EVENT_EXEC 4 |
66 | #define PTRACE_EVENT_VFORK_DONE 5 |
67 | #define PTRACE_EVENT_EXIT 6 |
68 | |
69 | #include <asm/ptrace.h> |
70 | |
71 | #ifdef __KERNEL__ |
72 | /* |
73 | * Ptrace flags |
74 | * |
75 | * The owner ship rules for task->ptrace which holds the ptrace |
76 | * flags is simple. When a task is running it owns it's task->ptrace |
77 | * flags. When the a task is stopped the ptracer owns task->ptrace. |
78 | */ |
79 | |
80 | #define PT_PTRACED 0x00000001 |
81 | #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ |
82 | #define PT_TRACESYSGOOD 0x00000004 |
83 | #define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */ |
84 | #define PT_TRACE_FORK 0x00000010 |
85 | #define PT_TRACE_VFORK 0x00000020 |
86 | #define PT_TRACE_CLONE 0x00000040 |
87 | #define PT_TRACE_EXEC 0x00000080 |
88 | #define PT_TRACE_VFORK_DONE 0x00000100 |
89 | #define PT_TRACE_EXIT 0x00000200 |
90 | |
91 | #define PT_TRACE_MASK 0x000003f4 |
92 | |
93 | /* single stepping state bits (used on ARM and PA-RISC) */ |
94 | #define PT_SINGLESTEP_BIT 31 |
95 | #define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT) |
96 | #define PT_BLOCKSTEP_BIT 30 |
97 | #define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT) |
98 | |
99 | #include <linux/compiler.h> /* For unlikely. */ |
100 | #include <linux/sched.h> /* For struct task_struct. */ |
101 | |
102 | |
103 | extern long arch_ptrace(struct task_struct *child, long request, long addr, long data); |
104 | extern int ptrace_traceme(void); |
105 | extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); |
106 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); |
107 | extern int ptrace_attach(struct task_struct *tsk); |
108 | extern int ptrace_detach(struct task_struct *, unsigned int); |
109 | extern void ptrace_disable(struct task_struct *); |
110 | extern int ptrace_check_attach(struct task_struct *task, int kill); |
111 | extern int ptrace_request(struct task_struct *child, long request, long addr, long data); |
112 | extern void ptrace_notify(int exit_code); |
113 | extern void __ptrace_link(struct task_struct *child, |
114 | struct task_struct *new_parent); |
115 | extern void __ptrace_unlink(struct task_struct *child); |
116 | extern void exit_ptrace(struct task_struct *tracer); |
117 | #define PTRACE_MODE_READ 1 |
118 | #define PTRACE_MODE_ATTACH 2 |
119 | /* Returns 0 on success, -errno on denial. */ |
120 | extern int __ptrace_may_access(struct task_struct *task, unsigned int mode); |
121 | /* Returns true on success, false on denial. */ |
122 | extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); |
123 | |
124 | static inline int ptrace_reparented(struct task_struct *child) |
125 | { |
126 | return child->real_parent != child->parent; |
127 | } |
128 | |
129 | static inline void ptrace_unlink(struct task_struct *child) |
130 | { |
131 | if (unlikely(child->ptrace)) |
132 | __ptrace_unlink(child); |
133 | } |
134 | |
135 | int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); |
136 | int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); |
137 | |
138 | /** |
139 | * task_ptrace - return %PT_* flags that apply to a task |
140 | * @task: pointer to &task_struct in question |
141 | * |
142 | * Returns the %PT_* flags that apply to @task. |
143 | */ |
144 | static inline int task_ptrace(struct task_struct *task) |
145 | { |
146 | return task->ptrace; |
147 | } |
148 | |
149 | /** |
150 | * ptrace_event - possibly stop for a ptrace event notification |
151 | * @mask: %PT_* bit to check in @current->ptrace |
152 | * @event: %PTRACE_EVENT_* value to report if @mask is set |
153 | * @message: value for %PTRACE_GETEVENTMSG to return |
154 | * |
155 | * This checks the @mask bit to see if ptrace wants stops for this event. |
156 | * If so we stop, reporting @event and @message to the ptrace parent. |
157 | * |
158 | * Returns nonzero if we did a ptrace notification, zero if not. |
159 | * |
160 | * Called without locks. |
161 | */ |
162 | static inline int ptrace_event(int mask, int event, unsigned long message) |
163 | { |
164 | if (mask && likely(!(current->ptrace & mask))) |
165 | return 0; |
166 | current->ptrace_message = message; |
167 | ptrace_notify((event << 8) | SIGTRAP); |
168 | return 1; |
169 | } |
170 | |
171 | /** |
172 | * ptrace_init_task - initialize ptrace state for a new child |
173 | * @child: new child task |
174 | * @ptrace: true if child should be ptrace'd by parent's tracer |
175 | * |
176 | * This is called immediately after adding @child to its parent's children |
177 | * list. @ptrace is false in the normal case, and true to ptrace @child. |
178 | * |
179 | * Called with current's siglock and write_lock_irq(&tasklist_lock) held. |
180 | */ |
181 | static inline void ptrace_init_task(struct task_struct *child, bool ptrace) |
182 | { |
183 | INIT_LIST_HEAD(&child->ptrace_entry); |
184 | INIT_LIST_HEAD(&child->ptraced); |
185 | child->parent = child->real_parent; |
186 | child->ptrace = 0; |
187 | if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) { |
188 | child->ptrace = current->ptrace; |
189 | __ptrace_link(child, current->parent); |
190 | } |
191 | } |
192 | |
193 | /** |
194 | * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped |
195 | * @task: task in %EXIT_DEAD state |
196 | * |
197 | * Called with write_lock(&tasklist_lock) held. |
198 | */ |
199 | static inline void ptrace_release_task(struct task_struct *task) |
200 | { |
201 | BUG_ON(!list_empty(&task->ptraced)); |
202 | ptrace_unlink(task); |
203 | BUG_ON(!list_empty(&task->ptrace_entry)); |
204 | } |
205 | |
206 | #ifndef force_successful_syscall_return |
207 | /* |
208 | * System call handlers that, upon successful completion, need to return a |
209 | * negative value should call force_successful_syscall_return() right before |
210 | * returning. On architectures where the syscall convention provides for a |
211 | * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly |
212 | * others), this macro can be used to ensure that the error flag will not get |
213 | * set. On architectures which do not support a separate error flag, the macro |
214 | * is a no-op and the spurious error condition needs to be filtered out by some |
215 | * other means (e.g., in user-level, by passing an extra argument to the |
216 | * syscall handler, or something along those lines). |
217 | */ |
218 | #define force_successful_syscall_return() do { } while (0) |
219 | #endif |
220 | |
221 | /* |
222 | * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__. |
223 | * |
224 | * These do-nothing inlines are used when the arch does not |
225 | * implement single-step. The kerneldoc comments are here |
226 | * to document the interface for all arch definitions. |
227 | */ |
228 | |
229 | #ifndef arch_has_single_step |
230 | /** |
231 | * arch_has_single_step - does this CPU support user-mode single-step? |
232 | * |
233 | * If this is defined, then there must be function declarations or |
234 | * inlines for user_enable_single_step() and user_disable_single_step(). |
235 | * arch_has_single_step() should evaluate to nonzero iff the machine |
236 | * supports instruction single-step for user mode. |
237 | * It can be a constant or it can test a CPU feature bit. |
238 | */ |
239 | #define arch_has_single_step() (0) |
240 | |
241 | /** |
242 | * user_enable_single_step - single-step in user-mode task |
243 | * @task: either current or a task stopped in %TASK_TRACED |
244 | * |
245 | * This can only be called when arch_has_single_step() has returned nonzero. |
246 | * Set @task so that when it returns to user mode, it will trap after the |
247 | * next single instruction executes. If arch_has_block_step() is defined, |
248 | * this must clear the effects of user_enable_block_step() too. |
249 | */ |
250 | static inline void user_enable_single_step(struct task_struct *task) |
251 | { |
252 | BUG(); /* This can never be called. */ |
253 | } |
254 | |
255 | /** |
256 | * user_disable_single_step - cancel user-mode single-step |
257 | * @task: either current or a task stopped in %TASK_TRACED |
258 | * |
259 | * Clear @task of the effects of user_enable_single_step() and |
260 | * user_enable_block_step(). This can be called whether or not either |
261 | * of those was ever called on @task, and even if arch_has_single_step() |
262 | * returned zero. |
263 | */ |
264 | static inline void user_disable_single_step(struct task_struct *task) |
265 | { |
266 | } |
267 | #else |
268 | extern void user_enable_single_step(struct task_struct *); |
269 | extern void user_disable_single_step(struct task_struct *); |
270 | #endif /* arch_has_single_step */ |
271 | |
272 | #ifndef arch_has_block_step |
273 | /** |
274 | * arch_has_block_step - does this CPU support user-mode block-step? |
275 | * |
276 | * If this is defined, then there must be a function declaration or inline |
277 | * for user_enable_block_step(), and arch_has_single_step() must be defined |
278 | * too. arch_has_block_step() should evaluate to nonzero iff the machine |
279 | * supports step-until-branch for user mode. It can be a constant or it |
280 | * can test a CPU feature bit. |
281 | */ |
282 | #define arch_has_block_step() (0) |
283 | |
284 | /** |
285 | * user_enable_block_step - step until branch in user-mode task |
286 | * @task: either current or a task stopped in %TASK_TRACED |
287 | * |
288 | * This can only be called when arch_has_block_step() has returned nonzero, |
289 | * and will never be called when single-instruction stepping is being used. |
290 | * Set @task so that when it returns to user mode, it will trap after the |
291 | * next branch or trap taken. |
292 | */ |
293 | static inline void user_enable_block_step(struct task_struct *task) |
294 | { |
295 | BUG(); /* This can never be called. */ |
296 | } |
297 | #else |
298 | extern void user_enable_block_step(struct task_struct *); |
299 | #endif /* arch_has_block_step */ |
300 | |
301 | #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO |
302 | extern void user_single_step_siginfo(struct task_struct *tsk, |
303 | struct pt_regs *regs, siginfo_t *info); |
304 | #else |
305 | static inline void user_single_step_siginfo(struct task_struct *tsk, |
306 | struct pt_regs *regs, siginfo_t *info) |
307 | { |
308 | memset(info, 0, sizeof(*info)); |
309 | info->si_signo = SIGTRAP; |
310 | } |
311 | #endif |
312 | |
313 | #ifndef arch_ptrace_stop_needed |
314 | /** |
315 | * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called |
316 | * @code: current->exit_code value ptrace will stop with |
317 | * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with |
318 | * |
319 | * This is called with the siglock held, to decide whether or not it's |
320 | * necessary to release the siglock and call arch_ptrace_stop() with the |
321 | * same @code and @info arguments. It can be defined to a constant if |
322 | * arch_ptrace_stop() is never required, or always is. On machines where |
323 | * this makes sense, it should be defined to a quick test to optimize out |
324 | * calling arch_ptrace_stop() when it would be superfluous. For example, |
325 | * if the thread has not been back to user mode since the last stop, the |
326 | * thread state might indicate that nothing needs to be done. |
327 | */ |
328 | #define arch_ptrace_stop_needed(code, info) (0) |
329 | #endif |
330 | |
331 | #ifndef arch_ptrace_stop |
332 | /** |
333 | * arch_ptrace_stop - Do machine-specific work before stopping for ptrace |
334 | * @code: current->exit_code value ptrace will stop with |
335 | * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with |
336 | * |
337 | * This is called with no locks held when arch_ptrace_stop_needed() has |
338 | * just returned nonzero. It is allowed to block, e.g. for user memory |
339 | * access. The arch can have machine-specific work to be done before |
340 | * ptrace stops. On ia64, register backing store gets written back to user |
341 | * memory here. Since this can be costly (requires dropping the siglock), |
342 | * we only do it when the arch requires it for this particular stop, as |
343 | * indicated by arch_ptrace_stop_needed(). |
344 | */ |
345 | #define arch_ptrace_stop(code, info) do { } while (0) |
346 | #endif |
347 | |
348 | #ifndef arch_ptrace_untrace |
349 | /* |
350 | * Do machine-specific work before untracing child. |
351 | * |
352 | * This is called for a normal detach as well as from ptrace_exit() |
353 | * when the tracing task dies. |
354 | * |
355 | * Called with write_lock(&tasklist_lock) held. |
356 | */ |
357 | #define arch_ptrace_untrace(task) do { } while (0) |
358 | #endif |
359 | |
360 | extern int task_current_syscall(struct task_struct *target, long *callno, |
361 | unsigned long args[6], unsigned int maxargs, |
362 | unsigned long *sp, unsigned long *pc); |
363 | |
364 | #endif |
365 | |
366 | #endif |
367 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9