Root/
1 | /* |
2 | * Architecture-specific setup. |
3 | * |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support |
7 | * |
8 | * 2005-10-07 Keith Owens <kaos@sgi.com> |
9 | * Add notify_die() hooks. |
10 | */ |
11 | #include <linux/cpu.h> |
12 | #include <linux/pm.h> |
13 | #include <linux/elf.h> |
14 | #include <linux/errno.h> |
15 | #include <linux/kallsyms.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/module.h> |
20 | #include <linux/notifier.h> |
21 | #include <linux/personality.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/stddef.h> |
24 | #include <linux/thread_info.h> |
25 | #include <linux/unistd.h> |
26 | #include <linux/efi.h> |
27 | #include <linux/interrupt.h> |
28 | #include <linux/delay.h> |
29 | #include <linux/kdebug.h> |
30 | #include <linux/utsname.h> |
31 | #include <linux/tracehook.h> |
32 | |
33 | #include <asm/cpu.h> |
34 | #include <asm/delay.h> |
35 | #include <asm/elf.h> |
36 | #include <asm/irq.h> |
37 | #include <asm/kexec.h> |
38 | #include <asm/pgalloc.h> |
39 | #include <asm/processor.h> |
40 | #include <asm/sal.h> |
41 | #include <asm/tlbflush.h> |
42 | #include <asm/uaccess.h> |
43 | #include <asm/unwind.h> |
44 | #include <asm/user.h> |
45 | |
46 | #include "entry.h" |
47 | |
48 | #ifdef CONFIG_PERFMON |
49 | # include <asm/perfmon.h> |
50 | #endif |
51 | |
52 | #include "sigframe.h" |
53 | |
54 | void (*ia64_mark_idle)(int); |
55 | |
56 | unsigned long boot_option_idle_override = 0; |
57 | EXPORT_SYMBOL(boot_option_idle_override); |
58 | unsigned long idle_halt; |
59 | EXPORT_SYMBOL(idle_halt); |
60 | unsigned long idle_nomwait; |
61 | EXPORT_SYMBOL(idle_nomwait); |
62 | void (*pm_idle) (void); |
63 | EXPORT_SYMBOL(pm_idle); |
64 | void (*pm_power_off) (void); |
65 | EXPORT_SYMBOL(pm_power_off); |
66 | |
67 | void |
68 | ia64_do_show_stack (struct unw_frame_info *info, void *arg) |
69 | { |
70 | unsigned long ip, sp, bsp; |
71 | char buf[128]; /* don't make it so big that it overflows the stack! */ |
72 | |
73 | printk("\nCall Trace:\n"); |
74 | do { |
75 | unw_get_ip(info, &ip); |
76 | if (ip == 0) |
77 | break; |
78 | |
79 | unw_get_sp(info, &sp); |
80 | unw_get_bsp(info, &bsp); |
81 | snprintf(buf, sizeof(buf), |
82 | " [<%016lx>] %%s\n" |
83 | " sp=%016lx bsp=%016lx\n", |
84 | ip, sp, bsp); |
85 | print_symbol(buf, ip); |
86 | } while (unw_unwind(info) >= 0); |
87 | } |
88 | |
89 | void |
90 | show_stack (struct task_struct *task, unsigned long *sp) |
91 | { |
92 | if (!task) |
93 | unw_init_running(ia64_do_show_stack, NULL); |
94 | else { |
95 | struct unw_frame_info info; |
96 | |
97 | unw_init_from_blocked_task(&info, task); |
98 | ia64_do_show_stack(&info, NULL); |
99 | } |
100 | } |
101 | |
102 | void |
103 | dump_stack (void) |
104 | { |
105 | show_stack(NULL, NULL); |
106 | } |
107 | |
108 | EXPORT_SYMBOL(dump_stack); |
109 | |
110 | void |
111 | show_regs (struct pt_regs *regs) |
112 | { |
113 | unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; |
114 | |
115 | print_modules(); |
116 | printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), |
117 | smp_processor_id(), current->comm); |
118 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", |
119 | regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), |
120 | init_utsname()->release); |
121 | print_symbol("ip is at %s\n", ip); |
122 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", |
123 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); |
124 | printk("rnat: %016lx bsps: %016lx pr : %016lx\n", |
125 | regs->ar_rnat, regs->ar_bspstore, regs->pr); |
126 | printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", |
127 | regs->loadrs, regs->ar_ccv, regs->ar_fpsr); |
128 | printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); |
129 | printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7); |
130 | printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", |
131 | regs->f6.u.bits[1], regs->f6.u.bits[0], |
132 | regs->f7.u.bits[1], regs->f7.u.bits[0]); |
133 | printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", |
134 | regs->f8.u.bits[1], regs->f8.u.bits[0], |
135 | regs->f9.u.bits[1], regs->f9.u.bits[0]); |
136 | printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", |
137 | regs->f10.u.bits[1], regs->f10.u.bits[0], |
138 | regs->f11.u.bits[1], regs->f11.u.bits[0]); |
139 | |
140 | printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3); |
141 | printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10); |
142 | printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13); |
143 | printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16); |
144 | printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19); |
145 | printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22); |
146 | printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25); |
147 | printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28); |
148 | printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31); |
149 | |
150 | if (user_mode(regs)) { |
151 | /* print the stacked registers */ |
152 | unsigned long val, *bsp, ndirty; |
153 | int i, sof, is_nat = 0; |
154 | |
155 | sof = regs->cr_ifs & 0x7f; /* size of frame */ |
156 | ndirty = (regs->loadrs >> 19); |
157 | bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty); |
158 | for (i = 0; i < sof; ++i) { |
159 | get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i)); |
160 | printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val, |
161 | ((i == sof - 1) || (i % 3) == 2) ? "\n" : " "); |
162 | } |
163 | } else |
164 | show_stack(NULL, NULL); |
165 | } |
166 | |
167 | /* local support for deprecated console_print */ |
168 | void |
169 | console_print(const char *s) |
170 | { |
171 | printk(KERN_EMERG "%s", s); |
172 | } |
173 | |
174 | void |
175 | do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) |
176 | { |
177 | if (fsys_mode(current, &scr->pt)) { |
178 | /* |
179 | * defer signal-handling etc. until we return to |
180 | * privilege-level 0. |
181 | */ |
182 | if (!ia64_psr(&scr->pt)->lp) |
183 | ia64_psr(&scr->pt)->lp = 1; |
184 | return; |
185 | } |
186 | |
187 | #ifdef CONFIG_PERFMON |
188 | if (current->thread.pfm_needs_checking) |
189 | /* |
190 | * Note: pfm_handle_work() allow us to call it with interrupts |
191 | * disabled, and may enable interrupts within the function. |
192 | */ |
193 | pfm_handle_work(); |
194 | #endif |
195 | |
196 | /* deal with pending signal delivery */ |
197 | if (test_thread_flag(TIF_SIGPENDING)) { |
198 | local_irq_enable(); /* force interrupt enable */ |
199 | ia64_do_signal(scr, in_syscall); |
200 | } |
201 | |
202 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { |
203 | clear_thread_flag(TIF_NOTIFY_RESUME); |
204 | tracehook_notify_resume(&scr->pt); |
205 | if (current->replacement_session_keyring) |
206 | key_replace_session_keyring(); |
207 | } |
208 | |
209 | /* copy user rbs to kernel rbs */ |
210 | if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { |
211 | local_irq_enable(); /* force interrupt enable */ |
212 | ia64_sync_krbs(); |
213 | } |
214 | |
215 | local_irq_disable(); /* force interrupt disable */ |
216 | } |
217 | |
218 | static int pal_halt = 1; |
219 | static int can_do_pal_halt = 1; |
220 | |
221 | static int __init nohalt_setup(char * str) |
222 | { |
223 | pal_halt = can_do_pal_halt = 0; |
224 | return 1; |
225 | } |
226 | __setup("nohalt", nohalt_setup); |
227 | |
228 | void |
229 | update_pal_halt_status(int status) |
230 | { |
231 | can_do_pal_halt = pal_halt && status; |
232 | } |
233 | |
234 | /* |
235 | * We use this if we don't have any better idle routine.. |
236 | */ |
237 | void |
238 | default_idle (void) |
239 | { |
240 | local_irq_enable(); |
241 | while (!need_resched()) { |
242 | if (can_do_pal_halt) { |
243 | local_irq_disable(); |
244 | if (!need_resched()) { |
245 | safe_halt(); |
246 | } |
247 | local_irq_enable(); |
248 | } else |
249 | cpu_relax(); |
250 | } |
251 | } |
252 | |
253 | #ifdef CONFIG_HOTPLUG_CPU |
254 | /* We don't actually take CPU down, just spin without interrupts. */ |
255 | static inline void play_dead(void) |
256 | { |
257 | unsigned int this_cpu = smp_processor_id(); |
258 | |
259 | /* Ack it */ |
260 | __get_cpu_var(cpu_state) = CPU_DEAD; |
261 | |
262 | max_xtp(); |
263 | local_irq_disable(); |
264 | idle_task_exit(); |
265 | ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); |
266 | /* |
267 | * The above is a point of no-return, the processor is |
268 | * expected to be in SAL loop now. |
269 | */ |
270 | BUG(); |
271 | } |
272 | #else |
273 | static inline void play_dead(void) |
274 | { |
275 | BUG(); |
276 | } |
277 | #endif /* CONFIG_HOTPLUG_CPU */ |
278 | |
279 | static void do_nothing(void *unused) |
280 | { |
281 | } |
282 | |
283 | /* |
284 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of |
285 | * pm_idle and update to new pm_idle value. Required while changing pm_idle |
286 | * handler on SMP systems. |
287 | * |
288 | * Caller must have changed pm_idle to the new value before the call. Old |
289 | * pm_idle value will not be used by any CPU after the return of this function. |
290 | */ |
291 | void cpu_idle_wait(void) |
292 | { |
293 | smp_mb(); |
294 | /* kick all the CPUs so that they exit out of pm_idle */ |
295 | smp_call_function(do_nothing, NULL, 1); |
296 | } |
297 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
298 | |
299 | void __attribute__((noreturn)) |
300 | cpu_idle (void) |
301 | { |
302 | void (*mark_idle)(int) = ia64_mark_idle; |
303 | int cpu = smp_processor_id(); |
304 | |
305 | /* endless idle loop with no priority at all */ |
306 | while (1) { |
307 | if (can_do_pal_halt) { |
308 | current_thread_info()->status &= ~TS_POLLING; |
309 | /* |
310 | * TS_POLLING-cleared state must be visible before we |
311 | * test NEED_RESCHED: |
312 | */ |
313 | smp_mb(); |
314 | } else { |
315 | current_thread_info()->status |= TS_POLLING; |
316 | } |
317 | |
318 | if (!need_resched()) { |
319 | void (*idle)(void); |
320 | #ifdef CONFIG_SMP |
321 | min_xtp(); |
322 | #endif |
323 | rmb(); |
324 | if (mark_idle) |
325 | (*mark_idle)(1); |
326 | |
327 | idle = pm_idle; |
328 | if (!idle) |
329 | idle = default_idle; |
330 | (*idle)(); |
331 | if (mark_idle) |
332 | (*mark_idle)(0); |
333 | #ifdef CONFIG_SMP |
334 | normal_xtp(); |
335 | #endif |
336 | } |
337 | preempt_enable_no_resched(); |
338 | schedule(); |
339 | preempt_disable(); |
340 | check_pgt_cache(); |
341 | if (cpu_is_offline(cpu)) |
342 | play_dead(); |
343 | } |
344 | } |
345 | |
346 | void |
347 | ia64_save_extra (struct task_struct *task) |
348 | { |
349 | #ifdef CONFIG_PERFMON |
350 | unsigned long info; |
351 | #endif |
352 | |
353 | if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) |
354 | ia64_save_debug_regs(&task->thread.dbr[0]); |
355 | |
356 | #ifdef CONFIG_PERFMON |
357 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) |
358 | pfm_save_regs(task); |
359 | |
360 | info = __get_cpu_var(pfm_syst_info); |
361 | if (info & PFM_CPUINFO_SYST_WIDE) |
362 | pfm_syst_wide_update_task(task, info, 0); |
363 | #endif |
364 | } |
365 | |
366 | void |
367 | ia64_load_extra (struct task_struct *task) |
368 | { |
369 | #ifdef CONFIG_PERFMON |
370 | unsigned long info; |
371 | #endif |
372 | |
373 | if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) |
374 | ia64_load_debug_regs(&task->thread.dbr[0]); |
375 | |
376 | #ifdef CONFIG_PERFMON |
377 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) |
378 | pfm_load_regs(task); |
379 | |
380 | info = __get_cpu_var(pfm_syst_info); |
381 | if (info & PFM_CPUINFO_SYST_WIDE) |
382 | pfm_syst_wide_update_task(task, info, 1); |
383 | #endif |
384 | } |
385 | |
386 | /* |
387 | * Copy the state of an ia-64 thread. |
388 | * |
389 | * We get here through the following call chain: |
390 | * |
391 | * from user-level: from kernel: |
392 | * |
393 | * <clone syscall> <some kernel call frames> |
394 | * sys_clone : |
395 | * do_fork do_fork |
396 | * copy_thread copy_thread |
397 | * |
398 | * This means that the stack layout is as follows: |
399 | * |
400 | * +---------------------+ (highest addr) |
401 | * | struct pt_regs | |
402 | * +---------------------+ |
403 | * | struct switch_stack | |
404 | * +---------------------+ |
405 | * | | |
406 | * | memory stack | |
407 | * | | <-- sp (lowest addr) |
408 | * +---------------------+ |
409 | * |
410 | * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an |
411 | * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register, |
412 | * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the |
413 | * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since |
414 | * the stack is page aligned and the page size is at least 4KB, this is always the case, |
415 | * so there is nothing to worry about. |
416 | */ |
417 | int |
418 | copy_thread(unsigned long clone_flags, |
419 | unsigned long user_stack_base, unsigned long user_stack_size, |
420 | struct task_struct *p, struct pt_regs *regs) |
421 | { |
422 | extern char ia64_ret_from_clone; |
423 | struct switch_stack *child_stack, *stack; |
424 | unsigned long rbs, child_rbs, rbs_size; |
425 | struct pt_regs *child_ptregs; |
426 | int retval = 0; |
427 | |
428 | #ifdef CONFIG_SMP |
429 | /* |
430 | * For SMP idle threads, fork_by_hand() calls do_fork with |
431 | * NULL regs. |
432 | */ |
433 | if (!regs) |
434 | return 0; |
435 | #endif |
436 | |
437 | stack = ((struct switch_stack *) regs) - 1; |
438 | |
439 | child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; |
440 | child_stack = (struct switch_stack *) child_ptregs - 1; |
441 | |
442 | /* copy parent's switch_stack & pt_regs to child: */ |
443 | memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); |
444 | |
445 | rbs = (unsigned long) current + IA64_RBS_OFFSET; |
446 | child_rbs = (unsigned long) p + IA64_RBS_OFFSET; |
447 | rbs_size = stack->ar_bspstore - rbs; |
448 | |
449 | /* copy the parent's register backing store to the child: */ |
450 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); |
451 | |
452 | if (likely(user_mode(child_ptregs))) { |
453 | if (clone_flags & CLONE_SETTLS) |
454 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ |
455 | if (user_stack_base) { |
456 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; |
457 | child_ptregs->ar_bspstore = user_stack_base; |
458 | child_ptregs->ar_rnat = 0; |
459 | child_ptregs->loadrs = 0; |
460 | } |
461 | } else { |
462 | /* |
463 | * Note: we simply preserve the relative position of |
464 | * the stack pointer here. There is no need to |
465 | * allocate a scratch area here, since that will have |
466 | * been taken care of by the caller of sys_clone() |
467 | * already. |
468 | */ |
469 | child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */ |
470 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ |
471 | } |
472 | child_stack->ar_bspstore = child_rbs + rbs_size; |
473 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; |
474 | |
475 | /* copy parts of thread_struct: */ |
476 | p->thread.ksp = (unsigned long) child_stack - 16; |
477 | |
478 | /* stop some PSR bits from being inherited. |
479 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() |
480 | * therefore we must specify them explicitly here and not include them in |
481 | * IA64_PSR_BITS_TO_CLEAR. |
482 | */ |
483 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) |
484 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); |
485 | |
486 | /* |
487 | * NOTE: The calling convention considers all floating point |
488 | * registers in the high partition (fph) to be scratch. Since |
489 | * the only way to get to this point is through a system call, |
490 | * we know that the values in fph are all dead. Hence, there |
491 | * is no need to inherit the fph state from the parent to the |
492 | * child and all we have to do is to make sure that |
493 | * IA64_THREAD_FPH_VALID is cleared in the child. |
494 | * |
495 | * XXX We could push this optimization a bit further by |
496 | * clearing IA64_THREAD_FPH_VALID on ANY system call. |
497 | * However, it's not clear this is worth doing. Also, it |
498 | * would be a slight deviation from the normal Linux system |
499 | * call behavior where scratch registers are preserved across |
500 | * system calls (unless used by the system call itself). |
501 | */ |
502 | # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \ |
503 | | IA64_THREAD_PM_VALID) |
504 | # define THREAD_FLAGS_TO_SET 0 |
505 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) |
506 | | THREAD_FLAGS_TO_SET); |
507 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ |
508 | |
509 | #ifdef CONFIG_PERFMON |
510 | if (current->thread.pfm_context) |
511 | pfm_inherit(p, child_ptregs); |
512 | #endif |
513 | return retval; |
514 | } |
515 | |
516 | static void |
517 | do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) |
518 | { |
519 | unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; |
520 | unsigned long uninitialized_var(ip); /* GCC be quiet */ |
521 | elf_greg_t *dst = arg; |
522 | struct pt_regs *pt; |
523 | char nat; |
524 | int i; |
525 | |
526 | memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */ |
527 | |
528 | if (unw_unwind_to_user(info) < 0) |
529 | return; |
530 | |
531 | unw_get_sp(info, &sp); |
532 | pt = (struct pt_regs *) (sp + 16); |
533 | |
534 | urbs_end = ia64_get_user_rbs_end(task, pt, &cfm); |
535 | |
536 | if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0) |
537 | return; |
538 | |
539 | ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), |
540 | &ar_rnat); |
541 | |
542 | /* |
543 | * coredump format: |
544 | * r0-r31 |
545 | * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) |
546 | * predicate registers (p0-p63) |
547 | * b0-b7 |
548 | * ip cfm user-mask |
549 | * ar.rsc ar.bsp ar.bspstore ar.rnat |
550 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec |
551 | */ |
552 | |
553 | /* r0 is zero */ |
554 | for (i = 1, mask = (1UL << i); i < 32; ++i) { |
555 | unw_get_gr(info, i, &dst[i], &nat); |
556 | if (nat) |
557 | nat_bits |= mask; |
558 | mask <<= 1; |
559 | } |
560 | dst[32] = nat_bits; |
561 | unw_get_pr(info, &dst[33]); |
562 | |
563 | for (i = 0; i < 8; ++i) |
564 | unw_get_br(info, i, &dst[34 + i]); |
565 | |
566 | unw_get_rp(info, &ip); |
567 | dst[42] = ip + ia64_psr(pt)->ri; |
568 | dst[43] = cfm; |
569 | dst[44] = pt->cr_ipsr & IA64_PSR_UM; |
570 | |
571 | unw_get_ar(info, UNW_AR_RSC, &dst[45]); |
572 | /* |
573 | * For bsp and bspstore, unw_get_ar() would return the kernel |
574 | * addresses, but we need the user-level addresses instead: |
575 | */ |
576 | dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */ |
577 | dst[47] = pt->ar_bspstore; |
578 | dst[48] = ar_rnat; |
579 | unw_get_ar(info, UNW_AR_CCV, &dst[49]); |
580 | unw_get_ar(info, UNW_AR_UNAT, &dst[50]); |
581 | unw_get_ar(info, UNW_AR_FPSR, &dst[51]); |
582 | dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */ |
583 | unw_get_ar(info, UNW_AR_LC, &dst[53]); |
584 | unw_get_ar(info, UNW_AR_EC, &dst[54]); |
585 | unw_get_ar(info, UNW_AR_CSD, &dst[55]); |
586 | unw_get_ar(info, UNW_AR_SSD, &dst[56]); |
587 | } |
588 | |
589 | void |
590 | do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg) |
591 | { |
592 | elf_fpreg_t *dst = arg; |
593 | int i; |
594 | |
595 | memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ |
596 | |
597 | if (unw_unwind_to_user(info) < 0) |
598 | return; |
599 | |
600 | /* f0 is 0.0, f1 is 1.0 */ |
601 | |
602 | for (i = 2; i < 32; ++i) |
603 | unw_get_fr(info, i, dst + i); |
604 | |
605 | ia64_flush_fph(task); |
606 | if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0) |
607 | memcpy(dst + 32, task->thread.fph, 96*16); |
608 | } |
609 | |
610 | void |
611 | do_copy_regs (struct unw_frame_info *info, void *arg) |
612 | { |
613 | do_copy_task_regs(current, info, arg); |
614 | } |
615 | |
616 | void |
617 | do_dump_fpu (struct unw_frame_info *info, void *arg) |
618 | { |
619 | do_dump_task_fpu(current, info, arg); |
620 | } |
621 | |
622 | void |
623 | ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) |
624 | { |
625 | unw_init_running(do_copy_regs, dst); |
626 | } |
627 | |
628 | int |
629 | dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) |
630 | { |
631 | unw_init_running(do_dump_fpu, dst); |
632 | return 1; /* f0-f31 are always valid so we always return 1 */ |
633 | } |
634 | |
635 | long |
636 | sys_execve (const char __user *filename, |
637 | const char __user *const __user *argv, |
638 | const char __user *const __user *envp, |
639 | struct pt_regs *regs) |
640 | { |
641 | char *fname; |
642 | int error; |
643 | |
644 | fname = getname(filename); |
645 | error = PTR_ERR(fname); |
646 | if (IS_ERR(fname)) |
647 | goto out; |
648 | error = do_execve(fname, argv, envp, regs); |
649 | putname(fname); |
650 | out: |
651 | return error; |
652 | } |
653 | |
654 | pid_t |
655 | kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) |
656 | { |
657 | extern void start_kernel_thread (void); |
658 | unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread; |
659 | struct { |
660 | struct switch_stack sw; |
661 | struct pt_regs pt; |
662 | } regs; |
663 | |
664 | memset(®s, 0, sizeof(regs)); |
665 | regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */ |
666 | regs.pt.r1 = helper_fptr[1]; /* set GP */ |
667 | regs.pt.r9 = (unsigned long) fn; /* 1st argument */ |
668 | regs.pt.r11 = (unsigned long) arg; /* 2nd argument */ |
669 | /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */ |
670 | regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; |
671 | regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */ |
672 | regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); |
673 | regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET; |
674 | regs.sw.pr = (1 << PRED_KERNEL_STACK); |
675 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s.pt, 0, NULL, NULL); |
676 | } |
677 | EXPORT_SYMBOL(kernel_thread); |
678 | |
679 | /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */ |
680 | int |
681 | kernel_thread_helper (int (*fn)(void *), void *arg) |
682 | { |
683 | return (*fn)(arg); |
684 | } |
685 | |
686 | /* |
687 | * Flush thread state. This is called when a thread does an execve(). |
688 | */ |
689 | void |
690 | flush_thread (void) |
691 | { |
692 | /* drop floating-point and debug-register state if it exists: */ |
693 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
694 | ia64_drop_fpu(current); |
695 | } |
696 | |
697 | /* |
698 | * Clean up state associated with current thread. This is called when |
699 | * the thread calls exit(). |
700 | */ |
701 | void |
702 | exit_thread (void) |
703 | { |
704 | |
705 | ia64_drop_fpu(current); |
706 | #ifdef CONFIG_PERFMON |
707 | /* if needed, stop monitoring and flush state to perfmon context */ |
708 | if (current->thread.pfm_context) |
709 | pfm_exit_thread(current); |
710 | |
711 | /* free debug register resources */ |
712 | if (current->thread.flags & IA64_THREAD_DBG_VALID) |
713 | pfm_release_debug_registers(current); |
714 | #endif |
715 | } |
716 | |
717 | unsigned long |
718 | get_wchan (struct task_struct *p) |
719 | { |
720 | struct unw_frame_info info; |
721 | unsigned long ip; |
722 | int count = 0; |
723 | |
724 | if (!p || p == current || p->state == TASK_RUNNING) |
725 | return 0; |
726 | |
727 | /* |
728 | * Note: p may not be a blocked task (it could be current or |
729 | * another process running on some other CPU. Rather than |
730 | * trying to determine if p is really blocked, we just assume |
731 | * it's blocked and rely on the unwind routines to fail |
732 | * gracefully if the process wasn't really blocked after all. |
733 | * --davidm 99/12/15 |
734 | */ |
735 | unw_init_from_blocked_task(&info, p); |
736 | do { |
737 | if (p->state == TASK_RUNNING) |
738 | return 0; |
739 | if (unw_unwind(&info) < 0) |
740 | return 0; |
741 | unw_get_ip(&info, &ip); |
742 | if (!in_sched_functions(ip)) |
743 | return ip; |
744 | } while (count++ < 16); |
745 | return 0; |
746 | } |
747 | |
748 | void |
749 | cpu_halt (void) |
750 | { |
751 | pal_power_mgmt_info_u_t power_info[8]; |
752 | unsigned long min_power; |
753 | int i, min_power_state; |
754 | |
755 | if (ia64_pal_halt_info(power_info) != 0) |
756 | return; |
757 | |
758 | min_power_state = 0; |
759 | min_power = power_info[0].pal_power_mgmt_info_s.power_consumption; |
760 | for (i = 1; i < 8; ++i) |
761 | if (power_info[i].pal_power_mgmt_info_s.im |
762 | && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) { |
763 | min_power = power_info[i].pal_power_mgmt_info_s.power_consumption; |
764 | min_power_state = i; |
765 | } |
766 | |
767 | while (1) |
768 | ia64_pal_halt(min_power_state); |
769 | } |
770 | |
771 | void machine_shutdown(void) |
772 | { |
773 | #ifdef CONFIG_HOTPLUG_CPU |
774 | int cpu; |
775 | |
776 | for_each_online_cpu(cpu) { |
777 | if (cpu != smp_processor_id()) |
778 | cpu_down(cpu); |
779 | } |
780 | #endif |
781 | #ifdef CONFIG_KEXEC |
782 | kexec_disable_iosapic(); |
783 | #endif |
784 | } |
785 | |
786 | void |
787 | machine_restart (char *restart_cmd) |
788 | { |
789 | (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); |
790 | (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); |
791 | } |
792 | |
793 | void |
794 | machine_halt (void) |
795 | { |
796 | (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0); |
797 | cpu_halt(); |
798 | } |
799 | |
800 | void |
801 | machine_power_off (void) |
802 | { |
803 | if (pm_power_off) |
804 | pm_power_off(); |
805 | machine_halt(); |
806 | } |
807 | |
808 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9