Root/
1 | /* |
2 | * linux/init/main.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * |
6 | * GK 2/5/95 - Changed to support mounting root fs via NFS |
7 | * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 |
8 | * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 |
9 | * Simplified starting of init: Michael A. Griffith <grif@acm.org> |
10 | */ |
11 | |
12 | #include <linux/types.h> |
13 | #include <linux/module.h> |
14 | #include <linux/proc_fs.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/syscalls.h> |
17 | #include <linux/stackprotector.h> |
18 | #include <linux/string.h> |
19 | #include <linux/ctype.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/ioport.h> |
22 | #include <linux/init.h> |
23 | #include <linux/initrd.h> |
24 | #include <linux/bootmem.h> |
25 | #include <linux/acpi.h> |
26 | #include <linux/tty.h> |
27 | #include <linux/percpu.h> |
28 | #include <linux/kmod.h> |
29 | #include <linux/vmalloc.h> |
30 | #include <linux/kernel_stat.h> |
31 | #include <linux/start_kernel.h> |
32 | #include <linux/security.h> |
33 | #include <linux/smp.h> |
34 | #include <linux/profile.h> |
35 | #include <linux/rcupdate.h> |
36 | #include <linux/moduleparam.h> |
37 | #include <linux/kallsyms.h> |
38 | #include <linux/writeback.h> |
39 | #include <linux/cpu.h> |
40 | #include <linux/cpuset.h> |
41 | #include <linux/cgroup.h> |
42 | #include <linux/efi.h> |
43 | #include <linux/tick.h> |
44 | #include <linux/interrupt.h> |
45 | #include <linux/taskstats_kern.h> |
46 | #include <linux/delayacct.h> |
47 | #include <linux/unistd.h> |
48 | #include <linux/rmap.h> |
49 | #include <linux/mempolicy.h> |
50 | #include <linux/key.h> |
51 | #include <linux/buffer_head.h> |
52 | #include <linux/page_cgroup.h> |
53 | #include <linux/debug_locks.h> |
54 | #include <linux/debugobjects.h> |
55 | #include <linux/lockdep.h> |
56 | #include <linux/kmemleak.h> |
57 | #include <linux/pid_namespace.h> |
58 | #include <linux/device.h> |
59 | #include <linux/kthread.h> |
60 | #include <linux/sched.h> |
61 | #include <linux/signal.h> |
62 | #include <linux/idr.h> |
63 | #include <linux/kgdb.h> |
64 | #include <linux/ftrace.h> |
65 | #include <linux/async.h> |
66 | #include <linux/kmemcheck.h> |
67 | #include <linux/sfi.h> |
68 | #include <linux/shmem_fs.h> |
69 | #include <linux/slab.h> |
70 | |
71 | #include <asm/io.h> |
72 | #include <asm/bugs.h> |
73 | #include <asm/setup.h> |
74 | #include <asm/sections.h> |
75 | #include <asm/cacheflush.h> |
76 | |
77 | #ifdef CONFIG_X86_LOCAL_APIC |
78 | #include <asm/smp.h> |
79 | #endif |
80 | |
81 | static int kernel_init(void *); |
82 | |
83 | extern void init_IRQ(void); |
84 | extern void fork_init(unsigned long); |
85 | extern void mca_init(void); |
86 | extern void sbus_init(void); |
87 | extern void prio_tree_init(void); |
88 | extern void radix_tree_init(void); |
89 | extern void free_initmem(void); |
90 | #ifndef CONFIG_DEBUG_RODATA |
91 | static inline void mark_rodata_ro(void) { } |
92 | #endif |
93 | |
94 | #ifdef CONFIG_TC |
95 | extern void tc_init(void); |
96 | #endif |
97 | |
98 | enum system_states system_state __read_mostly; |
99 | EXPORT_SYMBOL(system_state); |
100 | |
101 | /* |
102 | * Boot command-line arguments |
103 | */ |
104 | #define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT |
105 | #define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT |
106 | |
107 | extern void time_init(void); |
108 | /* Default late time init is NULL. archs can override this later. */ |
109 | void (*__initdata late_time_init)(void); |
110 | extern void softirq_init(void); |
111 | |
112 | /* Untouched command line saved by arch-specific code. */ |
113 | char __initdata boot_command_line[COMMAND_LINE_SIZE]; |
114 | /* Untouched saved command line (eg. for /proc) */ |
115 | char *saved_command_line; |
116 | /* Command line for parameter parsing */ |
117 | static char *static_command_line; |
118 | |
119 | static char *execute_command; |
120 | static char *ramdisk_execute_command; |
121 | |
122 | #ifdef CONFIG_SMP |
123 | /* Setup configured maximum number of CPUs to activate */ |
124 | unsigned int setup_max_cpus = NR_CPUS; |
125 | EXPORT_SYMBOL(setup_max_cpus); |
126 | |
127 | |
128 | /* |
129 | * Setup routine for controlling SMP activation |
130 | * |
131 | * Command-line option of "nosmp" or "maxcpus=0" will disable SMP |
132 | * activation entirely (the MPS table probe still happens, though). |
133 | * |
134 | * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer |
135 | * greater than 0, limits the maximum number of CPUs activated in |
136 | * SMP mode to <NUM>. |
137 | */ |
138 | |
139 | void __weak arch_disable_smp_support(void) { } |
140 | |
141 | static int __init nosmp(char *str) |
142 | { |
143 | setup_max_cpus = 0; |
144 | arch_disable_smp_support(); |
145 | |
146 | return 0; |
147 | } |
148 | |
149 | early_param("nosmp", nosmp); |
150 | |
151 | /* this is hard limit */ |
152 | static int __init nrcpus(char *str) |
153 | { |
154 | int nr_cpus; |
155 | |
156 | get_option(&str, &nr_cpus); |
157 | if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) |
158 | nr_cpu_ids = nr_cpus; |
159 | |
160 | return 0; |
161 | } |
162 | |
163 | early_param("nr_cpus", nrcpus); |
164 | |
165 | static int __init maxcpus(char *str) |
166 | { |
167 | get_option(&str, &setup_max_cpus); |
168 | if (setup_max_cpus == 0) |
169 | arch_disable_smp_support(); |
170 | |
171 | return 0; |
172 | } |
173 | |
174 | early_param("maxcpus", maxcpus); |
175 | #else |
176 | static const unsigned int setup_max_cpus = NR_CPUS; |
177 | #endif |
178 | |
179 | /* |
180 | * If set, this is an indication to the drivers that reset the underlying |
181 | * device before going ahead with the initialization otherwise driver might |
182 | * rely on the BIOS and skip the reset operation. |
183 | * |
184 | * This is useful if kernel is booting in an unreliable environment. |
185 | * For ex. kdump situaiton where previous kernel has crashed, BIOS has been |
186 | * skipped and devices will be in unknown state. |
187 | */ |
188 | unsigned int reset_devices; |
189 | EXPORT_SYMBOL(reset_devices); |
190 | |
191 | static int __init set_reset_devices(char *str) |
192 | { |
193 | reset_devices = 1; |
194 | return 1; |
195 | } |
196 | |
197 | __setup("reset_devices", set_reset_devices); |
198 | |
199 | static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; |
200 | const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; |
201 | static const char *panic_later, *panic_param; |
202 | |
203 | extern const struct obs_kernel_param __setup_start[], __setup_end[]; |
204 | |
205 | static int __init obsolete_checksetup(char *line) |
206 | { |
207 | const struct obs_kernel_param *p; |
208 | int had_early_param = 0; |
209 | |
210 | p = __setup_start; |
211 | do { |
212 | int n = strlen(p->str); |
213 | if (!strncmp(line, p->str, n)) { |
214 | if (p->early) { |
215 | /* Already done in parse_early_param? |
216 | * (Needs exact match on param part). |
217 | * Keep iterating, as we can have early |
218 | * params and __setups of same names 8( */ |
219 | if (line[n] == '\0' || line[n] == '=') |
220 | had_early_param = 1; |
221 | } else if (!p->setup_func) { |
222 | printk(KERN_WARNING "Parameter %s is obsolete," |
223 | " ignored\n", p->str); |
224 | return 1; |
225 | } else if (p->setup_func(line + n)) |
226 | return 1; |
227 | } |
228 | p++; |
229 | } while (p < __setup_end); |
230 | |
231 | return had_early_param; |
232 | } |
233 | |
234 | /* |
235 | * This should be approx 2 Bo*oMips to start (note initial shift), and will |
236 | * still work even if initially too large, it will just take slightly longer |
237 | */ |
238 | unsigned long loops_per_jiffy = (1<<12); |
239 | |
240 | EXPORT_SYMBOL(loops_per_jiffy); |
241 | |
242 | static int __init debug_kernel(char *str) |
243 | { |
244 | console_loglevel = 10; |
245 | return 0; |
246 | } |
247 | |
248 | static int __init quiet_kernel(char *str) |
249 | { |
250 | console_loglevel = 4; |
251 | return 0; |
252 | } |
253 | |
254 | early_param("debug", debug_kernel); |
255 | early_param("quiet", quiet_kernel); |
256 | |
257 | static int __init loglevel(char *str) |
258 | { |
259 | get_option(&str, &console_loglevel); |
260 | return 0; |
261 | } |
262 | |
263 | early_param("loglevel", loglevel); |
264 | |
265 | /* |
266 | * Unknown boot options get handed to init, unless they look like |
267 | * unused parameters (modprobe will find them in /proc/cmdline). |
268 | */ |
269 | static int __init unknown_bootoption(char *param, char *val) |
270 | { |
271 | /* Change NUL term back to "=", to make "param" the whole string. */ |
272 | if (val) { |
273 | /* param=val or param="val"? */ |
274 | if (val == param+strlen(param)+1) |
275 | val[-1] = '='; |
276 | else if (val == param+strlen(param)+2) { |
277 | val[-2] = '='; |
278 | memmove(val-1, val, strlen(val)+1); |
279 | val--; |
280 | } else |
281 | BUG(); |
282 | } |
283 | |
284 | /* Handle obsolete-style parameters */ |
285 | if (obsolete_checksetup(param)) |
286 | return 0; |
287 | |
288 | /* Unused module parameter. */ |
289 | if (strchr(param, '.') && (!val || strchr(param, '.') < val)) |
290 | return 0; |
291 | |
292 | if (panic_later) |
293 | return 0; |
294 | |
295 | if (val) { |
296 | /* Environment option */ |
297 | unsigned int i; |
298 | for (i = 0; envp_init[i]; i++) { |
299 | if (i == MAX_INIT_ENVS) { |
300 | panic_later = "Too many boot env vars at `%s'"; |
301 | panic_param = param; |
302 | } |
303 | if (!strncmp(param, envp_init[i], val - param)) |
304 | break; |
305 | } |
306 | envp_init[i] = param; |
307 | } else { |
308 | /* Command line option */ |
309 | unsigned int i; |
310 | for (i = 0; argv_init[i]; i++) { |
311 | if (i == MAX_INIT_ARGS) { |
312 | panic_later = "Too many boot init vars at `%s'"; |
313 | panic_param = param; |
314 | } |
315 | } |
316 | argv_init[i] = param; |
317 | } |
318 | return 0; |
319 | } |
320 | |
321 | #ifdef CONFIG_DEBUG_PAGEALLOC |
322 | int __read_mostly debug_pagealloc_enabled = 0; |
323 | #endif |
324 | |
325 | static int __init init_setup(char *str) |
326 | { |
327 | unsigned int i; |
328 | |
329 | execute_command = str; |
330 | /* |
331 | * In case LILO is going to boot us with default command line, |
332 | * it prepends "auto" before the whole cmdline which makes |
333 | * the shell think it should execute a script with such name. |
334 | * So we ignore all arguments entered _before_ init=... [MJ] |
335 | */ |
336 | for (i = 1; i < MAX_INIT_ARGS; i++) |
337 | argv_init[i] = NULL; |
338 | return 1; |
339 | } |
340 | __setup("init=", init_setup); |
341 | |
342 | static int __init rdinit_setup(char *str) |
343 | { |
344 | unsigned int i; |
345 | |
346 | ramdisk_execute_command = str; |
347 | /* See "auto" comment in init_setup */ |
348 | for (i = 1; i < MAX_INIT_ARGS; i++) |
349 | argv_init[i] = NULL; |
350 | return 1; |
351 | } |
352 | __setup("rdinit=", rdinit_setup); |
353 | |
354 | #ifndef CONFIG_SMP |
355 | |
356 | #ifdef CONFIG_X86_LOCAL_APIC |
357 | static void __init smp_init(void) |
358 | { |
359 | APIC_init_uniprocessor(); |
360 | } |
361 | #else |
362 | #define smp_init() do { } while (0) |
363 | #endif |
364 | |
365 | static inline void setup_nr_cpu_ids(void) { } |
366 | static inline void smp_prepare_cpus(unsigned int maxcpus) { } |
367 | |
368 | #else |
369 | |
370 | /* Setup number of possible processor ids */ |
371 | int nr_cpu_ids __read_mostly = NR_CPUS; |
372 | EXPORT_SYMBOL(nr_cpu_ids); |
373 | |
374 | /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ |
375 | static void __init setup_nr_cpu_ids(void) |
376 | { |
377 | nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; |
378 | } |
379 | |
380 | /* Called by boot processor to activate the rest. */ |
381 | static void __init smp_init(void) |
382 | { |
383 | unsigned int cpu; |
384 | |
385 | /* FIXME: This should be done in userspace --RR */ |
386 | for_each_present_cpu(cpu) { |
387 | if (num_online_cpus() >= setup_max_cpus) |
388 | break; |
389 | if (!cpu_online(cpu)) |
390 | cpu_up(cpu); |
391 | } |
392 | |
393 | /* Any cleanup work */ |
394 | printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus()); |
395 | smp_cpus_done(setup_max_cpus); |
396 | } |
397 | |
398 | #endif |
399 | |
400 | /* |
401 | * We need to store the untouched command line for future reference. |
402 | * We also need to store the touched command line since the parameter |
403 | * parsing is performed in place, and we should allow a component to |
404 | * store reference of name/value for future reference. |
405 | */ |
406 | static void __init setup_command_line(char *command_line) |
407 | { |
408 | saved_command_line = alloc_bootmem(strlen (boot_command_line)+1); |
409 | static_command_line = alloc_bootmem(strlen (command_line)+1); |
410 | strcpy (saved_command_line, boot_command_line); |
411 | strcpy (static_command_line, command_line); |
412 | } |
413 | |
414 | /* |
415 | * We need to finalize in a non-__init function or else race conditions |
416 | * between the root thread and the init thread may cause start_kernel to |
417 | * be reaped by free_initmem before the root thread has proceeded to |
418 | * cpu_idle. |
419 | * |
420 | * gcc-3.4 accidentally inlines this function, so use noinline. |
421 | */ |
422 | |
423 | static __initdata DECLARE_COMPLETION(kthreadd_done); |
424 | |
425 | static noinline void __init_refok rest_init(void) |
426 | { |
427 | int pid; |
428 | |
429 | rcu_scheduler_starting(); |
430 | /* |
431 | * We need to spawn init first so that it obtains pid 1, however |
432 | * the init task will end up wanting to create kthreads, which, if |
433 | * we schedule it before we create kthreadd, will OOPS. |
434 | */ |
435 | kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); |
436 | numa_default_policy(); |
437 | pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); |
438 | rcu_read_lock(); |
439 | kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); |
440 | rcu_read_unlock(); |
441 | complete(&kthreadd_done); |
442 | |
443 | /* |
444 | * The boot idle thread must execute schedule() |
445 | * at least once to get things moving: |
446 | */ |
447 | init_idle_bootup_task(current); |
448 | preempt_enable_no_resched(); |
449 | schedule(); |
450 | preempt_disable(); |
451 | |
452 | /* Call into cpu_idle with preempt disabled */ |
453 | cpu_idle(); |
454 | } |
455 | |
456 | /* Check for early params. */ |
457 | static int __init do_early_param(char *param, char *val) |
458 | { |
459 | const struct obs_kernel_param *p; |
460 | |
461 | for (p = __setup_start; p < __setup_end; p++) { |
462 | if ((p->early && strcmp(param, p->str) == 0) || |
463 | (strcmp(param, "console") == 0 && |
464 | strcmp(p->str, "earlycon") == 0) |
465 | ) { |
466 | if (p->setup_func(val) != 0) |
467 | printk(KERN_WARNING |
468 | "Malformed early option '%s'\n", param); |
469 | } |
470 | } |
471 | /* We accept everything at this stage. */ |
472 | return 0; |
473 | } |
474 | |
475 | void __init parse_early_options(char *cmdline) |
476 | { |
477 | parse_args("early options", cmdline, NULL, 0, do_early_param); |
478 | } |
479 | |
480 | /* Arch code calls this early on, or if not, just before other parsing. */ |
481 | void __init parse_early_param(void) |
482 | { |
483 | static __initdata int done = 0; |
484 | static __initdata char tmp_cmdline[COMMAND_LINE_SIZE]; |
485 | |
486 | if (done) |
487 | return; |
488 | |
489 | /* All fall through to do_early_param. */ |
490 | strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); |
491 | parse_early_options(tmp_cmdline); |
492 | done = 1; |
493 | } |
494 | |
495 | /* |
496 | * Activate the first processor. |
497 | */ |
498 | |
499 | static void __init boot_cpu_init(void) |
500 | { |
501 | int cpu = smp_processor_id(); |
502 | /* Mark the boot cpu "present", "online" etc for SMP and UP case */ |
503 | set_cpu_online(cpu, true); |
504 | set_cpu_active(cpu, true); |
505 | set_cpu_present(cpu, true); |
506 | set_cpu_possible(cpu, true); |
507 | } |
508 | |
509 | void __init __weak smp_setup_processor_id(void) |
510 | { |
511 | } |
512 | |
513 | void __init __weak thread_info_cache_init(void) |
514 | { |
515 | } |
516 | |
517 | /* |
518 | * Set up kernel memory allocators |
519 | */ |
520 | static void __init mm_init(void) |
521 | { |
522 | /* |
523 | * page_cgroup requires countinous pages as memmap |
524 | * and it's bigger than MAX_ORDER unless SPARSEMEM. |
525 | */ |
526 | page_cgroup_init_flatmem(); |
527 | mem_init(); |
528 | kmem_cache_init(); |
529 | percpu_init_late(); |
530 | pgtable_cache_init(); |
531 | vmalloc_init(); |
532 | } |
533 | |
534 | asmlinkage void __init start_kernel(void) |
535 | { |
536 | char * command_line; |
537 | extern const struct kernel_param __start___param[], __stop___param[]; |
538 | |
539 | smp_setup_processor_id(); |
540 | |
541 | /* |
542 | * Need to run as early as possible, to initialize the |
543 | * lockdep hash: |
544 | */ |
545 | lockdep_init(); |
546 | debug_objects_early_init(); |
547 | |
548 | /* |
549 | * Set up the the initial canary ASAP: |
550 | */ |
551 | boot_init_stack_canary(); |
552 | |
553 | cgroup_init_early(); |
554 | |
555 | local_irq_disable(); |
556 | early_boot_irqs_off(); |
557 | |
558 | /* |
559 | * Interrupts are still disabled. Do necessary setups, then |
560 | * enable them |
561 | */ |
562 | tick_init(); |
563 | boot_cpu_init(); |
564 | page_address_init(); |
565 | printk(KERN_NOTICE "%s", linux_banner); |
566 | setup_arch(&command_line); |
567 | mm_init_owner(&init_mm, &init_task); |
568 | setup_command_line(command_line); |
569 | setup_nr_cpu_ids(); |
570 | setup_per_cpu_areas(); |
571 | smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ |
572 | |
573 | build_all_zonelists(NULL); |
574 | page_alloc_init(); |
575 | |
576 | printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); |
577 | parse_early_param(); |
578 | parse_args("Booting kernel", static_command_line, __start___param, |
579 | __stop___param - __start___param, |
580 | &unknown_bootoption); |
581 | /* |
582 | * These use large bootmem allocations and must precede |
583 | * kmem_cache_init() |
584 | */ |
585 | pidhash_init(); |
586 | vfs_caches_init_early(); |
587 | sort_main_extable(); |
588 | trap_init(); |
589 | mm_init(); |
590 | /* |
591 | * Set up the scheduler prior starting any interrupts (such as the |
592 | * timer interrupt). Full topology setup happens at smp_init() |
593 | * time - but meanwhile we still have a functioning scheduler. |
594 | */ |
595 | sched_init(); |
596 | /* |
597 | * Disable preemption - early bootup scheduling is extremely |
598 | * fragile until we cpu_idle() for the first time. |
599 | */ |
600 | preempt_disable(); |
601 | if (!irqs_disabled()) { |
602 | printk(KERN_WARNING "start_kernel(): bug: interrupts were " |
603 | "enabled *very* early, fixing it\n"); |
604 | local_irq_disable(); |
605 | } |
606 | rcu_init(); |
607 | radix_tree_init(); |
608 | /* init some links before init_ISA_irqs() */ |
609 | early_irq_init(); |
610 | init_IRQ(); |
611 | prio_tree_init(); |
612 | init_timers(); |
613 | hrtimers_init(); |
614 | softirq_init(); |
615 | timekeeping_init(); |
616 | time_init(); |
617 | profile_init(); |
618 | if (!irqs_disabled()) |
619 | printk(KERN_CRIT "start_kernel(): bug: interrupts were " |
620 | "enabled early\n"); |
621 | early_boot_irqs_on(); |
622 | local_irq_enable(); |
623 | |
624 | /* Interrupts are enabled now so all GFP allocations are safe. */ |
625 | gfp_allowed_mask = __GFP_BITS_MASK; |
626 | |
627 | kmem_cache_init_late(); |
628 | |
629 | /* |
630 | * HACK ALERT! This is early. We're enabling the console before |
631 | * we've done PCI setups etc, and console_init() must be aware of |
632 | * this. But we do want output early, in case something goes wrong. |
633 | */ |
634 | console_init(); |
635 | if (panic_later) |
636 | panic(panic_later, panic_param); |
637 | |
638 | lockdep_info(); |
639 | |
640 | /* |
641 | * Need to run this when irqs are enabled, because it wants |
642 | * to self-test [hard/soft]-irqs on/off lock inversion bugs |
643 | * too: |
644 | */ |
645 | locking_selftest(); |
646 | |
647 | #ifdef CONFIG_BLK_DEV_INITRD |
648 | if (initrd_start && !initrd_below_start_ok && |
649 | page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { |
650 | printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " |
651 | "disabling it.\n", |
652 | page_to_pfn(virt_to_page((void *)initrd_start)), |
653 | min_low_pfn); |
654 | initrd_start = 0; |
655 | } |
656 | #endif |
657 | page_cgroup_init(); |
658 | enable_debug_pagealloc(); |
659 | kmemleak_init(); |
660 | debug_objects_mem_init(); |
661 | idr_init_cache(); |
662 | setup_per_cpu_pageset(); |
663 | numa_policy_init(); |
664 | if (late_time_init) |
665 | late_time_init(); |
666 | sched_clock_init(); |
667 | calibrate_delay(); |
668 | pidmap_init(); |
669 | anon_vma_init(); |
670 | #ifdef CONFIG_X86 |
671 | if (efi_enabled) |
672 | efi_enter_virtual_mode(); |
673 | #endif |
674 | thread_info_cache_init(); |
675 | cred_init(); |
676 | fork_init(totalram_pages); |
677 | proc_caches_init(); |
678 | buffer_init(); |
679 | key_init(); |
680 | security_init(); |
681 | dbg_late_init(); |
682 | vfs_caches_init(totalram_pages); |
683 | signals_init(); |
684 | /* rootfs populating might need page-writeback */ |
685 | page_writeback_init(); |
686 | #ifdef CONFIG_PROC_FS |
687 | proc_root_init(); |
688 | #endif |
689 | cgroup_init(); |
690 | cpuset_init(); |
691 | taskstats_init_early(); |
692 | delayacct_init(); |
693 | |
694 | check_bugs(); |
695 | |
696 | acpi_early_init(); /* before LAPIC and SMP init */ |
697 | sfi_init_late(); |
698 | |
699 | ftrace_init(); |
700 | |
701 | /* Do the rest non-__init'ed, we're now alive */ |
702 | rest_init(); |
703 | } |
704 | |
705 | /* Call all constructor functions linked into the kernel. */ |
706 | static void __init do_ctors(void) |
707 | { |
708 | #ifdef CONFIG_CONSTRUCTORS |
709 | ctor_fn_t *fn = (ctor_fn_t *) __ctors_start; |
710 | |
711 | for (; fn < (ctor_fn_t *) __ctors_end; fn++) |
712 | (*fn)(); |
713 | #endif |
714 | } |
715 | |
716 | int initcall_debug; |
717 | core_param(initcall_debug, initcall_debug, bool, 0644); |
718 | |
719 | static char msgbuf[64]; |
720 | |
721 | static int __init_or_module do_one_initcall_debug(initcall_t fn) |
722 | { |
723 | ktime_t calltime, delta, rettime; |
724 | unsigned long long duration; |
725 | int ret; |
726 | |
727 | printk(KERN_DEBUG "calling %pF @ %i\n", fn, task_pid_nr(current)); |
728 | calltime = ktime_get(); |
729 | ret = fn(); |
730 | rettime = ktime_get(); |
731 | delta = ktime_sub(rettime, calltime); |
732 | duration = (unsigned long long) ktime_to_ns(delta) >> 10; |
733 | printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs\n", fn, |
734 | ret, duration); |
735 | |
736 | return ret; |
737 | } |
738 | |
739 | int __init_or_module do_one_initcall(initcall_t fn) |
740 | { |
741 | int count = preempt_count(); |
742 | int ret; |
743 | |
744 | if (initcall_debug) |
745 | ret = do_one_initcall_debug(fn); |
746 | else |
747 | ret = fn(); |
748 | |
749 | msgbuf[0] = 0; |
750 | |
751 | if (ret && ret != -ENODEV && initcall_debug) |
752 | sprintf(msgbuf, "error code %d ", ret); |
753 | |
754 | if (preempt_count() != count) { |
755 | strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); |
756 | preempt_count() = count; |
757 | } |
758 | if (irqs_disabled()) { |
759 | strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); |
760 | local_irq_enable(); |
761 | } |
762 | if (msgbuf[0]) { |
763 | printk("initcall %pF returned with %s\n", fn, msgbuf); |
764 | } |
765 | |
766 | return ret; |
767 | } |
768 | |
769 | |
770 | extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; |
771 | |
772 | static void __init do_initcalls(void) |
773 | { |
774 | initcall_t *fn; |
775 | |
776 | for (fn = __early_initcall_end; fn < __initcall_end; fn++) |
777 | do_one_initcall(*fn); |
778 | |
779 | /* Make sure there is no pending stuff from the initcall sequence */ |
780 | flush_scheduled_work(); |
781 | } |
782 | |
783 | /* |
784 | * Ok, the machine is now initialized. None of the devices |
785 | * have been touched yet, but the CPU subsystem is up and |
786 | * running, and memory and process management works. |
787 | * |
788 | * Now we can finally start doing some real work.. |
789 | */ |
790 | static void __init do_basic_setup(void) |
791 | { |
792 | cpuset_init_smp(); |
793 | usermodehelper_init(); |
794 | init_tmpfs(); |
795 | driver_init(); |
796 | init_irq_proc(); |
797 | do_ctors(); |
798 | do_initcalls(); |
799 | } |
800 | |
801 | static void __init do_pre_smp_initcalls(void) |
802 | { |
803 | initcall_t *fn; |
804 | |
805 | for (fn = __initcall_start; fn < __early_initcall_end; fn++) |
806 | do_one_initcall(*fn); |
807 | } |
808 | |
809 | static void run_init_process(const char *init_filename) |
810 | { |
811 | argv_init[0] = init_filename; |
812 | kernel_execve(init_filename, argv_init, envp_init); |
813 | } |
814 | |
815 | /* This is a non __init function. Force it to be noinline otherwise gcc |
816 | * makes it inline to init() and it becomes part of init.text section |
817 | */ |
818 | static noinline int init_post(void) |
819 | { |
820 | /* need to finish all async __init code before freeing the memory */ |
821 | async_synchronize_full(); |
822 | free_initmem(); |
823 | mark_rodata_ro(); |
824 | system_state = SYSTEM_RUNNING; |
825 | numa_default_policy(); |
826 | |
827 | |
828 | current->signal->flags |= SIGNAL_UNKILLABLE; |
829 | |
830 | if (ramdisk_execute_command) { |
831 | run_init_process(ramdisk_execute_command); |
832 | printk(KERN_WARNING "Failed to execute %s\n", |
833 | ramdisk_execute_command); |
834 | } |
835 | |
836 | /* |
837 | * We try each of these until one succeeds. |
838 | * |
839 | * The Bourne shell can be used instead of init if we are |
840 | * trying to recover a really broken machine. |
841 | */ |
842 | if (execute_command) { |
843 | run_init_process(execute_command); |
844 | printk(KERN_WARNING "Failed to execute %s. Attempting " |
845 | "defaults...\n", execute_command); |
846 | } |
847 | run_init_process("/sbin/init"); |
848 | run_init_process("/etc/init"); |
849 | run_init_process("/bin/init"); |
850 | run_init_process("/bin/sh"); |
851 | |
852 | panic("No init found. Try passing init= option to kernel. " |
853 | "See Linux Documentation/init.txt for guidance."); |
854 | } |
855 | |
856 | static int __init kernel_init(void * unused) |
857 | { |
858 | /* |
859 | * Wait until kthreadd is all set-up. |
860 | */ |
861 | wait_for_completion(&kthreadd_done); |
862 | /* |
863 | * init can allocate pages on any node |
864 | */ |
865 | set_mems_allowed(node_states[N_HIGH_MEMORY]); |
866 | /* |
867 | * init can run on any cpu. |
868 | */ |
869 | set_cpus_allowed_ptr(current, cpu_all_mask); |
870 | /* |
871 | * Tell the world that we're going to be the grim |
872 | * reaper of innocent orphaned children. |
873 | * |
874 | * We don't want people to have to make incorrect |
875 | * assumptions about where in the task array this |
876 | * can be found. |
877 | */ |
878 | init_pid_ns.child_reaper = current; |
879 | |
880 | cad_pid = task_pid(current); |
881 | |
882 | smp_prepare_cpus(setup_max_cpus); |
883 | |
884 | do_pre_smp_initcalls(); |
885 | |
886 | smp_init(); |
887 | sched_init_smp(); |
888 | |
889 | do_basic_setup(); |
890 | |
891 | /* Open the /dev/console on the rootfs, this should never fail */ |
892 | if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) |
893 | printk(KERN_WARNING "Warning: unable to open an initial console.\n"); |
894 | |
895 | (void) sys_dup(0); |
896 | (void) sys_dup(0); |
897 | /* |
898 | * check if there is an early userspace init. If yes, let it do all |
899 | * the work |
900 | */ |
901 | |
902 | if (!ramdisk_execute_command) |
903 | ramdisk_execute_command = "/init"; |
904 | |
905 | if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { |
906 | ramdisk_execute_command = NULL; |
907 | prepare_namespace(); |
908 | } |
909 | |
910 | /* |
911 | * Ok, we have completed the initial bootup, and |
912 | * we're essentially up and running. Get rid of the |
913 | * initmem segments and start the user-mode stuff.. |
914 | */ |
915 | |
916 | init_post(); |
917 | return 0; |
918 | } |
919 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9