Root/include/asm-generic/vmlinux.lds.h

1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU(PAGE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
35 *
36 * STABS_DEBUG
37 * DWARF_DEBUG
38 *
39 * DISCARDS // must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * [_stext, _etext] is the text section
44 * [_sdata, _edata] is the data section
45 *
46 * Some of the included output section have their own set of constants.
47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48 * [__nosave_begin, __nosave_end] for the nosave data
49 */
50
51#ifndef LOAD_OFFSET
52#define LOAD_OFFSET 0
53#endif
54
55#ifndef SYMBOL_PREFIX
56#define VMLINUX_SYMBOL(sym) sym
57#else
58#define PASTE2(x,y) x##y
59#define PASTE(x,y) PASTE2(x,y)
60#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
61#endif
62
63/* Align . to a 8 byte boundary equals to maximum function alignment. */
64#define ALIGN_FUNCTION() . = ALIGN(8)
65
66/*
67 * Align to a 32 byte boundary equal to the
68 * alignment gcc 4.5 uses for a struct
69 */
70#define STRUCT_ALIGN() . = ALIGN(32)
71
72/* The actual configuration determine if the init/exit sections
73 * are handled as text/data or they can be discarded (which
74 * often happens at runtime)
75 */
76#ifdef CONFIG_HOTPLUG
77#define DEV_KEEP(sec) *(.dev##sec)
78#define DEV_DISCARD(sec)
79#else
80#define DEV_KEEP(sec)
81#define DEV_DISCARD(sec) *(.dev##sec)
82#endif
83
84#ifdef CONFIG_HOTPLUG_CPU
85#define CPU_KEEP(sec) *(.cpu##sec)
86#define CPU_DISCARD(sec)
87#else
88#define CPU_KEEP(sec)
89#define CPU_DISCARD(sec) *(.cpu##sec)
90#endif
91
92#if defined(CONFIG_MEMORY_HOTPLUG)
93#define MEM_KEEP(sec) *(.mem##sec)
94#define MEM_DISCARD(sec)
95#else
96#define MEM_KEEP(sec)
97#define MEM_DISCARD(sec) *(.mem##sec)
98#endif
99
100#ifdef CONFIG_FTRACE_MCOUNT_RECORD
101#define MCOUNT_REC() . = ALIGN(8); \
102            VMLINUX_SYMBOL(__start_mcount_loc) = .; \
103            *(__mcount_loc) \
104            VMLINUX_SYMBOL(__stop_mcount_loc) = .;
105#else
106#define MCOUNT_REC()
107#endif
108
109#ifdef CONFIG_TRACE_BRANCH_PROFILING
110#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
111                *(_ftrace_annotated_branch) \
112                VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
113#else
114#define LIKELY_PROFILE()
115#endif
116
117#ifdef CONFIG_PROFILE_ALL_BRANCHES
118#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
119                *(_ftrace_branch) \
120                VMLINUX_SYMBOL(__stop_branch_profile) = .;
121#else
122#define BRANCH_PROFILE()
123#endif
124
125#ifdef CONFIG_EVENT_TRACING
126#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
127            *(_ftrace_events) \
128            VMLINUX_SYMBOL(__stop_ftrace_events) = .;
129#else
130#define FTRACE_EVENTS()
131#endif
132
133#ifdef CONFIG_TRACING
134#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
135             *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
136             VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
137#else
138#define TRACE_PRINTKS()
139#endif
140
141#ifdef CONFIG_FTRACE_SYSCALLS
142#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
143             *(__syscalls_metadata) \
144             VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
145#else
146#define TRACE_SYSCALLS()
147#endif
148
149/* .data section */
150#define DATA_DATA \
151    *(.data) \
152    *(.ref.data) \
153    *(.data..shared_aligned) /* percpu related */ \
154    DEV_KEEP(init.data) \
155    DEV_KEEP(exit.data) \
156    CPU_KEEP(init.data) \
157    CPU_KEEP(exit.data) \
158    MEM_KEEP(init.data) \
159    MEM_KEEP(exit.data) \
160    . = ALIGN(32); \
161    VMLINUX_SYMBOL(__start___tracepoints) = .; \
162    *(__tracepoints) \
163    VMLINUX_SYMBOL(__stop___tracepoints) = .; \
164    /* implement dynamic printk debug */ \
165    . = ALIGN(8); \
166    VMLINUX_SYMBOL(__start___verbose) = .; \
167    *(__verbose) \
168    VMLINUX_SYMBOL(__stop___verbose) = .; \
169    LIKELY_PROFILE() \
170    BRANCH_PROFILE() \
171    TRACE_PRINTKS() \
172                                    \
173    STRUCT_ALIGN(); \
174    FTRACE_EVENTS() \
175                                    \
176    STRUCT_ALIGN(); \
177    TRACE_SYSCALLS()
178
179/*
180 * Data section helpers
181 */
182#define NOSAVE_DATA \
183    . = ALIGN(PAGE_SIZE); \
184    VMLINUX_SYMBOL(__nosave_begin) = .; \
185    *(.data..nosave) \
186    . = ALIGN(PAGE_SIZE); \
187    VMLINUX_SYMBOL(__nosave_end) = .;
188
189#define PAGE_ALIGNED_DATA(page_align) \
190    . = ALIGN(page_align); \
191    *(.data..page_aligned)
192
193#define READ_MOSTLY_DATA(align) \
194    . = ALIGN(align); \
195    *(.data..read_mostly)
196
197#define CACHELINE_ALIGNED_DATA(align) \
198    . = ALIGN(align); \
199    *(.data..cacheline_aligned)
200
201#define INIT_TASK_DATA(align) \
202    . = ALIGN(align); \
203    *(.data..init_task)
204
205/*
206 * Read only Data
207 */
208#define RO_DATA_SECTION(align) \
209    . = ALIGN((align)); \
210    .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
211        VMLINUX_SYMBOL(__start_rodata) = .; \
212        *(.rodata) *(.rodata.*) \
213        *(__vermagic) /* Kernel version magic */ \
214        *(__markers_strings) /* Markers: strings */ \
215        *(__tracepoints_strings)/* Tracepoints: strings */ \
216    } \
217                                    \
218    .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
219        *(.rodata1) \
220    } \
221                                    \
222    BUG_TABLE \
223                                    \
224    JUMP_TABLE \
225                                    \
226    /* PCI quirks */ \
227    .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
228        VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
229        *(.pci_fixup_early) \
230        VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
231        VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
232        *(.pci_fixup_header) \
233        VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
234        VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
235        *(.pci_fixup_final) \
236        VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
237        VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
238        *(.pci_fixup_enable) \
239        VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
240        VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
241        *(.pci_fixup_resume) \
242        VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
243        VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
244        *(.pci_fixup_resume_early) \
245        VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
246        VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
247        *(.pci_fixup_suspend) \
248        VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
249    } \
250                                    \
251    /* Built-in firmware blobs */ \
252    .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
253        VMLINUX_SYMBOL(__start_builtin_fw) = .; \
254        *(.builtin_fw) \
255        VMLINUX_SYMBOL(__end_builtin_fw) = .; \
256    } \
257                                    \
258    /* RapidIO route ops */ \
259    .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
260        VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
261        *(.rio_switch_ops) \
262        VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
263    } \
264                                    \
265    TRACEDATA \
266                                    \
267    /* Kernel symbol table: Normal symbols */ \
268    __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
269        VMLINUX_SYMBOL(__start___ksymtab) = .; \
270        *(__ksymtab) \
271        VMLINUX_SYMBOL(__stop___ksymtab) = .; \
272    } \
273                                    \
274    /* Kernel symbol table: GPL-only symbols */ \
275    __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
276        VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
277        *(__ksymtab_gpl) \
278        VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
279    } \
280                                    \
281    /* Kernel symbol table: Normal unused symbols */ \
282    __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
283        VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
284        *(__ksymtab_unused) \
285        VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
286    } \
287                                    \
288    /* Kernel symbol table: GPL-only unused symbols */ \
289    __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
290        VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
291        *(__ksymtab_unused_gpl) \
292        VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
293    } \
294                                    \
295    /* Kernel symbol table: GPL-future-only symbols */ \
296    __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
297        VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
298        *(__ksymtab_gpl_future) \
299        VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
300    } \
301                                    \
302    /* Kernel symbol table: Normal symbols */ \
303    __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
304        VMLINUX_SYMBOL(__start___kcrctab) = .; \
305        *(__kcrctab) \
306        VMLINUX_SYMBOL(__stop___kcrctab) = .; \
307    } \
308                                    \
309    /* Kernel symbol table: GPL-only symbols */ \
310    __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
311        VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
312        *(__kcrctab_gpl) \
313        VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
314    } \
315                                    \
316    /* Kernel symbol table: Normal unused symbols */ \
317    __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
318        VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
319        *(__kcrctab_unused) \
320        VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
321    } \
322                                    \
323    /* Kernel symbol table: GPL-only unused symbols */ \
324    __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
325        VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
326        *(__kcrctab_unused_gpl) \
327        VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
328    } \
329                                    \
330    /* Kernel symbol table: GPL-future-only symbols */ \
331    __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
332        VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
333        *(__kcrctab_gpl_future) \
334        VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
335    } \
336                                    \
337    /* Kernel symbol table: strings */ \
338        __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
339        *(__ksymtab_strings) \
340    } \
341                                    \
342    /* __*init sections */ \
343    __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
344        *(.ref.rodata) \
345        DEV_KEEP(init.rodata) \
346        DEV_KEEP(exit.rodata) \
347        CPU_KEEP(init.rodata) \
348        CPU_KEEP(exit.rodata) \
349        MEM_KEEP(init.rodata) \
350        MEM_KEEP(exit.rodata) \
351    } \
352                                    \
353    /* Built-in module parameters. */ \
354    __param : AT(ADDR(__param) - LOAD_OFFSET) { \
355        VMLINUX_SYMBOL(__start___param) = .; \
356        *(__param) \
357        VMLINUX_SYMBOL(__stop___param) = .; \
358        . = ALIGN((align)); \
359        VMLINUX_SYMBOL(__end_rodata) = .; \
360    } \
361    . = ALIGN((align));
362
363/* RODATA & RO_DATA provided for backward compatibility.
364 * All archs are supposed to use RO_DATA() */
365#define RODATA RO_DATA_SECTION(4096)
366#define RO_DATA(align) RO_DATA_SECTION(align)
367
368#define SECURITY_INIT \
369    .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
370        VMLINUX_SYMBOL(__security_initcall_start) = .; \
371        *(.security_initcall.init) \
372        VMLINUX_SYMBOL(__security_initcall_end) = .; \
373    }
374
375/* .text section. Map to function alignment to avoid address changes
376 * during second ld run in second ld pass when generating System.map */
377#define TEXT_TEXT \
378        ALIGN_FUNCTION(); \
379        *(.text.hot) \
380        *(.text) \
381        *(.ref.text) \
382    DEV_KEEP(init.text) \
383    DEV_KEEP(exit.text) \
384    CPU_KEEP(init.text) \
385    CPU_KEEP(exit.text) \
386    MEM_KEEP(init.text) \
387    MEM_KEEP(exit.text) \
388        *(.text.unlikely)
389
390
391/* sched.text is aling to function alignment to secure we have same
392 * address even at second ld pass when generating System.map */
393#define SCHED_TEXT \
394        ALIGN_FUNCTION(); \
395        VMLINUX_SYMBOL(__sched_text_start) = .; \
396        *(.sched.text) \
397        VMLINUX_SYMBOL(__sched_text_end) = .;
398
399/* spinlock.text is aling to function alignment to secure we have same
400 * address even at second ld pass when generating System.map */
401#define LOCK_TEXT \
402        ALIGN_FUNCTION(); \
403        VMLINUX_SYMBOL(__lock_text_start) = .; \
404        *(.spinlock.text) \
405        VMLINUX_SYMBOL(__lock_text_end) = .;
406
407#define KPROBES_TEXT \
408        ALIGN_FUNCTION(); \
409        VMLINUX_SYMBOL(__kprobes_text_start) = .; \
410        *(.kprobes.text) \
411        VMLINUX_SYMBOL(__kprobes_text_end) = .;
412
413#ifdef CONFIG_FUNCTION_GRAPH_TRACER
414#define IRQENTRY_TEXT \
415        ALIGN_FUNCTION(); \
416        VMLINUX_SYMBOL(__irqentry_text_start) = .; \
417        *(.irqentry.text) \
418        VMLINUX_SYMBOL(__irqentry_text_end) = .;
419#else
420#define IRQENTRY_TEXT
421#endif
422
423/* Section used for early init (in .S files) */
424#define HEAD_TEXT *(.head.text)
425
426#define HEAD_TEXT_SECTION \
427    .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
428        HEAD_TEXT \
429    }
430
431/*
432 * Exception table
433 */
434#define EXCEPTION_TABLE(align) \
435    . = ALIGN(align); \
436    __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
437        VMLINUX_SYMBOL(__start___ex_table) = .; \
438        *(__ex_table) \
439        VMLINUX_SYMBOL(__stop___ex_table) = .; \
440    }
441
442/*
443 * Init task
444 */
445#define INIT_TASK_DATA_SECTION(align) \
446    . = ALIGN(align); \
447    .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
448        INIT_TASK_DATA(align) \
449    }
450
451#ifdef CONFIG_CONSTRUCTORS
452#define KERNEL_CTORS() . = ALIGN(8); \
453            VMLINUX_SYMBOL(__ctors_start) = .; \
454            *(.ctors) \
455            VMLINUX_SYMBOL(__ctors_end) = .;
456#else
457#define KERNEL_CTORS()
458#endif
459
460/* init and exit section handling */
461#define INIT_DATA \
462    *(.init.data) \
463    DEV_DISCARD(init.data) \
464    CPU_DISCARD(init.data) \
465    MEM_DISCARD(init.data) \
466    KERNEL_CTORS() \
467    *(.init.rodata) \
468    MCOUNT_REC() \
469    DEV_DISCARD(init.rodata) \
470    CPU_DISCARD(init.rodata) \
471    MEM_DISCARD(init.rodata)
472
473#define INIT_TEXT \
474    *(.init.text) \
475    DEV_DISCARD(init.text) \
476    CPU_DISCARD(init.text) \
477    MEM_DISCARD(init.text)
478
479#define EXIT_DATA \
480    *(.exit.data) \
481    DEV_DISCARD(exit.data) \
482    DEV_DISCARD(exit.rodata) \
483    CPU_DISCARD(exit.data) \
484    CPU_DISCARD(exit.rodata) \
485    MEM_DISCARD(exit.data) \
486    MEM_DISCARD(exit.rodata)
487
488#define EXIT_TEXT \
489    *(.exit.text) \
490    DEV_DISCARD(exit.text) \
491    CPU_DISCARD(exit.text) \
492    MEM_DISCARD(exit.text)
493
494#define EXIT_CALL \
495    *(.exitcall.exit)
496
497/*
498 * bss (Block Started by Symbol) - uninitialized data
499 * zeroed during startup
500 */
501#define SBSS(sbss_align) \
502    . = ALIGN(sbss_align); \
503    .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
504        *(.sbss) \
505        *(.scommon) \
506    }
507
508#define BSS(bss_align) \
509    . = ALIGN(bss_align); \
510    .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
511        *(.bss..page_aligned) \
512        *(.dynbss) \
513        *(.bss) \
514        *(COMMON) \
515    }
516
517/*
518 * DWARF debug sections.
519 * Symbols in the DWARF debugging sections are relative to
520 * the beginning of the section so we begin them at 0.
521 */
522#define DWARF_DEBUG \
523        /* DWARF 1 */ \
524        .debug 0 : { *(.debug) } \
525        .line 0 : { *(.line) } \
526        /* GNU DWARF 1 extensions */ \
527        .debug_srcinfo 0 : { *(.debug_srcinfo) } \
528        .debug_sfnames 0 : { *(.debug_sfnames) } \
529        /* DWARF 1.1 and DWARF 2 */ \
530        .debug_aranges 0 : { *(.debug_aranges) } \
531        .debug_pubnames 0 : { *(.debug_pubnames) } \
532        /* DWARF 2 */ \
533        .debug_info 0 : { *(.debug_info \
534                .gnu.linkonce.wi.*) } \
535        .debug_abbrev 0 : { *(.debug_abbrev) } \
536        .debug_line 0 : { *(.debug_line) } \
537        .debug_frame 0 : { *(.debug_frame) } \
538        .debug_str 0 : { *(.debug_str) } \
539        .debug_loc 0 : { *(.debug_loc) } \
540        .debug_macinfo 0 : { *(.debug_macinfo) } \
541        /* SGI/MIPS DWARF 2 extensions */ \
542        .debug_weaknames 0 : { *(.debug_weaknames) } \
543        .debug_funcnames 0 : { *(.debug_funcnames) } \
544        .debug_typenames 0 : { *(.debug_typenames) } \
545        .debug_varnames 0 : { *(.debug_varnames) } \
546
547        /* Stabs debugging sections. */
548#define STABS_DEBUG \
549        .stab 0 : { *(.stab) } \
550        .stabstr 0 : { *(.stabstr) } \
551        .stab.excl 0 : { *(.stab.excl) } \
552        .stab.exclstr 0 : { *(.stab.exclstr) } \
553        .stab.index 0 : { *(.stab.index) } \
554        .stab.indexstr 0 : { *(.stab.indexstr) } \
555        .comment 0 : { *(.comment) }
556
557#ifdef CONFIG_GENERIC_BUG
558#define BUG_TABLE \
559    . = ALIGN(8); \
560    __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
561        VMLINUX_SYMBOL(__start___bug_table) = .; \
562        *(__bug_table) \
563        VMLINUX_SYMBOL(__stop___bug_table) = .; \
564    }
565#else
566#define BUG_TABLE
567#endif
568
569#define JUMP_TABLE \
570    . = ALIGN(8); \
571    __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
572        VMLINUX_SYMBOL(__start___jump_table) = .; \
573        *(__jump_table) \
574        VMLINUX_SYMBOL(__stop___jump_table) = .; \
575    }
576
577#ifdef CONFIG_PM_TRACE
578#define TRACEDATA \
579    . = ALIGN(4); \
580    .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
581        VMLINUX_SYMBOL(__tracedata_start) = .; \
582        *(.tracedata) \
583        VMLINUX_SYMBOL(__tracedata_end) = .; \
584    }
585#else
586#define TRACEDATA
587#endif
588
589#define NOTES \
590    .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
591        VMLINUX_SYMBOL(__start_notes) = .; \
592        *(.note.*) \
593        VMLINUX_SYMBOL(__stop_notes) = .; \
594    }
595
596#define INIT_SETUP(initsetup_align) \
597        . = ALIGN(initsetup_align); \
598        VMLINUX_SYMBOL(__setup_start) = .; \
599        *(.init.setup) \
600        VMLINUX_SYMBOL(__setup_end) = .;
601
602#define INITCALLS \
603    *(.initcallearly.init) \
604    VMLINUX_SYMBOL(__early_initcall_end) = .; \
605      *(.initcall0.init) \
606      *(.initcall0s.init) \
607      *(.initcall1.init) \
608      *(.initcall1s.init) \
609      *(.initcall2.init) \
610      *(.initcall2s.init) \
611      *(.initcall3.init) \
612      *(.initcall3s.init) \
613      *(.initcall4.init) \
614      *(.initcall4s.init) \
615      *(.initcall5.init) \
616      *(.initcall5s.init) \
617    *(.initcallrootfs.init) \
618      *(.initcall6.init) \
619      *(.initcall6s.init) \
620      *(.initcall7.init) \
621      *(.initcall7s.init)
622
623#define INIT_CALLS \
624        VMLINUX_SYMBOL(__initcall_start) = .; \
625        INITCALLS \
626        VMLINUX_SYMBOL(__initcall_end) = .;
627
628#define CON_INITCALL \
629        VMLINUX_SYMBOL(__con_initcall_start) = .; \
630        *(.con_initcall.init) \
631        VMLINUX_SYMBOL(__con_initcall_end) = .;
632
633#define SECURITY_INITCALL \
634        VMLINUX_SYMBOL(__security_initcall_start) = .; \
635        *(.security_initcall.init) \
636        VMLINUX_SYMBOL(__security_initcall_end) = .;
637
638#ifdef CONFIG_BLK_DEV_INITRD
639#define INIT_RAM_FS \
640    . = ALIGN(4); \
641    VMLINUX_SYMBOL(__initramfs_start) = .; \
642    *(.init.ramfs) \
643    . = ALIGN(8); \
644    *(.init.ramfs.info)
645#else
646#define INIT_RAM_FS
647#endif
648
649/*
650 * Default discarded sections.
651 *
652 * Some archs want to discard exit text/data at runtime rather than
653 * link time due to cross-section references such as alt instructions,
654 * bug table, eh_frame, etc. DISCARDS must be the last of output
655 * section definitions so that such archs put those in earlier section
656 * definitions.
657 */
658#define DISCARDS \
659    /DISCARD/ : { \
660    EXIT_TEXT \
661    EXIT_DATA \
662    EXIT_CALL \
663    *(.discard) \
664    *(.discard.*) \
665    }
666
667/**
668 * PERCPU_VADDR - define output section for percpu area
669 * @vaddr: explicit base address (optional)
670 * @phdr: destination PHDR (optional)
671 *
672 * Macro which expands to output section for percpu area. If @vaddr
673 * is not blank, it specifies explicit base address and all percpu
674 * symbols will be offset from the given address. If blank, @vaddr
675 * always equals @laddr + LOAD_OFFSET.
676 *
677 * @phdr defines the output PHDR to use if not blank. Be warned that
678 * output PHDR is sticky. If @phdr is specified, the next output
679 * section in the linker script will go there too. @phdr should have
680 * a leading colon.
681 *
682 * Note that this macros defines __per_cpu_load as an absolute symbol.
683 * If there is no need to put the percpu section at a predetermined
684 * address, use PERCPU().
685 */
686#define PERCPU_VADDR(vaddr, phdr) \
687    VMLINUX_SYMBOL(__per_cpu_load) = .; \
688    .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
689                - LOAD_OFFSET) { \
690        VMLINUX_SYMBOL(__per_cpu_start) = .; \
691        *(.data..percpu..first) \
692        . = ALIGN(PAGE_SIZE); \
693        *(.data..percpu..page_aligned) \
694        *(.data..percpu..readmostly) \
695        *(.data..percpu) \
696        *(.data..percpu..shared_aligned) \
697        VMLINUX_SYMBOL(__per_cpu_end) = .; \
698    } phdr \
699    . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
700
701/**
702 * PERCPU - define output section for percpu area, simple version
703 * @align: required alignment
704 *
705 * Align to @align and outputs output section for percpu area. This
706 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
707 * __per_cpu_start will be identical.
708 *
709 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
710 * that __per_cpu_load is defined as a relative symbol against
711 * .data..percpu which is required for relocatable x86_32
712 * configuration.
713 */
714#define PERCPU(align) \
715    . = ALIGN(align); \
716    .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
717        VMLINUX_SYMBOL(__per_cpu_load) = .; \
718        VMLINUX_SYMBOL(__per_cpu_start) = .; \
719        *(.data..percpu..first) \
720        . = ALIGN(PAGE_SIZE); \
721        *(.data..percpu..page_aligned) \
722        *(.data..percpu..readmostly) \
723        *(.data..percpu) \
724        *(.data..percpu..shared_aligned) \
725        VMLINUX_SYMBOL(__per_cpu_end) = .; \
726    }
727
728
729/*
730 * Definition of the high level *_SECTION macros
731 * They will fit only a subset of the architectures
732 */
733
734
735/*
736 * Writeable data.
737 * All sections are combined in a single .data section.
738 * The sections following CONSTRUCTORS are arranged so their
739 * typical alignment matches.
740 * A cacheline is typical/always less than a PAGE_SIZE so
741 * the sections that has this restriction (or similar)
742 * is located before the ones requiring PAGE_SIZE alignment.
743 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
744 * matches the requirment of PAGE_ALIGNED_DATA.
745 *
746 * use 0 as page_align if page_aligned data is not used */
747#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
748    . = ALIGN(PAGE_SIZE); \
749    .data : AT(ADDR(.data) - LOAD_OFFSET) { \
750        INIT_TASK_DATA(inittask) \
751        NOSAVE_DATA \
752        PAGE_ALIGNED_DATA(pagealigned) \
753        CACHELINE_ALIGNED_DATA(cacheline) \
754        READ_MOSTLY_DATA(cacheline) \
755        DATA_DATA \
756        CONSTRUCTORS \
757    }
758
759#define INIT_TEXT_SECTION(inittext_align) \
760    . = ALIGN(inittext_align); \
761    .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
762        VMLINUX_SYMBOL(_sinittext) = .; \
763        INIT_TEXT \
764        VMLINUX_SYMBOL(_einittext) = .; \
765    }
766
767#define INIT_DATA_SECTION(initsetup_align) \
768    .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
769        INIT_DATA \
770        INIT_SETUP(initsetup_align) \
771        INIT_CALLS \
772        CON_INITCALL \
773        SECURITY_INITCALL \
774        INIT_RAM_FS \
775    }
776
777#define BSS_SECTION(sbss_align, bss_align, stop_align) \
778    . = ALIGN(sbss_align); \
779    VMLINUX_SYMBOL(__bss_start) = .; \
780    SBSS(sbss_align) \
781    BSS(bss_align) \
782    . = ALIGN(stop_align); \
783    VMLINUX_SYMBOL(__bss_stop) = .;
784

Archive Download this file



interactive