Root/arch/s390/kernel/vdso.c

1/*
2 * vdso setup for s390
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/smp.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/elf.h>
23#include <linux/security.h>
24#include <linux/bootmem.h>
25#include <linux/compat.h>
26#include <asm/asm-offsets.h>
27#include <asm/pgtable.h>
28#include <asm/system.h>
29#include <asm/processor.h>
30#include <asm/mmu.h>
31#include <asm/mmu_context.h>
32#include <asm/sections.h>
33#include <asm/vdso.h>
34
35#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
36extern char vdso32_start, vdso32_end;
37static void *vdso32_kbase = &vdso32_start;
38static unsigned int vdso32_pages;
39static struct page **vdso32_pagelist;
40#endif
41
42#ifdef CONFIG_64BIT
43extern char vdso64_start, vdso64_end;
44static void *vdso64_kbase = &vdso64_start;
45static unsigned int vdso64_pages;
46static struct page **vdso64_pagelist;
47#endif /* CONFIG_64BIT */
48
49/*
50 * Should the kernel map a VDSO page into processes and pass its
51 * address down to glibc upon exec()?
52 */
53unsigned int __read_mostly vdso_enabled = 1;
54
55static int __init vdso_setup(char *s)
56{
57    unsigned long val;
58    int rc;
59
60    rc = 0;
61    if (strncmp(s, "on", 3) == 0)
62        vdso_enabled = 1;
63    else if (strncmp(s, "off", 4) == 0)
64        vdso_enabled = 0;
65    else {
66        rc = strict_strtoul(s, 0, &val);
67        vdso_enabled = rc ? 0 : !!val;
68    }
69    return !rc;
70}
71__setup("vdso=", vdso_setup);
72
73/*
74 * The vdso data page
75 */
76static union {
77    struct vdso_data data;
78    u8 page[PAGE_SIZE];
79} vdso_data_store __page_aligned_data;
80struct vdso_data *vdso_data = &vdso_data_store.data;
81
82/*
83 * Setup vdso data page.
84 */
85static void vdso_init_data(struct vdso_data *vd)
86{
87    vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31);
88}
89
90#ifdef CONFIG_64BIT
91/*
92 * Setup per cpu vdso data page.
93 */
94static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
95{
96}
97
98/*
99 * Allocate/free per cpu vdso data.
100 */
101#define SEGMENT_ORDER 2
102
103int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
104{
105    unsigned long segment_table, page_table, page_frame;
106    u32 *psal, *aste;
107    int i;
108
109    lowcore->vdso_per_cpu_data = __LC_PASTE;
110
111    if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
112        return 0;
113
114    segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
115    page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
116    page_frame = get_zeroed_page(GFP_KERNEL);
117    if (!segment_table || !page_table || !page_frame)
118        goto out;
119
120    clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
121            PAGE_SIZE << SEGMENT_ORDER);
122    clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
123            256*sizeof(unsigned long));
124
125    *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
126    *(unsigned long *) page_table = _PAGE_RO + page_frame;
127
128    psal = (u32 *) (page_table + 256*sizeof(unsigned long));
129    aste = psal + 32;
130
131    for (i = 4; i < 32; i += 4)
132        psal[i] = 0x80000000;
133
134    lowcore->paste[4] = (u32)(addr_t) psal;
135    psal[0] = 0x20000000;
136    psal[2] = (u32)(addr_t) aste;
137    *(unsigned long *) (aste + 2) = segment_table +
138        _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
139    aste[4] = (u32)(addr_t) psal;
140    lowcore->vdso_per_cpu_data = page_frame;
141
142    vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
143    return 0;
144
145out:
146    free_page(page_frame);
147    free_page(page_table);
148    free_pages(segment_table, SEGMENT_ORDER);
149    return -ENOMEM;
150}
151
152void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
153{
154    unsigned long segment_table, page_table, page_frame;
155    u32 *psal, *aste;
156
157    if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
158        return;
159
160    psal = (u32 *)(addr_t) lowcore->paste[4];
161    aste = (u32 *)(addr_t) psal[2];
162    segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
163    page_table = *(unsigned long *) segment_table;
164    page_frame = *(unsigned long *) page_table;
165
166    free_page(page_frame);
167    free_page(page_table);
168    free_pages(segment_table, SEGMENT_ORDER);
169}
170
171static void __vdso_init_cr5(void *dummy)
172{
173    unsigned long cr5;
174
175    cr5 = offsetof(struct _lowcore, paste);
176    __ctl_load(cr5, 5, 5);
177}
178
179static void vdso_init_cr5(void)
180{
181    if (user_mode != HOME_SPACE_MODE && vdso_enabled)
182        on_each_cpu(__vdso_init_cr5, NULL, 1);
183}
184#endif /* CONFIG_64BIT */
185
186/*
187 * This is called from binfmt_elf, we create the special vma for the
188 * vDSO and insert it into the mm struct tree
189 */
190int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
191{
192    struct mm_struct *mm = current->mm;
193    struct page **vdso_pagelist;
194    unsigned long vdso_pages;
195    unsigned long vdso_base;
196    int rc;
197
198    if (!vdso_enabled)
199        return 0;
200    /*
201     * Only map the vdso for dynamically linked elf binaries.
202     */
203    if (!uses_interp)
204        return 0;
205
206    vdso_base = mm->mmap_base;
207#ifdef CONFIG_64BIT
208    vdso_pagelist = vdso64_pagelist;
209    vdso_pages = vdso64_pages;
210#ifdef CONFIG_COMPAT
211    if (is_compat_task()) {
212        vdso_pagelist = vdso32_pagelist;
213        vdso_pages = vdso32_pages;
214    }
215#endif
216#else
217    vdso_pagelist = vdso32_pagelist;
218    vdso_pages = vdso32_pages;
219#endif
220
221    /*
222     * vDSO has a problem and was disabled, just don't "enable" it for
223     * the process
224     */
225    if (vdso_pages == 0)
226        return 0;
227
228    current->mm->context.vdso_base = 0;
229
230    /*
231     * pick a base address for the vDSO in process space. We try to put
232     * it at vdso_base which is the "natural" base for it, but we might
233     * fail and end up putting it elsewhere.
234     */
235    down_write(&mm->mmap_sem);
236    vdso_base = get_unmapped_area(NULL, vdso_base,
237                      vdso_pages << PAGE_SHIFT, 0, 0);
238    if (IS_ERR_VALUE(vdso_base)) {
239        rc = vdso_base;
240        goto out_up;
241    }
242
243    /*
244     * Put vDSO base into mm struct. We need to do this before calling
245     * install_special_mapping or the perf counter mmap tracking code
246     * will fail to recognise it as a vDSO (since arch_vma_name fails).
247     */
248    current->mm->context.vdso_base = vdso_base;
249
250    /*
251     * our vma flags don't have VM_WRITE so by default, the process
252     * isn't allowed to write those pages.
253     * gdb can break that with ptrace interface, and thus trigger COW
254     * on those pages but it's then your responsibility to never do that
255     * on the "data" page of the vDSO or you'll stop getting kernel
256     * updates and your nice userland gettimeofday will be totally dead.
257     * It's fine to use that for setting breakpoints in the vDSO code
258     * pages though
259     *
260     * Make sure the vDSO gets into every core dump.
261     * Dumping its contents makes post-mortem fully interpretable later
262     * without matching up the same kernel and hardware config to see
263     * what PC values meant.
264     */
265    rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
266                     VM_READ|VM_EXEC|
267                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
268                     VM_ALWAYSDUMP,
269                     vdso_pagelist);
270    if (rc)
271        current->mm->context.vdso_base = 0;
272out_up:
273    up_write(&mm->mmap_sem);
274    return rc;
275}
276
277const char *arch_vma_name(struct vm_area_struct *vma)
278{
279    if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
280        return "[vdso]";
281    return NULL;
282}
283
284static int __init vdso_init(void)
285{
286    int i;
287
288    if (!vdso_enabled)
289        return 0;
290    vdso_init_data(vdso_data);
291#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
292    /* Calculate the size of the 32 bit vDSO */
293    vdso32_pages = ((&vdso32_end - &vdso32_start
294             + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
295
296    /* Make sure pages are in the correct state */
297    vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
298                  GFP_KERNEL);
299    BUG_ON(vdso32_pagelist == NULL);
300    for (i = 0; i < vdso32_pages - 1; i++) {
301        struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
302        ClearPageReserved(pg);
303        get_page(pg);
304        vdso32_pagelist[i] = pg;
305    }
306    vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
307    vdso32_pagelist[vdso32_pages] = NULL;
308#endif
309
310#ifdef CONFIG_64BIT
311    /* Calculate the size of the 64 bit vDSO */
312    vdso64_pages = ((&vdso64_end - &vdso64_start
313             + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
314
315    /* Make sure pages are in the correct state */
316    vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
317                  GFP_KERNEL);
318    BUG_ON(vdso64_pagelist == NULL);
319    for (i = 0; i < vdso64_pages - 1; i++) {
320        struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
321        ClearPageReserved(pg);
322        get_page(pg);
323        vdso64_pagelist[i] = pg;
324    }
325    vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
326    vdso64_pagelist[vdso64_pages] = NULL;
327#ifndef CONFIG_SMP
328    if (vdso_alloc_per_cpu(0, &S390_lowcore))
329        BUG();
330#endif
331    vdso_init_cr5();
332#endif /* CONFIG_64BIT */
333
334    get_page(virt_to_page(vdso_data));
335
336    smp_wmb();
337
338    return 0;
339}
340arch_initcall(vdso_init);
341
342int in_gate_area_no_task(unsigned long addr)
343{
344    return 0;
345}
346
347int in_gate_area(struct task_struct *task, unsigned long addr)
348{
349    return 0;
350}
351
352struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
353{
354    return NULL;
355}
356

Archive Download this file



interactive