Root/arch/mips/mm/mmap.c

Source at commit 694c7fbe86b8a9c91392e505afcb9fcfc91deccc created 12 years 8 months ago.
By Maarten ter Huurne, MIPS: JZ4740: Add cpufreq support
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
8 */
9#include <linux/compiler.h>
10#include <linux/errno.h>
11#include <linux/mm.h>
12#include <linux/mman.h>
13#include <linux/module.h>
14#include <linux/personality.h>
15#include <linux/random.h>
16#include <linux/sched.h>
17
18unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
19EXPORT_SYMBOL(shm_align_mask);
20
21/* gap between mmap and stack */
22#define MIN_GAP (128*1024*1024UL)
23#define MAX_GAP ((TASK_SIZE)/6*5)
24
25static int mmap_is_legacy(void)
26{
27    if (current->personality & ADDR_COMPAT_LAYOUT)
28        return 1;
29
30    if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
31        return 1;
32
33    return sysctl_legacy_va_layout;
34}
35
36static unsigned long mmap_base(unsigned long rnd)
37{
38    unsigned long gap = rlimit(RLIMIT_STACK);
39
40    if (gap < MIN_GAP)
41        gap = MIN_GAP;
42    else if (gap > MAX_GAP)
43        gap = MAX_GAP;
44
45    return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46}
47
48#define COLOUR_ALIGN(addr, pgoff) \
49    ((((addr) + shm_align_mask) & ~shm_align_mask) + \
50     (((pgoff) << PAGE_SHIFT) & shm_align_mask))
51
52enum mmap_allocation_direction {UP, DOWN};
53
54static unsigned long arch_get_unmapped_area_common(struct file *filp,
55    unsigned long addr0, unsigned long len, unsigned long pgoff,
56    unsigned long flags, enum mmap_allocation_direction dir)
57{
58    struct mm_struct *mm = current->mm;
59    struct vm_area_struct *vma;
60    unsigned long addr = addr0;
61    int do_color_align;
62    struct vm_unmapped_area_info info;
63
64    if (unlikely(len > TASK_SIZE))
65        return -ENOMEM;
66
67    if (flags & MAP_FIXED) {
68        /* Even MAP_FIXED mappings must reside within TASK_SIZE */
69        if (TASK_SIZE - len < addr)
70            return -EINVAL;
71
72        /*
73         * We do not accept a shared mapping if it would violate
74         * cache aliasing constraints.
75         */
76        if ((flags & MAP_SHARED) &&
77            ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
78            return -EINVAL;
79        return addr;
80    }
81
82    do_color_align = 0;
83    if (filp || (flags & MAP_SHARED))
84        do_color_align = 1;
85
86    /* requesting a specific address */
87    if (addr) {
88        if (do_color_align)
89            addr = COLOUR_ALIGN(addr, pgoff);
90        else
91            addr = PAGE_ALIGN(addr);
92
93        vma = find_vma(mm, addr);
94        if (TASK_SIZE - len >= addr &&
95            (!vma || addr + len <= vma->vm_start))
96            return addr;
97    }
98
99    info.length = len;
100    info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
101    info.align_offset = pgoff << PAGE_SHIFT;
102
103    if (dir == DOWN) {
104        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
105        info.low_limit = PAGE_SIZE;
106        info.high_limit = mm->mmap_base;
107        addr = vm_unmapped_area(&info);
108
109        if (!(addr & ~PAGE_MASK))
110            return addr;
111
112        /*
113         * A failed mmap() very likely causes application failure,
114         * so fall back to the bottom-up function here. This scenario
115         * can happen with large stack limits and large mmap()
116         * allocations.
117         */
118    }
119
120    info.flags = 0;
121    info.low_limit = mm->mmap_base;
122    info.high_limit = TASK_SIZE;
123    return vm_unmapped_area(&info);
124}
125
126unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
127    unsigned long len, unsigned long pgoff, unsigned long flags)
128{
129    return arch_get_unmapped_area_common(filp,
130            addr0, len, pgoff, flags, UP);
131}
132
133/*
134 * There is no need to export this but sched.h declares the function as
135 * extern so making it static here results in an error.
136 */
137unsigned long arch_get_unmapped_area_topdown(struct file *filp,
138    unsigned long addr0, unsigned long len, unsigned long pgoff,
139    unsigned long flags)
140{
141    return arch_get_unmapped_area_common(filp,
142            addr0, len, pgoff, flags, DOWN);
143}
144
145void arch_pick_mmap_layout(struct mm_struct *mm)
146{
147    unsigned long random_factor = 0UL;
148
149    if (current->flags & PF_RANDOMIZE) {
150        random_factor = get_random_int();
151        random_factor = random_factor << PAGE_SHIFT;
152        if (TASK_IS_32BIT_ADDR)
153            random_factor &= 0xfffffful;
154        else
155            random_factor &= 0xffffffful;
156    }
157
158    if (mmap_is_legacy()) {
159        mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
160        mm->get_unmapped_area = arch_get_unmapped_area;
161    } else {
162        mm->mmap_base = mmap_base(random_factor);
163        mm->get_unmapped_area = arch_get_unmapped_area_topdown;
164    }
165}
166
167static inline unsigned long brk_rnd(void)
168{
169    unsigned long rnd = get_random_int();
170
171    rnd = rnd << PAGE_SHIFT;
172    /* 8MB for 32bit, 256MB for 64bit */
173    if (TASK_IS_32BIT_ADDR)
174        rnd = rnd & 0x7ffffful;
175    else
176        rnd = rnd & 0xffffffful;
177
178    return rnd;
179}
180
181unsigned long arch_randomize_brk(struct mm_struct *mm)
182{
183    unsigned long base = mm->brk;
184    unsigned long ret;
185
186    ret = PAGE_ALIGN(base + brk_rnd());
187
188    if (ret < mm->brk)
189        return mm->brk;
190
191    return ret;
192}
193
194int __virt_addr_valid(const volatile void *kaddr)
195{
196    return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
197}
198EXPORT_SYMBOL_GPL(__virt_addr_valid);
199

Archive Download this file



interactive