Root/
1 | /* |
2 | * linux/arch/arm/mm/mmap.c |
3 | */ |
4 | #include <linux/fs.h> |
5 | #include <linux/mm.h> |
6 | #include <linux/mman.h> |
7 | #include <linux/shm.h> |
8 | #include <linux/sched.h> |
9 | #include <linux/io.h> |
10 | #include <linux/random.h> |
11 | #include <asm/cputype.h> |
12 | #include <asm/system.h> |
13 | |
14 | #define COLOUR_ALIGN(addr,pgoff) \ |
15 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ |
16 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) |
17 | |
18 | /* |
19 | * We need to ensure that shared mappings are correctly aligned to |
20 | * avoid aliasing issues with VIPT caches. We need to ensure that |
21 | * a specific page of an object is always mapped at a multiple of |
22 | * SHMLBA bytes. |
23 | * |
24 | * We unconditionally provide this function for all cases, however |
25 | * in the VIVT case, we optimise out the alignment rules. |
26 | */ |
27 | unsigned long |
28 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
29 | unsigned long len, unsigned long pgoff, unsigned long flags) |
30 | { |
31 | struct mm_struct *mm = current->mm; |
32 | struct vm_area_struct *vma; |
33 | unsigned long start_addr; |
34 | #ifdef CONFIG_CPU_V6 |
35 | unsigned int cache_type; |
36 | int do_align = 0, aliasing = 0; |
37 | |
38 | /* |
39 | * We only need to do colour alignment if either the I or D |
40 | * caches alias. This is indicated by bits 9 and 21 of the |
41 | * cache type register. |
42 | */ |
43 | cache_type = read_cpuid_cachetype(); |
44 | if (cache_type != read_cpuid_id()) { |
45 | aliasing = (cache_type | cache_type >> 12) & (1 << 11); |
46 | if (aliasing) |
47 | do_align = filp || flags & MAP_SHARED; |
48 | } |
49 | #else |
50 | #define do_align 0 |
51 | #define aliasing 0 |
52 | #endif |
53 | |
54 | /* |
55 | * We enforce the MAP_FIXED case. |
56 | */ |
57 | if (flags & MAP_FIXED) { |
58 | if (aliasing && flags & MAP_SHARED && |
59 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) |
60 | return -EINVAL; |
61 | return addr; |
62 | } |
63 | |
64 | if (len > TASK_SIZE) |
65 | return -ENOMEM; |
66 | |
67 | if (addr) { |
68 | if (do_align) |
69 | addr = COLOUR_ALIGN(addr, pgoff); |
70 | else |
71 | addr = PAGE_ALIGN(addr); |
72 | |
73 | vma = find_vma(mm, addr); |
74 | if (TASK_SIZE - len >= addr && |
75 | (!vma || addr + len <= vma->vm_start)) |
76 | return addr; |
77 | } |
78 | if (len > mm->cached_hole_size) { |
79 | start_addr = addr = mm->free_area_cache; |
80 | } else { |
81 | start_addr = addr = TASK_UNMAPPED_BASE; |
82 | mm->cached_hole_size = 0; |
83 | } |
84 | /* 8 bits of randomness in 20 address space bits */ |
85 | if (current->flags & PF_RANDOMIZE) |
86 | addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; |
87 | |
88 | full_search: |
89 | if (do_align) |
90 | addr = COLOUR_ALIGN(addr, pgoff); |
91 | else |
92 | addr = PAGE_ALIGN(addr); |
93 | |
94 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
95 | /* At this point: (!vma || addr < vma->vm_end). */ |
96 | if (TASK_SIZE - len < addr) { |
97 | /* |
98 | * Start a new search - just in case we missed |
99 | * some holes. |
100 | */ |
101 | if (start_addr != TASK_UNMAPPED_BASE) { |
102 | start_addr = addr = TASK_UNMAPPED_BASE; |
103 | mm->cached_hole_size = 0; |
104 | goto full_search; |
105 | } |
106 | return -ENOMEM; |
107 | } |
108 | if (!vma || addr + len <= vma->vm_start) { |
109 | /* |
110 | * Remember the place where we stopped the search: |
111 | */ |
112 | mm->free_area_cache = addr + len; |
113 | return addr; |
114 | } |
115 | if (addr + mm->cached_hole_size < vma->vm_start) |
116 | mm->cached_hole_size = vma->vm_start - addr; |
117 | addr = vma->vm_end; |
118 | if (do_align) |
119 | addr = COLOUR_ALIGN(addr, pgoff); |
120 | } |
121 | } |
122 | |
123 | |
124 | /* |
125 | * You really shouldn't be using read() or write() on /dev/mem. This |
126 | * might go away in the future. |
127 | */ |
128 | int valid_phys_addr_range(unsigned long addr, size_t size) |
129 | { |
130 | if (addr < PHYS_OFFSET) |
131 | return 0; |
132 | if (addr + size > __pa(high_memory - 1) + 1) |
133 | return 0; |
134 | |
135 | return 1; |
136 | } |
137 | |
138 | /* |
139 | * We don't use supersection mappings for mmap() on /dev/mem, which |
140 | * means that we can't map the memory area above the 4G barrier into |
141 | * userspace. |
142 | */ |
143 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
144 | { |
145 | return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); |
146 | } |
147 | |
148 | #ifdef CONFIG_STRICT_DEVMEM |
149 | |
150 | #include <linux/ioport.h> |
151 | |
152 | /* |
153 | * devmem_is_allowed() checks to see if /dev/mem access to a certain |
154 | * address is valid. The argument is a physical page number. |
155 | * We mimic x86 here by disallowing access to system RAM as well as |
156 | * device-exclusive MMIO regions. This effectively disable read()/write() |
157 | * on /dev/mem. |
158 | */ |
159 | int devmem_is_allowed(unsigned long pfn) |
160 | { |
161 | if (iomem_is_exclusive(pfn << PAGE_SHIFT)) |
162 | return 0; |
163 | if (!page_is_ram(pfn)) |
164 | return 1; |
165 | return 0; |
166 | } |
167 | |
168 | #endif |
169 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9