Root/
1 | #ifndef _LINUX_HUGE_MM_H |
2 | #define _LINUX_HUGE_MM_H |
3 | |
4 | extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, |
5 | struct vm_area_struct *vma, |
6 | unsigned long address, pmd_t *pmd, |
7 | unsigned int flags); |
8 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
9 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
10 | struct vm_area_struct *vma); |
11 | extern void huge_pmd_set_accessed(struct mm_struct *mm, |
12 | struct vm_area_struct *vma, |
13 | unsigned long address, pmd_t *pmd, |
14 | pmd_t orig_pmd, int dirty); |
15 | extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, |
16 | unsigned long address, pmd_t *pmd, |
17 | pmd_t orig_pmd); |
18 | extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
19 | unsigned long addr, |
20 | pmd_t *pmd, |
21 | unsigned int flags); |
22 | extern int zap_huge_pmd(struct mmu_gather *tlb, |
23 | struct vm_area_struct *vma, |
24 | pmd_t *pmd, unsigned long addr); |
25 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
26 | unsigned long addr, unsigned long end, |
27 | unsigned char *vec); |
28 | extern int move_huge_pmd(struct vm_area_struct *vma, |
29 | struct vm_area_struct *new_vma, |
30 | unsigned long old_addr, |
31 | unsigned long new_addr, unsigned long old_end, |
32 | pmd_t *old_pmd, pmd_t *new_pmd); |
33 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
34 | unsigned long addr, pgprot_t newprot, |
35 | int prot_numa); |
36 | |
37 | enum transparent_hugepage_flag { |
38 | TRANSPARENT_HUGEPAGE_FLAG, |
39 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
40 | TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, |
41 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
42 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
43 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, |
44 | #ifdef CONFIG_DEBUG_VM |
45 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG, |
46 | #endif |
47 | }; |
48 | |
49 | enum page_check_address_pmd_flag { |
50 | PAGE_CHECK_ADDRESS_PMD_FLAG, |
51 | PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, |
52 | PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, |
53 | }; |
54 | extern pmd_t *page_check_address_pmd(struct page *page, |
55 | struct mm_struct *mm, |
56 | unsigned long address, |
57 | enum page_check_address_pmd_flag flag, |
58 | spinlock_t **ptl); |
59 | |
60 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) |
61 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) |
62 | |
63 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
64 | #define HPAGE_PMD_SHIFT PMD_SHIFT |
65 | #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) |
66 | #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) |
67 | |
68 | extern bool is_vma_temporary_stack(struct vm_area_struct *vma); |
69 | |
70 | #define transparent_hugepage_enabled(__vma) \ |
71 | ((transparent_hugepage_flags & \ |
72 | (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ |
73 | (transparent_hugepage_flags & \ |
74 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ |
75 | ((__vma)->vm_flags & VM_HUGEPAGE))) && \ |
76 | !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ |
77 | !is_vma_temporary_stack(__vma)) |
78 | #define transparent_hugepage_defrag(__vma) \ |
79 | ((transparent_hugepage_flags & \ |
80 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ |
81 | (transparent_hugepage_flags & \ |
82 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \ |
83 | (__vma)->vm_flags & VM_HUGEPAGE)) |
84 | #define transparent_hugepage_use_zero_page() \ |
85 | (transparent_hugepage_flags & \ |
86 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) |
87 | #ifdef CONFIG_DEBUG_VM |
88 | #define transparent_hugepage_debug_cow() \ |
89 | (transparent_hugepage_flags & \ |
90 | (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) |
91 | #else /* CONFIG_DEBUG_VM */ |
92 | #define transparent_hugepage_debug_cow() 0 |
93 | #endif /* CONFIG_DEBUG_VM */ |
94 | |
95 | extern unsigned long transparent_hugepage_flags; |
96 | extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
97 | pmd_t *dst_pmd, pmd_t *src_pmd, |
98 | struct vm_area_struct *vma, |
99 | unsigned long addr, unsigned long end); |
100 | extern int split_huge_page_to_list(struct page *page, struct list_head *list); |
101 | static inline int split_huge_page(struct page *page) |
102 | { |
103 | return split_huge_page_to_list(page, NULL); |
104 | } |
105 | extern void __split_huge_page_pmd(struct vm_area_struct *vma, |
106 | unsigned long address, pmd_t *pmd); |
107 | #define split_huge_page_pmd(__vma, __address, __pmd) \ |
108 | do { \ |
109 | pmd_t *____pmd = (__pmd); \ |
110 | if (unlikely(pmd_trans_huge(*____pmd))) \ |
111 | __split_huge_page_pmd(__vma, __address, \ |
112 | ____pmd); \ |
113 | } while (0) |
114 | #define wait_split_huge_page(__anon_vma, __pmd) \ |
115 | do { \ |
116 | pmd_t *____pmd = (__pmd); \ |
117 | anon_vma_lock_write(__anon_vma); \ |
118 | anon_vma_unlock_write(__anon_vma); \ |
119 | BUG_ON(pmd_trans_splitting(*____pmd) || \ |
120 | pmd_trans_huge(*____pmd)); \ |
121 | } while (0) |
122 | extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, |
123 | pmd_t *pmd); |
124 | #if HPAGE_PMD_ORDER >= MAX_ORDER |
125 | #error "hugepages can't be allocated by the buddy allocator" |
126 | #endif |
127 | extern int hugepage_madvise(struct vm_area_struct *vma, |
128 | unsigned long *vm_flags, int advice); |
129 | extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, |
130 | unsigned long start, |
131 | unsigned long end, |
132 | long adjust_next); |
133 | extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, |
134 | spinlock_t **ptl); |
135 | /* mmap_sem must be held on entry */ |
136 | static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, |
137 | spinlock_t **ptl) |
138 | { |
139 | VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); |
140 | if (pmd_trans_huge(*pmd)) |
141 | return __pmd_trans_huge_lock(pmd, vma, ptl); |
142 | else |
143 | return 0; |
144 | } |
145 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, |
146 | unsigned long start, |
147 | unsigned long end, |
148 | long adjust_next) |
149 | { |
150 | if (!vma->anon_vma || vma->vm_ops) |
151 | return; |
152 | __vma_adjust_trans_huge(vma, start, end, adjust_next); |
153 | } |
154 | static inline int hpage_nr_pages(struct page *page) |
155 | { |
156 | if (unlikely(PageTransHuge(page))) |
157 | return HPAGE_PMD_NR; |
158 | return 1; |
159 | } |
160 | static inline struct page *compound_trans_head(struct page *page) |
161 | { |
162 | if (PageTail(page)) { |
163 | struct page *head; |
164 | head = page->first_page; |
165 | smp_rmb(); |
166 | /* |
167 | * head may be a dangling pointer. |
168 | * __split_huge_page_refcount clears PageTail before |
169 | * overwriting first_page, so if PageTail is still |
170 | * there it means the head pointer isn't dangling. |
171 | */ |
172 | if (PageTail(page)) |
173 | return head; |
174 | } |
175 | return page; |
176 | } |
177 | |
178 | extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, |
179 | unsigned long addr, pmd_t pmd, pmd_t *pmdp); |
180 | |
181 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
182 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
183 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) |
184 | #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) |
185 | |
186 | #define hpage_nr_pages(x) 1 |
187 | |
188 | #define transparent_hugepage_enabled(__vma) 0 |
189 | |
190 | #define transparent_hugepage_flags 0UL |
191 | static inline int |
192 | split_huge_page_to_list(struct page *page, struct list_head *list) |
193 | { |
194 | return 0; |
195 | } |
196 | static inline int split_huge_page(struct page *page) |
197 | { |
198 | return 0; |
199 | } |
200 | #define split_huge_page_pmd(__vma, __address, __pmd) \ |
201 | do { } while (0) |
202 | #define wait_split_huge_page(__anon_vma, __pmd) \ |
203 | do { } while (0) |
204 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ |
205 | do { } while (0) |
206 | #define compound_trans_head(page) compound_head(page) |
207 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
208 | unsigned long *vm_flags, int advice) |
209 | { |
210 | BUG(); |
211 | return 0; |
212 | } |
213 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, |
214 | unsigned long start, |
215 | unsigned long end, |
216 | long adjust_next) |
217 | { |
218 | } |
219 | static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, |
220 | spinlock_t **ptl) |
221 | { |
222 | return 0; |
223 | } |
224 | |
225 | static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, |
226 | unsigned long addr, pmd_t pmd, pmd_t *pmdp) |
227 | { |
228 | return 0; |
229 | } |
230 | |
231 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
232 | |
233 | #endif /* _LINUX_HUGE_MM_H */ |
234 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9