Root/
1 | /* internal.h: mm/ internal definitions |
2 | * |
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) |
5 | * |
6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. |
10 | */ |
11 | #ifndef __MM_INTERNAL_H |
12 | #define __MM_INTERNAL_H |
13 | |
14 | #include <linux/mm.h> |
15 | |
16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
17 | unsigned long floor, unsigned long ceiling); |
18 | |
19 | static inline void set_page_count(struct page *page, int v) |
20 | { |
21 | atomic_set(&page->_count, v); |
22 | } |
23 | |
24 | /* |
25 | * Turn a non-refcounted page (->_count == 0) into refcounted with |
26 | * a count of one. |
27 | */ |
28 | static inline void set_page_refcounted(struct page *page) |
29 | { |
30 | VM_BUG_ON(PageTail(page)); |
31 | VM_BUG_ON(atomic_read(&page->_count)); |
32 | set_page_count(page, 1); |
33 | } |
34 | |
35 | static inline void __put_page(struct page *page) |
36 | { |
37 | atomic_dec(&page->_count); |
38 | } |
39 | |
40 | static inline void __get_page_tail_foll(struct page *page, |
41 | bool get_page_head) |
42 | { |
43 | /* |
44 | * If we're getting a tail page, the elevated page->_count is |
45 | * required only in the head page and we will elevate the head |
46 | * page->_count and tail page->_mapcount. |
47 | * |
48 | * We elevate page_tail->_mapcount for tail pages to force |
49 | * page_tail->_count to be zero at all times to avoid getting |
50 | * false positives from get_page_unless_zero() with |
51 | * speculative page access (like in |
52 | * page_cache_get_speculative()) on tail pages. |
53 | */ |
54 | VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); |
55 | VM_BUG_ON(atomic_read(&page->_count) != 0); |
56 | VM_BUG_ON(page_mapcount(page) < 0); |
57 | if (get_page_head) |
58 | atomic_inc(&page->first_page->_count); |
59 | atomic_inc(&page->_mapcount); |
60 | } |
61 | |
62 | /* |
63 | * This is meant to be called as the FOLL_GET operation of |
64 | * follow_page() and it must be called while holding the proper PT |
65 | * lock while the pte (or pmd_trans_huge) is still mapping the page. |
66 | */ |
67 | static inline void get_page_foll(struct page *page) |
68 | { |
69 | if (unlikely(PageTail(page))) |
70 | /* |
71 | * This is safe only because |
72 | * __split_huge_page_refcount() can't run under |
73 | * get_page_foll() because we hold the proper PT lock. |
74 | */ |
75 | __get_page_tail_foll(page, true); |
76 | else { |
77 | /* |
78 | * Getting a normal page or the head of a compound page |
79 | * requires to already have an elevated page->_count. |
80 | */ |
81 | VM_BUG_ON(atomic_read(&page->_count) <= 0); |
82 | atomic_inc(&page->_count); |
83 | } |
84 | } |
85 | |
86 | extern unsigned long highest_memmap_pfn; |
87 | |
88 | /* |
89 | * in mm/vmscan.c: |
90 | */ |
91 | extern int isolate_lru_page(struct page *page); |
92 | extern void putback_lru_page(struct page *page); |
93 | |
94 | /* |
95 | * in mm/page_alloc.c |
96 | */ |
97 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
98 | extern void prep_compound_page(struct page *page, unsigned long order); |
99 | #ifdef CONFIG_MEMORY_FAILURE |
100 | extern bool is_free_buddy_page(struct page *page); |
101 | #endif |
102 | |
103 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
104 | |
105 | /* |
106 | * in mm/compaction.c |
107 | */ |
108 | /* |
109 | * compact_control is used to track pages being migrated and the free pages |
110 | * they are being migrated to during memory compaction. The free_pfn starts |
111 | * at the end of a zone and migrate_pfn begins at the start. Movable pages |
112 | * are moved to the end of a zone during a compaction run and the run |
113 | * completes when free_pfn <= migrate_pfn |
114 | */ |
115 | struct compact_control { |
116 | struct list_head freepages; /* List of free pages to migrate to */ |
117 | struct list_head migratepages; /* List of pages being migrated */ |
118 | unsigned long nr_freepages; /* Number of isolated free pages */ |
119 | unsigned long nr_migratepages; /* Number of pages to migrate */ |
120 | unsigned long free_pfn; /* isolate_freepages search base */ |
121 | unsigned long start_free_pfn; /* where we started the search */ |
122 | unsigned long migrate_pfn; /* isolate_migratepages search base */ |
123 | bool sync; /* Synchronous migration */ |
124 | bool wrapped; /* Order > 0 compactions are |
125 | incremental, once free_pfn |
126 | and migrate_pfn meet, we restart |
127 | from the top of the zone; |
128 | remember we wrapped around. */ |
129 | |
130 | int order; /* order a direct compactor needs */ |
131 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
132 | struct zone *zone; |
133 | }; |
134 | |
135 | unsigned long |
136 | isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn); |
137 | unsigned long |
138 | isolate_migratepages_range(struct zone *zone, struct compact_control *cc, |
139 | unsigned long low_pfn, unsigned long end_pfn); |
140 | |
141 | #endif |
142 | |
143 | /* |
144 | * function for dealing with page's order in buddy system. |
145 | * zone->lock is already acquired when we use these. |
146 | * So, we don't need atomic page->flags operations here. |
147 | */ |
148 | static inline unsigned long page_order(struct page *page) |
149 | { |
150 | /* PageBuddy() must be checked by the caller */ |
151 | return page_private(page); |
152 | } |
153 | |
154 | /* mm/util.c */ |
155 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
156 | struct vm_area_struct *prev, struct rb_node *rb_parent); |
157 | |
158 | #ifdef CONFIG_MMU |
159 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, |
160 | unsigned long start, unsigned long end); |
161 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
162 | unsigned long start, unsigned long end); |
163 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) |
164 | { |
165 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
166 | } |
167 | |
168 | /* |
169 | * Called only in fault path via page_evictable() for a new page |
170 | * to determine if it's being mapped into a LOCKED vma. |
171 | * If so, mark page as mlocked. |
172 | */ |
173 | static inline int mlocked_vma_newpage(struct vm_area_struct *vma, |
174 | struct page *page) |
175 | { |
176 | VM_BUG_ON(PageLRU(page)); |
177 | |
178 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) |
179 | return 0; |
180 | |
181 | if (!TestSetPageMlocked(page)) { |
182 | inc_zone_page_state(page, NR_MLOCK); |
183 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
184 | } |
185 | return 1; |
186 | } |
187 | |
188 | /* |
189 | * must be called with vma's mmap_sem held for read or write, and page locked. |
190 | */ |
191 | extern void mlock_vma_page(struct page *page); |
192 | extern void munlock_vma_page(struct page *page); |
193 | |
194 | /* |
195 | * Clear the page's PageMlocked(). This can be useful in a situation where |
196 | * we want to unconditionally remove a page from the pagecache -- e.g., |
197 | * on truncation or freeing. |
198 | * |
199 | * It is legal to call this function for any page, mlocked or not. |
200 | * If called for a page that is still mapped by mlocked vmas, all we do |
201 | * is revert to lazy LRU behaviour -- semantics are not broken. |
202 | */ |
203 | extern void __clear_page_mlock(struct page *page); |
204 | static inline void clear_page_mlock(struct page *page) |
205 | { |
206 | if (unlikely(TestClearPageMlocked(page))) |
207 | __clear_page_mlock(page); |
208 | } |
209 | |
210 | /* |
211 | * mlock_migrate_page - called only from migrate_page_copy() to |
212 | * migrate the Mlocked page flag; update statistics. |
213 | */ |
214 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) |
215 | { |
216 | if (TestClearPageMlocked(page)) { |
217 | unsigned long flags; |
218 | |
219 | local_irq_save(flags); |
220 | __dec_zone_page_state(page, NR_MLOCK); |
221 | SetPageMlocked(newpage); |
222 | __inc_zone_page_state(newpage, NR_MLOCK); |
223 | local_irq_restore(flags); |
224 | } |
225 | } |
226 | |
227 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
228 | extern unsigned long vma_address(struct page *page, |
229 | struct vm_area_struct *vma); |
230 | #endif |
231 | #else /* !CONFIG_MMU */ |
232 | static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) |
233 | { |
234 | return 0; |
235 | } |
236 | static inline void clear_page_mlock(struct page *page) { } |
237 | static inline void mlock_vma_page(struct page *page) { } |
238 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
239 | |
240 | #endif /* !CONFIG_MMU */ |
241 | |
242 | /* |
243 | * Return the mem_map entry representing the 'offset' subpage within |
244 | * the maximally aligned gigantic page 'base'. Handle any discontiguity |
245 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
246 | */ |
247 | static inline struct page *mem_map_offset(struct page *base, int offset) |
248 | { |
249 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
250 | return pfn_to_page(page_to_pfn(base) + offset); |
251 | return base + offset; |
252 | } |
253 | |
254 | /* |
255 | * Iterator over all subpages within the maximally aligned gigantic |
256 | * page 'base'. Handle any discontiguity in the mem_map. |
257 | */ |
258 | static inline struct page *mem_map_next(struct page *iter, |
259 | struct page *base, int offset) |
260 | { |
261 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
262 | unsigned long pfn = page_to_pfn(base) + offset; |
263 | if (!pfn_valid(pfn)) |
264 | return NULL; |
265 | return pfn_to_page(pfn); |
266 | } |
267 | return iter + 1; |
268 | } |
269 | |
270 | /* |
271 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
272 | * so all functions starting at paging_init should be marked __init |
273 | * in those cases. SPARSEMEM, however, allows for memory hotplug, |
274 | * and alloc_bootmem_node is not used. |
275 | */ |
276 | #ifdef CONFIG_SPARSEMEM |
277 | #define __paginginit __meminit |
278 | #else |
279 | #define __paginginit __init |
280 | #endif |
281 | |
282 | /* Memory initialisation debug and verification */ |
283 | enum mminit_level { |
284 | MMINIT_WARNING, |
285 | MMINIT_VERIFY, |
286 | MMINIT_TRACE |
287 | }; |
288 | |
289 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
290 | |
291 | extern int mminit_loglevel; |
292 | |
293 | #define mminit_dprintk(level, prefix, fmt, arg...) \ |
294 | do { \ |
295 | if (level < mminit_loglevel) { \ |
296 | printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ |
297 | printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ |
298 | } \ |
299 | } while (0) |
300 | |
301 | extern void mminit_verify_pageflags_layout(void); |
302 | extern void mminit_verify_page_links(struct page *page, |
303 | enum zone_type zone, unsigned long nid, unsigned long pfn); |
304 | extern void mminit_verify_zonelist(void); |
305 | |
306 | #else |
307 | |
308 | static inline void mminit_dprintk(enum mminit_level level, |
309 | const char *prefix, const char *fmt, ...) |
310 | { |
311 | } |
312 | |
313 | static inline void mminit_verify_pageflags_layout(void) |
314 | { |
315 | } |
316 | |
317 | static inline void mminit_verify_page_links(struct page *page, |
318 | enum zone_type zone, unsigned long nid, unsigned long pfn) |
319 | { |
320 | } |
321 | |
322 | static inline void mminit_verify_zonelist(void) |
323 | { |
324 | } |
325 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
326 | |
327 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ |
328 | #if defined(CONFIG_SPARSEMEM) |
329 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
330 | unsigned long *end_pfn); |
331 | #else |
332 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
333 | unsigned long *end_pfn) |
334 | { |
335 | } |
336 | #endif /* CONFIG_SPARSEMEM */ |
337 | |
338 | #define ZONE_RECLAIM_NOSCAN -2 |
339 | #define ZONE_RECLAIM_FULL -1 |
340 | #define ZONE_RECLAIM_SOME 0 |
341 | #define ZONE_RECLAIM_SUCCESS 1 |
342 | #endif |
343 | |
344 | extern int hwpoison_filter(struct page *p); |
345 | |
346 | extern u32 hwpoison_filter_dev_major; |
347 | extern u32 hwpoison_filter_dev_minor; |
348 | extern u64 hwpoison_filter_flags_mask; |
349 | extern u64 hwpoison_filter_flags_value; |
350 | extern u64 hwpoison_filter_memcg; |
351 | extern u32 hwpoison_filter_enable; |
352 | |
353 | extern unsigned long vm_mmap_pgoff(struct file *, unsigned long, |
354 | unsigned long, unsigned long, |
355 | unsigned long, unsigned long); |
356 | |
357 | extern void set_pageblock_order(void); |
358 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9