Root/
1 | /* internal.h: mm/ internal definitions |
2 | * |
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) |
5 | * |
6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. |
10 | */ |
11 | #ifndef __MM_INTERNAL_H |
12 | #define __MM_INTERNAL_H |
13 | |
14 | #include <linux/mm.h> |
15 | |
16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
17 | unsigned long floor, unsigned long ceiling); |
18 | |
19 | static inline void set_page_count(struct page *page, int v) |
20 | { |
21 | atomic_set(&page->_count, v); |
22 | } |
23 | |
24 | /* |
25 | * Turn a non-refcounted page (->_count == 0) into refcounted with |
26 | * a count of one. |
27 | */ |
28 | static inline void set_page_refcounted(struct page *page) |
29 | { |
30 | VM_BUG_ON(PageTail(page)); |
31 | VM_BUG_ON(atomic_read(&page->_count)); |
32 | set_page_count(page, 1); |
33 | } |
34 | |
35 | static inline void __put_page(struct page *page) |
36 | { |
37 | atomic_dec(&page->_count); |
38 | } |
39 | |
40 | extern unsigned long highest_memmap_pfn; |
41 | |
42 | /* |
43 | * in mm/vmscan.c: |
44 | */ |
45 | extern int isolate_lru_page(struct page *page); |
46 | extern void putback_lru_page(struct page *page); |
47 | |
48 | /* |
49 | * in mm/page_alloc.c |
50 | */ |
51 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
52 | extern void prep_compound_page(struct page *page, unsigned long order); |
53 | #ifdef CONFIG_MEMORY_FAILURE |
54 | extern bool is_free_buddy_page(struct page *page); |
55 | #endif |
56 | |
57 | |
58 | /* |
59 | * function for dealing with page's order in buddy system. |
60 | * zone->lock is already acquired when we use these. |
61 | * So, we don't need atomic page->flags operations here. |
62 | */ |
63 | static inline unsigned long page_order(struct page *page) |
64 | { |
65 | /* PageBuddy() must be checked by the caller */ |
66 | return page_private(page); |
67 | } |
68 | |
69 | /* mm/util.c */ |
70 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
71 | struct vm_area_struct *prev, struct rb_node *rb_parent); |
72 | |
73 | #ifdef CONFIG_MMU |
74 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, |
75 | unsigned long start, unsigned long end); |
76 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
77 | unsigned long start, unsigned long end); |
78 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) |
79 | { |
80 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
81 | } |
82 | |
83 | /* |
84 | * Called only in fault path via page_evictable() for a new page |
85 | * to determine if it's being mapped into a LOCKED vma. |
86 | * If so, mark page as mlocked. |
87 | */ |
88 | static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) |
89 | { |
90 | VM_BUG_ON(PageLRU(page)); |
91 | |
92 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) |
93 | return 0; |
94 | |
95 | if (!TestSetPageMlocked(page)) { |
96 | inc_zone_page_state(page, NR_MLOCK); |
97 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
98 | } |
99 | return 1; |
100 | } |
101 | |
102 | /* |
103 | * must be called with vma's mmap_sem held for read or write, and page locked. |
104 | */ |
105 | extern void mlock_vma_page(struct page *page); |
106 | extern void munlock_vma_page(struct page *page); |
107 | |
108 | /* |
109 | * Clear the page's PageMlocked(). This can be useful in a situation where |
110 | * we want to unconditionally remove a page from the pagecache -- e.g., |
111 | * on truncation or freeing. |
112 | * |
113 | * It is legal to call this function for any page, mlocked or not. |
114 | * If called for a page that is still mapped by mlocked vmas, all we do |
115 | * is revert to lazy LRU behaviour -- semantics are not broken. |
116 | */ |
117 | extern void __clear_page_mlock(struct page *page); |
118 | static inline void clear_page_mlock(struct page *page) |
119 | { |
120 | if (unlikely(TestClearPageMlocked(page))) |
121 | __clear_page_mlock(page); |
122 | } |
123 | |
124 | /* |
125 | * mlock_migrate_page - called only from migrate_page_copy() to |
126 | * migrate the Mlocked page flag; update statistics. |
127 | */ |
128 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) |
129 | { |
130 | if (TestClearPageMlocked(page)) { |
131 | unsigned long flags; |
132 | |
133 | local_irq_save(flags); |
134 | __dec_zone_page_state(page, NR_MLOCK); |
135 | SetPageMlocked(newpage); |
136 | __inc_zone_page_state(newpage, NR_MLOCK); |
137 | local_irq_restore(flags); |
138 | } |
139 | } |
140 | |
141 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
142 | extern unsigned long vma_address(struct page *page, |
143 | struct vm_area_struct *vma); |
144 | #endif |
145 | #else /* !CONFIG_MMU */ |
146 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
147 | { |
148 | return 0; |
149 | } |
150 | static inline void clear_page_mlock(struct page *page) { } |
151 | static inline void mlock_vma_page(struct page *page) { } |
152 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
153 | |
154 | #endif /* !CONFIG_MMU */ |
155 | |
156 | /* |
157 | * Return the mem_map entry representing the 'offset' subpage within |
158 | * the maximally aligned gigantic page 'base'. Handle any discontiguity |
159 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
160 | */ |
161 | static inline struct page *mem_map_offset(struct page *base, int offset) |
162 | { |
163 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
164 | return pfn_to_page(page_to_pfn(base) + offset); |
165 | return base + offset; |
166 | } |
167 | |
168 | /* |
169 | * Iterator over all subpages within the maximally aligned gigantic |
170 | * page 'base'. Handle any discontiguity in the mem_map. |
171 | */ |
172 | static inline struct page *mem_map_next(struct page *iter, |
173 | struct page *base, int offset) |
174 | { |
175 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
176 | unsigned long pfn = page_to_pfn(base) + offset; |
177 | if (!pfn_valid(pfn)) |
178 | return NULL; |
179 | return pfn_to_page(pfn); |
180 | } |
181 | return iter + 1; |
182 | } |
183 | |
184 | /* |
185 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
186 | * so all functions starting at paging_init should be marked __init |
187 | * in those cases. SPARSEMEM, however, allows for memory hotplug, |
188 | * and alloc_bootmem_node is not used. |
189 | */ |
190 | #ifdef CONFIG_SPARSEMEM |
191 | #define __paginginit __meminit |
192 | #else |
193 | #define __paginginit __init |
194 | #endif |
195 | |
196 | /* Memory initialisation debug and verification */ |
197 | enum mminit_level { |
198 | MMINIT_WARNING, |
199 | MMINIT_VERIFY, |
200 | MMINIT_TRACE |
201 | }; |
202 | |
203 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
204 | |
205 | extern int mminit_loglevel; |
206 | |
207 | #define mminit_dprintk(level, prefix, fmt, arg...) \ |
208 | do { \ |
209 | if (level < mminit_loglevel) { \ |
210 | printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ |
211 | printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ |
212 | } \ |
213 | } while (0) |
214 | |
215 | extern void mminit_verify_pageflags_layout(void); |
216 | extern void mminit_verify_page_links(struct page *page, |
217 | enum zone_type zone, unsigned long nid, unsigned long pfn); |
218 | extern void mminit_verify_zonelist(void); |
219 | |
220 | #else |
221 | |
222 | static inline void mminit_dprintk(enum mminit_level level, |
223 | const char *prefix, const char *fmt, ...) |
224 | { |
225 | } |
226 | |
227 | static inline void mminit_verify_pageflags_layout(void) |
228 | { |
229 | } |
230 | |
231 | static inline void mminit_verify_page_links(struct page *page, |
232 | enum zone_type zone, unsigned long nid, unsigned long pfn) |
233 | { |
234 | } |
235 | |
236 | static inline void mminit_verify_zonelist(void) |
237 | { |
238 | } |
239 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
240 | |
241 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ |
242 | #if defined(CONFIG_SPARSEMEM) |
243 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
244 | unsigned long *end_pfn); |
245 | #else |
246 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
247 | unsigned long *end_pfn) |
248 | { |
249 | } |
250 | #endif /* CONFIG_SPARSEMEM */ |
251 | |
252 | #define ZONE_RECLAIM_NOSCAN -2 |
253 | #define ZONE_RECLAIM_FULL -1 |
254 | #define ZONE_RECLAIM_SOME 0 |
255 | #define ZONE_RECLAIM_SUCCESS 1 |
256 | #endif |
257 | |
258 | extern int hwpoison_filter(struct page *p); |
259 | |
260 | extern u32 hwpoison_filter_dev_major; |
261 | extern u32 hwpoison_filter_dev_minor; |
262 | extern u64 hwpoison_filter_flags_mask; |
263 | extern u64 hwpoison_filter_flags_value; |
264 | extern u64 hwpoison_filter_memcg; |
265 | extern u32 hwpoison_filter_enable; |
266 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9