Root/
1 | /* |
2 | * linux/mm/page_isolation.c |
3 | */ |
4 | |
5 | #include <linux/mm.h> |
6 | #include <linux/page-isolation.h> |
7 | #include <linux/pageblock-flags.h> |
8 | #include <linux/memory.h> |
9 | #include <linux/hugetlb.h> |
10 | #include "internal.h" |
11 | |
12 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) |
13 | { |
14 | struct zone *zone; |
15 | unsigned long flags, pfn; |
16 | struct memory_isolate_notify arg; |
17 | int notifier_ret; |
18 | int ret = -EBUSY; |
19 | |
20 | zone = page_zone(page); |
21 | |
22 | spin_lock_irqsave(&zone->lock, flags); |
23 | |
24 | pfn = page_to_pfn(page); |
25 | arg.start_pfn = pfn; |
26 | arg.nr_pages = pageblock_nr_pages; |
27 | arg.pages_found = 0; |
28 | |
29 | /* |
30 | * It may be possible to isolate a pageblock even if the |
31 | * migratetype is not MIGRATE_MOVABLE. The memory isolation |
32 | * notifier chain is used by balloon drivers to return the |
33 | * number of pages in a range that are held by the balloon |
34 | * driver to shrink memory. If all the pages are accounted for |
35 | * by balloons, are free, or on the LRU, isolation can continue. |
36 | * Later, for example, when memory hotplug notifier runs, these |
37 | * pages reported as "can be isolated" should be isolated(freed) |
38 | * by the balloon driver through the memory notifier chain. |
39 | */ |
40 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); |
41 | notifier_ret = notifier_to_errno(notifier_ret); |
42 | if (notifier_ret) |
43 | goto out; |
44 | /* |
45 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. |
46 | * We just check MOVABLE pages. |
47 | */ |
48 | if (!has_unmovable_pages(zone, page, arg.pages_found, |
49 | skip_hwpoisoned_pages)) |
50 | ret = 0; |
51 | |
52 | /* |
53 | * immobile means "not-on-lru" paes. If immobile is larger than |
54 | * removable-by-driver pages reported by notifier, we'll fail. |
55 | */ |
56 | |
57 | out: |
58 | if (!ret) { |
59 | unsigned long nr_pages; |
60 | int migratetype = get_pageblock_migratetype(page); |
61 | |
62 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
63 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); |
64 | |
65 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); |
66 | } |
67 | |
68 | spin_unlock_irqrestore(&zone->lock, flags); |
69 | if (!ret) |
70 | drain_all_pages(); |
71 | return ret; |
72 | } |
73 | |
74 | void unset_migratetype_isolate(struct page *page, unsigned migratetype) |
75 | { |
76 | struct zone *zone; |
77 | unsigned long flags, nr_pages; |
78 | |
79 | zone = page_zone(page); |
80 | spin_lock_irqsave(&zone->lock, flags); |
81 | if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
82 | goto out; |
83 | nr_pages = move_freepages_block(zone, page, migratetype); |
84 | __mod_zone_freepage_state(zone, nr_pages, migratetype); |
85 | set_pageblock_migratetype(page, migratetype); |
86 | out: |
87 | spin_unlock_irqrestore(&zone->lock, flags); |
88 | } |
89 | |
90 | static inline struct page * |
91 | __first_valid_page(unsigned long pfn, unsigned long nr_pages) |
92 | { |
93 | int i; |
94 | for (i = 0; i < nr_pages; i++) |
95 | if (pfn_valid_within(pfn + i)) |
96 | break; |
97 | if (unlikely(i == nr_pages)) |
98 | return NULL; |
99 | return pfn_to_page(pfn + i); |
100 | } |
101 | |
102 | /* |
103 | * start_isolate_page_range() -- make page-allocation-type of range of pages |
104 | * to be MIGRATE_ISOLATE. |
105 | * @start_pfn: The lower PFN of the range to be isolated. |
106 | * @end_pfn: The upper PFN of the range to be isolated. |
107 | * @migratetype: migrate type to set in error recovery. |
108 | * |
109 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in |
110 | * the range will never be allocated. Any free pages and pages freed in the |
111 | * future will not be allocated again. |
112 | * |
113 | * start_pfn/end_pfn must be aligned to pageblock_order. |
114 | * Returns 0 on success and -EBUSY if any part of range cannot be isolated. |
115 | */ |
116 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
117 | unsigned migratetype, bool skip_hwpoisoned_pages) |
118 | { |
119 | unsigned long pfn; |
120 | unsigned long undo_pfn; |
121 | struct page *page; |
122 | |
123 | BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); |
124 | BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); |
125 | |
126 | for (pfn = start_pfn; |
127 | pfn < end_pfn; |
128 | pfn += pageblock_nr_pages) { |
129 | page = __first_valid_page(pfn, pageblock_nr_pages); |
130 | if (page && |
131 | set_migratetype_isolate(page, skip_hwpoisoned_pages)) { |
132 | undo_pfn = pfn; |
133 | goto undo; |
134 | } |
135 | } |
136 | return 0; |
137 | undo: |
138 | for (pfn = start_pfn; |
139 | pfn < undo_pfn; |
140 | pfn += pageblock_nr_pages) |
141 | unset_migratetype_isolate(pfn_to_page(pfn), migratetype); |
142 | |
143 | return -EBUSY; |
144 | } |
145 | |
146 | /* |
147 | * Make isolated pages available again. |
148 | */ |
149 | int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
150 | unsigned migratetype) |
151 | { |
152 | unsigned long pfn; |
153 | struct page *page; |
154 | BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); |
155 | BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); |
156 | for (pfn = start_pfn; |
157 | pfn < end_pfn; |
158 | pfn += pageblock_nr_pages) { |
159 | page = __first_valid_page(pfn, pageblock_nr_pages); |
160 | if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
161 | continue; |
162 | unset_migratetype_isolate(page, migratetype); |
163 | } |
164 | return 0; |
165 | } |
166 | /* |
167 | * Test all pages in the range is free(means isolated) or not. |
168 | * all pages in [start_pfn...end_pfn) must be in the same zone. |
169 | * zone->lock must be held before call this. |
170 | * |
171 | * Returns 1 if all pages in the range are isolated. |
172 | */ |
173 | static int |
174 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, |
175 | bool skip_hwpoisoned_pages) |
176 | { |
177 | struct page *page; |
178 | |
179 | while (pfn < end_pfn) { |
180 | if (!pfn_valid_within(pfn)) { |
181 | pfn++; |
182 | continue; |
183 | } |
184 | page = pfn_to_page(pfn); |
185 | if (PageBuddy(page)) { |
186 | /* |
187 | * If race between isolatation and allocation happens, |
188 | * some free pages could be in MIGRATE_MOVABLE list |
189 | * although pageblock's migratation type of the page |
190 | * is MIGRATE_ISOLATE. Catch it and move the page into |
191 | * MIGRATE_ISOLATE list. |
192 | */ |
193 | if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { |
194 | struct page *end_page; |
195 | |
196 | end_page = page + (1 << page_order(page)) - 1; |
197 | move_freepages(page_zone(page), page, end_page, |
198 | MIGRATE_ISOLATE); |
199 | } |
200 | pfn += 1 << page_order(page); |
201 | } |
202 | else if (page_count(page) == 0 && |
203 | get_freepage_migratetype(page) == MIGRATE_ISOLATE) |
204 | pfn += 1; |
205 | else if (skip_hwpoisoned_pages && PageHWPoison(page)) { |
206 | /* |
207 | * The HWPoisoned page may be not in buddy |
208 | * system, and page_count() is not 0. |
209 | */ |
210 | pfn++; |
211 | continue; |
212 | } |
213 | else |
214 | break; |
215 | } |
216 | if (pfn < end_pfn) |
217 | return 0; |
218 | return 1; |
219 | } |
220 | |
221 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, |
222 | bool skip_hwpoisoned_pages) |
223 | { |
224 | unsigned long pfn, flags; |
225 | struct page *page; |
226 | struct zone *zone; |
227 | int ret; |
228 | |
229 | /* |
230 | * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages |
231 | * are not aligned to pageblock_nr_pages. |
232 | * Then we just check migratetype first. |
233 | */ |
234 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
235 | page = __first_valid_page(pfn, pageblock_nr_pages); |
236 | if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
237 | break; |
238 | } |
239 | page = __first_valid_page(start_pfn, end_pfn - start_pfn); |
240 | if ((pfn < end_pfn) || !page) |
241 | return -EBUSY; |
242 | /* Check all pages are free or marked as ISOLATED */ |
243 | zone = page_zone(page); |
244 | spin_lock_irqsave(&zone->lock, flags); |
245 | ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn, |
246 | skip_hwpoisoned_pages); |
247 | spin_unlock_irqrestore(&zone->lock, flags); |
248 | return ret ? 0 : -EBUSY; |
249 | } |
250 | |
251 | struct page *alloc_migrate_target(struct page *page, unsigned long private, |
252 | int **resultp) |
253 | { |
254 | gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; |
255 | |
256 | /* |
257 | * TODO: allocate a destination hugepage from a nearest neighbor node, |
258 | * accordance with memory policy of the user process if possible. For |
259 | * now as a simple work-around, we use the next node for destination. |
260 | */ |
261 | if (PageHuge(page)) { |
262 | nodemask_t src = nodemask_of_node(page_to_nid(page)); |
263 | nodemask_t dst; |
264 | nodes_complement(dst, src); |
265 | return alloc_huge_page_node(page_hstate(compound_head(page)), |
266 | next_node(page_to_nid(page), dst)); |
267 | } |
268 | |
269 | if (PageHighMem(page)) |
270 | gfp_mask |= __GFP_HIGHMEM; |
271 | |
272 | return alloc_page(gfp_mask); |
273 | } |
274 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9