Root/
1 | /* |
2 | * linux/mm/page_isolation.c |
3 | */ |
4 | |
5 | #include <linux/mm.h> |
6 | #include <linux/page-isolation.h> |
7 | #include <linux/pageblock-flags.h> |
8 | #include <linux/memory.h> |
9 | #include "internal.h" |
10 | |
11 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) |
12 | { |
13 | struct zone *zone; |
14 | unsigned long flags, pfn; |
15 | struct memory_isolate_notify arg; |
16 | int notifier_ret; |
17 | int ret = -EBUSY; |
18 | |
19 | zone = page_zone(page); |
20 | |
21 | spin_lock_irqsave(&zone->lock, flags); |
22 | |
23 | pfn = page_to_pfn(page); |
24 | arg.start_pfn = pfn; |
25 | arg.nr_pages = pageblock_nr_pages; |
26 | arg.pages_found = 0; |
27 | |
28 | /* |
29 | * It may be possible to isolate a pageblock even if the |
30 | * migratetype is not MIGRATE_MOVABLE. The memory isolation |
31 | * notifier chain is used by balloon drivers to return the |
32 | * number of pages in a range that are held by the balloon |
33 | * driver to shrink memory. If all the pages are accounted for |
34 | * by balloons, are free, or on the LRU, isolation can continue. |
35 | * Later, for example, when memory hotplug notifier runs, these |
36 | * pages reported as "can be isolated" should be isolated(freed) |
37 | * by the balloon driver through the memory notifier chain. |
38 | */ |
39 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); |
40 | notifier_ret = notifier_to_errno(notifier_ret); |
41 | if (notifier_ret) |
42 | goto out; |
43 | /* |
44 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. |
45 | * We just check MOVABLE pages. |
46 | */ |
47 | if (!has_unmovable_pages(zone, page, arg.pages_found, |
48 | skip_hwpoisoned_pages)) |
49 | ret = 0; |
50 | |
51 | /* |
52 | * immobile means "not-on-lru" paes. If immobile is larger than |
53 | * removable-by-driver pages reported by notifier, we'll fail. |
54 | */ |
55 | |
56 | out: |
57 | if (!ret) { |
58 | unsigned long nr_pages; |
59 | int migratetype = get_pageblock_migratetype(page); |
60 | |
61 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
62 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); |
63 | |
64 | __mod_zone_freepage_state(zone, -nr_pages, migratetype); |
65 | } |
66 | |
67 | spin_unlock_irqrestore(&zone->lock, flags); |
68 | if (!ret) |
69 | drain_all_pages(); |
70 | return ret; |
71 | } |
72 | |
73 | void unset_migratetype_isolate(struct page *page, unsigned migratetype) |
74 | { |
75 | struct zone *zone; |
76 | unsigned long flags, nr_pages; |
77 | |
78 | zone = page_zone(page); |
79 | spin_lock_irqsave(&zone->lock, flags); |
80 | if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
81 | goto out; |
82 | nr_pages = move_freepages_block(zone, page, migratetype); |
83 | __mod_zone_freepage_state(zone, nr_pages, migratetype); |
84 | set_pageblock_migratetype(page, migratetype); |
85 | out: |
86 | spin_unlock_irqrestore(&zone->lock, flags); |
87 | } |
88 | |
89 | static inline struct page * |
90 | __first_valid_page(unsigned long pfn, unsigned long nr_pages) |
91 | { |
92 | int i; |
93 | for (i = 0; i < nr_pages; i++) |
94 | if (pfn_valid_within(pfn + i)) |
95 | break; |
96 | if (unlikely(i == nr_pages)) |
97 | return NULL; |
98 | return pfn_to_page(pfn + i); |
99 | } |
100 | |
101 | /* |
102 | * start_isolate_page_range() -- make page-allocation-type of range of pages |
103 | * to be MIGRATE_ISOLATE. |
104 | * @start_pfn: The lower PFN of the range to be isolated. |
105 | * @end_pfn: The upper PFN of the range to be isolated. |
106 | * @migratetype: migrate type to set in error recovery. |
107 | * |
108 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in |
109 | * the range will never be allocated. Any free pages and pages freed in the |
110 | * future will not be allocated again. |
111 | * |
112 | * start_pfn/end_pfn must be aligned to pageblock_order. |
113 | * Returns 0 on success and -EBUSY if any part of range cannot be isolated. |
114 | */ |
115 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
116 | unsigned migratetype, bool skip_hwpoisoned_pages) |
117 | { |
118 | unsigned long pfn; |
119 | unsigned long undo_pfn; |
120 | struct page *page; |
121 | |
122 | BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); |
123 | BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); |
124 | |
125 | for (pfn = start_pfn; |
126 | pfn < end_pfn; |
127 | pfn += pageblock_nr_pages) { |
128 | page = __first_valid_page(pfn, pageblock_nr_pages); |
129 | if (page && |
130 | set_migratetype_isolate(page, skip_hwpoisoned_pages)) { |
131 | undo_pfn = pfn; |
132 | goto undo; |
133 | } |
134 | } |
135 | return 0; |
136 | undo: |
137 | for (pfn = start_pfn; |
138 | pfn < undo_pfn; |
139 | pfn += pageblock_nr_pages) |
140 | unset_migratetype_isolate(pfn_to_page(pfn), migratetype); |
141 | |
142 | return -EBUSY; |
143 | } |
144 | |
145 | /* |
146 | * Make isolated pages available again. |
147 | */ |
148 | int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
149 | unsigned migratetype) |
150 | { |
151 | unsigned long pfn; |
152 | struct page *page; |
153 | BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); |
154 | BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); |
155 | for (pfn = start_pfn; |
156 | pfn < end_pfn; |
157 | pfn += pageblock_nr_pages) { |
158 | page = __first_valid_page(pfn, pageblock_nr_pages); |
159 | if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
160 | continue; |
161 | unset_migratetype_isolate(page, migratetype); |
162 | } |
163 | return 0; |
164 | } |
165 | /* |
166 | * Test all pages in the range is free(means isolated) or not. |
167 | * all pages in [start_pfn...end_pfn) must be in the same zone. |
168 | * zone->lock must be held before call this. |
169 | * |
170 | * Returns 1 if all pages in the range are isolated. |
171 | */ |
172 | static int |
173 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, |
174 | bool skip_hwpoisoned_pages) |
175 | { |
176 | struct page *page; |
177 | |
178 | while (pfn < end_pfn) { |
179 | if (!pfn_valid_within(pfn)) { |
180 | pfn++; |
181 | continue; |
182 | } |
183 | page = pfn_to_page(pfn); |
184 | if (PageBuddy(page)) { |
185 | /* |
186 | * If race between isolatation and allocation happens, |
187 | * some free pages could be in MIGRATE_MOVABLE list |
188 | * although pageblock's migratation type of the page |
189 | * is MIGRATE_ISOLATE. Catch it and move the page into |
190 | * MIGRATE_ISOLATE list. |
191 | */ |
192 | if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) { |
193 | struct page *end_page; |
194 | |
195 | end_page = page + (1 << page_order(page)) - 1; |
196 | move_freepages(page_zone(page), page, end_page, |
197 | MIGRATE_ISOLATE); |
198 | } |
199 | pfn += 1 << page_order(page); |
200 | } |
201 | else if (page_count(page) == 0 && |
202 | get_freepage_migratetype(page) == MIGRATE_ISOLATE) |
203 | pfn += 1; |
204 | else if (skip_hwpoisoned_pages && PageHWPoison(page)) { |
205 | /* |
206 | * The HWPoisoned page may be not in buddy |
207 | * system, and page_count() is not 0. |
208 | */ |
209 | pfn++; |
210 | continue; |
211 | } |
212 | else |
213 | break; |
214 | } |
215 | if (pfn < end_pfn) |
216 | return 0; |
217 | return 1; |
218 | } |
219 | |
220 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, |
221 | bool skip_hwpoisoned_pages) |
222 | { |
223 | unsigned long pfn, flags; |
224 | struct page *page; |
225 | struct zone *zone; |
226 | int ret; |
227 | |
228 | /* |
229 | * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page |
230 | * is not aligned to pageblock_nr_pages. |
231 | * Then we just check pagetype fist. |
232 | */ |
233 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
234 | page = __first_valid_page(pfn, pageblock_nr_pages); |
235 | if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) |
236 | break; |
237 | } |
238 | page = __first_valid_page(start_pfn, end_pfn - start_pfn); |
239 | if ((pfn < end_pfn) || !page) |
240 | return -EBUSY; |
241 | /* Check all pages are free or Marked as ISOLATED */ |
242 | zone = page_zone(page); |
243 | spin_lock_irqsave(&zone->lock, flags); |
244 | ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn, |
245 | skip_hwpoisoned_pages); |
246 | spin_unlock_irqrestore(&zone->lock, flags); |
247 | return ret ? 0 : -EBUSY; |
248 | } |
249 | |
250 | struct page *alloc_migrate_target(struct page *page, unsigned long private, |
251 | int **resultp) |
252 | { |
253 | gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; |
254 | |
255 | if (PageHighMem(page)) |
256 | gfp_mask |= __GFP_HIGHMEM; |
257 | |
258 | return alloc_page(gfp_mask); |
259 | } |
260 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9