Root/
1 | /* |
2 | * linux/mm/memory_hotplug.c |
3 | * |
4 | * Copyright (C) |
5 | */ |
6 | |
7 | #include <linux/stddef.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/swap.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/pagemap.h> |
12 | #include <linux/bootmem.h> |
13 | #include <linux/compiler.h> |
14 | #include <linux/module.h> |
15 | #include <linux/pagevec.h> |
16 | #include <linux/writeback.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/sysctl.h> |
19 | #include <linux/cpu.h> |
20 | #include <linux/memory.h> |
21 | #include <linux/memory_hotplug.h> |
22 | #include <linux/highmem.h> |
23 | #include <linux/vmalloc.h> |
24 | #include <linux/ioport.h> |
25 | #include <linux/delay.h> |
26 | #include <linux/migrate.h> |
27 | #include <linux/page-isolation.h> |
28 | #include <linux/pfn.h> |
29 | #include <linux/suspend.h> |
30 | #include <linux/mm_inline.h> |
31 | #include <linux/firmware-map.h> |
32 | |
33 | #include <asm/tlbflush.h> |
34 | |
35 | #include "internal.h" |
36 | |
37 | DEFINE_MUTEX(mem_hotplug_mutex); |
38 | |
39 | void lock_memory_hotplug(void) |
40 | { |
41 | mutex_lock(&mem_hotplug_mutex); |
42 | |
43 | /* for exclusive hibernation if CONFIG_HIBERNATION=y */ |
44 | lock_system_sleep(); |
45 | } |
46 | |
47 | void unlock_memory_hotplug(void) |
48 | { |
49 | unlock_system_sleep(); |
50 | mutex_unlock(&mem_hotplug_mutex); |
51 | } |
52 | |
53 | |
54 | /* add this memory to iomem resource */ |
55 | static struct resource *register_memory_resource(u64 start, u64 size) |
56 | { |
57 | struct resource *res; |
58 | res = kzalloc(sizeof(struct resource), GFP_KERNEL); |
59 | BUG_ON(!res); |
60 | |
61 | res->name = "System RAM"; |
62 | res->start = start; |
63 | res->end = start + size - 1; |
64 | res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
65 | if (request_resource(&iomem_resource, res) < 0) { |
66 | printk("System RAM resource %llx - %llx cannot be added\n", |
67 | (unsigned long long)res->start, (unsigned long long)res->end); |
68 | kfree(res); |
69 | res = NULL; |
70 | } |
71 | return res; |
72 | } |
73 | |
74 | static void release_memory_resource(struct resource *res) |
75 | { |
76 | if (!res) |
77 | return; |
78 | release_resource(res); |
79 | kfree(res); |
80 | return; |
81 | } |
82 | |
83 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
84 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
85 | static void get_page_bootmem(unsigned long info, struct page *page, |
86 | unsigned long type) |
87 | { |
88 | page->lru.next = (struct list_head *) type; |
89 | SetPagePrivate(page); |
90 | set_page_private(page, info); |
91 | atomic_inc(&page->_count); |
92 | } |
93 | |
94 | /* reference to __meminit __free_pages_bootmem is valid |
95 | * so use __ref to tell modpost not to generate a warning */ |
96 | void __ref put_page_bootmem(struct page *page) |
97 | { |
98 | unsigned long type; |
99 | |
100 | type = (unsigned long) page->lru.next; |
101 | BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || |
102 | type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); |
103 | |
104 | if (atomic_dec_return(&page->_count) == 1) { |
105 | ClearPagePrivate(page); |
106 | set_page_private(page, 0); |
107 | INIT_LIST_HEAD(&page->lru); |
108 | __free_pages_bootmem(page, 0); |
109 | } |
110 | |
111 | } |
112 | |
113 | static void register_page_bootmem_info_section(unsigned long start_pfn) |
114 | { |
115 | unsigned long *usemap, mapsize, section_nr, i; |
116 | struct mem_section *ms; |
117 | struct page *page, *memmap; |
118 | |
119 | if (!pfn_valid(start_pfn)) |
120 | return; |
121 | |
122 | section_nr = pfn_to_section_nr(start_pfn); |
123 | ms = __nr_to_section(section_nr); |
124 | |
125 | /* Get section's memmap address */ |
126 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); |
127 | |
128 | /* |
129 | * Get page for the memmap's phys address |
130 | * XXX: need more consideration for sparse_vmemmap... |
131 | */ |
132 | page = virt_to_page(memmap); |
133 | mapsize = sizeof(struct page) * PAGES_PER_SECTION; |
134 | mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; |
135 | |
136 | /* remember memmap's page */ |
137 | for (i = 0; i < mapsize; i++, page++) |
138 | get_page_bootmem(section_nr, page, SECTION_INFO); |
139 | |
140 | usemap = __nr_to_section(section_nr)->pageblock_flags; |
141 | page = virt_to_page(usemap); |
142 | |
143 | mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; |
144 | |
145 | for (i = 0; i < mapsize; i++, page++) |
146 | get_page_bootmem(section_nr, page, MIX_SECTION_INFO); |
147 | |
148 | } |
149 | |
150 | void register_page_bootmem_info_node(struct pglist_data *pgdat) |
151 | { |
152 | unsigned long i, pfn, end_pfn, nr_pages; |
153 | int node = pgdat->node_id; |
154 | struct page *page; |
155 | struct zone *zone; |
156 | |
157 | nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; |
158 | page = virt_to_page(pgdat); |
159 | |
160 | for (i = 0; i < nr_pages; i++, page++) |
161 | get_page_bootmem(node, page, NODE_INFO); |
162 | |
163 | zone = &pgdat->node_zones[0]; |
164 | for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { |
165 | if (zone->wait_table) { |
166 | nr_pages = zone->wait_table_hash_nr_entries |
167 | * sizeof(wait_queue_head_t); |
168 | nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; |
169 | page = virt_to_page(zone->wait_table); |
170 | |
171 | for (i = 0; i < nr_pages; i++, page++) |
172 | get_page_bootmem(node, page, NODE_INFO); |
173 | } |
174 | } |
175 | |
176 | pfn = pgdat->node_start_pfn; |
177 | end_pfn = pfn + pgdat->node_spanned_pages; |
178 | |
179 | /* register_section info */ |
180 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) |
181 | register_page_bootmem_info_section(pfn); |
182 | |
183 | } |
184 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
185 | |
186 | static void grow_zone_span(struct zone *zone, unsigned long start_pfn, |
187 | unsigned long end_pfn) |
188 | { |
189 | unsigned long old_zone_end_pfn; |
190 | |
191 | zone_span_writelock(zone); |
192 | |
193 | old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; |
194 | if (start_pfn < zone->zone_start_pfn) |
195 | zone->zone_start_pfn = start_pfn; |
196 | |
197 | zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - |
198 | zone->zone_start_pfn; |
199 | |
200 | zone_span_writeunlock(zone); |
201 | } |
202 | |
203 | static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, |
204 | unsigned long end_pfn) |
205 | { |
206 | unsigned long old_pgdat_end_pfn = |
207 | pgdat->node_start_pfn + pgdat->node_spanned_pages; |
208 | |
209 | if (start_pfn < pgdat->node_start_pfn) |
210 | pgdat->node_start_pfn = start_pfn; |
211 | |
212 | pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - |
213 | pgdat->node_start_pfn; |
214 | } |
215 | |
216 | static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) |
217 | { |
218 | struct pglist_data *pgdat = zone->zone_pgdat; |
219 | int nr_pages = PAGES_PER_SECTION; |
220 | int nid = pgdat->node_id; |
221 | int zone_type; |
222 | unsigned long flags; |
223 | |
224 | zone_type = zone - pgdat->node_zones; |
225 | if (!zone->wait_table) { |
226 | int ret; |
227 | |
228 | ret = init_currently_empty_zone(zone, phys_start_pfn, |
229 | nr_pages, MEMMAP_HOTPLUG); |
230 | if (ret) |
231 | return ret; |
232 | } |
233 | pgdat_resize_lock(zone->zone_pgdat, &flags); |
234 | grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); |
235 | grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, |
236 | phys_start_pfn + nr_pages); |
237 | pgdat_resize_unlock(zone->zone_pgdat, &flags); |
238 | memmap_init_zone(nr_pages, nid, zone_type, |
239 | phys_start_pfn, MEMMAP_HOTPLUG); |
240 | return 0; |
241 | } |
242 | |
243 | static int __meminit __add_section(int nid, struct zone *zone, |
244 | unsigned long phys_start_pfn) |
245 | { |
246 | int nr_pages = PAGES_PER_SECTION; |
247 | int ret; |
248 | |
249 | if (pfn_valid(phys_start_pfn)) |
250 | return -EEXIST; |
251 | |
252 | ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); |
253 | |
254 | if (ret < 0) |
255 | return ret; |
256 | |
257 | ret = __add_zone(zone, phys_start_pfn); |
258 | |
259 | if (ret < 0) |
260 | return ret; |
261 | |
262 | return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); |
263 | } |
264 | |
265 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
266 | static int __remove_section(struct zone *zone, struct mem_section *ms) |
267 | { |
268 | /* |
269 | * XXX: Freeing memmap with vmemmap is not implement yet. |
270 | * This should be removed later. |
271 | */ |
272 | return -EBUSY; |
273 | } |
274 | #else |
275 | static int __remove_section(struct zone *zone, struct mem_section *ms) |
276 | { |
277 | unsigned long flags; |
278 | struct pglist_data *pgdat = zone->zone_pgdat; |
279 | int ret = -EINVAL; |
280 | |
281 | if (!valid_section(ms)) |
282 | return ret; |
283 | |
284 | ret = unregister_memory_section(ms); |
285 | if (ret) |
286 | return ret; |
287 | |
288 | pgdat_resize_lock(pgdat, &flags); |
289 | sparse_remove_one_section(zone, ms); |
290 | pgdat_resize_unlock(pgdat, &flags); |
291 | return 0; |
292 | } |
293 | #endif |
294 | |
295 | /* |
296 | * Reasonably generic function for adding memory. It is |
297 | * expected that archs that support memory hotplug will |
298 | * call this function after deciding the zone to which to |
299 | * add the new pages. |
300 | */ |
301 | int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, |
302 | unsigned long nr_pages) |
303 | { |
304 | unsigned long i; |
305 | int err = 0; |
306 | int start_sec, end_sec; |
307 | /* during initialize mem_map, align hot-added range to section */ |
308 | start_sec = pfn_to_section_nr(phys_start_pfn); |
309 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); |
310 | |
311 | for (i = start_sec; i <= end_sec; i++) { |
312 | err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); |
313 | |
314 | /* |
315 | * EEXIST is finally dealt with by ioresource collision |
316 | * check. see add_memory() => register_memory_resource() |
317 | * Warning will be printed if there is collision. |
318 | */ |
319 | if (err && (err != -EEXIST)) |
320 | break; |
321 | err = 0; |
322 | } |
323 | |
324 | return err; |
325 | } |
326 | EXPORT_SYMBOL_GPL(__add_pages); |
327 | |
328 | /** |
329 | * __remove_pages() - remove sections of pages from a zone |
330 | * @zone: zone from which pages need to be removed |
331 | * @phys_start_pfn: starting pageframe (must be aligned to start of a section) |
332 | * @nr_pages: number of pages to remove (must be multiple of section size) |
333 | * |
334 | * Generic helper function to remove section mappings and sysfs entries |
335 | * for the section of the memory we are removing. Caller needs to make |
336 | * sure that pages are marked reserved and zones are adjust properly by |
337 | * calling offline_pages(). |
338 | */ |
339 | int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, |
340 | unsigned long nr_pages) |
341 | { |
342 | unsigned long i, ret = 0; |
343 | int sections_to_remove; |
344 | |
345 | /* |
346 | * We can only remove entire sections |
347 | */ |
348 | BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); |
349 | BUG_ON(nr_pages % PAGES_PER_SECTION); |
350 | |
351 | sections_to_remove = nr_pages / PAGES_PER_SECTION; |
352 | for (i = 0; i < sections_to_remove; i++) { |
353 | unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; |
354 | release_mem_region(pfn << PAGE_SHIFT, |
355 | PAGES_PER_SECTION << PAGE_SHIFT); |
356 | ret = __remove_section(zone, __pfn_to_section(pfn)); |
357 | if (ret) |
358 | break; |
359 | } |
360 | return ret; |
361 | } |
362 | EXPORT_SYMBOL_GPL(__remove_pages); |
363 | |
364 | void online_page(struct page *page) |
365 | { |
366 | unsigned long pfn = page_to_pfn(page); |
367 | |
368 | totalram_pages++; |
369 | if (pfn >= num_physpages) |
370 | num_physpages = pfn + 1; |
371 | |
372 | #ifdef CONFIG_HIGHMEM |
373 | if (PageHighMem(page)) |
374 | totalhigh_pages++; |
375 | #endif |
376 | |
377 | ClearPageReserved(page); |
378 | init_page_count(page); |
379 | __free_page(page); |
380 | } |
381 | |
382 | static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, |
383 | void *arg) |
384 | { |
385 | unsigned long i; |
386 | unsigned long onlined_pages = *(unsigned long *)arg; |
387 | struct page *page; |
388 | if (PageReserved(pfn_to_page(start_pfn))) |
389 | for (i = 0; i < nr_pages; i++) { |
390 | page = pfn_to_page(start_pfn + i); |
391 | online_page(page); |
392 | onlined_pages++; |
393 | } |
394 | *(unsigned long *)arg = onlined_pages; |
395 | return 0; |
396 | } |
397 | |
398 | |
399 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages) |
400 | { |
401 | unsigned long onlined_pages = 0; |
402 | struct zone *zone; |
403 | int need_zonelists_rebuild = 0; |
404 | int nid; |
405 | int ret; |
406 | struct memory_notify arg; |
407 | |
408 | lock_memory_hotplug(); |
409 | arg.start_pfn = pfn; |
410 | arg.nr_pages = nr_pages; |
411 | arg.status_change_nid = -1; |
412 | |
413 | nid = page_to_nid(pfn_to_page(pfn)); |
414 | if (node_present_pages(nid) == 0) |
415 | arg.status_change_nid = nid; |
416 | |
417 | ret = memory_notify(MEM_GOING_ONLINE, &arg); |
418 | ret = notifier_to_errno(ret); |
419 | if (ret) { |
420 | memory_notify(MEM_CANCEL_ONLINE, &arg); |
421 | unlock_memory_hotplug(); |
422 | return ret; |
423 | } |
424 | /* |
425 | * This doesn't need a lock to do pfn_to_page(). |
426 | * The section can't be removed here because of the |
427 | * memory_block->state_mutex. |
428 | */ |
429 | zone = page_zone(pfn_to_page(pfn)); |
430 | /* |
431 | * If this zone is not populated, then it is not in zonelist. |
432 | * This means the page allocator ignores this zone. |
433 | * So, zonelist must be updated after online. |
434 | */ |
435 | mutex_lock(&zonelists_mutex); |
436 | if (!populated_zone(zone)) |
437 | need_zonelists_rebuild = 1; |
438 | |
439 | ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, |
440 | online_pages_range); |
441 | if (ret) { |
442 | mutex_unlock(&zonelists_mutex); |
443 | printk(KERN_DEBUG "online_pages %lx at %lx failed\n", |
444 | nr_pages, pfn); |
445 | memory_notify(MEM_CANCEL_ONLINE, &arg); |
446 | unlock_memory_hotplug(); |
447 | return ret; |
448 | } |
449 | |
450 | zone->present_pages += onlined_pages; |
451 | zone->zone_pgdat->node_present_pages += onlined_pages; |
452 | if (need_zonelists_rebuild) |
453 | build_all_zonelists(zone); |
454 | else |
455 | zone_pcp_update(zone); |
456 | |
457 | mutex_unlock(&zonelists_mutex); |
458 | |
459 | init_per_zone_wmark_min(); |
460 | |
461 | if (onlined_pages) { |
462 | kswapd_run(zone_to_nid(zone)); |
463 | node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); |
464 | } |
465 | |
466 | vm_total_pages = nr_free_pagecache_pages(); |
467 | |
468 | writeback_set_ratelimit(); |
469 | |
470 | if (onlined_pages) |
471 | memory_notify(MEM_ONLINE, &arg); |
472 | unlock_memory_hotplug(); |
473 | |
474 | return 0; |
475 | } |
476 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
477 | |
478 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
479 | static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) |
480 | { |
481 | struct pglist_data *pgdat; |
482 | unsigned long zones_size[MAX_NR_ZONES] = {0}; |
483 | unsigned long zholes_size[MAX_NR_ZONES] = {0}; |
484 | unsigned long start_pfn = start >> PAGE_SHIFT; |
485 | |
486 | pgdat = arch_alloc_nodedata(nid); |
487 | if (!pgdat) |
488 | return NULL; |
489 | |
490 | arch_refresh_nodedata(nid, pgdat); |
491 | |
492 | /* we can use NODE_DATA(nid) from here */ |
493 | |
494 | /* init node's zones as empty zones, we don't have any present pages.*/ |
495 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); |
496 | |
497 | /* |
498 | * The node we allocated has no zone fallback lists. For avoiding |
499 | * to access not-initialized zonelist, build here. |
500 | */ |
501 | mutex_lock(&zonelists_mutex); |
502 | build_all_zonelists(NULL); |
503 | mutex_unlock(&zonelists_mutex); |
504 | |
505 | return pgdat; |
506 | } |
507 | |
508 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) |
509 | { |
510 | arch_refresh_nodedata(nid, NULL); |
511 | arch_free_nodedata(pgdat); |
512 | return; |
513 | } |
514 | |
515 | |
516 | /* |
517 | * called by cpu_up() to online a node without onlined memory. |
518 | */ |
519 | int mem_online_node(int nid) |
520 | { |
521 | pg_data_t *pgdat; |
522 | int ret; |
523 | |
524 | lock_memory_hotplug(); |
525 | pgdat = hotadd_new_pgdat(nid, 0); |
526 | if (!pgdat) { |
527 | ret = -ENOMEM; |
528 | goto out; |
529 | } |
530 | node_set_online(nid); |
531 | ret = register_one_node(nid); |
532 | BUG_ON(ret); |
533 | |
534 | out: |
535 | unlock_memory_hotplug(); |
536 | return ret; |
537 | } |
538 | |
539 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
540 | int __ref add_memory(int nid, u64 start, u64 size) |
541 | { |
542 | pg_data_t *pgdat = NULL; |
543 | int new_pgdat = 0; |
544 | struct resource *res; |
545 | int ret; |
546 | |
547 | lock_memory_hotplug(); |
548 | |
549 | res = register_memory_resource(start, size); |
550 | ret = -EEXIST; |
551 | if (!res) |
552 | goto out; |
553 | |
554 | if (!node_online(nid)) { |
555 | pgdat = hotadd_new_pgdat(nid, start); |
556 | ret = -ENOMEM; |
557 | if (!pgdat) |
558 | goto out; |
559 | new_pgdat = 1; |
560 | } |
561 | |
562 | /* call arch's memory hotadd */ |
563 | ret = arch_add_memory(nid, start, size); |
564 | |
565 | if (ret < 0) |
566 | goto error; |
567 | |
568 | /* we online node here. we can't roll back from here. */ |
569 | node_set_online(nid); |
570 | |
571 | if (new_pgdat) { |
572 | ret = register_one_node(nid); |
573 | /* |
574 | * If sysfs file of new node can't create, cpu on the node |
575 | * can't be hot-added. There is no rollback way now. |
576 | * So, check by BUG_ON() to catch it reluctantly.. |
577 | */ |
578 | BUG_ON(ret); |
579 | } |
580 | |
581 | /* create new memmap entry */ |
582 | firmware_map_add_hotplug(start, start + size, "System RAM"); |
583 | |
584 | goto out; |
585 | |
586 | error: |
587 | /* rollback pgdat allocation and others */ |
588 | if (new_pgdat) |
589 | rollback_node_hotadd(nid, pgdat); |
590 | if (res) |
591 | release_memory_resource(res); |
592 | |
593 | out: |
594 | unlock_memory_hotplug(); |
595 | return ret; |
596 | } |
597 | EXPORT_SYMBOL_GPL(add_memory); |
598 | |
599 | #ifdef CONFIG_MEMORY_HOTREMOVE |
600 | /* |
601 | * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy |
602 | * set and the size of the free page is given by page_order(). Using this, |
603 | * the function determines if the pageblock contains only free pages. |
604 | * Due to buddy contraints, a free page at least the size of a pageblock will |
605 | * be located at the start of the pageblock |
606 | */ |
607 | static inline int pageblock_free(struct page *page) |
608 | { |
609 | return PageBuddy(page) && page_order(page) >= pageblock_order; |
610 | } |
611 | |
612 | /* Return the start of the next active pageblock after a given page */ |
613 | static struct page *next_active_pageblock(struct page *page) |
614 | { |
615 | /* Ensure the starting page is pageblock-aligned */ |
616 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); |
617 | |
618 | /* If the entire pageblock is free, move to the end of free page */ |
619 | if (pageblock_free(page)) { |
620 | int order; |
621 | /* be careful. we don't have locks, page_order can be changed.*/ |
622 | order = page_order(page); |
623 | if ((order < MAX_ORDER) && (order >= pageblock_order)) |
624 | return page + (1 << order); |
625 | } |
626 | |
627 | return page + pageblock_nr_pages; |
628 | } |
629 | |
630 | /* Checks if this range of memory is likely to be hot-removable. */ |
631 | int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) |
632 | { |
633 | struct page *page = pfn_to_page(start_pfn); |
634 | struct page *end_page = page + nr_pages; |
635 | |
636 | /* Check the starting page of each pageblock within the range */ |
637 | for (; page < end_page; page = next_active_pageblock(page)) { |
638 | if (!is_pageblock_removable_nolock(page)) |
639 | return 0; |
640 | cond_resched(); |
641 | } |
642 | |
643 | /* All pageblocks in the memory block are likely to be hot-removable */ |
644 | return 1; |
645 | } |
646 | |
647 | /* |
648 | * Confirm all pages in a range [start, end) is belongs to the same zone. |
649 | */ |
650 | static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) |
651 | { |
652 | unsigned long pfn; |
653 | struct zone *zone = NULL; |
654 | struct page *page; |
655 | int i; |
656 | for (pfn = start_pfn; |
657 | pfn < end_pfn; |
658 | pfn += MAX_ORDER_NR_PAGES) { |
659 | i = 0; |
660 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ |
661 | while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) |
662 | i++; |
663 | if (i == MAX_ORDER_NR_PAGES) |
664 | continue; |
665 | page = pfn_to_page(pfn + i); |
666 | if (zone && page_zone(page) != zone) |
667 | return 0; |
668 | zone = page_zone(page); |
669 | } |
670 | return 1; |
671 | } |
672 | |
673 | /* |
674 | * Scanning pfn is much easier than scanning lru list. |
675 | * Scan pfn from start to end and Find LRU page. |
676 | */ |
677 | static unsigned long scan_lru_pages(unsigned long start, unsigned long end) |
678 | { |
679 | unsigned long pfn; |
680 | struct page *page; |
681 | for (pfn = start; pfn < end; pfn++) { |
682 | if (pfn_valid(pfn)) { |
683 | page = pfn_to_page(pfn); |
684 | if (PageLRU(page)) |
685 | return pfn; |
686 | } |
687 | } |
688 | return 0; |
689 | } |
690 | |
691 | static struct page * |
692 | hotremove_migrate_alloc(struct page *page, unsigned long private, int **x) |
693 | { |
694 | /* This should be improooooved!! */ |
695 | return alloc_page(GFP_HIGHUSER_MOVABLE); |
696 | } |
697 | |
698 | #define NR_OFFLINE_AT_ONCE_PAGES (256) |
699 | static int |
700 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) |
701 | { |
702 | unsigned long pfn; |
703 | struct page *page; |
704 | int move_pages = NR_OFFLINE_AT_ONCE_PAGES; |
705 | int not_managed = 0; |
706 | int ret = 0; |
707 | LIST_HEAD(source); |
708 | |
709 | for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { |
710 | if (!pfn_valid(pfn)) |
711 | continue; |
712 | page = pfn_to_page(pfn); |
713 | if (!get_page_unless_zero(page)) |
714 | continue; |
715 | /* |
716 | * We can skip free pages. And we can only deal with pages on |
717 | * LRU. |
718 | */ |
719 | ret = isolate_lru_page(page); |
720 | if (!ret) { /* Success */ |
721 | put_page(page); |
722 | list_add_tail(&page->lru, &source); |
723 | move_pages--; |
724 | inc_zone_page_state(page, NR_ISOLATED_ANON + |
725 | page_is_file_cache(page)); |
726 | |
727 | } else { |
728 | #ifdef CONFIG_DEBUG_VM |
729 | printk(KERN_ALERT "removing pfn %lx from LRU failed\n", |
730 | pfn); |
731 | dump_page(page); |
732 | #endif |
733 | put_page(page); |
734 | /* Because we don't have big zone->lock. we should |
735 | check this again here. */ |
736 | if (page_count(page)) { |
737 | not_managed++; |
738 | ret = -EBUSY; |
739 | break; |
740 | } |
741 | } |
742 | } |
743 | if (!list_empty(&source)) { |
744 | if (not_managed) { |
745 | putback_lru_pages(&source); |
746 | goto out; |
747 | } |
748 | /* this function returns # of failed pages */ |
749 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0, |
750 | true, true); |
751 | if (ret) |
752 | putback_lru_pages(&source); |
753 | } |
754 | out: |
755 | return ret; |
756 | } |
757 | |
758 | /* |
759 | * remove from free_area[] and mark all as Reserved. |
760 | */ |
761 | static int |
762 | offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, |
763 | void *data) |
764 | { |
765 | __offline_isolated_pages(start, start + nr_pages); |
766 | return 0; |
767 | } |
768 | |
769 | static void |
770 | offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) |
771 | { |
772 | walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, |
773 | offline_isolated_pages_cb); |
774 | } |
775 | |
776 | /* |
777 | * Check all pages in range, recoreded as memory resource, are isolated. |
778 | */ |
779 | static int |
780 | check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, |
781 | void *data) |
782 | { |
783 | int ret; |
784 | long offlined = *(long *)data; |
785 | ret = test_pages_isolated(start_pfn, start_pfn + nr_pages); |
786 | offlined = nr_pages; |
787 | if (!ret) |
788 | *(long *)data += offlined; |
789 | return ret; |
790 | } |
791 | |
792 | static long |
793 | check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) |
794 | { |
795 | long offlined = 0; |
796 | int ret; |
797 | |
798 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, |
799 | check_pages_isolated_cb); |
800 | if (ret < 0) |
801 | offlined = (long)ret; |
802 | return offlined; |
803 | } |
804 | |
805 | static int __ref offline_pages(unsigned long start_pfn, |
806 | unsigned long end_pfn, unsigned long timeout) |
807 | { |
808 | unsigned long pfn, nr_pages, expire; |
809 | long offlined_pages; |
810 | int ret, drain, retry_max, node; |
811 | struct zone *zone; |
812 | struct memory_notify arg; |
813 | |
814 | BUG_ON(start_pfn >= end_pfn); |
815 | /* at least, alignment against pageblock is necessary */ |
816 | if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) |
817 | return -EINVAL; |
818 | if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) |
819 | return -EINVAL; |
820 | /* This makes hotplug much easier...and readable. |
821 | we assume this for now. .*/ |
822 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) |
823 | return -EINVAL; |
824 | |
825 | lock_memory_hotplug(); |
826 | |
827 | zone = page_zone(pfn_to_page(start_pfn)); |
828 | node = zone_to_nid(zone); |
829 | nr_pages = end_pfn - start_pfn; |
830 | |
831 | /* set above range as isolated */ |
832 | ret = start_isolate_page_range(start_pfn, end_pfn); |
833 | if (ret) |
834 | goto out; |
835 | |
836 | arg.start_pfn = start_pfn; |
837 | arg.nr_pages = nr_pages; |
838 | arg.status_change_nid = -1; |
839 | if (nr_pages >= node_present_pages(node)) |
840 | arg.status_change_nid = node; |
841 | |
842 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); |
843 | ret = notifier_to_errno(ret); |
844 | if (ret) |
845 | goto failed_removal; |
846 | |
847 | pfn = start_pfn; |
848 | expire = jiffies + timeout; |
849 | drain = 0; |
850 | retry_max = 5; |
851 | repeat: |
852 | /* start memory hot removal */ |
853 | ret = -EAGAIN; |
854 | if (time_after(jiffies, expire)) |
855 | goto failed_removal; |
856 | ret = -EINTR; |
857 | if (signal_pending(current)) |
858 | goto failed_removal; |
859 | ret = 0; |
860 | if (drain) { |
861 | lru_add_drain_all(); |
862 | cond_resched(); |
863 | drain_all_pages(); |
864 | } |
865 | |
866 | pfn = scan_lru_pages(start_pfn, end_pfn); |
867 | if (pfn) { /* We have page on LRU */ |
868 | ret = do_migrate_range(pfn, end_pfn); |
869 | if (!ret) { |
870 | drain = 1; |
871 | goto repeat; |
872 | } else { |
873 | if (ret < 0) |
874 | if (--retry_max == 0) |
875 | goto failed_removal; |
876 | yield(); |
877 | drain = 1; |
878 | goto repeat; |
879 | } |
880 | } |
881 | /* drain all zone's lru pagevec, this is asyncronous... */ |
882 | lru_add_drain_all(); |
883 | yield(); |
884 | /* drain pcp pages , this is synchrouns. */ |
885 | drain_all_pages(); |
886 | /* check again */ |
887 | offlined_pages = check_pages_isolated(start_pfn, end_pfn); |
888 | if (offlined_pages < 0) { |
889 | ret = -EBUSY; |
890 | goto failed_removal; |
891 | } |
892 | printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); |
893 | /* Ok, all of our target is islaoted. |
894 | We cannot do rollback at this point. */ |
895 | offline_isolated_pages(start_pfn, end_pfn); |
896 | /* reset pagetype flags and makes migrate type to be MOVABLE */ |
897 | undo_isolate_page_range(start_pfn, end_pfn); |
898 | /* removal success */ |
899 | zone->present_pages -= offlined_pages; |
900 | zone->zone_pgdat->node_present_pages -= offlined_pages; |
901 | totalram_pages -= offlined_pages; |
902 | |
903 | init_per_zone_wmark_min(); |
904 | |
905 | if (!node_present_pages(node)) { |
906 | node_clear_state(node, N_HIGH_MEMORY); |
907 | kswapd_stop(node); |
908 | } |
909 | |
910 | vm_total_pages = nr_free_pagecache_pages(); |
911 | writeback_set_ratelimit(); |
912 | |
913 | memory_notify(MEM_OFFLINE, &arg); |
914 | unlock_memory_hotplug(); |
915 | return 0; |
916 | |
917 | failed_removal: |
918 | printk(KERN_INFO "memory offlining %lx to %lx failed\n", |
919 | start_pfn, end_pfn); |
920 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
921 | /* pushback to free area */ |
922 | undo_isolate_page_range(start_pfn, end_pfn); |
923 | |
924 | out: |
925 | unlock_memory_hotplug(); |
926 | return ret; |
927 | } |
928 | |
929 | int remove_memory(u64 start, u64 size) |
930 | { |
931 | unsigned long start_pfn, end_pfn; |
932 | |
933 | start_pfn = PFN_DOWN(start); |
934 | end_pfn = start_pfn + PFN_DOWN(size); |
935 | return offline_pages(start_pfn, end_pfn, 120 * HZ); |
936 | } |
937 | #else |
938 | int remove_memory(u64 start, u64 size) |
939 | { |
940 | return -EINVAL; |
941 | } |
942 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
943 | EXPORT_SYMBOL_GPL(remove_memory); |
944 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9