Root/mm/memory_hotplug.c

1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
7#include <linux/stddef.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/interrupt.h>
11#include <linux/pagemap.h>
12#include <linux/bootmem.h>
13#include <linux/compiler.h>
14#include <linux/export.h>
15#include <linux/pagevec.h>
16#include <linux/writeback.h>
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h>
23#include <linux/vmalloc.h>
24#include <linux/ioport.h>
25#include <linux/delay.h>
26#include <linux/migrate.h>
27#include <linux/page-isolation.h>
28#include <linux/pfn.h>
29#include <linux/suspend.h>
30#include <linux/mm_inline.h>
31#include <linux/firmware-map.h>
32
33#include <asm/tlbflush.h>
34
35#include "internal.h"
36
37/*
38 * online_page_callback contains pointer to current page onlining function.
39 * Initially it is generic_online_page(). If it is required it could be
40 * changed by calling set_online_page_callback() for callback registration
41 * and restore_online_page_callback() for generic callback restore.
42 */
43
44static void generic_online_page(struct page *page);
45
46static online_page_callback_t online_page_callback = generic_online_page;
47
48DEFINE_MUTEX(mem_hotplug_mutex);
49
50void lock_memory_hotplug(void)
51{
52    mutex_lock(&mem_hotplug_mutex);
53
54    /* for exclusive hibernation if CONFIG_HIBERNATION=y */
55    lock_system_sleep();
56}
57
58void unlock_memory_hotplug(void)
59{
60    unlock_system_sleep();
61    mutex_unlock(&mem_hotplug_mutex);
62}
63
64
65/* add this memory to iomem resource */
66static struct resource *register_memory_resource(u64 start, u64 size)
67{
68    struct resource *res;
69    res = kzalloc(sizeof(struct resource), GFP_KERNEL);
70    BUG_ON(!res);
71
72    res->name = "System RAM";
73    res->start = start;
74    res->end = start + size - 1;
75    res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
76    if (request_resource(&iomem_resource, res) < 0) {
77        printk("System RAM resource %pR cannot be added\n", res);
78        kfree(res);
79        res = NULL;
80    }
81    return res;
82}
83
84static void release_memory_resource(struct resource *res)
85{
86    if (!res)
87        return;
88    release_resource(res);
89    kfree(res);
90    return;
91}
92
93#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
94#ifndef CONFIG_SPARSEMEM_VMEMMAP
95static void get_page_bootmem(unsigned long info, struct page *page,
96                 unsigned long type)
97{
98    page->lru.next = (struct list_head *) type;
99    SetPagePrivate(page);
100    set_page_private(page, info);
101    atomic_inc(&page->_count);
102}
103
104/* reference to __meminit __free_pages_bootmem is valid
105 * so use __ref to tell modpost not to generate a warning */
106void __ref put_page_bootmem(struct page *page)
107{
108    unsigned long type;
109
110    type = (unsigned long) page->lru.next;
111    BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
112           type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
113
114    if (atomic_dec_return(&page->_count) == 1) {
115        ClearPagePrivate(page);
116        set_page_private(page, 0);
117        INIT_LIST_HEAD(&page->lru);
118        __free_pages_bootmem(page, 0);
119    }
120
121}
122
123static void register_page_bootmem_info_section(unsigned long start_pfn)
124{
125    unsigned long *usemap, mapsize, section_nr, i;
126    struct mem_section *ms;
127    struct page *page, *memmap;
128
129    if (!pfn_valid(start_pfn))
130        return;
131
132    section_nr = pfn_to_section_nr(start_pfn);
133    ms = __nr_to_section(section_nr);
134
135    /* Get section's memmap address */
136    memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
137
138    /*
139     * Get page for the memmap's phys address
140     * XXX: need more consideration for sparse_vmemmap...
141     */
142    page = virt_to_page(memmap);
143    mapsize = sizeof(struct page) * PAGES_PER_SECTION;
144    mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
145
146    /* remember memmap's page */
147    for (i = 0; i < mapsize; i++, page++)
148        get_page_bootmem(section_nr, page, SECTION_INFO);
149
150    usemap = __nr_to_section(section_nr)->pageblock_flags;
151    page = virt_to_page(usemap);
152
153    mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
154
155    for (i = 0; i < mapsize; i++, page++)
156        get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
157
158}
159
160void register_page_bootmem_info_node(struct pglist_data *pgdat)
161{
162    unsigned long i, pfn, end_pfn, nr_pages;
163    int node = pgdat->node_id;
164    struct page *page;
165    struct zone *zone;
166
167    nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
168    page = virt_to_page(pgdat);
169
170    for (i = 0; i < nr_pages; i++, page++)
171        get_page_bootmem(node, page, NODE_INFO);
172
173    zone = &pgdat->node_zones[0];
174    for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
175        if (zone->wait_table) {
176            nr_pages = zone->wait_table_hash_nr_entries
177                * sizeof(wait_queue_head_t);
178            nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
179            page = virt_to_page(zone->wait_table);
180
181            for (i = 0; i < nr_pages; i++, page++)
182                get_page_bootmem(node, page, NODE_INFO);
183        }
184    }
185
186    pfn = pgdat->node_start_pfn;
187    end_pfn = pfn + pgdat->node_spanned_pages;
188
189    /* register_section info */
190    for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
191        register_page_bootmem_info_section(pfn);
192
193}
194#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
195
196static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
197               unsigned long end_pfn)
198{
199    unsigned long old_zone_end_pfn;
200
201    zone_span_writelock(zone);
202
203    old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
204    if (start_pfn < zone->zone_start_pfn)
205        zone->zone_start_pfn = start_pfn;
206
207    zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
208                zone->zone_start_pfn;
209
210    zone_span_writeunlock(zone);
211}
212
213static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
214                unsigned long end_pfn)
215{
216    unsigned long old_pgdat_end_pfn =
217        pgdat->node_start_pfn + pgdat->node_spanned_pages;
218
219    if (start_pfn < pgdat->node_start_pfn)
220        pgdat->node_start_pfn = start_pfn;
221
222    pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
223                    pgdat->node_start_pfn;
224}
225
226static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
227{
228    struct pglist_data *pgdat = zone->zone_pgdat;
229    int nr_pages = PAGES_PER_SECTION;
230    int nid = pgdat->node_id;
231    int zone_type;
232    unsigned long flags;
233
234    zone_type = zone - pgdat->node_zones;
235    if (!zone->wait_table) {
236        int ret;
237
238        ret = init_currently_empty_zone(zone, phys_start_pfn,
239                        nr_pages, MEMMAP_HOTPLUG);
240        if (ret)
241            return ret;
242    }
243    pgdat_resize_lock(zone->zone_pgdat, &flags);
244    grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
245    grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
246            phys_start_pfn + nr_pages);
247    pgdat_resize_unlock(zone->zone_pgdat, &flags);
248    memmap_init_zone(nr_pages, nid, zone_type,
249             phys_start_pfn, MEMMAP_HOTPLUG);
250    return 0;
251}
252
253static int __meminit __add_section(int nid, struct zone *zone,
254                    unsigned long phys_start_pfn)
255{
256    int nr_pages = PAGES_PER_SECTION;
257    int ret;
258
259    if (pfn_valid(phys_start_pfn))
260        return -EEXIST;
261
262    ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
263
264    if (ret < 0)
265        return ret;
266
267    ret = __add_zone(zone, phys_start_pfn);
268
269    if (ret < 0)
270        return ret;
271
272    return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
273}
274
275#ifdef CONFIG_SPARSEMEM_VMEMMAP
276static int __remove_section(struct zone *zone, struct mem_section *ms)
277{
278    /*
279     * XXX: Freeing memmap with vmemmap is not implement yet.
280     * This should be removed later.
281     */
282    return -EBUSY;
283}
284#else
285static int __remove_section(struct zone *zone, struct mem_section *ms)
286{
287    unsigned long flags;
288    struct pglist_data *pgdat = zone->zone_pgdat;
289    int ret = -EINVAL;
290
291    if (!valid_section(ms))
292        return ret;
293
294    ret = unregister_memory_section(ms);
295    if (ret)
296        return ret;
297
298    pgdat_resize_lock(pgdat, &flags);
299    sparse_remove_one_section(zone, ms);
300    pgdat_resize_unlock(pgdat, &flags);
301    return 0;
302}
303#endif
304
305/*
306 * Reasonably generic function for adding memory. It is
307 * expected that archs that support memory hotplug will
308 * call this function after deciding the zone to which to
309 * add the new pages.
310 */
311int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
312            unsigned long nr_pages)
313{
314    unsigned long i;
315    int err = 0;
316    int start_sec, end_sec;
317    /* during initialize mem_map, align hot-added range to section */
318    start_sec = pfn_to_section_nr(phys_start_pfn);
319    end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
320
321    for (i = start_sec; i <= end_sec; i++) {
322        err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
323
324        /*
325         * EEXIST is finally dealt with by ioresource collision
326         * check. see add_memory() => register_memory_resource()
327         * Warning will be printed if there is collision.
328         */
329        if (err && (err != -EEXIST))
330            break;
331        err = 0;
332    }
333
334    return err;
335}
336EXPORT_SYMBOL_GPL(__add_pages);
337
338/**
339 * __remove_pages() - remove sections of pages from a zone
340 * @zone: zone from which pages need to be removed
341 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
342 * @nr_pages: number of pages to remove (must be multiple of section size)
343 *
344 * Generic helper function to remove section mappings and sysfs entries
345 * for the section of the memory we are removing. Caller needs to make
346 * sure that pages are marked reserved and zones are adjust properly by
347 * calling offline_pages().
348 */
349int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
350         unsigned long nr_pages)
351{
352    unsigned long i, ret = 0;
353    int sections_to_remove;
354
355    /*
356     * We can only remove entire sections
357     */
358    BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
359    BUG_ON(nr_pages % PAGES_PER_SECTION);
360
361    sections_to_remove = nr_pages / PAGES_PER_SECTION;
362    for (i = 0; i < sections_to_remove; i++) {
363        unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
364        release_mem_region(pfn << PAGE_SHIFT,
365                   PAGES_PER_SECTION << PAGE_SHIFT);
366        ret = __remove_section(zone, __pfn_to_section(pfn));
367        if (ret)
368            break;
369    }
370    return ret;
371}
372EXPORT_SYMBOL_GPL(__remove_pages);
373
374int set_online_page_callback(online_page_callback_t callback)
375{
376    int rc = -EINVAL;
377
378    lock_memory_hotplug();
379
380    if (online_page_callback == generic_online_page) {
381        online_page_callback = callback;
382        rc = 0;
383    }
384
385    unlock_memory_hotplug();
386
387    return rc;
388}
389EXPORT_SYMBOL_GPL(set_online_page_callback);
390
391int restore_online_page_callback(online_page_callback_t callback)
392{
393    int rc = -EINVAL;
394
395    lock_memory_hotplug();
396
397    if (online_page_callback == callback) {
398        online_page_callback = generic_online_page;
399        rc = 0;
400    }
401
402    unlock_memory_hotplug();
403
404    return rc;
405}
406EXPORT_SYMBOL_GPL(restore_online_page_callback);
407
408void __online_page_set_limits(struct page *page)
409{
410    unsigned long pfn = page_to_pfn(page);
411
412    if (pfn >= num_physpages)
413        num_physpages = pfn + 1;
414}
415EXPORT_SYMBOL_GPL(__online_page_set_limits);
416
417void __online_page_increment_counters(struct page *page)
418{
419    totalram_pages++;
420
421#ifdef CONFIG_HIGHMEM
422    if (PageHighMem(page))
423        totalhigh_pages++;
424#endif
425}
426EXPORT_SYMBOL_GPL(__online_page_increment_counters);
427
428void __online_page_free(struct page *page)
429{
430    ClearPageReserved(page);
431    init_page_count(page);
432    __free_page(page);
433}
434EXPORT_SYMBOL_GPL(__online_page_free);
435
436static void generic_online_page(struct page *page)
437{
438    __online_page_set_limits(page);
439    __online_page_increment_counters(page);
440    __online_page_free(page);
441}
442
443static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
444            void *arg)
445{
446    unsigned long i;
447    unsigned long onlined_pages = *(unsigned long *)arg;
448    struct page *page;
449    if (PageReserved(pfn_to_page(start_pfn)))
450        for (i = 0; i < nr_pages; i++) {
451            page = pfn_to_page(start_pfn + i);
452            (*online_page_callback)(page);
453            onlined_pages++;
454        }
455    *(unsigned long *)arg = onlined_pages;
456    return 0;
457}
458
459
460int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
461{
462    unsigned long onlined_pages = 0;
463    struct zone *zone;
464    int need_zonelists_rebuild = 0;
465    int nid;
466    int ret;
467    struct memory_notify arg;
468
469    lock_memory_hotplug();
470    arg.start_pfn = pfn;
471    arg.nr_pages = nr_pages;
472    arg.status_change_nid = -1;
473
474    nid = page_to_nid(pfn_to_page(pfn));
475    if (node_present_pages(nid) == 0)
476        arg.status_change_nid = nid;
477
478    ret = memory_notify(MEM_GOING_ONLINE, &arg);
479    ret = notifier_to_errno(ret);
480    if (ret) {
481        memory_notify(MEM_CANCEL_ONLINE, &arg);
482        unlock_memory_hotplug();
483        return ret;
484    }
485    /*
486     * This doesn't need a lock to do pfn_to_page().
487     * The section can't be removed here because of the
488     * memory_block->state_mutex.
489     */
490    zone = page_zone(pfn_to_page(pfn));
491    /*
492     * If this zone is not populated, then it is not in zonelist.
493     * This means the page allocator ignores this zone.
494     * So, zonelist must be updated after online.
495     */
496    mutex_lock(&zonelists_mutex);
497    if (!populated_zone(zone))
498        need_zonelists_rebuild = 1;
499
500    ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
501        online_pages_range);
502    if (ret) {
503        mutex_unlock(&zonelists_mutex);
504        printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
505               (unsigned long long) pfn << PAGE_SHIFT,
506               (((unsigned long long) pfn + nr_pages)
507                << PAGE_SHIFT) - 1);
508        memory_notify(MEM_CANCEL_ONLINE, &arg);
509        unlock_memory_hotplug();
510        return ret;
511    }
512
513    zone->present_pages += onlined_pages;
514    zone->zone_pgdat->node_present_pages += onlined_pages;
515    if (onlined_pages) {
516        node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
517        if (need_zonelists_rebuild)
518            build_all_zonelists(NULL, zone);
519        else
520            zone_pcp_update(zone);
521    }
522
523    mutex_unlock(&zonelists_mutex);
524
525    init_per_zone_wmark_min();
526
527    if (onlined_pages)
528        kswapd_run(zone_to_nid(zone));
529
530    vm_total_pages = nr_free_pagecache_pages();
531
532    writeback_set_ratelimit();
533
534    if (onlined_pages)
535        memory_notify(MEM_ONLINE, &arg);
536    unlock_memory_hotplug();
537
538    return 0;
539}
540#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
541
542/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
543static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
544{
545    struct pglist_data *pgdat;
546    unsigned long zones_size[MAX_NR_ZONES] = {0};
547    unsigned long zholes_size[MAX_NR_ZONES] = {0};
548    unsigned long start_pfn = start >> PAGE_SHIFT;
549
550    pgdat = arch_alloc_nodedata(nid);
551    if (!pgdat)
552        return NULL;
553
554    arch_refresh_nodedata(nid, pgdat);
555
556    /* we can use NODE_DATA(nid) from here */
557
558    /* init node's zones as empty zones, we don't have any present pages.*/
559    free_area_init_node(nid, zones_size, start_pfn, zholes_size);
560
561    /*
562     * The node we allocated has no zone fallback lists. For avoiding
563     * to access not-initialized zonelist, build here.
564     */
565    mutex_lock(&zonelists_mutex);
566    build_all_zonelists(pgdat, NULL);
567    mutex_unlock(&zonelists_mutex);
568
569    return pgdat;
570}
571
572static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
573{
574    arch_refresh_nodedata(nid, NULL);
575    arch_free_nodedata(pgdat);
576    return;
577}
578
579
580/*
581 * called by cpu_up() to online a node without onlined memory.
582 */
583int mem_online_node(int nid)
584{
585    pg_data_t *pgdat;
586    int ret;
587
588    lock_memory_hotplug();
589    pgdat = hotadd_new_pgdat(nid, 0);
590    if (!pgdat) {
591        ret = -ENOMEM;
592        goto out;
593    }
594    node_set_online(nid);
595    ret = register_one_node(nid);
596    BUG_ON(ret);
597
598out:
599    unlock_memory_hotplug();
600    return ret;
601}
602
603/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
604int __ref add_memory(int nid, u64 start, u64 size)
605{
606    pg_data_t *pgdat = NULL;
607    int new_pgdat = 0;
608    struct resource *res;
609    int ret;
610
611    lock_memory_hotplug();
612
613    res = register_memory_resource(start, size);
614    ret = -EEXIST;
615    if (!res)
616        goto out;
617
618    if (!node_online(nid)) {
619        pgdat = hotadd_new_pgdat(nid, start);
620        ret = -ENOMEM;
621        if (!pgdat)
622            goto error;
623        new_pgdat = 1;
624    }
625
626    /* call arch's memory hotadd */
627    ret = arch_add_memory(nid, start, size);
628
629    if (ret < 0)
630        goto error;
631
632    /* we online node here. we can't roll back from here. */
633    node_set_online(nid);
634
635    if (new_pgdat) {
636        ret = register_one_node(nid);
637        /*
638         * If sysfs file of new node can't create, cpu on the node
639         * can't be hot-added. There is no rollback way now.
640         * So, check by BUG_ON() to catch it reluctantly..
641         */
642        BUG_ON(ret);
643    }
644
645    /* create new memmap entry */
646    firmware_map_add_hotplug(start, start + size, "System RAM");
647
648    goto out;
649
650error:
651    /* rollback pgdat allocation and others */
652    if (new_pgdat)
653        rollback_node_hotadd(nid, pgdat);
654    if (res)
655        release_memory_resource(res);
656
657out:
658    unlock_memory_hotplug();
659    return ret;
660}
661EXPORT_SYMBOL_GPL(add_memory);
662
663#ifdef CONFIG_MEMORY_HOTREMOVE
664/*
665 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
666 * set and the size of the free page is given by page_order(). Using this,
667 * the function determines if the pageblock contains only free pages.
668 * Due to buddy contraints, a free page at least the size of a pageblock will
669 * be located at the start of the pageblock
670 */
671static inline int pageblock_free(struct page *page)
672{
673    return PageBuddy(page) && page_order(page) >= pageblock_order;
674}
675
676/* Return the start of the next active pageblock after a given page */
677static struct page *next_active_pageblock(struct page *page)
678{
679    /* Ensure the starting page is pageblock-aligned */
680    BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
681
682    /* If the entire pageblock is free, move to the end of free page */
683    if (pageblock_free(page)) {
684        int order;
685        /* be careful. we don't have locks, page_order can be changed.*/
686        order = page_order(page);
687        if ((order < MAX_ORDER) && (order >= pageblock_order))
688            return page + (1 << order);
689    }
690
691    return page + pageblock_nr_pages;
692}
693
694/* Checks if this range of memory is likely to be hot-removable. */
695int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
696{
697    struct page *page = pfn_to_page(start_pfn);
698    struct page *end_page = page + nr_pages;
699
700    /* Check the starting page of each pageblock within the range */
701    for (; page < end_page; page = next_active_pageblock(page)) {
702        if (!is_pageblock_removable_nolock(page))
703            return 0;
704        cond_resched();
705    }
706
707    /* All pageblocks in the memory block are likely to be hot-removable */
708    return 1;
709}
710
711/*
712 * Confirm all pages in a range [start, end) is belongs to the same zone.
713 */
714static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
715{
716    unsigned long pfn;
717    struct zone *zone = NULL;
718    struct page *page;
719    int i;
720    for (pfn = start_pfn;
721         pfn < end_pfn;
722         pfn += MAX_ORDER_NR_PAGES) {
723        i = 0;
724        /* This is just a CONFIG_HOLES_IN_ZONE check.*/
725        while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
726            i++;
727        if (i == MAX_ORDER_NR_PAGES)
728            continue;
729        page = pfn_to_page(pfn + i);
730        if (zone && page_zone(page) != zone)
731            return 0;
732        zone = page_zone(page);
733    }
734    return 1;
735}
736
737/*
738 * Scanning pfn is much easier than scanning lru list.
739 * Scan pfn from start to end and Find LRU page.
740 */
741static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
742{
743    unsigned long pfn;
744    struct page *page;
745    for (pfn = start; pfn < end; pfn++) {
746        if (pfn_valid(pfn)) {
747            page = pfn_to_page(pfn);
748            if (PageLRU(page))
749                return pfn;
750        }
751    }
752    return 0;
753}
754
755static struct page *
756hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
757{
758    /* This should be improooooved!! */
759    return alloc_page(GFP_HIGHUSER_MOVABLE);
760}
761
762#define NR_OFFLINE_AT_ONCE_PAGES (256)
763static int
764do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
765{
766    unsigned long pfn;
767    struct page *page;
768    int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
769    int not_managed = 0;
770    int ret = 0;
771    LIST_HEAD(source);
772
773    for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
774        if (!pfn_valid(pfn))
775            continue;
776        page = pfn_to_page(pfn);
777        if (!get_page_unless_zero(page))
778            continue;
779        /*
780         * We can skip free pages. And we can only deal with pages on
781         * LRU.
782         */
783        ret = isolate_lru_page(page);
784        if (!ret) { /* Success */
785            put_page(page);
786            list_add_tail(&page->lru, &source);
787            move_pages--;
788            inc_zone_page_state(page, NR_ISOLATED_ANON +
789                        page_is_file_cache(page));
790
791        } else {
792#ifdef CONFIG_DEBUG_VM
793            printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
794                   pfn);
795            dump_page(page);
796#endif
797            put_page(page);
798            /* Because we don't have big zone->lock. we should
799               check this again here. */
800            if (page_count(page)) {
801                not_managed++;
802                ret = -EBUSY;
803                break;
804            }
805        }
806    }
807    if (!list_empty(&source)) {
808        if (not_managed) {
809            putback_lru_pages(&source);
810            goto out;
811        }
812        /* this function returns # of failed pages */
813        ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
814                            true, MIGRATE_SYNC);
815        if (ret)
816            putback_lru_pages(&source);
817    }
818out:
819    return ret;
820}
821
822/*
823 * remove from free_area[] and mark all as Reserved.
824 */
825static int
826offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
827            void *data)
828{
829    __offline_isolated_pages(start, start + nr_pages);
830    return 0;
831}
832
833static void
834offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
835{
836    walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
837                offline_isolated_pages_cb);
838}
839
840/*
841 * Check all pages in range, recoreded as memory resource, are isolated.
842 */
843static int
844check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
845            void *data)
846{
847    int ret;
848    long offlined = *(long *)data;
849    ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
850    offlined = nr_pages;
851    if (!ret)
852        *(long *)data += offlined;
853    return ret;
854}
855
856static long
857check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
858{
859    long offlined = 0;
860    int ret;
861
862    ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
863            check_pages_isolated_cb);
864    if (ret < 0)
865        offlined = (long)ret;
866    return offlined;
867}
868
869static int __ref offline_pages(unsigned long start_pfn,
870          unsigned long end_pfn, unsigned long timeout)
871{
872    unsigned long pfn, nr_pages, expire;
873    long offlined_pages;
874    int ret, drain, retry_max, node;
875    struct zone *zone;
876    struct memory_notify arg;
877
878    BUG_ON(start_pfn >= end_pfn);
879    /* at least, alignment against pageblock is necessary */
880    if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
881        return -EINVAL;
882    if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
883        return -EINVAL;
884    /* This makes hotplug much easier...and readable.
885       we assume this for now. .*/
886    if (!test_pages_in_a_zone(start_pfn, end_pfn))
887        return -EINVAL;
888
889    lock_memory_hotplug();
890
891    zone = page_zone(pfn_to_page(start_pfn));
892    node = zone_to_nid(zone);
893    nr_pages = end_pfn - start_pfn;
894
895    /* set above range as isolated */
896    ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
897    if (ret)
898        goto out;
899
900    arg.start_pfn = start_pfn;
901    arg.nr_pages = nr_pages;
902    arg.status_change_nid = -1;
903    if (nr_pages >= node_present_pages(node))
904        arg.status_change_nid = node;
905
906    ret = memory_notify(MEM_GOING_OFFLINE, &arg);
907    ret = notifier_to_errno(ret);
908    if (ret)
909        goto failed_removal;
910
911    pfn = start_pfn;
912    expire = jiffies + timeout;
913    drain = 0;
914    retry_max = 5;
915repeat:
916    /* start memory hot removal */
917    ret = -EAGAIN;
918    if (time_after(jiffies, expire))
919        goto failed_removal;
920    ret = -EINTR;
921    if (signal_pending(current))
922        goto failed_removal;
923    ret = 0;
924    if (drain) {
925        lru_add_drain_all();
926        cond_resched();
927        drain_all_pages();
928    }
929
930    pfn = scan_lru_pages(start_pfn, end_pfn);
931    if (pfn) { /* We have page on LRU */
932        ret = do_migrate_range(pfn, end_pfn);
933        if (!ret) {
934            drain = 1;
935            goto repeat;
936        } else {
937            if (ret < 0)
938                if (--retry_max == 0)
939                    goto failed_removal;
940            yield();
941            drain = 1;
942            goto repeat;
943        }
944    }
945    /* drain all zone's lru pagevec, this is asyncronous... */
946    lru_add_drain_all();
947    yield();
948    /* drain pcp pages , this is synchrouns. */
949    drain_all_pages();
950    /* check again */
951    offlined_pages = check_pages_isolated(start_pfn, end_pfn);
952    if (offlined_pages < 0) {
953        ret = -EBUSY;
954        goto failed_removal;
955    }
956    printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
957    /* Ok, all of our target is islaoted.
958       We cannot do rollback at this point. */
959    offline_isolated_pages(start_pfn, end_pfn);
960    /* reset pagetype flags and makes migrate type to be MOVABLE */
961    undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
962    /* removal success */
963    zone->present_pages -= offlined_pages;
964    zone->zone_pgdat->node_present_pages -= offlined_pages;
965    totalram_pages -= offlined_pages;
966
967    init_per_zone_wmark_min();
968
969    if (!populated_zone(zone))
970        zone_pcp_reset(zone);
971
972    if (!node_present_pages(node)) {
973        node_clear_state(node, N_HIGH_MEMORY);
974        kswapd_stop(node);
975    }
976
977    vm_total_pages = nr_free_pagecache_pages();
978    writeback_set_ratelimit();
979
980    memory_notify(MEM_OFFLINE, &arg);
981    unlock_memory_hotplug();
982    return 0;
983
984failed_removal:
985    printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
986           (unsigned long long) start_pfn << PAGE_SHIFT,
987           ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
988    memory_notify(MEM_CANCEL_OFFLINE, &arg);
989    /* pushback to free area */
990    undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
991
992out:
993    unlock_memory_hotplug();
994    return ret;
995}
996
997int remove_memory(u64 start, u64 size)
998{
999    unsigned long start_pfn, end_pfn;
1000
1001    start_pfn = PFN_DOWN(start);
1002    end_pfn = start_pfn + PFN_DOWN(size);
1003    return offline_pages(start_pfn, end_pfn, 120 * HZ);
1004}
1005#else
1006int remove_memory(u64 start, u64 size)
1007{
1008    return -EINVAL;
1009}
1010#endif /* CONFIG_MEMORY_HOTREMOVE */
1011EXPORT_SYMBOL_GPL(remove_memory);
1012

Archive Download this file



interactive