Root/mm/sparse.c

1/*
2 * sparse memory mappings.
3 */
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/mmzone.h>
7#include <linux/bootmem.h>
8#include <linux/highmem.h>
9#include <linux/export.h>
10#include <linux/spinlock.h>
11#include <linux/vmalloc.h>
12#include "internal.h"
13#include <asm/dma.h>
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16
17/*
18 * Permanent SPARSEMEM data:
19 *
20 * 1) mem_section - memory sections, mem_map's for valid memory
21 */
22#ifdef CONFIG_SPARSEMEM_EXTREME
23struct mem_section *mem_section[NR_SECTION_ROOTS]
24    ____cacheline_internodealigned_in_smp;
25#else
26struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
27    ____cacheline_internodealigned_in_smp;
28#endif
29EXPORT_SYMBOL(mem_section);
30
31#ifdef NODE_NOT_IN_PAGE_FLAGS
32/*
33 * If we did not store the node number in the page then we have to
34 * do a lookup in the section_to_node_table in order to find which
35 * node the page belongs to.
36 */
37#if MAX_NUMNODES <= 256
38static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39#else
40static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41#endif
42
43int page_to_nid(const struct page *page)
44{
45    return section_to_node_table[page_to_section(page)];
46}
47EXPORT_SYMBOL(page_to_nid);
48
49static void set_section_nid(unsigned long section_nr, int nid)
50{
51    section_to_node_table[section_nr] = nid;
52}
53#else /* !NODE_NOT_IN_PAGE_FLAGS */
54static inline void set_section_nid(unsigned long section_nr, int nid)
55{
56}
57#endif
58
59#ifdef CONFIG_SPARSEMEM_EXTREME
60static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
61{
62    struct mem_section *section = NULL;
63    unsigned long array_size = SECTIONS_PER_ROOT *
64                   sizeof(struct mem_section);
65
66    if (slab_is_available()) {
67        if (node_state(nid, N_HIGH_MEMORY))
68            section = kzalloc_node(array_size, GFP_KERNEL, nid);
69        else
70            section = kzalloc(array_size, GFP_KERNEL);
71    } else {
72        section = alloc_bootmem_node(NODE_DATA(nid), array_size);
73    }
74
75    return section;
76}
77
78static int __meminit sparse_index_init(unsigned long section_nr, int nid)
79{
80    unsigned long root = SECTION_NR_TO_ROOT(section_nr);
81    struct mem_section *section;
82
83    if (mem_section[root])
84        return -EEXIST;
85
86    section = sparse_index_alloc(nid);
87    if (!section)
88        return -ENOMEM;
89
90    mem_section[root] = section;
91
92    return 0;
93}
94#else /* !SPARSEMEM_EXTREME */
95static inline int sparse_index_init(unsigned long section_nr, int nid)
96{
97    return 0;
98}
99#endif
100
101/*
102 * Although written for the SPARSEMEM_EXTREME case, this happens
103 * to also work for the flat array case because
104 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
105 */
106int __section_nr(struct mem_section* ms)
107{
108    unsigned long root_nr;
109    struct mem_section* root;
110
111    for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
112        root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
113        if (!root)
114            continue;
115
116        if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
117             break;
118    }
119
120    VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
121
122    return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
123}
124
125/*
126 * During early boot, before section_mem_map is used for an actual
127 * mem_map, we use section_mem_map to store the section's NUMA
128 * node. This keeps us from having to use another data structure. The
129 * node information is cleared just before we store the real mem_map.
130 */
131static inline unsigned long sparse_encode_early_nid(int nid)
132{
133    return (nid << SECTION_NID_SHIFT);
134}
135
136static inline int sparse_early_nid(struct mem_section *section)
137{
138    return (section->section_mem_map >> SECTION_NID_SHIFT);
139}
140
141/* Validate the physical addressing limitations of the model */
142void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
143                        unsigned long *end_pfn)
144{
145    unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
146
147    /*
148     * Sanity checks - do not allow an architecture to pass
149     * in larger pfns than the maximum scope of sparsemem:
150     */
151    if (*start_pfn > max_sparsemem_pfn) {
152        mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
153            "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
154            *start_pfn, *end_pfn, max_sparsemem_pfn);
155        WARN_ON_ONCE(1);
156        *start_pfn = max_sparsemem_pfn;
157        *end_pfn = max_sparsemem_pfn;
158    } else if (*end_pfn > max_sparsemem_pfn) {
159        mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
160            "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
161            *start_pfn, *end_pfn, max_sparsemem_pfn);
162        WARN_ON_ONCE(1);
163        *end_pfn = max_sparsemem_pfn;
164    }
165}
166
167/* Record a memory area against a node. */
168void __init memory_present(int nid, unsigned long start, unsigned long end)
169{
170    unsigned long pfn;
171
172    start &= PAGE_SECTION_MASK;
173    mminit_validate_memmodel_limits(&start, &end);
174    for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
175        unsigned long section = pfn_to_section_nr(pfn);
176        struct mem_section *ms;
177
178        sparse_index_init(section, nid);
179        set_section_nid(section, nid);
180
181        ms = __nr_to_section(section);
182        if (!ms->section_mem_map)
183            ms->section_mem_map = sparse_encode_early_nid(nid) |
184                            SECTION_MARKED_PRESENT;
185    }
186}
187
188/*
189 * Only used by the i386 NUMA architecures, but relatively
190 * generic code.
191 */
192unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
193                             unsigned long end_pfn)
194{
195    unsigned long pfn;
196    unsigned long nr_pages = 0;
197
198    mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
199    for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
200        if (nid != early_pfn_to_nid(pfn))
201            continue;
202
203        if (pfn_present(pfn))
204            nr_pages += PAGES_PER_SECTION;
205    }
206
207    return nr_pages * sizeof(struct page);
208}
209
210/*
211 * Subtle, we encode the real pfn into the mem_map such that
212 * the identity pfn - section_mem_map will return the actual
213 * physical page frame number.
214 */
215static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
216{
217    return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
218}
219
220/*
221 * Decode mem_map from the coded memmap
222 */
223struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
224{
225    /* mask off the extra low bits of information */
226    coded_mem_map &= SECTION_MAP_MASK;
227    return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
228}
229
230static int __meminit sparse_init_one_section(struct mem_section *ms,
231        unsigned long pnum, struct page *mem_map,
232        unsigned long *pageblock_bitmap)
233{
234    if (!present_section(ms))
235        return -EINVAL;
236
237    ms->section_mem_map &= ~SECTION_MAP_MASK;
238    ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
239                            SECTION_HAS_MEM_MAP;
240     ms->pageblock_flags = pageblock_bitmap;
241
242    return 1;
243}
244
245unsigned long usemap_size(void)
246{
247    unsigned long size_bytes;
248    size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
249    size_bytes = roundup(size_bytes, sizeof(unsigned long));
250    return size_bytes;
251}
252
253#ifdef CONFIG_MEMORY_HOTPLUG
254static unsigned long *__kmalloc_section_usemap(void)
255{
256    return kmalloc(usemap_size(), GFP_KERNEL);
257}
258#endif /* CONFIG_MEMORY_HOTPLUG */
259
260#ifdef CONFIG_MEMORY_HOTREMOVE
261static unsigned long * __init
262sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
263                     unsigned long size)
264{
265    unsigned long goal, limit;
266    unsigned long *p;
267    int nid;
268    /*
269     * A page may contain usemaps for other sections preventing the
270     * page being freed and making a section unremovable while
271     * other sections referencing the usemap retmain active. Similarly,
272     * a pgdat can prevent a section being removed. If section A
273     * contains a pgdat and section B contains the usemap, both
274     * sections become inter-dependent. This allocates usemaps
275     * from the same section as the pgdat where possible to avoid
276     * this problem.
277     */
278    goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
279    limit = goal + (1UL << PA_SECTION_SHIFT);
280    nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
281again:
282    p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
283                      SMP_CACHE_BYTES, goal, limit);
284    if (!p && limit) {
285        limit = 0;
286        goto again;
287    }
288    return p;
289}
290
291static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
292{
293    unsigned long usemap_snr, pgdat_snr;
294    static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
295    static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
296    struct pglist_data *pgdat = NODE_DATA(nid);
297    int usemap_nid;
298
299    usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
300    pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
301    if (usemap_snr == pgdat_snr)
302        return;
303
304    if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
305        /* skip redundant message */
306        return;
307
308    old_usemap_snr = usemap_snr;
309    old_pgdat_snr = pgdat_snr;
310
311    usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
312    if (usemap_nid != nid) {
313        printk(KERN_INFO
314               "node %d must be removed before remove section %ld\n",
315               nid, usemap_snr);
316        return;
317    }
318    /*
319     * There is a circular dependency.
320     * Some platforms allow un-removable section because they will just
321     * gather other removable sections for dynamic partitioning.
322     * Just notify un-removable section's number here.
323     */
324    printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
325           pgdat_snr, nid);
326    printk(KERN_CONT
327           " have a circular dependency on usemap and pgdat allocations\n");
328}
329#else
330static unsigned long * __init
331sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
332                     unsigned long size)
333{
334    return alloc_bootmem_node_nopanic(pgdat, size);
335}
336
337static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
338{
339}
340#endif /* CONFIG_MEMORY_HOTREMOVE */
341
342static void __init sparse_early_usemaps_alloc_node(void *data,
343                 unsigned long pnum_begin,
344                 unsigned long pnum_end,
345                 unsigned long usemap_count, int nodeid)
346{
347    void *usemap;
348    unsigned long pnum;
349    unsigned long **usemap_map = (unsigned long **)data;
350    int size = usemap_size();
351
352    usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
353                              size * usemap_count);
354    if (!usemap) {
355        printk(KERN_WARNING "%s: allocation failed\n", __func__);
356        return;
357    }
358
359    for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
360        if (!present_section_nr(pnum))
361            continue;
362        usemap_map[pnum] = usemap;
363        usemap += size;
364        check_usemap_section_nr(nodeid, usemap_map[pnum]);
365    }
366}
367
368#ifndef CONFIG_SPARSEMEM_VMEMMAP
369struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
370{
371    struct page *map;
372    unsigned long size;
373
374    map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
375    if (map)
376        return map;
377
378    size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
379    map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
380                     PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
381    return map;
382}
383void __init sparse_mem_maps_populate_node(struct page **map_map,
384                      unsigned long pnum_begin,
385                      unsigned long pnum_end,
386                      unsigned long map_count, int nodeid)
387{
388    void *map;
389    unsigned long pnum;
390    unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
391
392    map = alloc_remap(nodeid, size * map_count);
393    if (map) {
394        for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
395            if (!present_section_nr(pnum))
396                continue;
397            map_map[pnum] = map;
398            map += size;
399        }
400        return;
401    }
402
403    size = PAGE_ALIGN(size);
404    map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
405                     PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
406    if (map) {
407        for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
408            if (!present_section_nr(pnum))
409                continue;
410            map_map[pnum] = map;
411            map += size;
412        }
413        return;
414    }
415
416    /* fallback */
417    for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
418        struct mem_section *ms;
419
420        if (!present_section_nr(pnum))
421            continue;
422        map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
423        if (map_map[pnum])
424            continue;
425        ms = __nr_to_section(pnum);
426        printk(KERN_ERR "%s: sparsemem memory map backing failed "
427            "some memory will not be available.\n", __func__);
428        ms->section_mem_map = 0;
429    }
430}
431#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
432
433#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
434static void __init sparse_early_mem_maps_alloc_node(void *data,
435                 unsigned long pnum_begin,
436                 unsigned long pnum_end,
437                 unsigned long map_count, int nodeid)
438{
439    struct page **map_map = (struct page **)data;
440    sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
441                     map_count, nodeid);
442}
443#else
444static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
445{
446    struct page *map;
447    struct mem_section *ms = __nr_to_section(pnum);
448    int nid = sparse_early_nid(ms);
449
450    map = sparse_mem_map_populate(pnum, nid);
451    if (map)
452        return map;
453
454    printk(KERN_ERR "%s: sparsemem memory map backing failed "
455            "some memory will not be available.\n", __func__);
456    ms->section_mem_map = 0;
457    return NULL;
458}
459#endif
460
461void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
462{
463}
464
465/**
466 * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
467 * @map: usemap_map for pageblock flags or mmap_map for vmemmap
468 */
469static void __init alloc_usemap_and_memmap(void (*alloc_func)
470                    (void *, unsigned long, unsigned long,
471                    unsigned long, int), void *data)
472{
473    unsigned long pnum;
474    unsigned long map_count;
475    int nodeid_begin = 0;
476    unsigned long pnum_begin = 0;
477
478    for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
479        struct mem_section *ms;
480
481        if (!present_section_nr(pnum))
482            continue;
483        ms = __nr_to_section(pnum);
484        nodeid_begin = sparse_early_nid(ms);
485        pnum_begin = pnum;
486        break;
487    }
488    map_count = 1;
489    for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
490        struct mem_section *ms;
491        int nodeid;
492
493        if (!present_section_nr(pnum))
494            continue;
495        ms = __nr_to_section(pnum);
496        nodeid = sparse_early_nid(ms);
497        if (nodeid == nodeid_begin) {
498            map_count++;
499            continue;
500        }
501        /* ok, we need to take cake of from pnum_begin to pnum - 1*/
502        alloc_func(data, pnum_begin, pnum,
503                        map_count, nodeid_begin);
504        /* new start, update count etc*/
505        nodeid_begin = nodeid;
506        pnum_begin = pnum;
507        map_count = 1;
508    }
509    /* ok, last chunk */
510    alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
511                        map_count, nodeid_begin);
512}
513
514/*
515 * Allocate the accumulated non-linear sections, allocate a mem_map
516 * for each and record the physical to section mapping.
517 */
518void __init sparse_init(void)
519{
520    unsigned long pnum;
521    struct page *map;
522    unsigned long *usemap;
523    unsigned long **usemap_map;
524    int size;
525#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
526    int size2;
527    struct page **map_map;
528#endif
529
530    /* see include/linux/mmzone.h 'struct mem_section' definition */
531    BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
532
533    /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
534    set_pageblock_order();
535
536    /*
537     * map is using big page (aka 2M in x86 64 bit)
538     * usemap is less one page (aka 24 bytes)
539     * so alloc 2M (with 2M align) and 24 bytes in turn will
540     * make next 2M slip to one more 2M later.
541     * then in big system, the memory will have a lot of holes...
542     * here try to allocate 2M pages continuously.
543     *
544     * powerpc need to call sparse_init_one_section right after each
545     * sparse_early_mem_map_alloc, so allocate usemap_map at first.
546     */
547    size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
548    usemap_map = alloc_bootmem(size);
549    if (!usemap_map)
550        panic("can not allocate usemap_map\n");
551    alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
552                            (void *)usemap_map);
553
554#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
555    size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
556    map_map = alloc_bootmem(size2);
557    if (!map_map)
558        panic("can not allocate map_map\n");
559    alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
560                            (void *)map_map);
561#endif
562
563    for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
564        if (!present_section_nr(pnum))
565            continue;
566
567        usemap = usemap_map[pnum];
568        if (!usemap)
569            continue;
570
571#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
572        map = map_map[pnum];
573#else
574        map = sparse_early_mem_map_alloc(pnum);
575#endif
576        if (!map)
577            continue;
578
579        sparse_init_one_section(__nr_to_section(pnum), pnum, map,
580                                usemap);
581    }
582
583    vmemmap_populate_print_last();
584
585#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
586    free_bootmem(__pa(map_map), size2);
587#endif
588    free_bootmem(__pa(usemap_map), size);
589}
590
591#ifdef CONFIG_MEMORY_HOTPLUG
592#ifdef CONFIG_SPARSEMEM_VMEMMAP
593static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
594                         unsigned long nr_pages)
595{
596    /* This will make the necessary allocations eventually. */
597    return sparse_mem_map_populate(pnum, nid);
598}
599static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
600{
601    unsigned long start = (unsigned long)memmap;
602    unsigned long end = (unsigned long)(memmap + nr_pages);
603
604    vmemmap_free(start, end);
605}
606#ifdef CONFIG_MEMORY_HOTREMOVE
607static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
608{
609    unsigned long start = (unsigned long)memmap;
610    unsigned long end = (unsigned long)(memmap + nr_pages);
611
612    vmemmap_free(start, end);
613}
614#endif /* CONFIG_MEMORY_HOTREMOVE */
615#else
616static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
617{
618    struct page *page, *ret;
619    unsigned long memmap_size = sizeof(struct page) * nr_pages;
620
621    page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
622    if (page)
623        goto got_map_page;
624
625    ret = vmalloc(memmap_size);
626    if (ret)
627        goto got_map_ptr;
628
629    return NULL;
630got_map_page:
631    ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
632got_map_ptr:
633
634    return ret;
635}
636
637static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
638                          unsigned long nr_pages)
639{
640    return __kmalloc_section_memmap(nr_pages);
641}
642
643static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
644{
645    if (is_vmalloc_addr(memmap))
646        vfree(memmap);
647    else
648        free_pages((unsigned long)memmap,
649               get_order(sizeof(struct page) * nr_pages));
650}
651
652#ifdef CONFIG_MEMORY_HOTREMOVE
653static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
654{
655    unsigned long maps_section_nr, removing_section_nr, i;
656    unsigned long magic;
657    struct page *page = virt_to_page(memmap);
658
659    for (i = 0; i < nr_pages; i++, page++) {
660        magic = (unsigned long) page->lru.next;
661
662        BUG_ON(magic == NODE_INFO);
663
664        maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
665        removing_section_nr = page->private;
666
667        /*
668         * When this function is called, the removing section is
669         * logical offlined state. This means all pages are isolated
670         * from page allocator. If removing section's memmap is placed
671         * on the same section, it must not be freed.
672         * If it is freed, page allocator may allocate it which will
673         * be removed physically soon.
674         */
675        if (maps_section_nr != removing_section_nr)
676            put_page_bootmem(page);
677    }
678}
679#endif /* CONFIG_MEMORY_HOTREMOVE */
680#endif /* CONFIG_SPARSEMEM_VMEMMAP */
681
682/*
683 * returns the number of sections whose mem_maps were properly
684 * set. If this is <=0, then that means that the passed-in
685 * map was not consumed and must be freed.
686 */
687int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
688               int nr_pages)
689{
690    unsigned long section_nr = pfn_to_section_nr(start_pfn);
691    struct pglist_data *pgdat = zone->zone_pgdat;
692    struct mem_section *ms;
693    struct page *memmap;
694    unsigned long *usemap;
695    unsigned long flags;
696    int ret;
697
698    /*
699     * no locking for this, because it does its own
700     * plus, it does a kmalloc
701     */
702    ret = sparse_index_init(section_nr, pgdat->node_id);
703    if (ret < 0 && ret != -EEXIST)
704        return ret;
705    memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
706    if (!memmap)
707        return -ENOMEM;
708    usemap = __kmalloc_section_usemap();
709    if (!usemap) {
710        __kfree_section_memmap(memmap, nr_pages);
711        return -ENOMEM;
712    }
713
714    pgdat_resize_lock(pgdat, &flags);
715
716    ms = __pfn_to_section(start_pfn);
717    if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
718        ret = -EEXIST;
719        goto out;
720    }
721
722    memset(memmap, 0, sizeof(struct page) * nr_pages);
723
724    ms->section_mem_map |= SECTION_MARKED_PRESENT;
725
726    ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
727
728out:
729    pgdat_resize_unlock(pgdat, &flags);
730    if (ret <= 0) {
731        kfree(usemap);
732        __kfree_section_memmap(memmap, nr_pages);
733    }
734    return ret;
735}
736
737#ifdef CONFIG_MEMORY_HOTREMOVE
738#ifdef CONFIG_MEMORY_FAILURE
739static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
740{
741    int i;
742
743    if (!memmap)
744        return;
745
746    for (i = 0; i < PAGES_PER_SECTION; i++) {
747        if (PageHWPoison(&memmap[i])) {
748            atomic_long_sub(1, &num_poisoned_pages);
749            ClearPageHWPoison(&memmap[i]);
750        }
751    }
752}
753#else
754static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
755{
756}
757#endif
758
759static void free_section_usemap(struct page *memmap, unsigned long *usemap)
760{
761    struct page *usemap_page;
762    unsigned long nr_pages;
763
764    if (!usemap)
765        return;
766
767    usemap_page = virt_to_page(usemap);
768    /*
769     * Check to see if allocation came from hot-plug-add
770     */
771    if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
772        kfree(usemap);
773        if (memmap)
774            __kfree_section_memmap(memmap, PAGES_PER_SECTION);
775        return;
776    }
777
778    /*
779     * The usemap came from bootmem. This is packed with other usemaps
780     * on the section which has pgdat at boot time. Just keep it as is now.
781     */
782
783    if (memmap) {
784        nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
785            >> PAGE_SHIFT;
786
787        free_map_bootmem(memmap, nr_pages);
788    }
789}
790
791void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
792{
793    struct page *memmap = NULL;
794    unsigned long *usemap = NULL, flags;
795    struct pglist_data *pgdat = zone->zone_pgdat;
796
797    pgdat_resize_lock(pgdat, &flags);
798    if (ms->section_mem_map) {
799        usemap = ms->pageblock_flags;
800        memmap = sparse_decode_mem_map(ms->section_mem_map,
801                        __section_nr(ms));
802        ms->section_mem_map = 0;
803        ms->pageblock_flags = NULL;
804    }
805    pgdat_resize_unlock(pgdat, &flags);
806
807    clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION);
808    free_section_usemap(memmap, usemap);
809}
810#endif /* CONFIG_MEMORY_HOTREMOVE */
811#endif /* CONFIG_MEMORY_HOTPLUG */
812

Archive Download this file



interactive