Root/mm/sparse-vmemmap.c

1/*
2 * Virtual Memory Map support
3 *
4 * (C) 2007 sgi. Christoph Lameter.
5 *
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset
8 * calculation without memory access.
9 *
10 * However, virtual mappings need a page table and TLBs. Many Linux
11 * architectures already map their physical space using 1-1 mappings
12 * via TLBs. For those arches the virtual memory map is essentially
13 * for free if we use the same page size as the 1-1 mappings. In that
14 * case the overhead consists of a few additional pages that are
15 * allocated to create a view of memory for vmemmap.
16 *
17 * The architecture is expected to provide a vmemmap_populate() function
18 * to instantiate the mapping.
19 */
20#include <linux/mm.h>
21#include <linux/mmzone.h>
22#include <linux/bootmem.h>
23#include <linux/highmem.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <linux/vmalloc.h>
27#include <linux/sched.h>
28#include <asm/dma.h>
29#include <asm/pgalloc.h>
30#include <asm/pgtable.h>
31
32/*
33 * Allocate a block of memory to be used to back the virtual memory map
34 * or to back the page tables that are used to create the mapping.
35 * Uses the main allocators if they are available, else bootmem.
36 */
37
38static void * __init_refok __earlyonly_bootmem_alloc(int node,
39                unsigned long size,
40                unsigned long align,
41                unsigned long goal)
42{
43    return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal);
44}
45
46static void *vmemmap_buf;
47static void *vmemmap_buf_end;
48
49void * __meminit vmemmap_alloc_block(unsigned long size, int node)
50{
51    /* If the main allocator is up use that, fallback to bootmem. */
52    if (slab_is_available()) {
53        struct page *page;
54
55        if (node_state(node, N_HIGH_MEMORY))
56            page = alloc_pages_node(
57                node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
58                get_order(size));
59        else
60            page = alloc_pages(
61                GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
62                get_order(size));
63        if (page)
64            return page_address(page);
65        return NULL;
66    } else
67        return __earlyonly_bootmem_alloc(node, size, size,
68                __pa(MAX_DMA_ADDRESS));
69}
70
71/* need to make sure size is all the same during early stage */
72void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
73{
74    void *ptr;
75
76    if (!vmemmap_buf)
77        return vmemmap_alloc_block(size, node);
78
79    /* take the from buf */
80    ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
81    if (ptr + size > vmemmap_buf_end)
82        return vmemmap_alloc_block(size, node);
83
84    vmemmap_buf = ptr + size;
85
86    return ptr;
87}
88
89void __meminit vmemmap_verify(pte_t *pte, int node,
90                unsigned long start, unsigned long end)
91{
92    unsigned long pfn = pte_pfn(*pte);
93    int actual_node = early_pfn_to_nid(pfn);
94
95    if (node_distance(actual_node, node) > LOCAL_DISTANCE)
96        printk(KERN_WARNING "[%lx-%lx] potential offnode "
97            "page_structs\n", start, end - 1);
98}
99
100pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
101{
102    pte_t *pte = pte_offset_kernel(pmd, addr);
103    if (pte_none(*pte)) {
104        pte_t entry;
105        void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
106        if (!p)
107            return NULL;
108        entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
109        set_pte_at(&init_mm, addr, pte, entry);
110    }
111    return pte;
112}
113
114pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
115{
116    pmd_t *pmd = pmd_offset(pud, addr);
117    if (pmd_none(*pmd)) {
118        void *p = vmemmap_alloc_block(PAGE_SIZE, node);
119        if (!p)
120            return NULL;
121        pmd_populate_kernel(&init_mm, pmd, p);
122    }
123    return pmd;
124}
125
126pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
127{
128    pud_t *pud = pud_offset(pgd, addr);
129    if (pud_none(*pud)) {
130        void *p = vmemmap_alloc_block(PAGE_SIZE, node);
131        if (!p)
132            return NULL;
133        pud_populate(&init_mm, pud, p);
134    }
135    return pud;
136}
137
138pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
139{
140    pgd_t *pgd = pgd_offset_k(addr);
141    if (pgd_none(*pgd)) {
142        void *p = vmemmap_alloc_block(PAGE_SIZE, node);
143        if (!p)
144            return NULL;
145        pgd_populate(&init_mm, pgd, p);
146    }
147    return pgd;
148}
149
150int __meminit vmemmap_populate_basepages(unsigned long start,
151                     unsigned long end, int node)
152{
153    unsigned long addr = start;
154    pgd_t *pgd;
155    pud_t *pud;
156    pmd_t *pmd;
157    pte_t *pte;
158
159    for (; addr < end; addr += PAGE_SIZE) {
160        pgd = vmemmap_pgd_populate(addr, node);
161        if (!pgd)
162            return -ENOMEM;
163        pud = vmemmap_pud_populate(pgd, addr, node);
164        if (!pud)
165            return -ENOMEM;
166        pmd = vmemmap_pmd_populate(pud, addr, node);
167        if (!pmd)
168            return -ENOMEM;
169        pte = vmemmap_pte_populate(pmd, addr, node);
170        if (!pte)
171            return -ENOMEM;
172        vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
173    }
174
175    return 0;
176}
177
178struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
179{
180    unsigned long start;
181    unsigned long end;
182    struct page *map;
183
184    map = pfn_to_page(pnum * PAGES_PER_SECTION);
185    start = (unsigned long)map;
186    end = (unsigned long)(map + PAGES_PER_SECTION);
187
188    if (vmemmap_populate(start, end, nid))
189        return NULL;
190
191    return map;
192}
193
194void __init sparse_mem_maps_populate_node(struct page **map_map,
195                      unsigned long pnum_begin,
196                      unsigned long pnum_end,
197                      unsigned long map_count, int nodeid)
198{
199    unsigned long pnum;
200    unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
201    void *vmemmap_buf_start;
202
203    size = ALIGN(size, PMD_SIZE);
204    vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
205             PMD_SIZE, __pa(MAX_DMA_ADDRESS));
206
207    if (vmemmap_buf_start) {
208        vmemmap_buf = vmemmap_buf_start;
209        vmemmap_buf_end = vmemmap_buf_start + size * map_count;
210    }
211
212    for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
213        struct mem_section *ms;
214
215        if (!present_section_nr(pnum))
216            continue;
217
218        map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
219        if (map_map[pnum])
220            continue;
221        ms = __nr_to_section(pnum);
222        printk(KERN_ERR "%s: sparsemem memory map backing failed "
223            "some memory will not be available.\n", __func__);
224        ms->section_mem_map = 0;
225    }
226
227    if (vmemmap_buf_start) {
228        /* need to free left buf */
229        free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
230        vmemmap_buf = NULL;
231        vmemmap_buf_end = NULL;
232    }
233}
234

Archive Download this file



interactive