Root/
1 | /* |
2 | * sparse memory mappings. |
3 | */ |
4 | #include <linux/mm.h> |
5 | #include <linux/slab.h> |
6 | #include <linux/mmzone.h> |
7 | #include <linux/bootmem.h> |
8 | #include <linux/highmem.h> |
9 | #include <linux/module.h> |
10 | #include <linux/spinlock.h> |
11 | #include <linux/vmalloc.h> |
12 | #include "internal.h" |
13 | #include <asm/dma.h> |
14 | #include <asm/pgalloc.h> |
15 | #include <asm/pgtable.h> |
16 | |
17 | /* |
18 | * Permanent SPARSEMEM data: |
19 | * |
20 | * 1) mem_section - memory sections, mem_map's for valid memory |
21 | */ |
22 | #ifdef CONFIG_SPARSEMEM_EXTREME |
23 | struct mem_section *mem_section[NR_SECTION_ROOTS] |
24 | ____cacheline_internodealigned_in_smp; |
25 | #else |
26 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] |
27 | ____cacheline_internodealigned_in_smp; |
28 | #endif |
29 | EXPORT_SYMBOL(mem_section); |
30 | |
31 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
32 | /* |
33 | * If we did not store the node number in the page then we have to |
34 | * do a lookup in the section_to_node_table in order to find which |
35 | * node the page belongs to. |
36 | */ |
37 | #if MAX_NUMNODES <= 256 |
38 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
39 | #else |
40 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
41 | #endif |
42 | |
43 | int page_to_nid(struct page *page) |
44 | { |
45 | return section_to_node_table[page_to_section(page)]; |
46 | } |
47 | EXPORT_SYMBOL(page_to_nid); |
48 | |
49 | static void set_section_nid(unsigned long section_nr, int nid) |
50 | { |
51 | section_to_node_table[section_nr] = nid; |
52 | } |
53 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ |
54 | static inline void set_section_nid(unsigned long section_nr, int nid) |
55 | { |
56 | } |
57 | #endif |
58 | |
59 | #ifdef CONFIG_SPARSEMEM_EXTREME |
60 | static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) |
61 | { |
62 | struct mem_section *section = NULL; |
63 | unsigned long array_size = SECTIONS_PER_ROOT * |
64 | sizeof(struct mem_section); |
65 | |
66 | if (slab_is_available()) { |
67 | if (node_state(nid, N_HIGH_MEMORY)) |
68 | section = kmalloc_node(array_size, GFP_KERNEL, nid); |
69 | else |
70 | section = kmalloc(array_size, GFP_KERNEL); |
71 | } else |
72 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); |
73 | |
74 | if (section) |
75 | memset(section, 0, array_size); |
76 | |
77 | return section; |
78 | } |
79 | |
80 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
81 | { |
82 | static DEFINE_SPINLOCK(index_init_lock); |
83 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
84 | struct mem_section *section; |
85 | int ret = 0; |
86 | |
87 | if (mem_section[root]) |
88 | return -EEXIST; |
89 | |
90 | section = sparse_index_alloc(nid); |
91 | if (!section) |
92 | return -ENOMEM; |
93 | /* |
94 | * This lock keeps two different sections from |
95 | * reallocating for the same index |
96 | */ |
97 | spin_lock(&index_init_lock); |
98 | |
99 | if (mem_section[root]) { |
100 | ret = -EEXIST; |
101 | goto out; |
102 | } |
103 | |
104 | mem_section[root] = section; |
105 | out: |
106 | spin_unlock(&index_init_lock); |
107 | return ret; |
108 | } |
109 | #else /* !SPARSEMEM_EXTREME */ |
110 | static inline int sparse_index_init(unsigned long section_nr, int nid) |
111 | { |
112 | return 0; |
113 | } |
114 | #endif |
115 | |
116 | /* |
117 | * Although written for the SPARSEMEM_EXTREME case, this happens |
118 | * to also work for the flat array case because |
119 | * NR_SECTION_ROOTS==NR_MEM_SECTIONS. |
120 | */ |
121 | int __section_nr(struct mem_section* ms) |
122 | { |
123 | unsigned long root_nr; |
124 | struct mem_section* root; |
125 | |
126 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
127 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
128 | if (!root) |
129 | continue; |
130 | |
131 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) |
132 | break; |
133 | } |
134 | |
135 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
136 | } |
137 | |
138 | /* |
139 | * During early boot, before section_mem_map is used for an actual |
140 | * mem_map, we use section_mem_map to store the section's NUMA |
141 | * node. This keeps us from having to use another data structure. The |
142 | * node information is cleared just before we store the real mem_map. |
143 | */ |
144 | static inline unsigned long sparse_encode_early_nid(int nid) |
145 | { |
146 | return (nid << SECTION_NID_SHIFT); |
147 | } |
148 | |
149 | static inline int sparse_early_nid(struct mem_section *section) |
150 | { |
151 | return (section->section_mem_map >> SECTION_NID_SHIFT); |
152 | } |
153 | |
154 | /* Validate the physical addressing limitations of the model */ |
155 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, |
156 | unsigned long *end_pfn) |
157 | { |
158 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
159 | |
160 | /* |
161 | * Sanity checks - do not allow an architecture to pass |
162 | * in larger pfns than the maximum scope of sparsemem: |
163 | */ |
164 | if (*start_pfn > max_sparsemem_pfn) { |
165 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
166 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
167 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
168 | WARN_ON_ONCE(1); |
169 | *start_pfn = max_sparsemem_pfn; |
170 | *end_pfn = max_sparsemem_pfn; |
171 | } else if (*end_pfn > max_sparsemem_pfn) { |
172 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
173 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", |
174 | *start_pfn, *end_pfn, max_sparsemem_pfn); |
175 | WARN_ON_ONCE(1); |
176 | *end_pfn = max_sparsemem_pfn; |
177 | } |
178 | } |
179 | |
180 | /* Record a memory area against a node. */ |
181 | void __init memory_present(int nid, unsigned long start, unsigned long end) |
182 | { |
183 | unsigned long pfn; |
184 | |
185 | start &= PAGE_SECTION_MASK; |
186 | mminit_validate_memmodel_limits(&start, &end); |
187 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
188 | unsigned long section = pfn_to_section_nr(pfn); |
189 | struct mem_section *ms; |
190 | |
191 | sparse_index_init(section, nid); |
192 | set_section_nid(section, nid); |
193 | |
194 | ms = __nr_to_section(section); |
195 | if (!ms->section_mem_map) |
196 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
197 | SECTION_MARKED_PRESENT; |
198 | } |
199 | } |
200 | |
201 | /* |
202 | * Only used by the i386 NUMA architecures, but relatively |
203 | * generic code. |
204 | */ |
205 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, |
206 | unsigned long end_pfn) |
207 | { |
208 | unsigned long pfn; |
209 | unsigned long nr_pages = 0; |
210 | |
211 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); |
212 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
213 | if (nid != early_pfn_to_nid(pfn)) |
214 | continue; |
215 | |
216 | if (pfn_present(pfn)) |
217 | nr_pages += PAGES_PER_SECTION; |
218 | } |
219 | |
220 | return nr_pages * sizeof(struct page); |
221 | } |
222 | |
223 | /* |
224 | * Subtle, we encode the real pfn into the mem_map such that |
225 | * the identity pfn - section_mem_map will return the actual |
226 | * physical page frame number. |
227 | */ |
228 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) |
229 | { |
230 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); |
231 | } |
232 | |
233 | /* |
234 | * Decode mem_map from the coded memmap |
235 | */ |
236 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
237 | { |
238 | /* mask off the extra low bits of information */ |
239 | coded_mem_map &= SECTION_MAP_MASK; |
240 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
241 | } |
242 | |
243 | static int __meminit sparse_init_one_section(struct mem_section *ms, |
244 | unsigned long pnum, struct page *mem_map, |
245 | unsigned long *pageblock_bitmap) |
246 | { |
247 | if (!present_section(ms)) |
248 | return -EINVAL; |
249 | |
250 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
251 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | |
252 | SECTION_HAS_MEM_MAP; |
253 | ms->pageblock_flags = pageblock_bitmap; |
254 | |
255 | return 1; |
256 | } |
257 | |
258 | unsigned long usemap_size(void) |
259 | { |
260 | unsigned long size_bytes; |
261 | size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; |
262 | size_bytes = roundup(size_bytes, sizeof(unsigned long)); |
263 | return size_bytes; |
264 | } |
265 | |
266 | #ifdef CONFIG_MEMORY_HOTPLUG |
267 | static unsigned long *__kmalloc_section_usemap(void) |
268 | { |
269 | return kmalloc(usemap_size(), GFP_KERNEL); |
270 | } |
271 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
272 | |
273 | #ifdef CONFIG_MEMORY_HOTREMOVE |
274 | static unsigned long * __init |
275 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
276 | unsigned long count) |
277 | { |
278 | unsigned long section_nr; |
279 | |
280 | /* |
281 | * A page may contain usemaps for other sections preventing the |
282 | * page being freed and making a section unremovable while |
283 | * other sections referencing the usemap retmain active. Similarly, |
284 | * a pgdat can prevent a section being removed. If section A |
285 | * contains a pgdat and section B contains the usemap, both |
286 | * sections become inter-dependent. This allocates usemaps |
287 | * from the same section as the pgdat where possible to avoid |
288 | * this problem. |
289 | */ |
290 | section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
291 | return alloc_bootmem_section(usemap_size() * count, section_nr); |
292 | } |
293 | |
294 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) |
295 | { |
296 | unsigned long usemap_snr, pgdat_snr; |
297 | static unsigned long old_usemap_snr = NR_MEM_SECTIONS; |
298 | static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; |
299 | struct pglist_data *pgdat = NODE_DATA(nid); |
300 | int usemap_nid; |
301 | |
302 | usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); |
303 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); |
304 | if (usemap_snr == pgdat_snr) |
305 | return; |
306 | |
307 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) |
308 | /* skip redundant message */ |
309 | return; |
310 | |
311 | old_usemap_snr = usemap_snr; |
312 | old_pgdat_snr = pgdat_snr; |
313 | |
314 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); |
315 | if (usemap_nid != nid) { |
316 | printk(KERN_INFO |
317 | "node %d must be removed before remove section %ld\n", |
318 | nid, usemap_snr); |
319 | return; |
320 | } |
321 | /* |
322 | * There is a circular dependency. |
323 | * Some platforms allow un-removable section because they will just |
324 | * gather other removable sections for dynamic partitioning. |
325 | * Just notify un-removable section's number here. |
326 | */ |
327 | printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, |
328 | pgdat_snr, nid); |
329 | printk(KERN_CONT |
330 | " have a circular dependency on usemap and pgdat allocations\n"); |
331 | } |
332 | #else |
333 | static unsigned long * __init |
334 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
335 | unsigned long count) |
336 | { |
337 | return NULL; |
338 | } |
339 | |
340 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) |
341 | { |
342 | } |
343 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
344 | |
345 | static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, |
346 | unsigned long pnum_begin, |
347 | unsigned long pnum_end, |
348 | unsigned long usemap_count, int nodeid) |
349 | { |
350 | void *usemap; |
351 | unsigned long pnum; |
352 | int size = usemap_size(); |
353 | |
354 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
355 | usemap_count); |
356 | if (usemap) { |
357 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
358 | if (!present_section_nr(pnum)) |
359 | continue; |
360 | usemap_map[pnum] = usemap; |
361 | usemap += size; |
362 | } |
363 | return; |
364 | } |
365 | |
366 | usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count); |
367 | if (usemap) { |
368 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
369 | if (!present_section_nr(pnum)) |
370 | continue; |
371 | usemap_map[pnum] = usemap; |
372 | usemap += size; |
373 | check_usemap_section_nr(nodeid, usemap_map[pnum]); |
374 | } |
375 | return; |
376 | } |
377 | |
378 | printk(KERN_WARNING "%s: allocation failed\n", __func__); |
379 | } |
380 | |
381 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
382 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
383 | { |
384 | struct page *map; |
385 | |
386 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); |
387 | if (map) |
388 | return map; |
389 | |
390 | map = alloc_bootmem_pages_node(NODE_DATA(nid), |
391 | PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION)); |
392 | return map; |
393 | } |
394 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
395 | unsigned long pnum_begin, |
396 | unsigned long pnum_end, |
397 | unsigned long map_count, int nodeid) |
398 | { |
399 | void *map; |
400 | unsigned long pnum; |
401 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; |
402 | |
403 | map = alloc_remap(nodeid, size * map_count); |
404 | if (map) { |
405 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
406 | if (!present_section_nr(pnum)) |
407 | continue; |
408 | map_map[pnum] = map; |
409 | map += size; |
410 | } |
411 | return; |
412 | } |
413 | |
414 | size = PAGE_ALIGN(size); |
415 | map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count); |
416 | if (map) { |
417 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
418 | if (!present_section_nr(pnum)) |
419 | continue; |
420 | map_map[pnum] = map; |
421 | map += size; |
422 | } |
423 | return; |
424 | } |
425 | |
426 | /* fallback */ |
427 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
428 | struct mem_section *ms; |
429 | |
430 | if (!present_section_nr(pnum)) |
431 | continue; |
432 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); |
433 | if (map_map[pnum]) |
434 | continue; |
435 | ms = __nr_to_section(pnum); |
436 | printk(KERN_ERR "%s: sparsemem memory map backing failed " |
437 | "some memory will not be available.\n", __func__); |
438 | ms->section_mem_map = 0; |
439 | } |
440 | } |
441 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
442 | |
443 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
444 | static void __init sparse_early_mem_maps_alloc_node(struct page **map_map, |
445 | unsigned long pnum_begin, |
446 | unsigned long pnum_end, |
447 | unsigned long map_count, int nodeid) |
448 | { |
449 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, |
450 | map_count, nodeid); |
451 | } |
452 | #else |
453 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
454 | { |
455 | struct page *map; |
456 | struct mem_section *ms = __nr_to_section(pnum); |
457 | int nid = sparse_early_nid(ms); |
458 | |
459 | map = sparse_mem_map_populate(pnum, nid); |
460 | if (map) |
461 | return map; |
462 | |
463 | printk(KERN_ERR "%s: sparsemem memory map backing failed " |
464 | "some memory will not be available.\n", __func__); |
465 | ms->section_mem_map = 0; |
466 | return NULL; |
467 | } |
468 | #endif |
469 | |
470 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) |
471 | { |
472 | } |
473 | |
474 | /* |
475 | * Allocate the accumulated non-linear sections, allocate a mem_map |
476 | * for each and record the physical to section mapping. |
477 | */ |
478 | void __init sparse_init(void) |
479 | { |
480 | unsigned long pnum; |
481 | struct page *map; |
482 | unsigned long *usemap; |
483 | unsigned long **usemap_map; |
484 | int size; |
485 | int nodeid_begin = 0; |
486 | unsigned long pnum_begin = 0; |
487 | unsigned long usemap_count; |
488 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
489 | unsigned long map_count; |
490 | int size2; |
491 | struct page **map_map; |
492 | #endif |
493 | |
494 | /* |
495 | * map is using big page (aka 2M in x86 64 bit) |
496 | * usemap is less one page (aka 24 bytes) |
497 | * so alloc 2M (with 2M align) and 24 bytes in turn will |
498 | * make next 2M slip to one more 2M later. |
499 | * then in big system, the memory will have a lot of holes... |
500 | * here try to allocate 2M pages continously. |
501 | * |
502 | * powerpc need to call sparse_init_one_section right after each |
503 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. |
504 | */ |
505 | size = sizeof(unsigned long *) * NR_MEM_SECTIONS; |
506 | usemap_map = alloc_bootmem(size); |
507 | if (!usemap_map) |
508 | panic("can not allocate usemap_map\n"); |
509 | |
510 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
511 | struct mem_section *ms; |
512 | |
513 | if (!present_section_nr(pnum)) |
514 | continue; |
515 | ms = __nr_to_section(pnum); |
516 | nodeid_begin = sparse_early_nid(ms); |
517 | pnum_begin = pnum; |
518 | break; |
519 | } |
520 | usemap_count = 1; |
521 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { |
522 | struct mem_section *ms; |
523 | int nodeid; |
524 | |
525 | if (!present_section_nr(pnum)) |
526 | continue; |
527 | ms = __nr_to_section(pnum); |
528 | nodeid = sparse_early_nid(ms); |
529 | if (nodeid == nodeid_begin) { |
530 | usemap_count++; |
531 | continue; |
532 | } |
533 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ |
534 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum, |
535 | usemap_count, nodeid_begin); |
536 | /* new start, update count etc*/ |
537 | nodeid_begin = nodeid; |
538 | pnum_begin = pnum; |
539 | usemap_count = 1; |
540 | } |
541 | /* ok, last chunk */ |
542 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, |
543 | usemap_count, nodeid_begin); |
544 | |
545 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
546 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; |
547 | map_map = alloc_bootmem(size2); |
548 | if (!map_map) |
549 | panic("can not allocate map_map\n"); |
550 | |
551 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
552 | struct mem_section *ms; |
553 | |
554 | if (!present_section_nr(pnum)) |
555 | continue; |
556 | ms = __nr_to_section(pnum); |
557 | nodeid_begin = sparse_early_nid(ms); |
558 | pnum_begin = pnum; |
559 | break; |
560 | } |
561 | map_count = 1; |
562 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { |
563 | struct mem_section *ms; |
564 | int nodeid; |
565 | |
566 | if (!present_section_nr(pnum)) |
567 | continue; |
568 | ms = __nr_to_section(pnum); |
569 | nodeid = sparse_early_nid(ms); |
570 | if (nodeid == nodeid_begin) { |
571 | map_count++; |
572 | continue; |
573 | } |
574 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ |
575 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum, |
576 | map_count, nodeid_begin); |
577 | /* new start, update count etc*/ |
578 | nodeid_begin = nodeid; |
579 | pnum_begin = pnum; |
580 | map_count = 1; |
581 | } |
582 | /* ok, last chunk */ |
583 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS, |
584 | map_count, nodeid_begin); |
585 | #endif |
586 | |
587 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
588 | if (!present_section_nr(pnum)) |
589 | continue; |
590 | |
591 | usemap = usemap_map[pnum]; |
592 | if (!usemap) |
593 | continue; |
594 | |
595 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
596 | map = map_map[pnum]; |
597 | #else |
598 | map = sparse_early_mem_map_alloc(pnum); |
599 | #endif |
600 | if (!map) |
601 | continue; |
602 | |
603 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, |
604 | usemap); |
605 | } |
606 | |
607 | vmemmap_populate_print_last(); |
608 | |
609 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
610 | free_bootmem(__pa(map_map), size2); |
611 | #endif |
612 | free_bootmem(__pa(usemap_map), size); |
613 | } |
614 | |
615 | #ifdef CONFIG_MEMORY_HOTPLUG |
616 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
617 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
618 | unsigned long nr_pages) |
619 | { |
620 | /* This will make the necessary allocations eventually. */ |
621 | return sparse_mem_map_populate(pnum, nid); |
622 | } |
623 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) |
624 | { |
625 | return; /* XXX: Not implemented yet */ |
626 | } |
627 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) |
628 | { |
629 | } |
630 | #else |
631 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) |
632 | { |
633 | struct page *page, *ret; |
634 | unsigned long memmap_size = sizeof(struct page) * nr_pages; |
635 | |
636 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
637 | if (page) |
638 | goto got_map_page; |
639 | |
640 | ret = vmalloc(memmap_size); |
641 | if (ret) |
642 | goto got_map_ptr; |
643 | |
644 | return NULL; |
645 | got_map_page: |
646 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); |
647 | got_map_ptr: |
648 | memset(ret, 0, memmap_size); |
649 | |
650 | return ret; |
651 | } |
652 | |
653 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
654 | unsigned long nr_pages) |
655 | { |
656 | return __kmalloc_section_memmap(nr_pages); |
657 | } |
658 | |
659 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) |
660 | { |
661 | if (is_vmalloc_addr(memmap)) |
662 | vfree(memmap); |
663 | else |
664 | free_pages((unsigned long)memmap, |
665 | get_order(sizeof(struct page) * nr_pages)); |
666 | } |
667 | |
668 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) |
669 | { |
670 | unsigned long maps_section_nr, removing_section_nr, i; |
671 | int magic; |
672 | |
673 | for (i = 0; i < nr_pages; i++, page++) { |
674 | magic = atomic_read(&page->_mapcount); |
675 | |
676 | BUG_ON(magic == NODE_INFO); |
677 | |
678 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); |
679 | removing_section_nr = page->private; |
680 | |
681 | /* |
682 | * When this function is called, the removing section is |
683 | * logical offlined state. This means all pages are isolated |
684 | * from page allocator. If removing section's memmap is placed |
685 | * on the same section, it must not be freed. |
686 | * If it is freed, page allocator may allocate it which will |
687 | * be removed physically soon. |
688 | */ |
689 | if (maps_section_nr != removing_section_nr) |
690 | put_page_bootmem(page); |
691 | } |
692 | } |
693 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
694 | |
695 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) |
696 | { |
697 | struct page *usemap_page; |
698 | unsigned long nr_pages; |
699 | |
700 | if (!usemap) |
701 | return; |
702 | |
703 | usemap_page = virt_to_page(usemap); |
704 | /* |
705 | * Check to see if allocation came from hot-plug-add |
706 | */ |
707 | if (PageSlab(usemap_page)) { |
708 | kfree(usemap); |
709 | if (memmap) |
710 | __kfree_section_memmap(memmap, PAGES_PER_SECTION); |
711 | return; |
712 | } |
713 | |
714 | /* |
715 | * The usemap came from bootmem. This is packed with other usemaps |
716 | * on the section which has pgdat at boot time. Just keep it as is now. |
717 | */ |
718 | |
719 | if (memmap) { |
720 | struct page *memmap_page; |
721 | memmap_page = virt_to_page(memmap); |
722 | |
723 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
724 | >> PAGE_SHIFT; |
725 | |
726 | free_map_bootmem(memmap_page, nr_pages); |
727 | } |
728 | } |
729 | |
730 | /* |
731 | * returns the number of sections whose mem_maps were properly |
732 | * set. If this is <=0, then that means that the passed-in |
733 | * map was not consumed and must be freed. |
734 | */ |
735 | int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
736 | int nr_pages) |
737 | { |
738 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
739 | struct pglist_data *pgdat = zone->zone_pgdat; |
740 | struct mem_section *ms; |
741 | struct page *memmap; |
742 | unsigned long *usemap; |
743 | unsigned long flags; |
744 | int ret; |
745 | |
746 | /* |
747 | * no locking for this, because it does its own |
748 | * plus, it does a kmalloc |
749 | */ |
750 | ret = sparse_index_init(section_nr, pgdat->node_id); |
751 | if (ret < 0 && ret != -EEXIST) |
752 | return ret; |
753 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); |
754 | if (!memmap) |
755 | return -ENOMEM; |
756 | usemap = __kmalloc_section_usemap(); |
757 | if (!usemap) { |
758 | __kfree_section_memmap(memmap, nr_pages); |
759 | return -ENOMEM; |
760 | } |
761 | |
762 | pgdat_resize_lock(pgdat, &flags); |
763 | |
764 | ms = __pfn_to_section(start_pfn); |
765 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { |
766 | ret = -EEXIST; |
767 | goto out; |
768 | } |
769 | |
770 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
771 | |
772 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); |
773 | |
774 | out: |
775 | pgdat_resize_unlock(pgdat, &flags); |
776 | if (ret <= 0) { |
777 | kfree(usemap); |
778 | __kfree_section_memmap(memmap, nr_pages); |
779 | } |
780 | return ret; |
781 | } |
782 | |
783 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) |
784 | { |
785 | struct page *memmap = NULL; |
786 | unsigned long *usemap = NULL; |
787 | |
788 | if (ms->section_mem_map) { |
789 | usemap = ms->pageblock_flags; |
790 | memmap = sparse_decode_mem_map(ms->section_mem_map, |
791 | __section_nr(ms)); |
792 | ms->section_mem_map = 0; |
793 | ms->pageblock_flags = NULL; |
794 | } |
795 | |
796 | free_section_usemap(memmap, usemap); |
797 | } |
798 | #endif |
799 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9