Root/mm/page_alloc.c

1/*
2 * linux/mm/page_alloc.c
3 *
4 * Manages the free list, the system allocates free pages here.
5 * Note that kmalloc() lives in slab.c
6 *
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Swap reorganised 29.12.95, Stephen Tweedie
9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15 */
16
17#include <linux/stddef.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/interrupt.h>
21#include <linux/pagemap.h>
22#include <linux/jiffies.h>
23#include <linux/bootmem.h>
24#include <linux/compiler.h>
25#include <linux/kernel.h>
26#include <linux/kmemcheck.h>
27#include <linux/module.h>
28#include <linux/suspend.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/slab.h>
32#include <linux/oom.h>
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/sysctl.h>
36#include <linux/cpu.h>
37#include <linux/cpuset.h>
38#include <linux/memory_hotplug.h>
39#include <linux/nodemask.h>
40#include <linux/vmalloc.h>
41#include <linux/mempolicy.h>
42#include <linux/stop_machine.h>
43#include <linux/sort.h>
44#include <linux/pfn.h>
45#include <linux/backing-dev.h>
46#include <linux/fault-inject.h>
47#include <linux/page-isolation.h>
48#include <linux/page_cgroup.h>
49#include <linux/debugobjects.h>
50#include <linux/kmemleak.h>
51#include <linux/memory.h>
52#include <linux/compaction.h>
53#include <trace/events/kmem.h>
54#include <linux/ftrace_event.h>
55
56#include <asm/tlbflush.h>
57#include <asm/div64.h>
58#include "internal.h"
59
60#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
61DEFINE_PER_CPU(int, numa_node);
62EXPORT_PER_CPU_SYMBOL(numa_node);
63#endif
64
65#ifdef CONFIG_HAVE_MEMORYLESS_NODES
66/*
67 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
68 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
69 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
70 * defined in <linux/topology.h>.
71 */
72DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
73EXPORT_PER_CPU_SYMBOL(_numa_mem_);
74#endif
75
76/*
77 * Array of node states.
78 */
79nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
80    [N_POSSIBLE] = NODE_MASK_ALL,
81    [N_ONLINE] = { { [0] = 1UL } },
82#ifndef CONFIG_NUMA
83    [N_NORMAL_MEMORY] = { { [0] = 1UL } },
84#ifdef CONFIG_HIGHMEM
85    [N_HIGH_MEMORY] = { { [0] = 1UL } },
86#endif
87    [N_CPU] = { { [0] = 1UL } },
88#endif /* NUMA */
89};
90EXPORT_SYMBOL(node_states);
91
92unsigned long totalram_pages __read_mostly;
93unsigned long totalreserve_pages __read_mostly;
94int percpu_pagelist_fraction;
95gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
96
97#ifdef CONFIG_PM_SLEEP
98/*
99 * The following functions are used by the suspend/hibernate code to temporarily
100 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
101 * while devices are suspended. To avoid races with the suspend/hibernate code,
102 * they should always be called with pm_mutex held (gfp_allowed_mask also should
103 * only be modified with pm_mutex held, unless the suspend/hibernate code is
104 * guaranteed not to run in parallel with that modification).
105 */
106void set_gfp_allowed_mask(gfp_t mask)
107{
108    WARN_ON(!mutex_is_locked(&pm_mutex));
109    gfp_allowed_mask = mask;
110}
111
112gfp_t clear_gfp_allowed_mask(gfp_t mask)
113{
114    gfp_t ret = gfp_allowed_mask;
115
116    WARN_ON(!mutex_is_locked(&pm_mutex));
117    gfp_allowed_mask &= ~mask;
118    return ret;
119}
120#endif /* CONFIG_PM_SLEEP */
121
122#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
123int pageblock_order __read_mostly;
124#endif
125
126static void __free_pages_ok(struct page *page, unsigned int order);
127
128/*
129 * results with 256, 32 in the lowmem_reserve sysctl:
130 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
131 * 1G machine -> (16M dma, 784M normal, 224M high)
132 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
133 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
134 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
135 *
136 * TBD: should special case ZONE_DMA32 machines here - in those we normally
137 * don't need any ZONE_NORMAL reservation
138 */
139int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
140#ifdef CONFIG_ZONE_DMA
141     256,
142#endif
143#ifdef CONFIG_ZONE_DMA32
144     256,
145#endif
146#ifdef CONFIG_HIGHMEM
147     32,
148#endif
149     32,
150};
151
152EXPORT_SYMBOL(totalram_pages);
153
154static char * const zone_names[MAX_NR_ZONES] = {
155#ifdef CONFIG_ZONE_DMA
156     "DMA",
157#endif
158#ifdef CONFIG_ZONE_DMA32
159     "DMA32",
160#endif
161     "Normal",
162#ifdef CONFIG_HIGHMEM
163     "HighMem",
164#endif
165     "Movable",
166};
167
168int min_free_kbytes = 1024;
169
170static unsigned long __meminitdata nr_kernel_pages;
171static unsigned long __meminitdata nr_all_pages;
172static unsigned long __meminitdata dma_reserve;
173
174#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
175  /*
176   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
177   * ranges of memory (RAM) that may be registered with add_active_range().
178   * Ranges passed to add_active_range() will be merged if possible
179   * so the number of times add_active_range() can be called is
180   * related to the number of nodes and the number of holes
181   */
182  #ifdef CONFIG_MAX_ACTIVE_REGIONS
183    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
184    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
185  #else
186    #if MAX_NUMNODES >= 32
187      /* If there can be many nodes, allow up to 50 holes per node */
188      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
189    #else
190      /* By default, allow up to 256 distinct regions */
191      #define MAX_ACTIVE_REGIONS 256
192    #endif
193  #endif
194
195  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
196  static int __meminitdata nr_nodemap_entries;
197  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
198  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
199  static unsigned long __initdata required_kernelcore;
200  static unsigned long __initdata required_movablecore;
201  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
202
203  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
204  int movable_zone;
205  EXPORT_SYMBOL(movable_zone);
206#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
207
208#if MAX_NUMNODES > 1
209int nr_node_ids __read_mostly = MAX_NUMNODES;
210int nr_online_nodes __read_mostly = 1;
211EXPORT_SYMBOL(nr_node_ids);
212EXPORT_SYMBOL(nr_online_nodes);
213#endif
214
215int page_group_by_mobility_disabled __read_mostly;
216
217static void set_pageblock_migratetype(struct page *page, int migratetype)
218{
219
220    if (unlikely(page_group_by_mobility_disabled))
221        migratetype = MIGRATE_UNMOVABLE;
222
223    set_pageblock_flags_group(page, (unsigned long)migratetype,
224                    PB_migrate, PB_migrate_end);
225}
226
227bool oom_killer_disabled __read_mostly;
228
229#ifdef CONFIG_DEBUG_VM
230static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
231{
232    int ret = 0;
233    unsigned seq;
234    unsigned long pfn = page_to_pfn(page);
235
236    do {
237        seq = zone_span_seqbegin(zone);
238        if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
239            ret = 1;
240        else if (pfn < zone->zone_start_pfn)
241            ret = 1;
242    } while (zone_span_seqretry(zone, seq));
243
244    return ret;
245}
246
247static int page_is_consistent(struct zone *zone, struct page *page)
248{
249    if (!pfn_valid_within(page_to_pfn(page)))
250        return 0;
251    if (zone != page_zone(page))
252        return 0;
253
254    return 1;
255}
256/*
257 * Temporary debugging check for pages not lying within a given zone.
258 */
259static int bad_range(struct zone *zone, struct page *page)
260{
261    if (page_outside_zone_boundaries(zone, page))
262        return 1;
263    if (!page_is_consistent(zone, page))
264        return 1;
265
266    return 0;
267}
268#else
269static inline int bad_range(struct zone *zone, struct page *page)
270{
271    return 0;
272}
273#endif
274
275static void bad_page(struct page *page)
276{
277    static unsigned long resume;
278    static unsigned long nr_shown;
279    static unsigned long nr_unshown;
280
281    /* Don't complain about poisoned pages */
282    if (PageHWPoison(page)) {
283        __ClearPageBuddy(page);
284        return;
285    }
286
287    /*
288     * Allow a burst of 60 reports, then keep quiet for that minute;
289     * or allow a steady drip of one report per second.
290     */
291    if (nr_shown == 60) {
292        if (time_before(jiffies, resume)) {
293            nr_unshown++;
294            goto out;
295        }
296        if (nr_unshown) {
297            printk(KERN_ALERT
298                  "BUG: Bad page state: %lu messages suppressed\n",
299                nr_unshown);
300            nr_unshown = 0;
301        }
302        nr_shown = 0;
303    }
304    if (nr_shown++ == 0)
305        resume = jiffies + 60 * HZ;
306
307    printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
308        current->comm, page_to_pfn(page));
309    dump_page(page);
310
311    dump_stack();
312out:
313    /* Leave bad fields for debug, except PageBuddy could make trouble */
314    __ClearPageBuddy(page);
315    add_taint(TAINT_BAD_PAGE);
316}
317
318/*
319 * Higher-order pages are called "compound pages". They are structured thusly:
320 *
321 * The first PAGE_SIZE page is called the "head page".
322 *
323 * The remaining PAGE_SIZE pages are called "tail pages".
324 *
325 * All pages have PG_compound set. All pages have their ->private pointing at
326 * the head page (even the head page has this).
327 *
328 * The first tail page's ->lru.next holds the address of the compound page's
329 * put_page() function. Its ->lru.prev holds the order of allocation.
330 * This usage means that zero-order pages may not be compound.
331 */
332
333static void free_compound_page(struct page *page)
334{
335    __free_pages_ok(page, compound_order(page));
336}
337
338void prep_compound_page(struct page *page, unsigned long order)
339{
340    int i;
341    int nr_pages = 1 << order;
342
343    set_compound_page_dtor(page, free_compound_page);
344    set_compound_order(page, order);
345    __SetPageHead(page);
346    for (i = 1; i < nr_pages; i++) {
347        struct page *p = page + i;
348
349        __SetPageTail(p);
350        p->first_page = page;
351    }
352}
353
354static int destroy_compound_page(struct page *page, unsigned long order)
355{
356    int i;
357    int nr_pages = 1 << order;
358    int bad = 0;
359
360    if (unlikely(compound_order(page) != order) ||
361        unlikely(!PageHead(page))) {
362        bad_page(page);
363        bad++;
364    }
365
366    __ClearPageHead(page);
367
368    for (i = 1; i < nr_pages; i++) {
369        struct page *p = page + i;
370
371        if (unlikely(!PageTail(p) || (p->first_page != page))) {
372            bad_page(page);
373            bad++;
374        }
375        __ClearPageTail(p);
376    }
377
378    return bad;
379}
380
381static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
382{
383    int i;
384
385    /*
386     * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
387     * and __GFP_HIGHMEM from hard or soft interrupt context.
388     */
389    VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
390    for (i = 0; i < (1 << order); i++)
391        clear_highpage(page + i);
392}
393
394static inline void set_page_order(struct page *page, int order)
395{
396    set_page_private(page, order);
397    __SetPageBuddy(page);
398}
399
400static inline void rmv_page_order(struct page *page)
401{
402    __ClearPageBuddy(page);
403    set_page_private(page, 0);
404}
405
406/*
407 * Locate the struct page for both the matching buddy in our
408 * pair (buddy1) and the combined O(n+1) page they form (page).
409 *
410 * 1) Any buddy B1 will have an order O twin B2 which satisfies
411 * the following equation:
412 * B2 = B1 ^ (1 << O)
413 * For example, if the starting buddy (buddy2) is #8 its order
414 * 1 buddy is #10:
415 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
416 *
417 * 2) Any buddy B will have an order O+1 parent P which
418 * satisfies the following equation:
419 * P = B & ~(1 << O)
420 *
421 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
422 */
423static inline struct page *
424__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
425{
426    unsigned long buddy_idx = page_idx ^ (1 << order);
427
428    return page + (buddy_idx - page_idx);
429}
430
431static inline unsigned long
432__find_combined_index(unsigned long page_idx, unsigned int order)
433{
434    return (page_idx & ~(1 << order));
435}
436
437/*
438 * This function checks whether a page is free && is the buddy
439 * we can do coalesce a page and its buddy if
440 * (a) the buddy is not in a hole &&
441 * (b) the buddy is in the buddy system &&
442 * (c) a page and its buddy have the same order &&
443 * (d) a page and its buddy are in the same zone.
444 *
445 * For recording whether a page is in the buddy system, we use PG_buddy.
446 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
447 *
448 * For recording page's order, we use page_private(page).
449 */
450static inline int page_is_buddy(struct page *page, struct page *buddy,
451                                int order)
452{
453    if (!pfn_valid_within(page_to_pfn(buddy)))
454        return 0;
455
456    if (page_zone_id(page) != page_zone_id(buddy))
457        return 0;
458
459    if (PageBuddy(buddy) && page_order(buddy) == order) {
460        VM_BUG_ON(page_count(buddy) != 0);
461        return 1;
462    }
463    return 0;
464}
465
466/*
467 * Freeing function for a buddy system allocator.
468 *
469 * The concept of a buddy system is to maintain direct-mapped table
470 * (containing bit values) for memory blocks of various "orders".
471 * The bottom level table contains the map for the smallest allocatable
472 * units of memory (here, pages), and each level above it describes
473 * pairs of units from the levels below, hence, "buddies".
474 * At a high level, all that happens here is marking the table entry
475 * at the bottom level available, and propagating the changes upward
476 * as necessary, plus some accounting needed to play nicely with other
477 * parts of the VM system.
478 * At each level, we keep a list of pages, which are heads of continuous
479 * free pages of length of (1 << order) and marked with PG_buddy. Page's
480 * order is recorded in page_private(page) field.
481 * So when we are allocating or freeing one, we can derive the state of the
482 * other. That is, if we allocate a small block, and both were
483 * free, the remainder of the region must be split into blocks.
484 * If a block is freed, and its buddy is also free, then this
485 * triggers coalescing into a block of larger size.
486 *
487 * -- wli
488 */
489
490static inline void __free_one_page(struct page *page,
491        struct zone *zone, unsigned int order,
492        int migratetype)
493{
494    unsigned long page_idx;
495    unsigned long combined_idx;
496    struct page *buddy;
497
498    if (unlikely(PageCompound(page)))
499        if (unlikely(destroy_compound_page(page, order)))
500            return;
501
502    VM_BUG_ON(migratetype == -1);
503
504    page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
505
506    VM_BUG_ON(page_idx & ((1 << order) - 1));
507    VM_BUG_ON(bad_range(zone, page));
508
509    while (order < MAX_ORDER-1) {
510        buddy = __page_find_buddy(page, page_idx, order);
511        if (!page_is_buddy(page, buddy, order))
512            break;
513
514        /* Our buddy is free, merge with it and move up one order. */
515        list_del(&buddy->lru);
516        zone->free_area[order].nr_free--;
517        rmv_page_order(buddy);
518        combined_idx = __find_combined_index(page_idx, order);
519        page = page + (combined_idx - page_idx);
520        page_idx = combined_idx;
521        order++;
522    }
523    set_page_order(page, order);
524
525    /*
526     * If this is not the largest possible page, check if the buddy
527     * of the next-highest order is free. If it is, it's possible
528     * that pages are being freed that will coalesce soon. In case,
529     * that is happening, add the free page to the tail of the list
530     * so it's less likely to be used soon and more likely to be merged
531     * as a higher order page
532     */
533    if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
534        struct page *higher_page, *higher_buddy;
535        combined_idx = __find_combined_index(page_idx, order);
536        higher_page = page + combined_idx - page_idx;
537        higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
538        if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
539            list_add_tail(&page->lru,
540                &zone->free_area[order].free_list[migratetype]);
541            goto out;
542        }
543    }
544
545    list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
546out:
547    zone->free_area[order].nr_free++;
548}
549
550/*
551 * free_page_mlock() -- clean up attempts to free and mlocked() page.
552 * Page should not be on lru, so no need to fix that up.
553 * free_pages_check() will verify...
554 */
555static inline void free_page_mlock(struct page *page)
556{
557    __dec_zone_page_state(page, NR_MLOCK);
558    __count_vm_event(UNEVICTABLE_MLOCKFREED);
559}
560
561static inline int free_pages_check(struct page *page)
562{
563    if (unlikely(page_mapcount(page) |
564        (page->mapping != NULL) |
565        (atomic_read(&page->_count) != 0) |
566        (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
567        bad_page(page);
568        return 1;
569    }
570    if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
571        page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
572    return 0;
573}
574
575/*
576 * Frees a number of pages from the PCP lists
577 * Assumes all pages on list are in same zone, and of same order.
578 * count is the number of pages to free.
579 *
580 * If the zone was previously in an "all pages pinned" state then look to
581 * see if this freeing clears that state.
582 *
583 * And clear the zone's pages_scanned counter, to hold off the "all pages are
584 * pinned" detection logic.
585 */
586static void free_pcppages_bulk(struct zone *zone, int count,
587                    struct per_cpu_pages *pcp)
588{
589    int migratetype = 0;
590    int batch_free = 0;
591
592    spin_lock(&zone->lock);
593    zone->all_unreclaimable = 0;
594    zone->pages_scanned = 0;
595
596    __mod_zone_page_state(zone, NR_FREE_PAGES, count);
597    while (count) {
598        struct page *page;
599        struct list_head *list;
600
601        /*
602         * Remove pages from lists in a round-robin fashion. A
603         * batch_free count is maintained that is incremented when an
604         * empty list is encountered. This is so more pages are freed
605         * off fuller lists instead of spinning excessively around empty
606         * lists
607         */
608        do {
609            batch_free++;
610            if (++migratetype == MIGRATE_PCPTYPES)
611                migratetype = 0;
612            list = &pcp->lists[migratetype];
613        } while (list_empty(list));
614
615        do {
616            page = list_entry(list->prev, struct page, lru);
617            /* must delete as __free_one_page list manipulates */
618            list_del(&page->lru);
619            /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
620            __free_one_page(page, zone, 0, page_private(page));
621            trace_mm_page_pcpu_drain(page, 0, page_private(page));
622        } while (--count && --batch_free && !list_empty(list));
623    }
624    spin_unlock(&zone->lock);
625}
626
627static void free_one_page(struct zone *zone, struct page *page, int order,
628                int migratetype)
629{
630    spin_lock(&zone->lock);
631    zone->all_unreclaimable = 0;
632    zone->pages_scanned = 0;
633
634    __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
635    __free_one_page(page, zone, order, migratetype);
636    spin_unlock(&zone->lock);
637}
638
639static bool free_pages_prepare(struct page *page, unsigned int order)
640{
641    int i;
642    int bad = 0;
643
644    trace_mm_page_free_direct(page, order);
645    kmemcheck_free_shadow(page, order);
646
647    for (i = 0; i < (1 << order); i++) {
648        struct page *pg = page + i;
649
650        if (PageAnon(pg))
651            pg->mapping = NULL;
652        bad += free_pages_check(pg);
653    }
654    if (bad)
655        return false;
656
657    if (!PageHighMem(page)) {
658        debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
659        debug_check_no_obj_freed(page_address(page),
660                       PAGE_SIZE << order);
661    }
662    arch_free_page(page, order);
663    kernel_map_pages(page, 1 << order, 0);
664
665    return true;
666}
667
668static void __free_pages_ok(struct page *page, unsigned int order)
669{
670    unsigned long flags;
671    int wasMlocked = __TestClearPageMlocked(page);
672
673    if (!free_pages_prepare(page, order))
674        return;
675
676    local_irq_save(flags);
677    if (unlikely(wasMlocked))
678        free_page_mlock(page);
679    __count_vm_events(PGFREE, 1 << order);
680    free_one_page(page_zone(page), page, order,
681                    get_pageblock_migratetype(page));
682    local_irq_restore(flags);
683}
684
685/*
686 * permit the bootmem allocator to evade page validation on high-order frees
687 */
688void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
689{
690    if (order == 0) {
691        __ClearPageReserved(page);
692        set_page_count(page, 0);
693        set_page_refcounted(page);
694        __free_page(page);
695    } else {
696        int loop;
697
698        prefetchw(page);
699        for (loop = 0; loop < BITS_PER_LONG; loop++) {
700            struct page *p = &page[loop];
701
702            if (loop + 1 < BITS_PER_LONG)
703                prefetchw(p + 1);
704            __ClearPageReserved(p);
705            set_page_count(p, 0);
706        }
707
708        set_page_refcounted(page);
709        __free_pages(page, order);
710    }
711}
712
713
714/*
715 * The order of subdivision here is critical for the IO subsystem.
716 * Please do not alter this order without good reasons and regression
717 * testing. Specifically, as large blocks of memory are subdivided,
718 * the order in which smaller blocks are delivered depends on the order
719 * they're subdivided in this function. This is the primary factor
720 * influencing the order in which pages are delivered to the IO
721 * subsystem according to empirical testing, and this is also justified
722 * by considering the behavior of a buddy system containing a single
723 * large block of memory acted on by a series of small allocations.
724 * This behavior is a critical factor in sglist merging's success.
725 *
726 * -- wli
727 */
728static inline void expand(struct zone *zone, struct page *page,
729    int low, int high, struct free_area *area,
730    int migratetype)
731{
732    unsigned long size = 1 << high;
733
734    while (high > low) {
735        area--;
736        high--;
737        size >>= 1;
738        VM_BUG_ON(bad_range(zone, &page[size]));
739        list_add(&page[size].lru, &area->free_list[migratetype]);
740        area->nr_free++;
741        set_page_order(&page[size], high);
742    }
743}
744
745/*
746 * This page is about to be returned from the page allocator
747 */
748static inline int check_new_page(struct page *page)
749{
750    if (unlikely(page_mapcount(page) |
751        (page->mapping != NULL) |
752        (atomic_read(&page->_count) != 0) |
753        (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
754        bad_page(page);
755        return 1;
756    }
757    return 0;
758}
759
760static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
761{
762    int i;
763
764    for (i = 0; i < (1 << order); i++) {
765        struct page *p = page + i;
766        if (unlikely(check_new_page(p)))
767            return 1;
768    }
769
770    set_page_private(page, 0);
771    set_page_refcounted(page);
772
773    arch_alloc_page(page, order);
774    kernel_map_pages(page, 1 << order, 1);
775
776    if (gfp_flags & __GFP_ZERO)
777        prep_zero_page(page, order, gfp_flags);
778
779    if (order && (gfp_flags & __GFP_COMP))
780        prep_compound_page(page, order);
781
782    return 0;
783}
784
785/*
786 * Go through the free lists for the given migratetype and remove
787 * the smallest available page from the freelists
788 */
789static inline
790struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
791                        int migratetype)
792{
793    unsigned int current_order;
794    struct free_area * area;
795    struct page *page;
796
797    /* Find a page of the appropriate size in the preferred list */
798    for (current_order = order; current_order < MAX_ORDER; ++current_order) {
799        area = &(zone->free_area[current_order]);
800        if (list_empty(&area->free_list[migratetype]))
801            continue;
802
803        page = list_entry(area->free_list[migratetype].next,
804                            struct page, lru);
805        list_del(&page->lru);
806        rmv_page_order(page);
807        area->nr_free--;
808        expand(zone, page, order, current_order, area, migratetype);
809        return page;
810    }
811
812    return NULL;
813}
814
815
816/*
817 * This array describes the order lists are fallen back to when
818 * the free lists for the desirable migrate type are depleted
819 */
820static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
821    [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
822    [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
823    [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
824    [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
825};
826
827/*
828 * Move the free pages in a range to the free lists of the requested type.
829 * Note that start_page and end_pages are not aligned on a pageblock
830 * boundary. If alignment is required, use move_freepages_block()
831 */
832static int move_freepages(struct zone *zone,
833              struct page *start_page, struct page *end_page,
834              int migratetype)
835{
836    struct page *page;
837    unsigned long order;
838    int pages_moved = 0;
839
840#ifndef CONFIG_HOLES_IN_ZONE
841    /*
842     * page_zone is not safe to call in this context when
843     * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
844     * anyway as we check zone boundaries in move_freepages_block().
845     * Remove at a later date when no bug reports exist related to
846     * grouping pages by mobility
847     */
848    BUG_ON(page_zone(start_page) != page_zone(end_page));
849#endif
850
851    for (page = start_page; page <= end_page;) {
852        /* Make sure we are not inadvertently changing nodes */
853        VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
854
855        if (!pfn_valid_within(page_to_pfn(page))) {
856            page++;
857            continue;
858        }
859
860        if (!PageBuddy(page)) {
861            page++;
862            continue;
863        }
864
865        order = page_order(page);
866        list_del(&page->lru);
867        list_add(&page->lru,
868            &zone->free_area[order].free_list[migratetype]);
869        page += 1 << order;
870        pages_moved += 1 << order;
871    }
872
873    return pages_moved;
874}
875
876static int move_freepages_block(struct zone *zone, struct page *page,
877                int migratetype)
878{
879    unsigned long start_pfn, end_pfn;
880    struct page *start_page, *end_page;
881
882    start_pfn = page_to_pfn(page);
883    start_pfn = start_pfn & ~(pageblock_nr_pages-1);
884    start_page = pfn_to_page(start_pfn);
885    end_page = start_page + pageblock_nr_pages - 1;
886    end_pfn = start_pfn + pageblock_nr_pages - 1;
887
888    /* Do not cross zone boundaries */
889    if (start_pfn < zone->zone_start_pfn)
890        start_page = page;
891    if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
892        return 0;
893
894    return move_freepages(zone, start_page, end_page, migratetype);
895}
896
897static void change_pageblock_range(struct page *pageblock_page,
898                    int start_order, int migratetype)
899{
900    int nr_pageblocks = 1 << (start_order - pageblock_order);
901
902    while (nr_pageblocks--) {
903        set_pageblock_migratetype(pageblock_page, migratetype);
904        pageblock_page += pageblock_nr_pages;
905    }
906}
907
908/* Remove an element from the buddy allocator from the fallback list */
909static inline struct page *
910__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
911{
912    struct free_area * area;
913    int current_order;
914    struct page *page;
915    int migratetype, i;
916
917    /* Find the largest possible block of pages in the other list */
918    for (current_order = MAX_ORDER-1; current_order >= order;
919                        --current_order) {
920        for (i = 0; i < MIGRATE_TYPES - 1; i++) {
921            migratetype = fallbacks[start_migratetype][i];
922
923            /* MIGRATE_RESERVE handled later if necessary */
924            if (migratetype == MIGRATE_RESERVE)
925                continue;
926
927            area = &(zone->free_area[current_order]);
928            if (list_empty(&area->free_list[migratetype]))
929                continue;
930
931            page = list_entry(area->free_list[migratetype].next,
932                    struct page, lru);
933            area->nr_free--;
934
935            /*
936             * If breaking a large block of pages, move all free
937             * pages to the preferred allocation list. If falling
938             * back for a reclaimable kernel allocation, be more
939             * agressive about taking ownership of free pages
940             */
941            if (unlikely(current_order >= (pageblock_order >> 1)) ||
942                    start_migratetype == MIGRATE_RECLAIMABLE ||
943                    page_group_by_mobility_disabled) {
944                unsigned long pages;
945                pages = move_freepages_block(zone, page,
946                                start_migratetype);
947
948                /* Claim the whole block if over half of it is free */
949                if (pages >= (1 << (pageblock_order-1)) ||
950                        page_group_by_mobility_disabled)
951                    set_pageblock_migratetype(page,
952                                start_migratetype);
953
954                migratetype = start_migratetype;
955            }
956
957            /* Remove the page from the freelists */
958            list_del(&page->lru);
959            rmv_page_order(page);
960
961            /* Take ownership for orders >= pageblock_order */
962            if (current_order >= pageblock_order)
963                change_pageblock_range(page, current_order,
964                            start_migratetype);
965
966            expand(zone, page, order, current_order, area, migratetype);
967
968            trace_mm_page_alloc_extfrag(page, order, current_order,
969                start_migratetype, migratetype);
970
971            return page;
972        }
973    }
974
975    return NULL;
976}
977
978/*
979 * Do the hard work of removing an element from the buddy allocator.
980 * Call me with the zone->lock already held.
981 */
982static struct page *__rmqueue(struct zone *zone, unsigned int order,
983                        int migratetype)
984{
985    struct page *page;
986
987retry_reserve:
988    page = __rmqueue_smallest(zone, order, migratetype);
989
990    if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
991        page = __rmqueue_fallback(zone, order, migratetype);
992
993        /*
994         * Use MIGRATE_RESERVE rather than fail an allocation. goto
995         * is used because __rmqueue_smallest is an inline function
996         * and we want just one call site
997         */
998        if (!page) {
999            migratetype = MIGRATE_RESERVE;
1000            goto retry_reserve;
1001        }
1002    }
1003
1004    trace_mm_page_alloc_zone_locked(page, order, migratetype);
1005    return page;
1006}
1007
1008/*
1009 * Obtain a specified number of elements from the buddy allocator, all under
1010 * a single hold of the lock, for efficiency. Add them to the supplied list.
1011 * Returns the number of new pages which were placed at *list.
1012 */
1013static int rmqueue_bulk(struct zone *zone, unsigned int order,
1014            unsigned long count, struct list_head *list,
1015            int migratetype, int cold)
1016{
1017    int i;
1018    
1019    spin_lock(&zone->lock);
1020    for (i = 0; i < count; ++i) {
1021        struct page *page = __rmqueue(zone, order, migratetype);
1022        if (unlikely(page == NULL))
1023            break;
1024
1025        /*
1026         * Split buddy pages returned by expand() are received here
1027         * in physical page order. The page is added to the callers and
1028         * list and the list head then moves forward. From the callers
1029         * perspective, the linked list is ordered by page number in
1030         * some conditions. This is useful for IO devices that can
1031         * merge IO requests if the physical pages are ordered
1032         * properly.
1033         */
1034        if (likely(cold == 0))
1035            list_add(&page->lru, list);
1036        else
1037            list_add_tail(&page->lru, list);
1038        set_page_private(page, migratetype);
1039        list = &page->lru;
1040    }
1041    __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1042    spin_unlock(&zone->lock);
1043    return i;
1044}
1045
1046#ifdef CONFIG_NUMA
1047/*
1048 * Called from the vmstat counter updater to drain pagesets of this
1049 * currently executing processor on remote nodes after they have
1050 * expired.
1051 *
1052 * Note that this function must be called with the thread pinned to
1053 * a single processor.
1054 */
1055void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1056{
1057    unsigned long flags;
1058    int to_drain;
1059
1060    local_irq_save(flags);
1061    if (pcp->count >= pcp->batch)
1062        to_drain = pcp->batch;
1063    else
1064        to_drain = pcp->count;
1065    free_pcppages_bulk(zone, to_drain, pcp);
1066    pcp->count -= to_drain;
1067    local_irq_restore(flags);
1068}
1069#endif
1070
1071/*
1072 * Drain pages of the indicated processor.
1073 *
1074 * The processor must either be the current processor and the
1075 * thread pinned to the current processor or a processor that
1076 * is not online.
1077 */
1078static void drain_pages(unsigned int cpu)
1079{
1080    unsigned long flags;
1081    struct zone *zone;
1082
1083    for_each_populated_zone(zone) {
1084        struct per_cpu_pageset *pset;
1085        struct per_cpu_pages *pcp;
1086
1087        local_irq_save(flags);
1088        pset = per_cpu_ptr(zone->pageset, cpu);
1089
1090        pcp = &pset->pcp;
1091        free_pcppages_bulk(zone, pcp->count, pcp);
1092        pcp->count = 0;
1093        local_irq_restore(flags);
1094    }
1095}
1096
1097/*
1098 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1099 */
1100void drain_local_pages(void *arg)
1101{
1102    drain_pages(smp_processor_id());
1103}
1104
1105/*
1106 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1107 */
1108void drain_all_pages(void)
1109{
1110    on_each_cpu(drain_local_pages, NULL, 1);
1111}
1112
1113#ifdef CONFIG_HIBERNATION
1114
1115void mark_free_pages(struct zone *zone)
1116{
1117    unsigned long pfn, max_zone_pfn;
1118    unsigned long flags;
1119    int order, t;
1120    struct list_head *curr;
1121
1122    if (!zone->spanned_pages)
1123        return;
1124
1125    spin_lock_irqsave(&zone->lock, flags);
1126
1127    max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1128    for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1129        if (pfn_valid(pfn)) {
1130            struct page *page = pfn_to_page(pfn);
1131
1132            if (!swsusp_page_is_forbidden(page))
1133                swsusp_unset_page_free(page);
1134        }
1135
1136    for_each_migratetype_order(order, t) {
1137        list_for_each(curr, &zone->free_area[order].free_list[t]) {
1138            unsigned long i;
1139
1140            pfn = page_to_pfn(list_entry(curr, struct page, lru));
1141            for (i = 0; i < (1UL << order); i++)
1142                swsusp_set_page_free(pfn_to_page(pfn + i));
1143        }
1144    }
1145    spin_unlock_irqrestore(&zone->lock, flags);
1146}
1147#endif /* CONFIG_PM */
1148
1149/*
1150 * Free a 0-order page
1151 * cold == 1 ? free a cold page : free a hot page
1152 */
1153void free_hot_cold_page(struct page *page, int cold)
1154{
1155    struct zone *zone = page_zone(page);
1156    struct per_cpu_pages *pcp;
1157    unsigned long flags;
1158    int migratetype;
1159    int wasMlocked = __TestClearPageMlocked(page);
1160
1161    if (!free_pages_prepare(page, 0))
1162        return;
1163
1164    migratetype = get_pageblock_migratetype(page);
1165    set_page_private(page, migratetype);
1166    local_irq_save(flags);
1167    if (unlikely(wasMlocked))
1168        free_page_mlock(page);
1169    __count_vm_event(PGFREE);
1170
1171    /*
1172     * We only track unmovable, reclaimable and movable on pcp lists.
1173     * Free ISOLATE pages back to the allocator because they are being
1174     * offlined but treat RESERVE as movable pages so we can get those
1175     * areas back if necessary. Otherwise, we may have to free
1176     * excessively into the page allocator
1177     */
1178    if (migratetype >= MIGRATE_PCPTYPES) {
1179        if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1180            free_one_page(zone, page, 0, migratetype);
1181            goto out;
1182        }
1183        migratetype = MIGRATE_MOVABLE;
1184    }
1185
1186    pcp = &this_cpu_ptr(zone->pageset)->pcp;
1187    if (cold)
1188        list_add_tail(&page->lru, &pcp->lists[migratetype]);
1189    else
1190        list_add(&page->lru, &pcp->lists[migratetype]);
1191    pcp->count++;
1192    if (pcp->count >= pcp->high) {
1193        free_pcppages_bulk(zone, pcp->batch, pcp);
1194        pcp->count -= pcp->batch;
1195    }
1196
1197out:
1198    local_irq_restore(flags);
1199}
1200
1201/*
1202 * split_page takes a non-compound higher-order page, and splits it into
1203 * n (1<<order) sub-pages: page[0..n]
1204 * Each sub-page must be freed individually.
1205 *
1206 * Note: this is probably too low level an operation for use in drivers.
1207 * Please consult with lkml before using this in your driver.
1208 */
1209void split_page(struct page *page, unsigned int order)
1210{
1211    int i;
1212
1213    VM_BUG_ON(PageCompound(page));
1214    VM_BUG_ON(!page_count(page));
1215
1216#ifdef CONFIG_KMEMCHECK
1217    /*
1218     * Split shadow pages too, because free(page[0]) would
1219     * otherwise free the whole shadow.
1220     */
1221    if (kmemcheck_page_is_tracked(page))
1222        split_page(virt_to_page(page[0].shadow), order);
1223#endif
1224
1225    for (i = 1; i < (1 << order); i++)
1226        set_page_refcounted(page + i);
1227}
1228
1229/*
1230 * Similar to split_page except the page is already free. As this is only
1231 * being used for migration, the migratetype of the block also changes.
1232 * As this is called with interrupts disabled, the caller is responsible
1233 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1234 * are enabled.
1235 *
1236 * Note: this is probably too low level an operation for use in drivers.
1237 * Please consult with lkml before using this in your driver.
1238 */
1239int split_free_page(struct page *page)
1240{
1241    unsigned int order;
1242    unsigned long watermark;
1243    struct zone *zone;
1244
1245    BUG_ON(!PageBuddy(page));
1246
1247    zone = page_zone(page);
1248    order = page_order(page);
1249
1250    /* Obey watermarks as if the page was being allocated */
1251    watermark = low_wmark_pages(zone) + (1 << order);
1252    if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1253        return 0;
1254
1255    /* Remove page from free list */
1256    list_del(&page->lru);
1257    zone->free_area[order].nr_free--;
1258    rmv_page_order(page);
1259    __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1260
1261    /* Split into individual pages */
1262    set_page_refcounted(page);
1263    split_page(page, order);
1264
1265    if (order >= pageblock_order - 1) {
1266        struct page *endpage = page + (1 << order) - 1;
1267        for (; page < endpage; page += pageblock_nr_pages)
1268            set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1269    }
1270
1271    return 1 << order;
1272}
1273
1274/*
1275 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
1276 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1277 * or two.
1278 */
1279static inline
1280struct page *buffered_rmqueue(struct zone *preferred_zone,
1281            struct zone *zone, int order, gfp_t gfp_flags,
1282            int migratetype)
1283{
1284    unsigned long flags;
1285    struct page *page;
1286    int cold = !!(gfp_flags & __GFP_COLD);
1287
1288again:
1289    if (likely(order == 0)) {
1290        struct per_cpu_pages *pcp;
1291        struct list_head *list;
1292
1293        local_irq_save(flags);
1294        pcp = &this_cpu_ptr(zone->pageset)->pcp;
1295        list = &pcp->lists[migratetype];
1296        if (list_empty(list)) {
1297            pcp->count += rmqueue_bulk(zone, 0,
1298                    pcp->batch, list,
1299                    migratetype, cold);
1300            if (unlikely(list_empty(list)))
1301                goto failed;
1302        }
1303
1304        if (cold)
1305            page = list_entry(list->prev, struct page, lru);
1306        else
1307            page = list_entry(list->next, struct page, lru);
1308
1309        list_del(&page->lru);
1310        pcp->count--;
1311    } else {
1312        if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1313            /*
1314             * __GFP_NOFAIL is not to be used in new code.
1315             *
1316             * All __GFP_NOFAIL callers should be fixed so that they
1317             * properly detect and handle allocation failures.
1318             *
1319             * We most definitely don't want callers attempting to
1320             * allocate greater than order-1 page units with
1321             * __GFP_NOFAIL.
1322             */
1323            WARN_ON_ONCE(order > 1);
1324        }
1325        spin_lock_irqsave(&zone->lock, flags);
1326        page = __rmqueue(zone, order, migratetype);
1327        spin_unlock(&zone->lock);
1328        if (!page)
1329            goto failed;
1330        __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1331    }
1332
1333    __count_zone_vm_events(PGALLOC, zone, 1 << order);
1334    zone_statistics(preferred_zone, zone);
1335    local_irq_restore(flags);
1336
1337    VM_BUG_ON(bad_range(zone, page));
1338    if (prep_new_page(page, order, gfp_flags))
1339        goto again;
1340    return page;
1341
1342failed:
1343    local_irq_restore(flags);
1344    return NULL;
1345}
1346
1347/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1348#define ALLOC_WMARK_MIN WMARK_MIN
1349#define ALLOC_WMARK_LOW WMARK_LOW
1350#define ALLOC_WMARK_HIGH WMARK_HIGH
1351#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1352
1353/* Mask to get the watermark bits */
1354#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1355
1356#define ALLOC_HARDER 0x10 /* try to alloc harder */
1357#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
1358#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1359
1360#ifdef CONFIG_FAIL_PAGE_ALLOC
1361
1362static struct fail_page_alloc_attr {
1363    struct fault_attr attr;
1364
1365    u32 ignore_gfp_highmem;
1366    u32 ignore_gfp_wait;
1367    u32 min_order;
1368
1369#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1370
1371    struct dentry *ignore_gfp_highmem_file;
1372    struct dentry *ignore_gfp_wait_file;
1373    struct dentry *min_order_file;
1374
1375#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1376
1377} fail_page_alloc = {
1378    .attr = FAULT_ATTR_INITIALIZER,
1379    .ignore_gfp_wait = 1,
1380    .ignore_gfp_highmem = 1,
1381    .min_order = 1,
1382};
1383
1384static int __init setup_fail_page_alloc(char *str)
1385{
1386    return setup_fault_attr(&fail_page_alloc.attr, str);
1387}
1388__setup("fail_page_alloc=", setup_fail_page_alloc);
1389
1390static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1391{
1392    if (order < fail_page_alloc.min_order)
1393        return 0;
1394    if (gfp_mask & __GFP_NOFAIL)
1395        return 0;
1396    if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1397        return 0;
1398    if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1399        return 0;
1400
1401    return should_fail(&fail_page_alloc.attr, 1 << order);
1402}
1403
1404#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1405
1406static int __init fail_page_alloc_debugfs(void)
1407{
1408    mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1409    struct dentry *dir;
1410    int err;
1411
1412    err = init_fault_attr_dentries(&fail_page_alloc.attr,
1413                       "fail_page_alloc");
1414    if (err)
1415        return err;
1416    dir = fail_page_alloc.attr.dentries.dir;
1417
1418    fail_page_alloc.ignore_gfp_wait_file =
1419        debugfs_create_bool("ignore-gfp-wait", mode, dir,
1420                      &fail_page_alloc.ignore_gfp_wait);
1421
1422    fail_page_alloc.ignore_gfp_highmem_file =
1423        debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1424                      &fail_page_alloc.ignore_gfp_highmem);
1425    fail_page_alloc.min_order_file =
1426        debugfs_create_u32("min-order", mode, dir,
1427                   &fail_page_alloc.min_order);
1428
1429    if (!fail_page_alloc.ignore_gfp_wait_file ||
1430            !fail_page_alloc.ignore_gfp_highmem_file ||
1431            !fail_page_alloc.min_order_file) {
1432        err = -ENOMEM;
1433        debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1434        debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1435        debugfs_remove(fail_page_alloc.min_order_file);
1436        cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1437    }
1438
1439    return err;
1440}
1441
1442late_initcall(fail_page_alloc_debugfs);
1443
1444#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1445
1446#else /* CONFIG_FAIL_PAGE_ALLOC */
1447
1448static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1449{
1450    return 0;
1451}
1452
1453#endif /* CONFIG_FAIL_PAGE_ALLOC */
1454
1455/*
1456 * Return 1 if free pages are above 'mark'. This takes into account the order
1457 * of the allocation.
1458 */
1459int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1460              int classzone_idx, int alloc_flags)
1461{
1462    /* free_pages my go negative - that's OK */
1463    long min = mark;
1464    long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1465    int o;
1466
1467    if (alloc_flags & ALLOC_HIGH)
1468        min -= min / 2;
1469    if (alloc_flags & ALLOC_HARDER)
1470        min -= min / 4;
1471
1472    if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1473        return 0;
1474    for (o = 0; o < order; o++) {
1475        /* At the next order, this order's pages become unavailable */
1476        free_pages -= z->free_area[o].nr_free << o;
1477
1478        /* Require fewer higher order pages to be free */
1479        min >>= 1;
1480
1481        if (free_pages <= min)
1482            return 0;
1483    }
1484    return 1;
1485}
1486
1487#ifdef CONFIG_NUMA
1488/*
1489 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1490 * skip over zones that are not allowed by the cpuset, or that have
1491 * been recently (in last second) found to be nearly full. See further
1492 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1493 * that have to skip over a lot of full or unallowed zones.
1494 *
1495 * If the zonelist cache is present in the passed in zonelist, then
1496 * returns a pointer to the allowed node mask (either the current
1497 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1498 *
1499 * If the zonelist cache is not available for this zonelist, does
1500 * nothing and returns NULL.
1501 *
1502 * If the fullzones BITMAP in the zonelist cache is stale (more than
1503 * a second since last zap'd) then we zap it out (clear its bits.)
1504 *
1505 * We hold off even calling zlc_setup, until after we've checked the
1506 * first zone in the zonelist, on the theory that most allocations will
1507 * be satisfied from that first zone, so best to examine that zone as
1508 * quickly as we can.
1509 */
1510static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1511{
1512    struct zonelist_cache *zlc; /* cached zonelist speedup info */
1513    nodemask_t *allowednodes; /* zonelist_cache approximation */
1514
1515    zlc = zonelist->zlcache_ptr;
1516    if (!zlc)
1517        return NULL;
1518
1519    if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1520        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1521        zlc->last_full_zap = jiffies;
1522    }
1523
1524    allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1525                    &cpuset_current_mems_allowed :
1526                    &node_states[N_HIGH_MEMORY];
1527    return allowednodes;
1528}
1529
1530/*
1531 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1532 * if it is worth looking at further for free memory:
1533 * 1) Check that the zone isn't thought to be full (doesn't have its
1534 * bit set in the zonelist_cache fullzones BITMAP).
1535 * 2) Check that the zones node (obtained from the zonelist_cache
1536 * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1537 * Return true (non-zero) if zone is worth looking at further, or
1538 * else return false (zero) if it is not.
1539 *
1540 * This check -ignores- the distinction between various watermarks,
1541 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
1542 * found to be full for any variation of these watermarks, it will
1543 * be considered full for up to one second by all requests, unless
1544 * we are so low on memory on all allowed nodes that we are forced
1545 * into the second scan of the zonelist.
1546 *
1547 * In the second scan we ignore this zonelist cache and exactly
1548 * apply the watermarks to all zones, even it is slower to do so.
1549 * We are low on memory in the second scan, and should leave no stone
1550 * unturned looking for a free page.
1551 */
1552static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1553                        nodemask_t *allowednodes)
1554{
1555    struct zonelist_cache *zlc; /* cached zonelist speedup info */
1556    int i; /* index of *z in zonelist zones */
1557    int n; /* node that zone *z is on */
1558
1559    zlc = zonelist->zlcache_ptr;
1560    if (!zlc)
1561        return 1;
1562
1563    i = z - zonelist->_zonerefs;
1564    n = zlc->z_to_n[i];
1565
1566    /* This zone is worth trying if it is allowed but not full */
1567    return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1568}
1569
1570/*
1571 * Given 'z' scanning a zonelist, set the corresponding bit in
1572 * zlc->fullzones, so that subsequent attempts to allocate a page
1573 * from that zone don't waste time re-examining it.
1574 */
1575static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1576{
1577    struct zonelist_cache *zlc; /* cached zonelist speedup info */
1578    int i; /* index of *z in zonelist zones */
1579
1580    zlc = zonelist->zlcache_ptr;
1581    if (!zlc)
1582        return;
1583
1584    i = z - zonelist->_zonerefs;
1585
1586    set_bit(i, zlc->fullzones);
1587}
1588
1589#else /* CONFIG_NUMA */
1590
1591static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1592{
1593    return NULL;
1594}
1595
1596static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1597                nodemask_t *allowednodes)
1598{
1599    return 1;
1600}
1601
1602static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1603{
1604}
1605#endif /* CONFIG_NUMA */
1606
1607/*
1608 * get_page_from_freelist goes through the zonelist trying to allocate
1609 * a page.
1610 */
1611static struct page *
1612get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1613        struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1614        struct zone *preferred_zone, int migratetype)
1615{
1616    struct zoneref *z;
1617    struct page *page = NULL;
1618    int classzone_idx;
1619    struct zone *zone;
1620    nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1621    int zlc_active = 0; /* set if using zonelist_cache */
1622    int did_zlc_setup = 0; /* just call zlc_setup() one time */
1623
1624    classzone_idx = zone_idx(preferred_zone);
1625zonelist_scan:
1626    /*
1627     * Scan zonelist, looking for a zone with enough free.
1628     * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1629     */
1630    for_each_zone_zonelist_nodemask(zone, z, zonelist,
1631                        high_zoneidx, nodemask) {
1632        if (NUMA_BUILD && zlc_active &&
1633            !zlc_zone_worth_trying(zonelist, z, allowednodes))
1634                continue;
1635        if ((alloc_flags & ALLOC_CPUSET) &&
1636            !cpuset_zone_allowed_softwall(zone, gfp_mask))
1637                goto try_next_zone;
1638
1639        BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1640        if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1641            unsigned long mark;
1642            int ret;
1643
1644            mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1645            if (zone_watermark_ok(zone, order, mark,
1646                    classzone_idx, alloc_flags))
1647                goto try_this_zone;
1648
1649            if (zone_reclaim_mode == 0)
1650                goto this_zone_full;
1651
1652            ret = zone_reclaim(zone, gfp_mask, order);
1653            switch (ret) {
1654            case ZONE_RECLAIM_NOSCAN:
1655                /* did not scan */
1656                goto try_next_zone;
1657            case ZONE_RECLAIM_FULL:
1658                /* scanned but unreclaimable */
1659                goto this_zone_full;
1660            default:
1661                /* did we reclaim enough */
1662                if (!zone_watermark_ok(zone, order, mark,
1663                        classzone_idx, alloc_flags))
1664                    goto this_zone_full;
1665            }
1666        }
1667
1668try_this_zone:
1669        page = buffered_rmqueue(preferred_zone, zone, order,
1670                        gfp_mask, migratetype);
1671        if (page)
1672            break;
1673this_zone_full:
1674        if (NUMA_BUILD)
1675            zlc_mark_zone_full(zonelist, z);
1676try_next_zone:
1677        if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1678            /*
1679             * we do zlc_setup after the first zone is tried but only
1680             * if there are multiple nodes make it worthwhile
1681             */
1682            allowednodes = zlc_setup(zonelist, alloc_flags);
1683            zlc_active = 1;
1684            did_zlc_setup = 1;
1685        }
1686    }
1687
1688    if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1689        /* Disable zlc cache for second zonelist scan */
1690        zlc_active = 0;
1691        goto zonelist_scan;
1692    }
1693    return page;
1694}
1695
1696static inline int
1697should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1698                unsigned long pages_reclaimed)
1699{
1700    /* Do not loop if specifically requested */
1701    if (gfp_mask & __GFP_NORETRY)
1702        return 0;
1703
1704    /*
1705     * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1706     * means __GFP_NOFAIL, but that may not be true in other
1707     * implementations.
1708     */
1709    if (order <= PAGE_ALLOC_COSTLY_ORDER)
1710        return 1;
1711
1712    /*
1713     * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1714     * specified, then we retry until we no longer reclaim any pages
1715     * (above), or we've reclaimed an order of pages at least as
1716     * large as the allocation's order. In both cases, if the
1717     * allocation still fails, we stop retrying.
1718     */
1719    if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1720        return 1;
1721
1722    /*
1723     * Don't let big-order allocations loop unless the caller
1724     * explicitly requests that.
1725     */
1726    if (gfp_mask & __GFP_NOFAIL)
1727        return 1;
1728
1729    return 0;
1730}
1731
1732static inline struct page *
1733__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1734    struct zonelist *zonelist, enum zone_type high_zoneidx,
1735    nodemask_t *nodemask, struct zone *preferred_zone,
1736    int migratetype)
1737{
1738    struct page *page;
1739
1740    /* Acquire the OOM killer lock for the zones in zonelist */
1741    if (!try_set_zone_oom(zonelist, gfp_mask)) {
1742        schedule_timeout_uninterruptible(1);
1743        return NULL;
1744    }
1745
1746    /*
1747     * Go through the zonelist yet one more time, keep very high watermark
1748     * here, this is only to catch a parallel oom killing, we must fail if
1749     * we're still under heavy pressure.
1750     */
1751    page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1752        order, zonelist, high_zoneidx,
1753        ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1754        preferred_zone, migratetype);
1755    if (page)
1756        goto out;
1757
1758    if (!(gfp_mask & __GFP_NOFAIL)) {
1759        /* The OOM killer will not help higher order allocs */
1760        if (order > PAGE_ALLOC_COSTLY_ORDER)
1761            goto out;
1762        /*
1763         * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1764         * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1765         * The caller should handle page allocation failure by itself if
1766         * it specifies __GFP_THISNODE.
1767         * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1768         */
1769        if (gfp_mask & __GFP_THISNODE)
1770            goto out;
1771    }
1772    /* Exhausted what can be done so it's blamo time */
1773    out_of_memory(zonelist, gfp_mask, order, nodemask);
1774
1775out:
1776    clear_zonelist_oom(zonelist, gfp_mask);
1777    return page;
1778}
1779
1780#ifdef CONFIG_COMPACTION
1781/* Try memory compaction for high-order allocations before reclaim */
1782static struct page *
1783__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1784    struct zonelist *zonelist, enum zone_type high_zoneidx,
1785    nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1786    int migratetype, unsigned long *did_some_progress)
1787{
1788    struct page *page;
1789
1790    if (!order || compaction_deferred(preferred_zone))
1791        return NULL;
1792
1793    *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1794                                nodemask);
1795    if (*did_some_progress != COMPACT_SKIPPED) {
1796
1797        /* Page migration frees to the PCP lists but we want merging */
1798        drain_pages(get_cpu());
1799        put_cpu();
1800
1801        page = get_page_from_freelist(gfp_mask, nodemask,
1802                order, zonelist, high_zoneidx,
1803                alloc_flags, preferred_zone,
1804                migratetype);
1805        if (page) {
1806            preferred_zone->compact_considered = 0;
1807            preferred_zone->compact_defer_shift = 0;
1808            count_vm_event(COMPACTSUCCESS);
1809            return page;
1810        }
1811
1812        /*
1813         * It's bad if compaction run occurs and fails.
1814         * The most likely reason is that pages exist,
1815         * but not enough to satisfy watermarks.
1816         */
1817        count_vm_event(COMPACTFAIL);
1818        defer_compaction(preferred_zone);
1819
1820        cond_resched();
1821    }
1822
1823    return NULL;
1824}
1825#else
1826static inline struct page *
1827__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1828    struct zonelist *zonelist, enum zone_type high_zoneidx,
1829    nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1830    int migratetype, unsigned long *did_some_progress)
1831{
1832    return NULL;
1833}
1834#endif /* CONFIG_COMPACTION */
1835
1836/* The really slow allocator path where we enter direct reclaim */
1837static inline struct page *
1838__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
1839    struct zonelist *zonelist, enum zone_type high_zoneidx,
1840    nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1841    int migratetype, unsigned long *did_some_progress)
1842{
1843    struct page *page = NULL;
1844    struct reclaim_state reclaim_state;
1845    struct task_struct *p = current;
1846
1847    cond_resched();
1848
1849    /* We now go into synchronous reclaim */
1850    cpuset_memory_pressure_bump();
1851    p->flags |= PF_MEMALLOC;
1852    lockdep_set_current_reclaim_state(gfp_mask);
1853    reclaim_state.reclaimed_slab = 0;
1854    p->reclaim_state = &reclaim_state;
1855
1856    *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
1857
1858    p->reclaim_state = NULL;
1859    lockdep_clear_current_reclaim_state();
1860    p->flags &= ~PF_MEMALLOC;
1861
1862    cond_resched();
1863
1864    if (order != 0)
1865        drain_all_pages();
1866
1867    if (likely(*did_some_progress))
1868        page = get_page_from_freelist(gfp_mask, nodemask, order,
1869                    zonelist, high_zoneidx,
1870                    alloc_flags, preferred_zone,
1871                    migratetype);
1872    return page;
1873}
1874
1875/*
1876 * This is called in the allocator slow-path if the allocation request is of
1877 * sufficient urgency to ignore watermarks and take other desperate measures
1878 */
1879static inline struct page *
1880__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
1881    struct zonelist *zonelist, enum zone_type high_zoneidx,
1882    nodemask_t *nodemask, struct zone *preferred_zone,
1883    int migratetype)
1884{
1885    struct page *page;
1886
1887    do {
1888        page = get_page_from_freelist(gfp_mask, nodemask, order,
1889            zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
1890            preferred_zone, migratetype);
1891
1892        if (!page && gfp_mask & __GFP_NOFAIL)
1893            congestion_wait(BLK_RW_ASYNC, HZ/50);
1894    } while (!page && (gfp_mask & __GFP_NOFAIL));
1895
1896    return page;
1897}
1898
1899static inline
1900void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
1901                        enum zone_type high_zoneidx)
1902{
1903    struct zoneref *z;
1904    struct zone *zone;
1905
1906    for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1907        wakeup_kswapd(zone, order);
1908}
1909
1910static inline int
1911gfp_to_alloc_flags(gfp_t gfp_mask)
1912{
1913    struct task_struct *p = current;
1914    int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
1915    const gfp_t wait = gfp_mask & __GFP_WAIT;
1916
1917    /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
1918    BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
1919
1920    /*
1921     * The caller may dip into page reserves a bit more if the caller
1922     * cannot run direct reclaim, or if the caller has realtime scheduling
1923     * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
1924     * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1925     */
1926    alloc_flags |= (gfp_mask & __GFP_HIGH);
1927
1928    if (!wait) {
1929        alloc_flags |= ALLOC_HARDER;
1930        /*
1931         * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1932         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1933         */
1934        alloc_flags &= ~ALLOC_CPUSET;
1935    } else if (unlikely(rt_task(p)) && !in_interrupt())
1936        alloc_flags |= ALLOC_HARDER;
1937
1938    if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
1939        if (!in_interrupt() &&
1940            ((p->flags & PF_MEMALLOC) ||
1941             unlikely(test_thread_flag(TIF_MEMDIE))))
1942            alloc_flags |= ALLOC_NO_WATERMARKS;
1943    }
1944
1945    return alloc_flags;
1946}
1947
1948static inline struct page *
1949__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1950    struct zonelist *zonelist, enum zone_type high_zoneidx,
1951    nodemask_t *nodemask, struct zone *preferred_zone,
1952    int migratetype)
1953{
1954    const gfp_t wait = gfp_mask & __GFP_WAIT;
1955    struct page *page = NULL;
1956    int alloc_flags;
1957    unsigned long pages_reclaimed = 0;
1958    unsigned long did_some_progress;
1959    struct task_struct *p = current;
1960
1961    /*
1962     * In the slowpath, we sanity check order to avoid ever trying to
1963     * reclaim >= MAX_ORDER areas which will never succeed. Callers may
1964     * be using allocators in order of preference for an area that is
1965     * too large.
1966     */
1967    if (order >= MAX_ORDER) {
1968        WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
1969        return NULL;
1970    }
1971
1972    /*
1973     * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1974     * __GFP_NOWARN set) should not cause reclaim since the subsystem
1975     * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1976     * using a larger set of nodes after it has established that the
1977     * allowed per node queues are empty and that nodes are
1978     * over allocated.
1979     */
1980    if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1981        goto nopage;
1982
1983restart:
1984    wake_all_kswapd(order, zonelist, high_zoneidx);
1985
1986    /*
1987     * OK, we're below the kswapd watermark and have kicked background
1988     * reclaim. Now things get more complex, so set up alloc_flags according
1989     * to how we want to proceed.
1990     */
1991    alloc_flags = gfp_to_alloc_flags(gfp_mask);
1992
1993    /* This is the last chance, in general, before the goto nopage. */
1994    page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1995            high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
1996            preferred_zone, migratetype);
1997    if (page)
1998        goto got_pg;
1999
2000rebalance:
2001    /* Allocate without watermarks if the context allows */
2002    if (alloc_flags & ALLOC_NO_WATERMARKS) {
2003        page = __alloc_pages_high_priority(gfp_mask, order,
2004                zonelist, high_zoneidx, nodemask,
2005                preferred_zone, migratetype);
2006        if (page)
2007            goto got_pg;
2008    }
2009
2010    /* Atomic allocations - we can't balance anything */
2011    if (!wait)
2012        goto nopage;
2013
2014    /* Avoid recursion of direct reclaim */
2015    if (p->flags & PF_MEMALLOC)
2016        goto nopage;
2017
2018    /* Avoid allocations with no watermarks from looping endlessly */
2019    if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2020        goto nopage;
2021
2022    /* Try direct compaction */
2023    page = __alloc_pages_direct_compact(gfp_mask, order,
2024                    zonelist, high_zoneidx,
2025                    nodemask,
2026                    alloc_flags, preferred_zone,
2027                    migratetype, &did_some_progress);
2028    if (page)
2029        goto got_pg;
2030
2031    /* Try direct reclaim and then allocating */
2032    page = __alloc_pages_direct_reclaim(gfp_mask, order,
2033                    zonelist, high_zoneidx,
2034                    nodemask,
2035                    alloc_flags, preferred_zone,
2036                    migratetype, &did_some_progress);
2037    if (page)
2038        goto got_pg;
2039
2040    /*
2041     * If we failed to make any progress reclaiming, then we are
2042     * running out of options and have to consider going OOM
2043     */
2044    if (!did_some_progress) {
2045        if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2046            if (oom_killer_disabled)
2047                goto nopage;
2048            page = __alloc_pages_may_oom(gfp_mask, order,
2049                    zonelist, high_zoneidx,
2050                    nodemask, preferred_zone,
2051                    migratetype);
2052            if (page)
2053                goto got_pg;
2054
2055            /*
2056             * The OOM killer does not trigger for high-order
2057             * ~__GFP_NOFAIL allocations so if no progress is being
2058             * made, there are no other options and retrying is
2059             * unlikely to help.
2060             */
2061            if (order > PAGE_ALLOC_COSTLY_ORDER &&
2062                        !(gfp_mask & __GFP_NOFAIL))
2063                goto nopage;
2064
2065            goto restart;
2066        }
2067    }
2068
2069    /* Check if we should retry the allocation */
2070    pages_reclaimed += did_some_progress;
2071    if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
2072        /* Wait for some write requests to complete then retry */
2073        congestion_wait(BLK_RW_ASYNC, HZ/50);
2074        goto rebalance;
2075    }
2076
2077nopage:
2078    if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2079        printk(KERN_WARNING "%s: page allocation failure."
2080            " order:%d, mode:0x%x\n",
2081            p->comm, order, gfp_mask);
2082        dump_stack();
2083        show_mem();
2084    }
2085    return page;
2086got_pg:
2087    if (kmemcheck_enabled)
2088        kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2089    return page;
2090
2091}
2092
2093/*
2094 * This is the 'heart' of the zoned buddy allocator.
2095 */
2096struct page *
2097__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2098            struct zonelist *zonelist, nodemask_t *nodemask)
2099{
2100    enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2101    struct zone *preferred_zone;
2102    struct page *page;
2103    int migratetype = allocflags_to_migratetype(gfp_mask);
2104
2105    gfp_mask &= gfp_allowed_mask;
2106
2107    lockdep_trace_alloc(gfp_mask);
2108
2109    might_sleep_if(gfp_mask & __GFP_WAIT);
2110
2111    if (should_fail_alloc_page(gfp_mask, order))
2112        return NULL;
2113
2114    /*
2115     * Check the zones suitable for the gfp_mask contain at least one
2116     * valid zone. It's possible to have an empty zonelist as a result
2117     * of GFP_THISNODE and a memoryless node
2118     */
2119    if (unlikely(!zonelist->_zonerefs->zone))
2120        return NULL;
2121
2122    get_mems_allowed();
2123    /* The preferred zone is used for statistics later */
2124    first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
2125    if (!preferred_zone) {
2126        put_mems_allowed();
2127        return NULL;
2128    }
2129
2130    /* First allocation attempt */
2131    page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2132            zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2133            preferred_zone, migratetype);
2134    if (unlikely(!page))
2135        page = __alloc_pages_slowpath(gfp_mask, order,
2136                zonelist, high_zoneidx, nodemask,
2137                preferred_zone, migratetype);
2138    put_mems_allowed();
2139
2140    trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2141    return page;
2142}
2143EXPORT_SYMBOL(__alloc_pages_nodemask);
2144
2145/*
2146 * Common helper functions.
2147 */
2148unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2149{
2150    struct page *page;
2151
2152    /*
2153     * __get_free_pages() returns a 32-bit address, which cannot represent
2154     * a highmem page
2155     */
2156    VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2157
2158    page = alloc_pages(gfp_mask, order);
2159    if (!page)
2160        return 0;
2161    return (unsigned long) page_address(page);
2162}
2163EXPORT_SYMBOL(__get_free_pages);
2164
2165unsigned long get_zeroed_page(gfp_t gfp_mask)
2166{
2167    return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2168}
2169EXPORT_SYMBOL(get_zeroed_page);
2170
2171void __pagevec_free(struct pagevec *pvec)
2172{
2173    int i = pagevec_count(pvec);
2174
2175    while (--i >= 0) {
2176        trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
2177        free_hot_cold_page(pvec->pages[i], pvec->cold);
2178    }
2179}
2180
2181void __free_pages(struct page *page, unsigned int order)
2182{
2183    if (put_page_testzero(page)) {
2184        if (order == 0)
2185            free_hot_cold_page(page, 0);
2186        else
2187            __free_pages_ok(page, order);
2188    }
2189}
2190
2191EXPORT_SYMBOL(__free_pages);
2192
2193void free_pages(unsigned long addr, unsigned int order)
2194{
2195    if (addr != 0) {
2196        VM_BUG_ON(!virt_addr_valid((void *)addr));
2197        __free_pages(virt_to_page((void *)addr), order);
2198    }
2199}
2200
2201EXPORT_SYMBOL(free_pages);
2202
2203/**
2204 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2205 * @size: the number of bytes to allocate
2206 * @gfp_mask: GFP flags for the allocation
2207 *
2208 * This function is similar to alloc_pages(), except that it allocates the
2209 * minimum number of pages to satisfy the request. alloc_pages() can only
2210 * allocate memory in power-of-two pages.
2211 *
2212 * This function is also limited by MAX_ORDER.
2213 *
2214 * Memory allocated by this function must be released by free_pages_exact().
2215 */
2216void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2217{
2218    unsigned int order = get_order(size);
2219    unsigned long addr;
2220
2221    addr = __get_free_pages(gfp_mask, order);
2222    if (addr) {
2223        unsigned long alloc_end = addr + (PAGE_SIZE << order);
2224        unsigned long used = addr + PAGE_ALIGN(size);
2225
2226        split_page(virt_to_page((void *)addr), order);
2227        while (used < alloc_end) {
2228            free_page(used);
2229            used += PAGE_SIZE;
2230        }
2231    }
2232
2233    return (void *)addr;
2234}
2235EXPORT_SYMBOL(alloc_pages_exact);
2236
2237/**
2238 * free_pages_exact - release memory allocated via alloc_pages_exact()
2239 * @virt: the value returned by alloc_pages_exact.
2240 * @size: size of allocation, same value as passed to alloc_pages_exact().
2241 *
2242 * Release the memory allocated by a previous call to alloc_pages_exact.
2243 */
2244void free_pages_exact(void *virt, size_t size)
2245{
2246    unsigned long addr = (unsigned long)virt;
2247    unsigned long end = addr + PAGE_ALIGN(size);
2248
2249    while (addr < end) {
2250        free_page(addr);
2251        addr += PAGE_SIZE;
2252    }
2253}
2254EXPORT_SYMBOL(free_pages_exact);
2255
2256static unsigned int nr_free_zone_pages(int offset)
2257{
2258    struct zoneref *z;
2259    struct zone *zone;
2260
2261    /* Just pick one node, since fallback list is circular */
2262    unsigned int sum = 0;
2263
2264    struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2265
2266    for_each_zone_zonelist(zone, z, zonelist, offset) {
2267        unsigned long size = zone->present_pages;
2268        unsigned long high = high_wmark_pages(zone);
2269        if (size > high)
2270            sum += size - high;
2271    }
2272
2273    return sum;
2274}
2275
2276/*
2277 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2278 */
2279unsigned int nr_free_buffer_pages(void)
2280{
2281    return nr_free_zone_pages(gfp_zone(GFP_USER));
2282}
2283EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2284
2285/*
2286 * Amount of free RAM allocatable within all zones
2287 */
2288unsigned int nr_free_pagecache_pages(void)
2289{
2290    return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2291}
2292
2293static inline void show_node(struct zone *zone)
2294{
2295    if (NUMA_BUILD)
2296        printk("Node %d ", zone_to_nid(zone));
2297}
2298
2299void si_meminfo(struct sysinfo *val)
2300{
2301    val->totalram = totalram_pages;
2302    val->sharedram = 0;
2303    val->freeram = global_page_state(NR_FREE_PAGES);
2304    val->bufferram = nr_blockdev_pages();
2305    val->totalhigh = totalhigh_pages;
2306    val->freehigh = nr_free_highpages();
2307    val->mem_unit = PAGE_SIZE;
2308}
2309
2310EXPORT_SYMBOL(si_meminfo);
2311
2312#ifdef CONFIG_NUMA
2313void si_meminfo_node(struct sysinfo *val, int nid)
2314{
2315    pg_data_t *pgdat = NODE_DATA(nid);
2316
2317    val->totalram = pgdat->node_present_pages;
2318    val->freeram = node_page_state(nid, NR_FREE_PAGES);
2319#ifdef CONFIG_HIGHMEM
2320    val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2321    val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2322            NR_FREE_PAGES);
2323#else
2324    val->totalhigh = 0;
2325    val->freehigh = 0;
2326#endif
2327    val->mem_unit = PAGE_SIZE;
2328}
2329#endif
2330
2331#define K(x) ((x) << (PAGE_SHIFT-10))
2332
2333/*
2334 * Show free area list (used inside shift_scroll-lock stuff)
2335 * We also calculate the percentage fragmentation. We do this by counting the
2336 * memory on each free list with the exception of the first item on the list.
2337 */
2338void show_free_areas(void)
2339{
2340    int cpu;
2341    struct zone *zone;
2342
2343    for_each_populated_zone(zone) {
2344        show_node(zone);
2345        printk("%s per-cpu:\n", zone->name);
2346
2347        for_each_online_cpu(cpu) {
2348            struct per_cpu_pageset *pageset;
2349
2350            pageset = per_cpu_ptr(zone->pageset, cpu);
2351
2352            printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2353                   cpu, pageset->pcp.high,
2354                   pageset->pcp.batch, pageset->pcp.count);
2355        }
2356    }
2357
2358    printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2359        " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2360        " unevictable:%lu"
2361        " dirty:%lu writeback:%lu unstable:%lu\n"
2362        " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2363        " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2364        global_page_state(NR_ACTIVE_ANON),
2365        global_page_state(NR_INACTIVE_ANON),
2366        global_page_state(NR_ISOLATED_ANON),
2367        global_page_state(NR_ACTIVE_FILE),
2368        global_page_state(NR_INACTIVE_FILE),
2369        global_page_state(NR_ISOLATED_FILE),
2370        global_page_state(NR_UNEVICTABLE),
2371        global_page_state(NR_FILE_DIRTY),
2372        global_page_state(NR_WRITEBACK),
2373        global_page_state(NR_UNSTABLE_NFS),
2374        global_page_state(NR_FREE_PAGES),
2375        global_page_state(NR_SLAB_RECLAIMABLE),
2376        global_page_state(NR_SLAB_UNRECLAIMABLE),
2377        global_page_state(NR_FILE_MAPPED),
2378        global_page_state(NR_SHMEM),
2379        global_page_state(NR_PAGETABLE),
2380        global_page_state(NR_BOUNCE));
2381
2382    for_each_populated_zone(zone) {
2383        int i;
2384
2385        show_node(zone);
2386        printk("%s"
2387            " free:%lukB"
2388            " min:%lukB"
2389            " low:%lukB"
2390            " high:%lukB"
2391            " active_anon:%lukB"
2392            " inactive_anon:%lukB"
2393            " active_file:%lukB"
2394            " inactive_file:%lukB"
2395            " unevictable:%lukB"
2396            " isolated(anon):%lukB"
2397            " isolated(file):%lukB"
2398            " present:%lukB"
2399            " mlocked:%lukB"
2400            " dirty:%lukB"
2401            " writeback:%lukB"
2402            " mapped:%lukB"
2403            " shmem:%lukB"
2404            " slab_reclaimable:%lukB"
2405            " slab_unreclaimable:%lukB"
2406            " kernel_stack:%lukB"
2407            " pagetables:%lukB"
2408            " unstable:%lukB"
2409            " bounce:%lukB"
2410            " writeback_tmp:%lukB"
2411            " pages_scanned:%lu"
2412            " all_unreclaimable? %s"
2413            "\n",
2414            zone->name,
2415            K(zone_page_state(zone, NR_FREE_PAGES)),
2416            K(min_wmark_pages(zone)),
2417            K(low_wmark_pages(zone)),
2418            K(high_wmark_pages(zone)),
2419            K(zone_page_state(zone, NR_ACTIVE_ANON)),
2420            K(zone_page_state(zone, NR_INACTIVE_ANON)),
2421            K(zone_page_state(zone, NR_ACTIVE_FILE)),
2422            K(zone_page_state(zone, NR_INACTIVE_FILE)),
2423            K(zone_page_state(zone, NR_UNEVICTABLE)),
2424            K(zone_page_state(zone, NR_ISOLATED_ANON)),
2425            K(zone_page_state(zone, NR_ISOLATED_FILE)),
2426            K(zone->present_pages),
2427            K(zone_page_state(zone, NR_MLOCK)),
2428            K(zone_page_state(zone, NR_FILE_DIRTY)),
2429            K(zone_page_state(zone, NR_WRITEBACK)),
2430            K(zone_page_state(zone, NR_FILE_MAPPED)),
2431            K(zone_page_state(zone, NR_SHMEM)),
2432            K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2433            K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2434            zone_page_state(zone, NR_KERNEL_STACK) *
2435                THREAD_SIZE / 1024,
2436            K(zone_page_state(zone, NR_PAGETABLE)),
2437            K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2438            K(zone_page_state(zone, NR_BOUNCE)),
2439            K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2440            zone->pages_scanned,
2441            (zone->all_unreclaimable ? "yes" : "no")
2442            );
2443        printk("lowmem_reserve[]:");
2444        for (i = 0; i < MAX_NR_ZONES; i++)
2445            printk(" %lu", zone->lowmem_reserve[i]);
2446        printk("\n");
2447    }
2448
2449    for_each_populated_zone(zone) {
2450         unsigned long nr[MAX_ORDER], flags, order, total = 0;
2451
2452        show_node(zone);
2453        printk("%s: ", zone->name);
2454
2455        spin_lock_irqsave(&zone->lock, flags);
2456        for (order = 0; order < MAX_ORDER; order++) {
2457            nr[order] = zone->free_area[order].nr_free;
2458            total += nr[order] << order;
2459        }
2460        spin_unlock_irqrestore(&zone->lock, flags);
2461        for (order = 0; order < MAX_ORDER; order++)
2462            printk("%lu*%lukB ", nr[order], K(1UL) << order);
2463        printk("= %lukB\n", K(total));
2464    }
2465
2466    printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2467
2468    show_swap_cache_info();
2469}
2470
2471static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2472{
2473    zoneref->zone = zone;
2474    zoneref->zone_idx = zone_idx(zone);
2475}
2476
2477/*
2478 * Builds allocation fallback zone lists.
2479 *
2480 * Add all populated zones of a node to the zonelist.
2481 */
2482static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2483                int nr_zones, enum zone_type zone_type)
2484{
2485    struct zone *zone;
2486
2487    BUG_ON(zone_type >= MAX_NR_ZONES);
2488    zone_type++;
2489
2490    do {
2491        zone_type--;
2492        zone = pgdat->node_zones + zone_type;
2493        if (populated_zone(zone)) {
2494            zoneref_set_zone(zone,
2495                &zonelist->_zonerefs[nr_zones++]);
2496            check_highest_zone(zone_type);
2497        }
2498
2499    } while (zone_type);
2500    return nr_zones;
2501}
2502
2503
2504/*
2505 * zonelist_order:
2506 * 0 = automatic detection of better ordering.
2507 * 1 = order by ([node] distance, -zonetype)
2508 * 2 = order by (-zonetype, [node] distance)
2509 *
2510 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2511 * the same zonelist. So only NUMA can configure this param.
2512 */
2513#define ZONELIST_ORDER_DEFAULT 0
2514#define ZONELIST_ORDER_NODE 1
2515#define ZONELIST_ORDER_ZONE 2
2516
2517/* zonelist order in the kernel.
2518 * set_zonelist_order() will set this to NODE or ZONE.
2519 */
2520static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2521static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2522
2523
2524#ifdef CONFIG_NUMA
2525/* The value user specified ....changed by config */
2526static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2527/* string for sysctl */
2528#define NUMA_ZONELIST_ORDER_LEN 16
2529char numa_zonelist_order[16] = "default";
2530
2531/*
2532 * interface for configure zonelist ordering.
2533 * command line option "numa_zonelist_order"
2534 * = "[dD]efault - default, automatic configuration.
2535 * = "[nN]ode - order by node locality, then by zone within node
2536 * = "[zZ]one - order by zone, then by locality within zone
2537 */
2538
2539static int __parse_numa_zonelist_order(char *s)
2540{
2541    if (*s == 'd' || *s == 'D') {
2542        user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2543    } else if (*s == 'n' || *s == 'N') {
2544        user_zonelist_order = ZONELIST_ORDER_NODE;
2545    } else if (*s == 'z' || *s == 'Z') {
2546        user_zonelist_order = ZONELIST_ORDER_ZONE;
2547    } else {
2548        printk(KERN_WARNING
2549            "Ignoring invalid numa_zonelist_order value: "
2550            "%s\n", s);
2551        return -EINVAL;
2552    }
2553    return 0;
2554}
2555
2556static __init int setup_numa_zonelist_order(char *s)
2557{
2558    if (s)
2559        return __parse_numa_zonelist_order(s);
2560    return 0;
2561}
2562early_param("numa_zonelist_order", setup_numa_zonelist_order);
2563
2564/*
2565 * sysctl handler for numa_zonelist_order
2566 */
2567int numa_zonelist_order_handler(ctl_table *table, int write,
2568        void __user *buffer, size_t *length,
2569        loff_t *ppos)
2570{
2571    char saved_string[NUMA_ZONELIST_ORDER_LEN];
2572    int ret;
2573    static DEFINE_MUTEX(zl_order_mutex);
2574
2575    mutex_lock(&zl_order_mutex);
2576    if (write)
2577        strcpy(saved_string, (char*)table->data);
2578    ret = proc_dostring(table, write, buffer, length, ppos);
2579    if (ret)
2580        goto out;
2581    if (write) {
2582        int oldval = user_zonelist_order;
2583        if (__parse_numa_zonelist_order((char*)table->data)) {
2584            /*
2585             * bogus value. restore saved string
2586             */
2587            strncpy((char*)table->data, saved_string,
2588                NUMA_ZONELIST_ORDER_LEN);
2589            user_zonelist_order = oldval;
2590        } else if (oldval != user_zonelist_order) {
2591            mutex_lock(&zonelists_mutex);
2592            build_all_zonelists(NULL);
2593            mutex_unlock(&zonelists_mutex);
2594        }
2595    }
2596out:
2597    mutex_unlock(&zl_order_mutex);
2598    return ret;
2599}
2600
2601
2602#define MAX_NODE_LOAD (nr_online_nodes)
2603static int node_load[MAX_NUMNODES];
2604
2605/**
2606 * find_next_best_node - find the next node that should appear in a given node's fallback list
2607 * @node: node whose fallback list we're appending
2608 * @used_node_mask: nodemask_t of already used nodes
2609 *
2610 * We use a number of factors to determine which is the next node that should
2611 * appear on a given node's fallback list. The node should not have appeared
2612 * already in @node's fallback list, and it should be the next closest node
2613 * according to the distance array (which contains arbitrary distance values
2614 * from each node to each node in the system), and should also prefer nodes
2615 * with no CPUs, since presumably they'll have very little allocation pressure
2616 * on them otherwise.
2617 * It returns -1 if no node is found.
2618 */
2619static int find_next_best_node(int node, nodemask_t *used_node_mask)
2620{
2621    int n, val;
2622    int min_val = INT_MAX;
2623    int best_node = -1;
2624    const struct cpumask *tmp = cpumask_of_node(0);
2625
2626    /* Use the local node if we haven't already */
2627    if (!node_isset(node, *used_node_mask)) {
2628        node_set(node, *used_node_mask);
2629        return node;
2630    }
2631
2632    for_each_node_state(n, N_HIGH_MEMORY) {
2633
2634        /* Don't want a node to appear more than once */
2635        if (node_isset(n, *used_node_mask))
2636            continue;
2637
2638        /* Use the distance array to find the distance */
2639        val = node_distance(node, n);
2640
2641        /* Penalize nodes under us ("prefer the next node") */
2642        val += (n < node);
2643
2644        /* Give preference to headless and unused nodes */
2645        tmp = cpumask_of_node(n);
2646        if (!cpumask_empty(tmp))
2647            val += PENALTY_FOR_NODE_WITH_CPUS;
2648
2649        /* Slight preference for less loaded node */
2650        val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2651        val += node_load[n];
2652
2653        if (val < min_val) {
2654            min_val = val;
2655            best_node = n;
2656        }
2657    }
2658
2659    if (best_node >= 0)
2660        node_set(best_node, *used_node_mask);
2661
2662    return best_node;
2663}
2664
2665
2666/*
2667 * Build zonelists ordered by node and zones within node.
2668 * This results in maximum locality--normal zone overflows into local
2669 * DMA zone, if any--but risks exhausting DMA zone.
2670 */
2671static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2672{
2673    int j;
2674    struct zonelist *zonelist;
2675
2676    zonelist = &pgdat->node_zonelists[0];
2677    for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2678        ;
2679    j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2680                            MAX_NR_ZONES - 1);
2681    zonelist->_zonerefs[j].zone = NULL;
2682    zonelist->_zonerefs[j].zone_idx = 0;
2683}
2684
2685/*
2686 * Build gfp_thisnode zonelists
2687 */
2688static void build_thisnode_zonelists(pg_data_t *pgdat)
2689{
2690    int j;
2691    struct zonelist *zonelist;
2692
2693    zonelist = &pgdat->node_zonelists[1];
2694    j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2695    zonelist->_zonerefs[j].zone = NULL;
2696    zonelist->_zonerefs[j].zone_idx = 0;
2697}
2698
2699/*
2700 * Build zonelists ordered by zone and nodes within zones.
2701 * This results in conserving DMA zone[s] until all Normal memory is
2702 * exhausted, but results in overflowing to remote node while memory
2703 * may still exist in local DMA zone.
2704 */
2705static int node_order[MAX_NUMNODES];
2706
2707static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2708{
2709    int pos, j, node;
2710    int zone_type; /* needs to be signed */
2711    struct zone *z;
2712    struct zonelist *zonelist;
2713
2714    zonelist = &pgdat->node_zonelists[0];
2715    pos = 0;
2716    for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2717        for (j = 0; j < nr_nodes; j++) {
2718            node = node_order[j];
2719            z = &NODE_DATA(node)->node_zones[zone_type];
2720            if (populated_zone(z)) {
2721                zoneref_set_zone(z,
2722                    &zonelist->_zonerefs[pos++]);
2723                check_highest_zone(zone_type);
2724            }
2725        }
2726    }
2727    zonelist->_zonerefs[pos].zone = NULL;
2728    zonelist->_zonerefs[pos].zone_idx = 0;
2729}
2730
2731static int default_zonelist_order(void)
2732{
2733    int nid, zone_type;
2734    unsigned long low_kmem_size,total_size;
2735    struct zone *z;
2736    int average_size;
2737    /*
2738         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
2739     * If they are really small and used heavily, the system can fall
2740     * into OOM very easily.
2741     * This function detect ZONE_DMA/DMA32 size and configures zone order.
2742     */
2743    /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2744    low_kmem_size = 0;
2745    total_size = 0;
2746    for_each_online_node(nid) {
2747        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2748            z = &NODE_DATA(nid)->node_zones[zone_type];
2749            if (populated_zone(z)) {
2750                if (zone_type < ZONE_NORMAL)
2751                    low_kmem_size += z->present_pages;
2752                total_size += z->present_pages;
2753            } else if (zone_type == ZONE_NORMAL) {
2754                /*
2755                 * If any node has only lowmem, then node order
2756                 * is preferred to allow kernel allocations
2757                 * locally; otherwise, they can easily infringe
2758                 * on other nodes when there is an abundance of
2759                 * lowmem available to allocate from.
2760                 */
2761                return ZONELIST_ORDER_NODE;
2762            }
2763        }
2764    }
2765    if (!low_kmem_size || /* there are no DMA area. */
2766        low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2767        return ZONELIST_ORDER_NODE;
2768    /*
2769     * look into each node's config.
2770       * If there is a node whose DMA/DMA32 memory is very big area on
2771      * local memory, NODE_ORDER may be suitable.
2772         */
2773    average_size = total_size /
2774                (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2775    for_each_online_node(nid) {
2776        low_kmem_size = 0;
2777        total_size = 0;
2778        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2779            z = &NODE_DATA(nid)->node_zones[zone_type];
2780            if (populated_zone(z)) {
2781                if (zone_type < ZONE_NORMAL)
2782                    low_kmem_size += z->present_pages;
2783                total_size += z->present_pages;
2784            }
2785        }
2786        if (low_kmem_size &&
2787            total_size > average_size && /* ignore small node */
2788            low_kmem_size > total_size * 70/100)
2789            return ZONELIST_ORDER_NODE;
2790    }
2791    return ZONELIST_ORDER_ZONE;
2792}
2793
2794static void set_zonelist_order(void)
2795{
2796    if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2797        current_zonelist_order = default_zonelist_order();
2798    else
2799        current_zonelist_order = user_zonelist_order;
2800}
2801
2802static void build_zonelists(pg_data_t *pgdat)
2803{
2804    int j, node, load;
2805    enum zone_type i;
2806    nodemask_t used_mask;
2807    int local_node, prev_node;
2808    struct zonelist *zonelist;
2809    int order = current_zonelist_order;
2810
2811    /* initialize zonelists */
2812    for (i = 0; i < MAX_ZONELISTS; i++) {
2813        zonelist = pgdat->node_zonelists + i;
2814        zonelist->_zonerefs[0].zone = NULL;
2815        zonelist->_zonerefs[0].zone_idx = 0;
2816    }
2817
2818    /* NUMA-aware ordering of nodes */
2819    local_node = pgdat->node_id;
2820    load = nr_online_nodes;
2821    prev_node = local_node;
2822    nodes_clear(used_mask);
2823
2824    memset(node_order, 0, sizeof(node_order));
2825    j = 0;
2826
2827    while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2828        int distance = node_distance(local_node, node);
2829
2830        /*
2831         * If another node is sufficiently far away then it is better
2832         * to reclaim pages in a zone before going off node.
2833         */
2834        if (distance > RECLAIM_DISTANCE)
2835            zone_reclaim_mode = 1;
2836
2837        /*
2838         * We don't want to pressure a particular node.
2839         * So adding penalty to the first node in same
2840         * distance group to make it round-robin.
2841         */
2842        if (distance != node_distance(local_node, prev_node))
2843            node_load[node] = load;
2844
2845        prev_node = node;
2846        load--;
2847        if (order == ZONELIST_ORDER_NODE)
2848            build_zonelists_in_node_order(pgdat, node);
2849        else
2850            node_order[j++] = node; /* remember order */
2851    }
2852
2853    if (order == ZONELIST_ORDER_ZONE) {
2854        /* calculate node order -- i.e., DMA last! */
2855        build_zonelists_in_zone_order(pgdat, j);
2856    }
2857
2858    build_thisnode_zonelists(pgdat);
2859}
2860
2861/* Construct the zonelist performance cache - see further mmzone.h */
2862static void build_zonelist_cache(pg_data_t *pgdat)
2863{
2864    struct zonelist *zonelist;
2865    struct zonelist_cache *zlc;
2866    struct zoneref *z;
2867
2868    zonelist = &pgdat->node_zonelists[0];
2869    zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2870    bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2871    for (z = zonelist->_zonerefs; z->zone; z++)
2872        zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2873}
2874
2875#ifdef CONFIG_HAVE_MEMORYLESS_NODES
2876/*
2877 * Return node id of node used for "local" allocations.
2878 * I.e., first node id of first zone in arg node's generic zonelist.
2879 * Used for initializing percpu 'numa_mem', which is used primarily
2880 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
2881 */
2882int local_memory_node(int node)
2883{
2884    struct zone *zone;
2885
2886    (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
2887                   gfp_zone(GFP_KERNEL),
2888                   NULL,
2889                   &zone);
2890    return zone->node;
2891}
2892#endif
2893
2894#else /* CONFIG_NUMA */
2895
2896static void set_zonelist_order(void)
2897{
2898    current_zonelist_order = ZONELIST_ORDER_ZONE;
2899}
2900
2901static void build_zonelists(pg_data_t *pgdat)
2902{
2903    int node, local_node;
2904    enum zone_type j;
2905    struct zonelist *zonelist;
2906
2907    local_node = pgdat->node_id;
2908
2909    zonelist = &pgdat->node_zonelists[0];
2910    j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2911
2912    /*
2913     * Now we build the zonelist so that it contains the zones
2914     * of all the other nodes.
2915     * We don't want to pressure a particular node, so when
2916     * building the zones for node N, we make sure that the
2917     * zones coming right after the local ones are those from
2918     * node N+1 (modulo N)
2919     */
2920    for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2921        if (!node_online(node))
2922            continue;
2923        j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2924                            MAX_NR_ZONES - 1);
2925    }
2926    for (node = 0; node < local_node; node++) {
2927        if (!node_online(node))
2928            continue;
2929        j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2930                            MAX_NR_ZONES - 1);
2931    }
2932
2933    zonelist->_zonerefs[j].zone = NULL;
2934    zonelist->_zonerefs[j].zone_idx = 0;
2935}
2936
2937/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2938static void build_zonelist_cache(pg_data_t *pgdat)
2939{
2940    pgdat->node_zonelists[0].zlcache_ptr = NULL;
2941}
2942
2943#endif /* CONFIG_NUMA */
2944
2945/*
2946 * Boot pageset table. One per cpu which is going to be used for all
2947 * zones and all nodes. The parameters will be set in such a way
2948 * that an item put on a list will immediately be handed over to
2949 * the buddy list. This is safe since pageset manipulation is done
2950 * with interrupts disabled.
2951 *
2952 * The boot_pagesets must be kept even after bootup is complete for
2953 * unused processors and/or zones. They do play a role for bootstrapping
2954 * hotplugged processors.
2955 *
2956 * zoneinfo_show() and maybe other functions do
2957 * not check if the processor is online before following the pageset pointer.
2958 * Other parts of the kernel may not check if the zone is available.
2959 */
2960static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
2961static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
2962static void setup_zone_pageset(struct zone *zone);
2963
2964/*
2965 * Global mutex to protect against size modification of zonelists
2966 * as well as to serialize pageset setup for the new populated zone.
2967 */
2968DEFINE_MUTEX(zonelists_mutex);
2969
2970/* return values int ....just for stop_machine() */
2971static __init_refok int __build_all_zonelists(void *data)
2972{
2973    int nid;
2974    int cpu;
2975
2976#ifdef CONFIG_NUMA
2977    memset(node_load, 0, sizeof(node_load));
2978#endif
2979    for_each_online_node(nid) {
2980        pg_data_t *pgdat = NODE_DATA(nid);
2981
2982        build_zonelists(pgdat);
2983        build_zonelist_cache(pgdat);
2984    }
2985
2986#ifdef CONFIG_MEMORY_HOTPLUG
2987    /* Setup real pagesets for the new zone */
2988    if (data) {
2989        struct zone *zone = data;
2990        setup_zone_pageset(zone);
2991    }
2992#endif
2993
2994    /*
2995     * Initialize the boot_pagesets that are going to be used
2996     * for bootstrapping processors. The real pagesets for
2997     * each zone will be allocated later when the per cpu
2998     * allocator is available.
2999     *
3000     * boot_pagesets are used also for bootstrapping offline
3001     * cpus if the system is already booted because the pagesets
3002     * are needed to initialize allocators on a specific cpu too.
3003     * F.e. the percpu allocator needs the page allocator which
3004     * needs the percpu allocator in order to allocate its pagesets
3005     * (a chicken-egg dilemma).
3006     */
3007    for_each_possible_cpu(cpu) {
3008        setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3009
3010#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3011        /*
3012         * We now know the "local memory node" for each node--
3013         * i.e., the node of the first zone in the generic zonelist.
3014         * Set up numa_mem percpu variable for on-line cpus. During
3015         * boot, only the boot cpu should be on-line; we'll init the
3016         * secondary cpus' numa_mem as they come on-line. During
3017         * node/memory hotplug, we'll fixup all on-line cpus.
3018         */
3019        if (cpu_online(cpu))
3020            set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3021#endif
3022    }
3023
3024    return 0;
3025}
3026
3027/*
3028 * Called with zonelists_mutex held always
3029 * unless system_state == SYSTEM_BOOTING.
3030 */
3031void build_all_zonelists(void *data)
3032{
3033    set_zonelist_order();
3034
3035    if (system_state == SYSTEM_BOOTING) {
3036        __build_all_zonelists(NULL);
3037        mminit_verify_zonelist();
3038        cpuset_init_current_mems_allowed();
3039    } else {
3040        /* we have to stop all cpus to guarantee there is no user
3041           of zonelist */
3042        stop_machine(__build_all_zonelists, data, NULL);
3043        /* cpuset refresh routine should be here */
3044    }
3045    vm_total_pages = nr_free_pagecache_pages();
3046    /*
3047     * Disable grouping by mobility if the number of pages in the
3048     * system is too low to allow the mechanism to work. It would be
3049     * more accurate, but expensive to check per-zone. This check is
3050     * made on memory-hotadd so a system can start with mobility
3051     * disabled and enable it later
3052     */
3053    if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3054        page_group_by_mobility_disabled = 1;
3055    else
3056        page_group_by_mobility_disabled = 0;
3057
3058    printk("Built %i zonelists in %s order, mobility grouping %s. "
3059        "Total pages: %ld\n",
3060            nr_online_nodes,
3061            zonelist_order_name[current_zonelist_order],
3062            page_group_by_mobility_disabled ? "off" : "on",
3063            vm_total_pages);
3064#ifdef CONFIG_NUMA
3065    printk("Policy zone: %s\n", zone_names[policy_zone]);
3066#endif
3067}
3068
3069/*
3070 * Helper functions to size the waitqueue hash table.
3071 * Essentially these want to choose hash table sizes sufficiently
3072 * large so that collisions trying to wait on pages are rare.
3073 * But in fact, the number of active page waitqueues on typical
3074 * systems is ridiculously low, less than 200. So this is even
3075 * conservative, even though it seems large.
3076 *
3077 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3078 * waitqueues, i.e. the size of the waitq table given the number of pages.
3079 */
3080#define PAGES_PER_WAITQUEUE 256
3081
3082#ifndef CONFIG_MEMORY_HOTPLUG
3083static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3084{
3085    unsigned long size = 1;
3086
3087    pages /= PAGES_PER_WAITQUEUE;
3088
3089    while (size < pages)
3090        size <<= 1;
3091
3092    /*
3093     * Once we have dozens or even hundreds of threads sleeping
3094     * on IO we've got bigger problems than wait queue collision.
3095     * Limit the size of the wait table to a reasonable size.
3096     */
3097    size = min(size, 4096UL);
3098
3099    return max(size, 4UL);
3100}
3101#else
3102/*
3103 * A zone's size might be changed by hot-add, so it is not possible to determine
3104 * a suitable size for its wait_table. So we use the maximum size now.
3105 *
3106 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
3107 *
3108 * i386 (preemption config) : 4096 x 16 = 64Kbyte.
3109 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3110 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
3111 *
3112 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3113 * or more by the traditional way. (See above). It equals:
3114 *
3115 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
3116 * ia64(16K page size) : = ( 8G + 4M)byte.
3117 * powerpc (64K page size) : = (32G +16M)byte.
3118 */
3119static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3120{
3121    return 4096UL;
3122}
3123#endif
3124
3125/*
3126 * This is an integer logarithm so that shifts can be used later
3127 * to extract the more random high bits from the multiplicative
3128 * hash function before the remainder is taken.
3129 */
3130static inline unsigned long wait_table_bits(unsigned long size)
3131{
3132    return ffz(~size);
3133}
3134
3135#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3136
3137/*
3138 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3139 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3140 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3141 * higher will lead to a bigger reserve which will get freed as contiguous
3142 * blocks as reclaim kicks in
3143 */
3144static void setup_zone_migrate_reserve(struct zone *zone)
3145{
3146    unsigned long start_pfn, pfn, end_pfn;
3147    struct page *page;
3148    unsigned long block_migratetype;
3149    int reserve;
3150
3151    /* Get the start pfn, end pfn and the number of blocks to reserve */
3152    start_pfn = zone->zone_start_pfn;
3153    end_pfn = start_pfn + zone->spanned_pages;
3154    reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3155                            pageblock_order;
3156
3157    /*
3158     * Reserve blocks are generally in place to help high-order atomic
3159     * allocations that are short-lived. A min_free_kbytes value that
3160     * would result in more than 2 reserve blocks for atomic allocations
3161     * is assumed to be in place to help anti-fragmentation for the
3162     * future allocation of hugepages at runtime.
3163     */
3164    reserve = min(2, reserve);
3165
3166    for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3167        if (!pfn_valid(pfn))
3168            continue;
3169        page = pfn_to_page(pfn);
3170
3171        /* Watch out for overlapping nodes */
3172        if (page_to_nid(page) != zone_to_nid(zone))
3173            continue;
3174
3175        /* Blocks with reserved pages will never free, skip them. */
3176        if (PageReserved(page))
3177            continue;
3178
3179        block_migratetype = get_pageblock_migratetype(page);
3180
3181        /* If this block is reserved, account for it */
3182        if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
3183            reserve--;
3184            continue;
3185        }
3186
3187        /* Suitable for reserving if this block is movable */
3188        if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
3189            set_pageblock_migratetype(page, MIGRATE_RESERVE);
3190            move_freepages_block(zone, page, MIGRATE_RESERVE);
3191            reserve--;
3192            continue;
3193        }
3194
3195        /*
3196         * If the reserve is met and this is a previous reserved block,
3197         * take it back
3198         */
3199        if (block_migratetype == MIGRATE_RESERVE) {
3200            set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3201            move_freepages_block(zone, page, MIGRATE_MOVABLE);
3202        }
3203    }
3204}
3205
3206/*
3207 * Initially all pages are reserved - free ones are freed
3208 * up by free_all_bootmem() once the early boot process is
3209 * done. Non-atomic initialization, single-pass.
3210 */
3211void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3212        unsigned long start_pfn, enum memmap_context context)
3213{
3214    struct page *page;
3215    unsigned long end_pfn = start_pfn + size;
3216    unsigned long pfn;
3217    struct zone *z;
3218
3219    if (highest_memmap_pfn < end_pfn - 1)
3220        highest_memmap_pfn = end_pfn - 1;
3221
3222    z = &NODE_DATA(nid)->node_zones[zone];
3223    for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3224        /*
3225         * There can be holes in boot-time mem_map[]s
3226         * handed to this function. They do not
3227         * exist on hotplugged memory.
3228         */
3229        if (context == MEMMAP_EARLY) {
3230            if (!early_pfn_valid(pfn))
3231                continue;
3232            if (!early_pfn_in_nid(pfn, nid))
3233                continue;
3234        }
3235        page = pfn_to_page(pfn);
3236        set_page_links(page, zone, nid, pfn);
3237        mminit_verify_page_links(page, zone, nid, pfn);
3238        init_page_count(page);
3239        reset_page_mapcount(page);
3240        SetPageReserved(page);
3241        /*
3242         * Mark the block movable so that blocks are reserved for
3243         * movable at startup. This will force kernel allocations
3244         * to reserve their blocks rather than leaking throughout
3245         * the address space during boot when many long-lived
3246         * kernel allocations are made. Later some blocks near
3247         * the start are marked MIGRATE_RESERVE by
3248         * setup_zone_migrate_reserve()
3249         *
3250         * bitmap is created for zone's valid pfn range. but memmap
3251         * can be created for invalid pages (for alignment)
3252         * check here not to call set_pageblock_migratetype() against
3253         * pfn out of zone.
3254         */
3255        if ((z->zone_start_pfn <= pfn)
3256            && (pfn < z->zone_start_pfn + z->spanned_pages)
3257            && !(pfn & (pageblock_nr_pages - 1)))
3258            set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3259
3260        INIT_LIST_HEAD(&page->lru);
3261#ifdef WANT_PAGE_VIRTUAL
3262        /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3263        if (!is_highmem_idx(zone))
3264            set_page_address(page, __va(pfn << PAGE_SHIFT));
3265#endif
3266    }
3267}
3268
3269static void __meminit zone_init_free_lists(struct zone *zone)
3270{
3271    int order, t;
3272    for_each_migratetype_order(order, t) {
3273        INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3274        zone->free_area[order].nr_free = 0;
3275    }
3276}
3277
3278#ifndef __HAVE_ARCH_MEMMAP_INIT
3279#define memmap_init(size, nid, zone, start_pfn) \
3280    memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3281#endif
3282
3283static int zone_batchsize(struct zone *zone)
3284{
3285#ifdef CONFIG_MMU
3286    int batch;
3287
3288    /*
3289     * The per-cpu-pages pools are set to around 1000th of the
3290     * size of the zone. But no more than 1/2 of a meg.
3291     *
3292     * OK, so we don't know how big the cache is. So guess.
3293     */
3294    batch = zone->present_pages / 1024;
3295    if (batch * PAGE_SIZE > 512 * 1024)
3296        batch = (512 * 1024) / PAGE_SIZE;
3297    batch /= 4; /* We effectively *= 4 below */
3298    if (batch < 1)
3299        batch = 1;
3300
3301    /*
3302     * Clamp the batch to a 2^n - 1 value. Having a power
3303     * of 2 value was found to be more likely to have
3304     * suboptimal cache aliasing properties in some cases.
3305     *
3306     * For example if 2 tasks are alternately allocating
3307     * batches of pages, one task can end up with a lot
3308     * of pages of one half of the possible page colors
3309     * and the other with pages of the other colors.
3310     */
3311    batch = rounddown_pow_of_two(batch + batch/2) - 1;
3312
3313    return batch;
3314
3315#else
3316    /* The deferral and batching of frees should be suppressed under NOMMU
3317     * conditions.
3318     *
3319     * The problem is that NOMMU needs to be able to allocate large chunks
3320     * of contiguous memory as there's no hardware page translation to
3321     * assemble apparent contiguous memory from discontiguous pages.
3322     *
3323     * Queueing large contiguous runs of pages for batching, however,
3324     * causes the pages to actually be freed in smaller chunks. As there
3325     * can be a significant delay between the individual batches being
3326     * recycled, this leads to the once large chunks of space being
3327     * fragmented and becoming unavailable for high-order allocations.
3328     */
3329    return 0;
3330#endif
3331}
3332
3333static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3334{
3335    struct per_cpu_pages *pcp;
3336    int migratetype;
3337
3338    memset(p, 0, sizeof(*p));
3339
3340    pcp = &p->pcp;
3341    pcp->count = 0;
3342    pcp->high = 6 * batch;
3343    pcp->batch = max(1UL, 1 * batch);
3344    for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3345        INIT_LIST_HEAD(&pcp->lists[migratetype]);
3346}
3347
3348/*
3349 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3350 * to the value high for the pageset p.
3351 */
3352
3353static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3354                unsigned long high)
3355{
3356    struct per_cpu_pages *pcp;
3357
3358    pcp = &p->pcp;
3359    pcp->high = high;
3360    pcp->batch = max(1UL, high/4);
3361    if ((high/4) > (PAGE_SHIFT * 8))
3362        pcp->batch = PAGE_SHIFT * 8;
3363}
3364
3365static __meminit void setup_zone_pageset(struct zone *zone)
3366{
3367    int cpu;
3368
3369    zone->pageset = alloc_percpu(struct per_cpu_pageset);
3370
3371    for_each_possible_cpu(cpu) {
3372        struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3373
3374        setup_pageset(pcp, zone_batchsize(zone));
3375
3376        if (percpu_pagelist_fraction)
3377            setup_pagelist_highmark(pcp,
3378                (zone->present_pages /
3379                    percpu_pagelist_fraction));
3380    }
3381}
3382
3383/*
3384 * Allocate per cpu pagesets and initialize them.
3385 * Before this call only boot pagesets were available.
3386 */
3387void __init setup_per_cpu_pageset(void)
3388{
3389    struct zone *zone;
3390
3391    for_each_populated_zone(zone)
3392        setup_zone_pageset(zone);
3393}
3394
3395static noinline __init_refok
3396int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3397{
3398    int i;
3399    struct pglist_data *pgdat = zone->zone_pgdat;
3400    size_t alloc_size;
3401
3402    /*
3403     * The per-page waitqueue mechanism uses hashed waitqueues
3404     * per zone.
3405     */
3406    zone->wait_table_hash_nr_entries =
3407         wait_table_hash_nr_entries(zone_size_pages);
3408    zone->wait_table_bits =
3409        wait_table_bits(zone->wait_table_hash_nr_entries);
3410    alloc_size = zone->wait_table_hash_nr_entries
3411                    * sizeof(wait_queue_head_t);
3412
3413    if (!slab_is_available()) {
3414        zone->wait_table = (wait_queue_head_t *)
3415            alloc_bootmem_node(pgdat, alloc_size);
3416    } else {
3417        /*
3418         * This case means that a zone whose size was 0 gets new memory
3419         * via memory hot-add.
3420         * But it may be the case that a new node was hot-added. In
3421         * this case vmalloc() will not be able to use this new node's
3422         * memory - this wait_table must be initialized to use this new
3423         * node itself as well.
3424         * To use this new node's memory, further consideration will be
3425         * necessary.
3426         */
3427        zone->wait_table = vmalloc(alloc_size);
3428    }
3429    if (!zone->wait_table)
3430        return -ENOMEM;
3431
3432    for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3433        init_waitqueue_head(zone->wait_table + i);
3434
3435    return 0;
3436}
3437
3438static int __zone_pcp_update(void *data)
3439{
3440    struct zone *zone = data;
3441    int cpu;
3442    unsigned long batch = zone_batchsize(zone), flags;
3443
3444    for_each_possible_cpu(cpu) {
3445        struct per_cpu_pageset *pset;
3446        struct per_cpu_pages *pcp;
3447
3448        pset = per_cpu_ptr(zone->pageset, cpu);
3449        pcp = &pset->pcp;
3450
3451        local_irq_save(flags);
3452        free_pcppages_bulk(zone, pcp->count, pcp);
3453        setup_pageset(pset, batch);
3454        local_irq_restore(flags);
3455    }
3456    return 0;
3457}
3458
3459void zone_pcp_update(struct zone *zone)
3460{
3461    stop_machine(__zone_pcp_update, zone, NULL);
3462}
3463
3464static __meminit void zone_pcp_init(struct zone *zone)
3465{
3466    /*
3467     * per cpu subsystem is not up at this point. The following code
3468     * relies on the ability of the linker to provide the
3469     * offset of a (static) per cpu variable into the per cpu area.
3470     */
3471    zone->pageset = &boot_pageset;
3472
3473    if (zone->present_pages)
3474        printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
3475            zone->name, zone->present_pages,
3476                     zone_batchsize(zone));
3477}
3478
3479__meminit int init_currently_empty_zone(struct zone *zone,
3480                    unsigned long zone_start_pfn,
3481                    unsigned long size,
3482                    enum memmap_context context)
3483{
3484    struct pglist_data *pgdat = zone->zone_pgdat;
3485    int ret;
3486    ret = zone_wait_table_init(zone, size);
3487    if (ret)
3488        return ret;
3489    pgdat->nr_zones = zone_idx(zone) + 1;
3490
3491    zone->zone_start_pfn = zone_start_pfn;
3492
3493    mminit_dprintk(MMINIT_TRACE, "memmap_init",
3494            "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3495            pgdat->node_id,
3496            (unsigned long)zone_idx(zone),
3497            zone_start_pfn, (zone_start_pfn + size));
3498
3499    zone_init_free_lists(zone);
3500
3501    return 0;
3502}
3503
3504#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3505/*
3506 * Basic iterator support. Return the first range of PFNs for a node
3507 * Note: nid == MAX_NUMNODES returns first region regardless of node
3508 */
3509static int __meminit first_active_region_index_in_nid(int nid)
3510{
3511    int i;
3512
3513    for (i = 0; i < nr_nodemap_entries; i++)
3514        if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
3515            return i;
3516
3517    return -1;
3518}
3519
3520/*
3521 * Basic iterator support. Return the next active range of PFNs for a node
3522 * Note: nid == MAX_NUMNODES returns next region regardless of node
3523 */
3524static int __meminit next_active_region_index_in_nid(int index, int nid)
3525{
3526    for (index = index + 1; index < nr_nodemap_entries; index++)
3527        if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
3528            return index;
3529
3530    return -1;
3531}
3532
3533#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3534/*
3535 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3536 * Architectures may implement their own version but if add_active_range()
3537 * was used and there are no special requirements, this is a convenient
3538 * alternative
3539 */
3540int __meminit __early_pfn_to_nid(unsigned long pfn)
3541{
3542    int i;
3543
3544    for (i = 0; i < nr_nodemap_entries; i++) {
3545        unsigned long start_pfn = early_node_map[i].start_pfn;
3546        unsigned long end_pfn = early_node_map[i].end_pfn;
3547
3548        if (start_pfn <= pfn && pfn < end_pfn)
3549            return early_node_map[i].nid;
3550    }
3551    /* This is a memory hole */
3552    return -1;
3553}
3554#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3555
3556int __meminit early_pfn_to_nid(unsigned long pfn)
3557{
3558    int nid;
3559
3560    nid = __early_pfn_to_nid(pfn);
3561    if (nid >= 0)
3562        return nid;
3563    /* just returns 0 */
3564    return 0;
3565}
3566
3567#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3568bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3569{
3570    int nid;
3571
3572    nid = __early_pfn_to_nid(pfn);
3573    if (nid >= 0 && nid != node)
3574        return false;
3575    return true;
3576}
3577#endif
3578
3579/* Basic iterator support to walk early_node_map[] */
3580#define for_each_active_range_index_in_nid(i, nid) \
3581    for (i = first_active_region_index_in_nid(nid); i != -1; \
3582                i = next_active_region_index_in_nid(i, nid))
3583
3584/**
3585 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3586 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3587 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3588 *
3589 * If an architecture guarantees that all ranges registered with
3590 * add_active_ranges() contain no holes and may be freed, this
3591 * this function may be used instead of calling free_bootmem() manually.
3592 */
3593void __init free_bootmem_with_active_regions(int nid,
3594                        unsigned long max_low_pfn)
3595{
3596    int i;
3597
3598    for_each_active_range_index_in_nid(i, nid) {
3599        unsigned long size_pages = 0;
3600        unsigned long end_pfn = early_node_map[i].end_pfn;
3601
3602        if (early_node_map[i].start_pfn >= max_low_pfn)
3603            continue;
3604
3605        if (end_pfn > max_low_pfn)
3606            end_pfn = max_low_pfn;
3607
3608        size_pages = end_pfn - early_node_map[i].start_pfn;
3609        free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3610                PFN_PHYS(early_node_map[i].start_pfn),
3611                size_pages << PAGE_SHIFT);
3612    }
3613}
3614
3615int __init add_from_early_node_map(struct range *range, int az,
3616                   int nr_range, int nid)
3617{
3618    int i;
3619    u64 start, end;
3620
3621    /* need to go over early_node_map to find out good range for node */
3622    for_each_active_range_index_in_nid(i, nid) {
3623        start = early_node_map[i].start_pfn;
3624        end = early_node_map[i].end_pfn;
3625        nr_range = add_range(range, az, nr_range, start, end);
3626    }
3627    return nr_range;
3628}
3629
3630#ifdef CONFIG_NO_BOOTMEM
3631void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
3632                    u64 goal, u64 limit)
3633{
3634    int i;
3635    void *ptr;
3636
3637    if (limit > get_max_mapped())
3638        limit = get_max_mapped();
3639
3640    /* need to go over early_node_map to find out good range for node */
3641    for_each_active_range_index_in_nid(i, nid) {
3642        u64 addr;
3643        u64 ei_start, ei_last;
3644
3645        ei_last = early_node_map[i].end_pfn;
3646        ei_last <<= PAGE_SHIFT;
3647        ei_start = early_node_map[i].start_pfn;
3648        ei_start <<= PAGE_SHIFT;
3649        addr = find_early_area(ei_start, ei_last,
3650                     goal, limit, size, align);
3651
3652        if (addr == -1ULL)
3653            continue;
3654
3655#if 0
3656        printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
3657                nid,
3658                ei_start, ei_last, goal, limit, size,
3659                align, addr);
3660#endif
3661
3662        ptr = phys_to_virt(addr);
3663        memset(ptr, 0, size);
3664        reserve_early_without_check(addr, addr + size, "BOOTMEM");
3665        /*
3666         * The min_count is set to 0 so that bootmem allocated blocks
3667         * are never reported as leaks.
3668         */
3669        kmemleak_alloc(ptr, size, 0, 0);
3670        return ptr;
3671    }
3672
3673    return NULL;
3674}
3675#endif
3676
3677
3678void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3679{
3680    int i;
3681    int ret;
3682
3683    for_each_active_range_index_in_nid(i, nid) {
3684        ret = work_fn(early_node_map[i].start_pfn,
3685                  early_node_map[i].end_pfn, data);
3686        if (ret)
3687            break;
3688    }
3689}
3690/**
3691 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3692 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3693 *
3694 * If an architecture guarantees that all ranges registered with
3695 * add_active_ranges() contain no holes and may be freed, this
3696 * function may be used instead of calling memory_present() manually.
3697 */
3698void __init sparse_memory_present_with_active_regions(int nid)
3699{
3700    int i;
3701
3702    for_each_active_range_index_in_nid(i, nid)
3703        memory_present(early_node_map[i].nid,
3704                early_node_map[i].start_pfn,
3705                early_node_map[i].end_pfn);
3706}
3707
3708/**
3709 * get_pfn_range_for_nid - Return the start and end page frames for a node
3710 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3711 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3712 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3713 *
3714 * It returns the start and end page frame of a node based on information
3715 * provided by an arch calling add_active_range(). If called for a node
3716 * with no available memory, a warning is printed and the start and end
3717 * PFNs will be 0.
3718 */
3719void __meminit get_pfn_range_for_nid(unsigned int nid,
3720            unsigned long *start_pfn, unsigned long *end_pfn)
3721{
3722    int i;
3723    *start_pfn = -1UL;
3724    *end_pfn = 0;
3725
3726    for_each_active_range_index_in_nid(i, nid) {
3727        *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3728        *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3729    }
3730
3731    if (*start_pfn == -1UL)
3732        *start_pfn = 0;
3733}
3734
3735/*
3736 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3737 * assumption is made that zones within a node are ordered in monotonic
3738 * increasing memory addresses so that the "highest" populated zone is used
3739 */
3740static void __init find_usable_zone_for_movable(void)
3741{
3742    int zone_index;
3743    for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3744        if (zone_index == ZONE_MOVABLE)
3745            continue;
3746
3747        if (arch_zone_highest_possible_pfn[zone_index] >
3748                arch_zone_lowest_possible_pfn[zone_index])
3749            break;
3750    }
3751
3752    VM_BUG_ON(zone_index == -1);
3753    movable_zone = zone_index;
3754}
3755
3756/*
3757 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3758 * because it is sized independant of architecture. Unlike the other zones,
3759 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3760 * in each node depending on the size of each node and how evenly kernelcore
3761 * is distributed. This helper function adjusts the zone ranges
3762 * provided by the architecture for a given node by using the end of the
3763 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3764 * zones within a node are in order of monotonic increases memory addresses
3765 */
3766static void __meminit adjust_zone_range_for_zone_movable(int nid,
3767                    unsigned long zone_type,
3768                    unsigned long node_start_pfn,
3769                    unsigned long node_end_pfn,
3770                    unsigned long *zone_start_pfn,
3771                    unsigned long *zone_end_pfn)
3772{
3773    /* Only adjust if ZONE_MOVABLE is on this node */
3774    if (zone_movable_pfn[nid]) {
3775        /* Size ZONE_MOVABLE */
3776        if (zone_type == ZONE_MOVABLE) {
3777            *zone_start_pfn = zone_movable_pfn[nid];
3778            *zone_end_pfn = min(node_end_pfn,
3779                arch_zone_highest_possible_pfn[movable_zone]);
3780
3781        /* Adjust for ZONE_MOVABLE starting within this range */
3782        } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3783                *zone_end_pfn > zone_movable_pfn[nid]) {
3784            *zone_end_pfn = zone_movable_pfn[nid];
3785
3786        /* Check if this whole range is within ZONE_MOVABLE */
3787        } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3788            *zone_start_pfn = *zone_end_pfn;
3789    }
3790}
3791
3792/*
3793 * Return the number of pages a zone spans in a node, including holes
3794 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3795 */
3796static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3797                    unsigned long zone_type,
3798                    unsigned long *ignored)
3799{
3800    unsigned long node_start_pfn, node_end_pfn;
3801    unsigned long zone_start_pfn, zone_end_pfn;
3802
3803    /* Get the start and end of the node and zone */
3804    get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3805    zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3806    zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3807    adjust_zone_range_for_zone_movable(nid, zone_type,
3808                node_start_pfn, node_end_pfn,
3809                &zone_start_pfn, &zone_end_pfn);
3810
3811    /* Check that this node has pages within the zone's required range */
3812    if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3813        return 0;
3814
3815    /* Move the zone boundaries inside the node if necessary */
3816    zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3817    zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3818
3819    /* Return the spanned pages */
3820    return zone_end_pfn - zone_start_pfn;
3821}
3822
3823/*
3824 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3825 * then all holes in the requested range will be accounted for.
3826 */
3827unsigned long __meminit __absent_pages_in_range(int nid,
3828                unsigned long range_start_pfn,
3829                unsigned long range_end_pfn)
3830{
3831    int i = 0;
3832    unsigned long prev_end_pfn = 0, hole_pages = 0;
3833    unsigned long start_pfn;
3834
3835    /* Find the end_pfn of the first active range of pfns in the node */
3836    i = first_active_region_index_in_nid(nid);
3837    if (i == -1)
3838        return 0;
3839
3840    prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3841
3842    /* Account for ranges before physical memory on this node */
3843    if (early_node_map[i].start_pfn > range_start_pfn)
3844        hole_pages = prev_end_pfn - range_start_pfn;
3845
3846    /* Find all holes for the zone within the node */
3847    for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3848
3849        /* No need to continue if prev_end_pfn is outside the zone */
3850        if (prev_end_pfn >= range_end_pfn)
3851            break;
3852
3853        /* Make sure the end of the zone is not within the hole */
3854        start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3855        prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3856
3857        /* Update the hole size cound and move on */
3858        if (start_pfn > range_start_pfn) {
3859            BUG_ON(prev_end_pfn > start_pfn);
3860            hole_pages += start_pfn - prev_end_pfn;
3861        }
3862        prev_end_pfn = early_node_map[i].end_pfn;
3863    }
3864
3865    /* Account for ranges past physical memory on this node */
3866    if (range_end_pfn > prev_end_pfn)
3867        hole_pages += range_end_pfn -
3868                max(range_start_pfn, prev_end_pfn);
3869
3870    return hole_pages;
3871}
3872
3873/**
3874 * absent_pages_in_range - Return number of page frames in holes within a range
3875 * @start_pfn: The start PFN to start searching for holes
3876 * @end_pfn: The end PFN to stop searching for holes
3877 *
3878 * It returns the number of pages frames in memory holes within a range.
3879 */
3880unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3881                            unsigned long end_pfn)
3882{
3883    return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3884}
3885
3886/* Return the number of page frames in holes in a zone on a node */
3887static unsigned long __meminit zone_absent_pages_in_node(int nid,
3888                    unsigned long zone_type,
3889                    unsigned long *ignored)
3890{
3891    unsigned long node_start_pfn, node_end_pfn;
3892    unsigned long zone_start_pfn, zone_end_pfn;
3893
3894    get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3895    zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3896                            node_start_pfn);
3897    zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3898                            node_end_pfn);
3899
3900    adjust_zone_range_for_zone_movable(nid, zone_type,
3901            node_start_pfn, node_end_pfn,
3902            &zone_start_pfn, &zone_end_pfn);
3903    return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3904}
3905
3906#else
3907static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3908                    unsigned long zone_type,
3909                    unsigned long *zones_size)
3910{
3911    return zones_size[zone_type];
3912}
3913
3914static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3915                        unsigned long zone_type,
3916                        unsigned long *zholes_size)
3917{
3918    if (!zholes_size)
3919        return 0;
3920
3921    return zholes_size[zone_type];
3922}
3923
3924#endif
3925
3926static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3927        unsigned long *zones_size, unsigned long *zholes_size)
3928{
3929    unsigned long realtotalpages, totalpages = 0;
3930    enum zone_type i;
3931
3932    for (i = 0; i < MAX_NR_ZONES; i++)
3933        totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3934                                zones_size);
3935    pgdat->node_spanned_pages = totalpages;
3936
3937    realtotalpages = totalpages;
3938    for (i = 0; i < MAX_NR_ZONES; i++)
3939        realtotalpages -=
3940            zone_absent_pages_in_node(pgdat->node_id, i,
3941                                zholes_size);
3942    pgdat->node_present_pages = realtotalpages;
3943    printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3944                            realtotalpages);
3945}
3946
3947#ifndef CONFIG_SPARSEMEM
3948/*
3949 * Calculate the size of the zone->blockflags rounded to an unsigned long
3950 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3951 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3952 * round what is now in bits to nearest long in bits, then return it in
3953 * bytes.
3954 */
3955static unsigned long __init usemap_size(unsigned long zonesize)
3956{
3957    unsigned long usemapsize;
3958
3959    usemapsize = roundup(zonesize, pageblock_nr_pages);
3960    usemapsize = usemapsize >> pageblock_order;
3961    usemapsize *= NR_PAGEBLOCK_BITS;
3962    usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3963
3964    return usemapsize / 8;
3965}
3966
3967static void __init setup_usemap(struct pglist_data *pgdat,
3968                struct zone *zone, unsigned long zonesize)
3969{
3970    unsigned long usemapsize = usemap_size(zonesize);
3971    zone->pageblock_flags = NULL;
3972    if (usemapsize)
3973        zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3974}
3975#else
3976static void inline setup_usemap(struct pglist_data *pgdat,
3977                struct zone *zone, unsigned long zonesize) {}
3978#endif /* CONFIG_SPARSEMEM */
3979
3980#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3981
3982/* Return a sensible default order for the pageblock size. */
3983static inline int pageblock_default_order(void)
3984{
3985    if (HPAGE_SHIFT > PAGE_SHIFT)
3986        return HUGETLB_PAGE_ORDER;
3987
3988    return MAX_ORDER-1;
3989}
3990
3991/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3992static inline void __init set_pageblock_order(unsigned int order)
3993{
3994    /* Check that pageblock_nr_pages has not already been setup */
3995    if (pageblock_order)
3996        return;
3997
3998    /*
3999     * Assume the largest contiguous order of interest is a huge page.
4000     * This value may be variable depending on boot parameters on IA64
4001     */
4002    pageblock_order = order;
4003}
4004#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4005
4006/*
4007 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4008 * and pageblock_default_order() are unused as pageblock_order is set
4009 * at compile-time. See include/linux/pageblock-flags.h for the values of
4010 * pageblock_order based on the kernel config
4011 */
4012static inline int pageblock_default_order(unsigned int order)
4013{
4014    return MAX_ORDER-1;
4015}
4016#define set_pageblock_order(x) do {} while (0)
4017
4018#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4019
4020/*
4021 * Set up the zone data structures:
4022 * - mark all pages reserved
4023 * - mark all memory queues empty
4024 * - clear the memory bitmaps
4025 */
4026static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4027        unsigned long *zones_size, unsigned long *zholes_size)
4028{
4029    enum zone_type j;
4030    int nid = pgdat->node_id;
4031    unsigned long zone_start_pfn = pgdat->node_start_pfn;
4032    int ret;
4033
4034    pgdat_resize_init(pgdat);
4035    pgdat->nr_zones = 0;
4036    init_waitqueue_head(&pgdat->kswapd_wait);
4037    pgdat->kswapd_max_order = 0;
4038    pgdat_page_cgroup_init(pgdat);
4039    
4040    for (j = 0; j < MAX_NR_ZONES; j++) {
4041        struct zone *zone = pgdat->node_zones + j;
4042        unsigned long size, realsize, memmap_pages;
4043        enum lru_list l;
4044
4045        size = zone_spanned_pages_in_node(nid, j, zones_size);
4046        realsize = size - zone_absent_pages_in_node(nid, j,
4047                                zholes_size);
4048
4049        /*
4050         * Adjust realsize so that it accounts for how much memory
4051         * is used by this zone for memmap. This affects the watermark
4052         * and per-cpu initialisations
4053         */
4054        memmap_pages =
4055            PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4056        if (realsize >= memmap_pages) {
4057            realsize -= memmap_pages;
4058            if (memmap_pages)
4059                printk(KERN_DEBUG
4060                       " %s zone: %lu pages used for memmap\n",
4061                       zone_names[j], memmap_pages);
4062        } else
4063            printk(KERN_WARNING
4064                " %s zone: %lu pages exceeds realsize %lu\n",
4065                zone_names[j], memmap_pages, realsize);
4066
4067        /* Account for reserved pages */
4068        if (j == 0 && realsize > dma_reserve) {
4069            realsize -= dma_reserve;
4070            printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
4071                    zone_names[0], dma_reserve);
4072        }
4073
4074        if (!is_highmem_idx(j))
4075            nr_kernel_pages += realsize;
4076        nr_all_pages += realsize;
4077
4078        zone->spanned_pages = size;
4079        zone->present_pages = realsize;
4080#ifdef CONFIG_NUMA
4081        zone->node = nid;
4082        zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4083                        / 100;
4084        zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4085#endif
4086        zone->name = zone_names[j];
4087        spin_lock_init(&zone->lock);
4088        spin_lock_init(&zone->lru_lock);
4089        zone_seqlock_init(zone);
4090        zone->zone_pgdat = pgdat;
4091
4092        zone->prev_priority = DEF_PRIORITY;
4093
4094        zone_pcp_init(zone);
4095        for_each_lru(l) {
4096            INIT_LIST_HEAD(&zone->lru[l].list);
4097            zone->reclaim_stat.nr_saved_scan[l] = 0;
4098        }
4099        zone->reclaim_stat.recent_rotated[0] = 0;
4100        zone->reclaim_stat.recent_rotated[1] = 0;
4101        zone->reclaim_stat.recent_scanned[0] = 0;
4102        zone->reclaim_stat.recent_scanned[1] = 0;
4103        zap_zone_vm_stats(zone);
4104        zone->flags = 0;
4105        if (!size)
4106            continue;
4107
4108        set_pageblock_order(pageblock_default_order());
4109        setup_usemap(pgdat, zone, size);
4110        ret = init_currently_empty_zone(zone, zone_start_pfn,
4111                        size, MEMMAP_EARLY);
4112        BUG_ON(ret);
4113        memmap_init(size, nid, j, zone_start_pfn);
4114        zone_start_pfn += size;
4115    }
4116}
4117
4118static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4119{
4120    /* Skip empty nodes */
4121    if (!pgdat->node_spanned_pages)
4122        return;
4123
4124#ifdef CONFIG_FLAT_NODE_MEM_MAP
4125    /* ia64 gets its own node_mem_map, before this, without bootmem */
4126    if (!pgdat->node_mem_map) {
4127        unsigned long size, start, end;
4128        struct page *map;
4129
4130        /*
4131         * The zone's endpoints aren't required to be MAX_ORDER
4132         * aligned but the node_mem_map endpoints must be in order
4133         * for the buddy allocator to function correctly.
4134         */
4135        start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4136        end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4137        end = ALIGN(end, MAX_ORDER_NR_PAGES);
4138        size = (end - start) * sizeof(struct page);
4139        map = alloc_remap(pgdat->node_id, size);
4140        if (!map)
4141            map = alloc_bootmem_node(pgdat, size);
4142        pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4143    }
4144#ifndef CONFIG_NEED_MULTIPLE_NODES
4145    /*
4146     * With no DISCONTIG, the global mem_map is just set as node 0's
4147     */
4148    if (pgdat == NODE_DATA(0)) {
4149        mem_map = NODE_DATA(0)->node_mem_map;
4150#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4151        if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4152            mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4153#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4154    }
4155#endif
4156#endif /* CONFIG_FLAT_NODE_MEM_MAP */
4157}
4158
4159void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4160        unsigned long node_start_pfn, unsigned long *zholes_size)
4161{
4162    pg_data_t *pgdat = NODE_DATA(nid);
4163
4164    pgdat->node_id = nid;
4165    pgdat->node_start_pfn = node_start_pfn;
4166    calculate_node_totalpages(pgdat, zones_size, zholes_size);
4167
4168    alloc_node_mem_map(pgdat);
4169#ifdef CONFIG_FLAT_NODE_MEM_MAP
4170    printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4171        nid, (unsigned long)pgdat,
4172        (unsigned long)pgdat->node_mem_map);
4173#endif
4174
4175    free_area_init_core(pgdat, zones_size, zholes_size);
4176}
4177
4178#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
4179
4180#if MAX_NUMNODES > 1
4181/*
4182 * Figure out the number of possible node ids.
4183 */
4184static void __init setup_nr_node_ids(void)
4185{
4186    unsigned int node;
4187    unsigned int highest = 0;
4188
4189    for_each_node_mask(node, node_possible_map)
4190        highest = node;
4191    nr_node_ids = highest + 1;
4192}
4193#else
4194static inline void setup_nr_node_ids(void)
4195{
4196}
4197#endif
4198
4199/**
4200 * add_active_range - Register a range of PFNs backed by physical memory
4201 * @nid: The node ID the range resides on
4202 * @start_pfn: The start PFN of the available physical memory
4203 * @end_pfn: The end PFN of the available physical memory
4204 *
4205 * These ranges are stored in an early_node_map[] and later used by
4206 * free_area_init_nodes() to calculate zone sizes and holes. If the
4207 * range spans a memory hole, it is up to the architecture to ensure
4208 * the memory is not freed by the bootmem allocator. If possible
4209 * the range being registered will be merged with existing ranges.
4210 */
4211void __init add_active_range(unsigned int nid, unsigned long start_pfn,
4212                        unsigned long end_pfn)
4213{
4214    int i;
4215
4216    mminit_dprintk(MMINIT_TRACE, "memory_register",
4217            "Entering add_active_range(%d, %#lx, %#lx) "
4218            "%d entries of %d used\n",
4219            nid, start_pfn, end_pfn,
4220            nr_nodemap_entries, MAX_ACTIVE_REGIONS);
4221
4222    mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
4223
4224    /* Merge with existing active regions if possible */
4225    for (i = 0; i < nr_nodemap_entries; i++) {
4226        if (early_node_map[i].nid != nid)
4227            continue;
4228
4229        /* Skip if an existing region covers this new one */
4230        if (start_pfn >= early_node_map[i].start_pfn &&
4231                end_pfn <= early_node_map[i].end_pfn)
4232            return;
4233
4234        /* Merge forward if suitable */
4235        if (start_pfn <= early_node_map[i].end_pfn &&
4236                end_pfn > early_node_map[i].end_pfn) {
4237            early_node_map[i].end_pfn = end_pfn;
4238            return;
4239        }
4240
4241        /* Merge backward if suitable */
4242        if (start_pfn < early_node_map[i].start_pfn &&
4243                end_pfn >= early_node_map[i].start_pfn) {
4244            early_node_map[i].start_pfn = start_pfn;
4245            return;
4246        }
4247    }
4248
4249    /* Check that early_node_map is large enough */
4250    if (i >= MAX_ACTIVE_REGIONS) {
4251        printk(KERN_CRIT "More than %d memory regions, truncating\n",
4252                            MAX_ACTIVE_REGIONS);
4253        return;
4254    }
4255
4256    early_node_map[i].nid = nid;
4257    early_node_map[i].start_pfn = start_pfn;
4258    early_node_map[i].end_pfn = end_pfn;
4259    nr_nodemap_entries = i + 1;
4260}
4261
4262/**
4263 * remove_active_range - Shrink an existing registered range of PFNs
4264 * @nid: The node id the range is on that should be shrunk
4265 * @start_pfn: The new PFN of the range
4266 * @end_pfn: The new PFN of the range
4267 *
4268 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
4269 * The map is kept near the end physical page range that has already been
4270 * registered. This function allows an arch to shrink an existing registered
4271 * range.
4272 */
4273void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
4274                unsigned long end_pfn)
4275{
4276    int i, j;
4277    int removed = 0;
4278
4279    printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
4280              nid, start_pfn, end_pfn);
4281
4282    /* Find the old active region end and shrink */
4283    for_each_active_range_index_in_nid(i, nid) {
4284        if (early_node_map[i].start_pfn >= start_pfn &&
4285            early_node_map[i].end_pfn <= end_pfn) {
4286            /* clear it */
4287            early_node_map[i].start_pfn = 0;
4288            early_node_map[i].end_pfn = 0;
4289            removed = 1;
4290            continue;
4291        }
4292        if (early_node_map[i].start_pfn < start_pfn &&
4293            early_node_map[i].end_pfn > start_pfn) {
4294            unsigned long temp_end_pfn = early_node_map[i].end_pfn;
4295            early_node_map[i].end_pfn = start_pfn;
4296            if (temp_end_pfn > end_pfn)
4297                add_active_range(nid, end_pfn, temp_end_pfn);
4298            continue;
4299        }
4300        if (early_node_map[i].start_pfn >= start_pfn &&
4301            early_node_map[i].end_pfn > end_pfn &&
4302            early_node_map[i].start_pfn < end_pfn) {
4303            early_node_map[i].start_pfn = end_pfn;
4304            continue;
4305        }
4306    }
4307
4308    if (!removed)
4309        return;
4310
4311    /* remove the blank ones */
4312    for (i = nr_nodemap_entries - 1; i > 0; i--) {
4313        if (early_node_map[i].nid != nid)
4314            continue;
4315        if (early_node_map[i].end_pfn)
4316            continue;
4317        /* we found it, get rid of it */
4318        for (j = i; j < nr_nodemap_entries - 1; j++)
4319            memcpy(&early_node_map[j], &early_node_map[j+1],
4320                sizeof(early_node_map[j]));
4321        j = nr_nodemap_entries - 1;
4322        memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
4323        nr_nodemap_entries--;
4324    }
4325}
4326
4327/**
4328 * remove_all_active_ranges - Remove all currently registered regions
4329 *
4330 * During discovery, it may be found that a table like SRAT is invalid
4331 * and an alternative discovery method must be used. This function removes
4332 * all currently registered regions.
4333 */
4334void __init remove_all_active_ranges(void)
4335{
4336    memset(early_node_map, 0, sizeof(early_node_map));
4337    nr_nodemap_entries = 0;
4338}
4339
4340/* Compare two active node_active_regions */
4341static int __init cmp_node_active_region(const void *a, const void *b)
4342{
4343    struct node_active_region *arange = (struct node_active_region *)a;
4344    struct node_active_region *brange = (struct node_active_region *)b;
4345
4346    /* Done this way to avoid overflows */
4347    if (arange->start_pfn > brange->start_pfn)
4348        return 1;
4349    if (arange->start_pfn < brange->start_pfn)
4350        return -1;
4351
4352    return 0;
4353}
4354
4355/* sort the node_map by start_pfn */
4356void __init sort_node_map(void)
4357{
4358    sort(early_node_map, (size_t)nr_nodemap_entries,
4359            sizeof(struct node_active_region),
4360            cmp_node_active_region, NULL);
4361}
4362
4363/* Find the lowest pfn for a node */
4364static unsigned long __init find_min_pfn_for_node(int nid)
4365{
4366    int i;
4367    unsigned long min_pfn = ULONG_MAX;
4368
4369    /* Assuming a sorted map, the first range found has the starting pfn */
4370    for_each_active_range_index_in_nid(i, nid)
4371        min_pfn = min(min_pfn, early_node_map[i].start_pfn);
4372
4373    if (min_pfn == ULONG_MAX) {
4374        printk(KERN_WARNING
4375            "Could not find start_pfn for node %d\n", nid);
4376        return 0;
4377    }
4378
4379    return min_pfn;
4380}
4381
4382/**
4383 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4384 *
4385 * It returns the minimum PFN based on information provided via
4386 * add_active_range().
4387 */
4388unsigned long __init find_min_pfn_with_active_regions(void)
4389{
4390    return find_min_pfn_for_node(MAX_NUMNODES);
4391}
4392
4393/*
4394 * early_calculate_totalpages()
4395 * Sum pages in active regions for movable zone.
4396 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4397 */
4398static unsigned long __init early_calculate_totalpages(void)
4399{
4400    int i;
4401    unsigned long totalpages = 0;
4402
4403    for (i = 0; i < nr_nodemap_entries; i++) {
4404        unsigned long pages = early_node_map[i].end_pfn -
4405                        early_node_map[i].start_pfn;
4406        totalpages += pages;
4407        if (pages)
4408            node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
4409    }
4410      return totalpages;
4411}
4412
4413/*
4414 * Find the PFN the Movable zone begins in each node. Kernel memory
4415 * is spread evenly between nodes as long as the nodes have enough
4416 * memory. When they don't, some nodes will have more kernelcore than
4417 * others
4418 */
4419static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4420{
4421    int i, nid;
4422    unsigned long usable_startpfn;
4423    unsigned long kernelcore_node, kernelcore_remaining;
4424    /* save the state before borrow the nodemask */
4425    nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4426    unsigned long totalpages = early_calculate_totalpages();
4427    int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4428
4429    /*
4430     * If movablecore was specified, calculate what size of
4431     * kernelcore that corresponds so that memory usable for
4432     * any allocation type is evenly spread. If both kernelcore
4433     * and movablecore are specified, then the value of kernelcore
4434     * will be used for required_kernelcore if it's greater than
4435     * what movablecore would have allowed.
4436     */
4437    if (required_movablecore) {
4438        unsigned long corepages;
4439
4440        /*
4441         * Round-up so that ZONE_MOVABLE is at least as large as what
4442         * was requested by the user
4443         */
4444        required_movablecore =
4445            roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4446        corepages = totalpages - required_movablecore;
4447
4448        required_kernelcore = max(required_kernelcore, corepages);
4449    }
4450
4451    /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4452    if (!required_kernelcore)
4453        goto out;
4454
4455    /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4456    find_usable_zone_for_movable();
4457    usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4458
4459restart:
4460    /* Spread kernelcore memory as evenly as possible throughout nodes */
4461    kernelcore_node = required_kernelcore / usable_nodes;
4462    for_each_node_state(nid, N_HIGH_MEMORY) {
4463        /*
4464         * Recalculate kernelcore_node if the division per node
4465         * now exceeds what is necessary to satisfy the requested
4466         * amount of memory for the kernel
4467         */
4468        if (required_kernelcore < kernelcore_node)
4469            kernelcore_node = required_kernelcore / usable_nodes;
4470
4471        /*
4472         * As the map is walked, we track how much memory is usable
4473         * by the kernel using kernelcore_remaining. When it is
4474         * 0, the rest of the node is usable by ZONE_MOVABLE
4475         */
4476        kernelcore_remaining = kernelcore_node;
4477
4478        /* Go through each range of PFNs within this node */
4479        for_each_active_range_index_in_nid(i, nid) {
4480            unsigned long start_pfn, end_pfn;
4481            unsigned long size_pages;
4482
4483            start_pfn = max(early_node_map[i].start_pfn,
4484                        zone_movable_pfn[nid]);
4485            end_pfn = early_node_map[i].end_pfn;
4486            if (start_pfn >= end_pfn)
4487                continue;
4488
4489            /* Account for what is only usable for kernelcore */
4490            if (start_pfn < usable_startpfn) {
4491                unsigned long kernel_pages;
4492                kernel_pages = min(end_pfn, usable_startpfn)
4493                                - start_pfn;
4494
4495                kernelcore_remaining -= min(kernel_pages,
4496                            kernelcore_remaining);
4497                required_kernelcore -= min(kernel_pages,
4498                            required_kernelcore);
4499
4500                /* Continue if range is now fully accounted */
4501                if (end_pfn <= usable_startpfn) {
4502
4503                    /*
4504                     * Push zone_movable_pfn to the end so
4505                     * that if we have to rebalance
4506                     * kernelcore across nodes, we will
4507                     * not double account here
4508                     */
4509                    zone_movable_pfn[nid] = end_pfn;
4510                    continue;
4511                }
4512                start_pfn = usable_startpfn;
4513            }
4514
4515            /*
4516             * The usable PFN range for ZONE_MOVABLE is from
4517             * start_pfn->end_pfn. Calculate size_pages as the
4518             * number of pages used as kernelcore
4519             */
4520            size_pages = end_pfn - start_pfn;
4521            if (size_pages > kernelcore_remaining)
4522                size_pages = kernelcore_remaining;
4523            zone_movable_pfn[nid] = start_pfn + size_pages;
4524
4525            /*
4526             * Some kernelcore has been met, update counts and
4527             * break if the kernelcore for this node has been
4528             * satisified
4529             */
4530            required_kernelcore -= min(required_kernelcore,
4531                                size_pages);
4532            kernelcore_remaining -= size_pages;
4533            if (!kernelcore_remaining)
4534                break;
4535        }
4536    }
4537
4538    /*
4539     * If there is still required_kernelcore, we do another pass with one
4540     * less node in the count. This will push zone_movable_pfn[nid] further
4541     * along on the nodes that still have memory until kernelcore is
4542     * satisified
4543     */
4544    usable_nodes--;
4545    if (usable_nodes && required_kernelcore > usable_nodes)
4546        goto restart;
4547
4548    /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4549    for (nid = 0; nid < MAX_NUMNODES; nid++)
4550        zone_movable_pfn[nid] =
4551            roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4552
4553out:
4554    /* restore the node_state */
4555    node_states[N_HIGH_MEMORY] = saved_node_state;
4556}
4557
4558/* Any regular memory on that node ? */
4559static void check_for_regular_memory(pg_data_t *pgdat)
4560{
4561#ifdef CONFIG_HIGHMEM
4562    enum zone_type zone_type;
4563
4564    for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4565        struct zone *zone = &pgdat->node_zones[zone_type];
4566        if (zone->present_pages)
4567            node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4568    }
4569#endif
4570}
4571
4572/**
4573 * free_area_init_nodes - Initialise all pg_data_t and zone data
4574 * @max_zone_pfn: an array of max PFNs for each zone
4575 *
4576 * This will call free_area_init_node() for each active node in the system.
4577 * Using the page ranges provided by add_active_range(), the size of each
4578 * zone in each node and their holes is calculated. If the maximum PFN
4579 * between two adjacent zones match, it is assumed that the zone is empty.
4580 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4581 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4582 * starts where the previous one ended. For example, ZONE_DMA32 starts
4583 * at arch_max_dma_pfn.
4584 */
4585void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4586{
4587    unsigned long nid;
4588    int i;
4589
4590    /* Sort early_node_map as initialisation assumes it is sorted */
4591    sort_node_map();
4592
4593    /* Record where the zone boundaries are */
4594    memset(arch_zone_lowest_possible_pfn, 0,
4595                sizeof(arch_zone_lowest_possible_pfn));
4596    memset(arch_zone_highest_possible_pfn, 0,
4597                sizeof(arch_zone_highest_possible_pfn));
4598    arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4599    arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4600    for (i = 1; i < MAX_NR_ZONES; i++) {
4601        if (i == ZONE_MOVABLE)
4602            continue;
4603        arch_zone_lowest_possible_pfn[i] =
4604            arch_zone_highest_possible_pfn[i-1];
4605        arch_zone_highest_possible_pfn[i] =
4606            max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4607    }
4608    arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4609    arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4610
4611    /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4612    memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4613    find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4614
4615    /* Print out the zone ranges */
4616    printk("Zone PFN ranges:\n");
4617    for (i = 0; i < MAX_NR_ZONES; i++) {
4618        if (i == ZONE_MOVABLE)
4619            continue;
4620        printk(" %-8s ", zone_names[i]);
4621        if (arch_zone_lowest_possible_pfn[i] ==
4622                arch_zone_highest_possible_pfn[i])
4623            printk("empty\n");
4624        else
4625            printk("%0#10lx -> %0#10lx\n",
4626                arch_zone_lowest_possible_pfn[i],
4627                arch_zone_highest_possible_pfn[i]);
4628    }
4629
4630    /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4631    printk("Movable zone start PFN for each node\n");
4632    for (i = 0; i < MAX_NUMNODES; i++) {
4633        if (zone_movable_pfn[i])
4634            printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
4635    }
4636
4637    /* Print out the early_node_map[] */
4638    printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4639    for (i = 0; i < nr_nodemap_entries; i++)
4640        printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4641                        early_node_map[i].start_pfn,
4642                        early_node_map[i].end_pfn);
4643
4644    /* Initialise every node */
4645    mminit_verify_pageflags_layout();
4646    setup_nr_node_ids();
4647    for_each_online_node(nid) {
4648        pg_data_t *pgdat = NODE_DATA(nid);
4649        free_area_init_node(nid, NULL,
4650                find_min_pfn_for_node(nid), NULL);
4651
4652        /* Any memory on that node */
4653        if (pgdat->node_present_pages)
4654            node_set_state(nid, N_HIGH_MEMORY);
4655        check_for_regular_memory(pgdat);
4656    }
4657}
4658
4659static int __init cmdline_parse_core(char *p, unsigned long *core)
4660{
4661    unsigned long long coremem;
4662    if (!p)
4663        return -EINVAL;
4664
4665    coremem = memparse(p, &p);
4666    *core = coremem >> PAGE_SHIFT;
4667
4668    /* Paranoid check that UL is enough for the coremem value */
4669    WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4670
4671    return 0;
4672}
4673
4674/*
4675 * kernelcore=size sets the amount of memory for use for allocations that
4676 * cannot be reclaimed or migrated.
4677 */
4678static int __init cmdline_parse_kernelcore(char *p)
4679{
4680    return cmdline_parse_core(p, &required_kernelcore);
4681}
4682
4683/*
4684 * movablecore=size sets the amount of memory for use for allocations that
4685 * can be reclaimed or migrated.
4686 */
4687static int __init cmdline_parse_movablecore(char *p)
4688{
4689    return cmdline_parse_core(p, &required_movablecore);
4690}
4691
4692early_param("kernelcore", cmdline_parse_kernelcore);
4693early_param("movablecore", cmdline_parse_movablecore);
4694
4695#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4696
4697/**
4698 * set_dma_reserve - set the specified number of pages reserved in the first zone
4699 * @new_dma_reserve: The number of pages to mark reserved
4700 *
4701 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4702 * In the DMA zone, a significant percentage may be consumed by kernel image
4703 * and other unfreeable allocations which can skew the watermarks badly. This
4704 * function may optionally be used to account for unfreeable pages in the
4705 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4706 * smaller per-cpu batchsize.
4707 */
4708void __init set_dma_reserve(unsigned long new_dma_reserve)
4709{
4710    dma_reserve = new_dma_reserve;
4711}
4712
4713#ifndef CONFIG_NEED_MULTIPLE_NODES
4714struct pglist_data __refdata contig_page_data = {
4715#ifndef CONFIG_NO_BOOTMEM
4716 .bdata = &bootmem_node_data[0]
4717#endif
4718 };
4719EXPORT_SYMBOL(contig_page_data);
4720#endif
4721
4722void __init free_area_init(unsigned long *zones_size)
4723{
4724    free_area_init_node(0, zones_size,
4725            __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4726}
4727
4728static int page_alloc_cpu_notify(struct notifier_block *self,
4729                 unsigned long action, void *hcpu)
4730{
4731    int cpu = (unsigned long)hcpu;
4732
4733    if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4734        drain_pages(cpu);
4735
4736        /*
4737         * Spill the event counters of the dead processor
4738         * into the current processors event counters.
4739         * This artificially elevates the count of the current
4740         * processor.
4741         */
4742        vm_events_fold_cpu(cpu);
4743
4744        /*
4745         * Zero the differential counters of the dead processor
4746         * so that the vm statistics are consistent.
4747         *
4748         * This is only okay since the processor is dead and cannot
4749         * race with what we are doing.
4750         */
4751        refresh_cpu_vm_stats(cpu);
4752    }
4753    return NOTIFY_OK;
4754}
4755
4756void __init page_alloc_init(void)
4757{
4758    hotcpu_notifier(page_alloc_cpu_notify, 0);
4759}
4760
4761/*
4762 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4763 * or min_free_kbytes changes.
4764 */
4765static void calculate_totalreserve_pages(void)
4766{
4767    struct pglist_data *pgdat;
4768    unsigned long reserve_pages = 0;
4769    enum zone_type i, j;
4770
4771    for_each_online_pgdat(pgdat) {
4772        for (i = 0; i < MAX_NR_ZONES; i++) {
4773            struct zone *zone = pgdat->node_zones + i;
4774            unsigned long max = 0;
4775
4776            /* Find valid and maximum lowmem_reserve in the zone */
4777            for (j = i; j < MAX_NR_ZONES; j++) {
4778                if (zone->lowmem_reserve[j] > max)
4779                    max = zone->lowmem_reserve[j];
4780            }
4781
4782            /* we treat the high watermark as reserved pages. */
4783            max += high_wmark_pages(zone);
4784
4785            if (max > zone->present_pages)
4786                max = zone->present_pages;
4787            reserve_pages += max;
4788        }
4789    }
4790    totalreserve_pages = reserve_pages;
4791}
4792
4793/*
4794 * setup_per_zone_lowmem_reserve - called whenever
4795 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
4796 * has a correct pages reserved value, so an adequate number of
4797 * pages are left in the zone after a successful __alloc_pages().
4798 */
4799static void setup_per_zone_lowmem_reserve(void)
4800{
4801    struct pglist_data *pgdat;
4802    enum zone_type j, idx;
4803
4804    for_each_online_pgdat(pgdat) {
4805        for (j = 0; j < MAX_NR_ZONES; j++) {
4806            struct zone *zone = pgdat->node_zones + j;
4807            unsigned long present_pages = zone->present_pages;
4808
4809            zone->lowmem_reserve[j] = 0;
4810
4811            idx = j;
4812            while (idx) {
4813                struct zone *lower_zone;
4814
4815                idx--;
4816
4817                if (sysctl_lowmem_reserve_ratio[idx] < 1)
4818                    sysctl_lowmem_reserve_ratio[idx] = 1;
4819
4820                lower_zone = pgdat->node_zones + idx;
4821                lower_zone->lowmem_reserve[j] = present_pages /
4822                    sysctl_lowmem_reserve_ratio[idx];
4823                present_pages += lower_zone->present_pages;
4824            }
4825        }
4826    }
4827
4828    /* update totalreserve_pages */
4829    calculate_totalreserve_pages();
4830}
4831
4832/**
4833 * setup_per_zone_wmarks - called when min_free_kbytes changes
4834 * or when memory is hot-{added|removed}
4835 *
4836 * Ensures that the watermark[min,low,high] values for each zone are set
4837 * correctly with respect to min_free_kbytes.
4838 */
4839void setup_per_zone_wmarks(void)
4840{
4841    unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4842    unsigned long lowmem_pages = 0;
4843    struct zone *zone;
4844    unsigned long flags;
4845
4846    /* Calculate total number of !ZONE_HIGHMEM pages */
4847    for_each_zone(zone) {
4848        if (!is_highmem(zone))
4849            lowmem_pages += zone->present_pages;
4850    }
4851
4852    for_each_zone(zone) {
4853        u64 tmp;
4854
4855        spin_lock_irqsave(&zone->lock, flags);
4856        tmp = (u64)pages_min * zone->present_pages;
4857        do_div(tmp, lowmem_pages);
4858        if (is_highmem(zone)) {
4859            /*
4860             * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4861             * need highmem pages, so cap pages_min to a small
4862             * value here.
4863             *
4864             * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
4865             * deltas controls asynch page reclaim, and so should
4866             * not be capped for highmem.
4867             */
4868            int min_pages;
4869
4870            min_pages = zone->present_pages / 1024;
4871            if (min_pages < SWAP_CLUSTER_MAX)
4872                min_pages = SWAP_CLUSTER_MAX;
4873            if (min_pages > 128)
4874                min_pages = 128;
4875            zone->watermark[WMARK_MIN] = min_pages;
4876        } else {
4877            /*
4878             * If it's a lowmem zone, reserve a number of pages
4879             * proportionate to the zone's size.
4880             */
4881            zone->watermark[WMARK_MIN] = tmp;
4882        }
4883
4884        zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
4885        zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
4886        setup_zone_migrate_reserve(zone);
4887        spin_unlock_irqrestore(&zone->lock, flags);
4888    }
4889
4890    /* update totalreserve_pages */
4891    calculate_totalreserve_pages();
4892}
4893
4894/*
4895 * The inactive anon list should be small enough that the VM never has to
4896 * do too much work, but large enough that each inactive page has a chance
4897 * to be referenced again before it is swapped out.
4898 *
4899 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
4900 * INACTIVE_ANON pages on this zone's LRU, maintained by the
4901 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
4902 * the anonymous pages are kept on the inactive list.
4903 *
4904 * total target max
4905 * memory ratio inactive anon
4906 * -------------------------------------
4907 * 10MB 1 5MB
4908 * 100MB 1 50MB
4909 * 1GB 3 250MB
4910 * 10GB 10 0.9GB
4911 * 100GB 31 3GB
4912 * 1TB 101 10GB
4913 * 10TB 320 32GB
4914 */
4915void calculate_zone_inactive_ratio(struct zone *zone)
4916{
4917    unsigned int gb, ratio;
4918
4919    /* Zone size in gigabytes */
4920    gb = zone->present_pages >> (30 - PAGE_SHIFT);
4921    if (gb)
4922        ratio = int_sqrt(10 * gb);
4923    else
4924        ratio = 1;
4925
4926    zone->inactive_ratio = ratio;
4927}
4928
4929static void __init setup_per_zone_inactive_ratio(void)
4930{
4931    struct zone *zone;
4932
4933    for_each_zone(zone)
4934        calculate_zone_inactive_ratio(zone);
4935}
4936
4937/*
4938 * Initialise min_free_kbytes.
4939 *
4940 * For small machines we want it small (128k min). For large machines
4941 * we want it large (64MB max). But it is not linear, because network
4942 * bandwidth does not increase linearly with machine size. We use
4943 *
4944 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4945 * min_free_kbytes = sqrt(lowmem_kbytes * 16)
4946 *
4947 * which yields
4948 *
4949 * 16MB: 512k
4950 * 32MB: 724k
4951 * 64MB: 1024k
4952 * 128MB: 1448k
4953 * 256MB: 2048k
4954 * 512MB: 2896k
4955 * 1024MB: 4096k
4956 * 2048MB: 5792k
4957 * 4096MB: 8192k
4958 * 8192MB: 11584k
4959 * 16384MB: 16384k
4960 */
4961static int __init init_per_zone_wmark_min(void)
4962{
4963    unsigned long lowmem_kbytes;
4964
4965    lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4966
4967    min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4968    if (min_free_kbytes < 128)
4969        min_free_kbytes = 128;
4970    if (min_free_kbytes > 65536)
4971        min_free_kbytes = 65536;
4972    setup_per_zone_wmarks();
4973    setup_per_zone_lowmem_reserve();
4974    setup_per_zone_inactive_ratio();
4975    return 0;
4976}
4977module_init(init_per_zone_wmark_min)
4978
4979/*
4980 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
4981 * that we can call two helper functions whenever min_free_kbytes
4982 * changes.
4983 */
4984int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
4985    void __user *buffer, size_t *length, loff_t *ppos)
4986{
4987    proc_dointvec(table, write, buffer, length, ppos);
4988    if (write)
4989        setup_per_zone_wmarks();
4990    return 0;
4991}
4992
4993#ifdef CONFIG_NUMA
4994int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4995    void __user *buffer, size_t *length, loff_t *ppos)
4996{
4997    struct zone *zone;
4998    int rc;
4999
5000    rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5001    if (rc)
5002        return rc;
5003
5004    for_each_zone(zone)
5005        zone->min_unmapped_pages = (zone->present_pages *
5006                sysctl_min_unmapped_ratio) / 100;
5007    return 0;
5008}
5009
5010int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5011    void __user *buffer, size_t *length, loff_t *ppos)
5012{
5013    struct zone *zone;
5014    int rc;
5015
5016    rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5017    if (rc)
5018        return rc;
5019
5020    for_each_zone(zone)
5021        zone->min_slab_pages = (zone->present_pages *
5022                sysctl_min_slab_ratio) / 100;
5023    return 0;
5024}
5025#endif
5026
5027/*
5028 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5029 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5030 * whenever sysctl_lowmem_reserve_ratio changes.
5031 *
5032 * The reserve ratio obviously has absolutely no relation with the
5033 * minimum watermarks. The lowmem reserve ratio can only make sense
5034 * if in function of the boot time zone sizes.
5035 */
5036int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5037    void __user *buffer, size_t *length, loff_t *ppos)
5038{
5039    proc_dointvec_minmax(table, write, buffer, length, ppos);
5040    setup_per_zone_lowmem_reserve();
5041    return 0;
5042}
5043
5044/*
5045 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5046 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
5047 * can have before it gets flushed back to buddy allocator.
5048 */
5049
5050int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5051    void __user *buffer, size_t *length, loff_t *ppos)
5052{
5053    struct zone *zone;
5054    unsigned int cpu;
5055    int ret;
5056
5057    ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5058    if (!write || (ret == -EINVAL))
5059        return ret;
5060    for_each_populated_zone(zone) {
5061        for_each_possible_cpu(cpu) {
5062            unsigned long high;
5063            high = zone->present_pages / percpu_pagelist_fraction;
5064            setup_pagelist_highmark(
5065                per_cpu_ptr(zone->pageset, cpu), high);
5066        }
5067    }
5068    return 0;
5069}
5070
5071int hashdist = HASHDIST_DEFAULT;
5072
5073#ifdef CONFIG_NUMA
5074static int __init set_hashdist(char *str)
5075{
5076    if (!str)
5077        return 0;
5078    hashdist = simple_strtoul(str, &str, 0);
5079    return 1;
5080}
5081__setup("hashdist=", set_hashdist);
5082#endif
5083
5084/*
5085 * allocate a large system hash table from bootmem
5086 * - it is assumed that the hash table must contain an exact power-of-2
5087 * quantity of entries
5088 * - limit is the number of hash buckets, not the total allocation size
5089 */
5090void *__init alloc_large_system_hash(const char *tablename,
5091                     unsigned long bucketsize,
5092                     unsigned long numentries,
5093                     int scale,
5094                     int flags,
5095                     unsigned int *_hash_shift,
5096                     unsigned int *_hash_mask,
5097                     unsigned long limit)
5098{
5099    unsigned long long max = limit;
5100    unsigned long log2qty, size;
5101    void *table = NULL;
5102
5103    /* allow the kernel cmdline to have a say */
5104    if (!numentries) {
5105        /* round applicable memory size up to nearest megabyte */
5106        numentries = nr_kernel_pages;
5107        numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5108        numentries >>= 20 - PAGE_SHIFT;
5109        numentries <<= 20 - PAGE_SHIFT;
5110
5111        /* limit to 1 bucket per 2^scale bytes of low memory */
5112        if (scale > PAGE_SHIFT)
5113            numentries >>= (scale - PAGE_SHIFT);
5114        else
5115            numentries <<= (PAGE_SHIFT - scale);
5116
5117        /* Make sure we've got at least a 0-order allocation.. */
5118        if (unlikely(flags & HASH_SMALL)) {
5119            /* Makes no sense without HASH_EARLY */
5120            WARN_ON(!(flags & HASH_EARLY));
5121            if (!(numentries >> *_hash_shift)) {
5122                numentries = 1UL << *_hash_shift;
5123                BUG_ON(!numentries);
5124            }
5125        } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5126            numentries = PAGE_SIZE / bucketsize;
5127    }
5128    numentries = roundup_pow_of_two(numentries);
5129
5130    /* limit allocation size to 1/16 total memory by default */
5131    if (max == 0) {
5132        max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5133        do_div(max, bucketsize);
5134    }
5135
5136    if (numentries > max)
5137        numentries = max;
5138
5139    log2qty = ilog2(numentries);
5140
5141    do {
5142        size = bucketsize << log2qty;
5143        if (flags & HASH_EARLY)
5144            table = alloc_bootmem_nopanic(size);
5145        else if (hashdist)
5146            table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5147        else {
5148            /*
5149             * If bucketsize is not a power-of-two, we may free
5150             * some pages at the end of hash table which
5151             * alloc_pages_exact() automatically does
5152             */
5153            if (get_order(size) < MAX_ORDER) {
5154                table = alloc_pages_exact(size, GFP_ATOMIC);
5155                kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5156            }
5157        }
5158    } while (!table && size > PAGE_SIZE && --log2qty);
5159
5160    if (!table)
5161        panic("Failed to allocate %s hash table\n", tablename);
5162
5163    printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
5164           tablename,
5165           (1U << log2qty),
5166           ilog2(size) - PAGE_SHIFT,
5167           size);
5168
5169    if (_hash_shift)
5170        *_hash_shift = log2qty;
5171    if (_hash_mask)
5172        *_hash_mask = (1 << log2qty) - 1;
5173
5174    return table;
5175}
5176
5177/* Return a pointer to the bitmap storing bits affecting a block of pages */
5178static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5179                            unsigned long pfn)
5180{
5181#ifdef CONFIG_SPARSEMEM
5182    return __pfn_to_section(pfn)->pageblock_flags;
5183#else
5184    return zone->pageblock_flags;
5185#endif /* CONFIG_SPARSEMEM */
5186}
5187
5188static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5189{
5190#ifdef CONFIG_SPARSEMEM
5191    pfn &= (PAGES_PER_SECTION-1);
5192    return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5193#else
5194    pfn = pfn - zone->zone_start_pfn;
5195    return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5196#endif /* CONFIG_SPARSEMEM */
5197}
5198
5199/**
5200 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5201 * @page: The page within the block of interest
5202 * @start_bitidx: The first bit of interest to retrieve
5203 * @end_bitidx: The last bit of interest
5204 * returns pageblock_bits flags
5205 */
5206unsigned long get_pageblock_flags_group(struct page *page,
5207                    int start_bitidx, int end_bitidx)
5208{
5209    struct zone *zone;
5210    unsigned long *bitmap;
5211    unsigned long pfn, bitidx;
5212    unsigned long flags = 0;
5213    unsigned long value = 1;
5214
5215    zone = page_zone(page);
5216    pfn = page_to_pfn(page);
5217    bitmap = get_pageblock_bitmap(zone, pfn);
5218    bitidx = pfn_to_bitidx(zone, pfn);
5219
5220    for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5221        if (test_bit(bitidx + start_bitidx, bitmap))
5222            flags |= value;
5223
5224    return flags;
5225}
5226
5227/**
5228 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5229 * @page: The page within the block of interest
5230 * @start_bitidx: The first bit of interest
5231 * @end_bitidx: The last bit of interest
5232 * @flags: The flags to set
5233 */
5234void set_pageblock_flags_group(struct page *page, unsigned long flags,
5235                    int start_bitidx, int end_bitidx)
5236{
5237    struct zone *zone;
5238    unsigned long *bitmap;
5239    unsigned long pfn, bitidx;
5240    unsigned long value = 1;
5241
5242    zone = page_zone(page);
5243    pfn = page_to_pfn(page);
5244    bitmap = get_pageblock_bitmap(zone, pfn);
5245    bitidx = pfn_to_bitidx(zone, pfn);
5246    VM_BUG_ON(pfn < zone->zone_start_pfn);
5247    VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5248
5249    for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5250        if (flags & value)
5251            __set_bit(bitidx + start_bitidx, bitmap);
5252        else
5253            __clear_bit(bitidx + start_bitidx, bitmap);
5254}
5255
5256/*
5257 * This is designed as sub function...plz see page_isolation.c also.
5258 * set/clear page block's type to be ISOLATE.
5259 * page allocater never alloc memory from ISOLATE block.
5260 */
5261
5262int set_migratetype_isolate(struct page *page)
5263{
5264    struct zone *zone;
5265    struct page *curr_page;
5266    unsigned long flags, pfn, iter;
5267    unsigned long immobile = 0;
5268    struct memory_isolate_notify arg;
5269    int notifier_ret;
5270    int ret = -EBUSY;
5271    int zone_idx;
5272
5273    zone = page_zone(page);
5274    zone_idx = zone_idx(zone);
5275
5276    spin_lock_irqsave(&zone->lock, flags);
5277    if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
5278        zone_idx == ZONE_MOVABLE) {
5279        ret = 0;
5280        goto out;
5281    }
5282
5283    pfn = page_to_pfn(page);
5284    arg.start_pfn = pfn;
5285    arg.nr_pages = pageblock_nr_pages;
5286    arg.pages_found = 0;
5287
5288    /*
5289     * It may be possible to isolate a pageblock even if the
5290     * migratetype is not MIGRATE_MOVABLE. The memory isolation
5291     * notifier chain is used by balloon drivers to return the
5292     * number of pages in a range that are held by the balloon
5293     * driver to shrink memory. If all the pages are accounted for
5294     * by balloons, are free, or on the LRU, isolation can continue.
5295     * Later, for example, when memory hotplug notifier runs, these
5296     * pages reported as "can be isolated" should be isolated(freed)
5297     * by the balloon driver through the memory notifier chain.
5298     */
5299    notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5300    notifier_ret = notifier_to_errno(notifier_ret);
5301    if (notifier_ret || !arg.pages_found)
5302        goto out;
5303
5304    for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
5305        if (!pfn_valid_within(pfn))
5306            continue;
5307
5308        curr_page = pfn_to_page(iter);
5309        if (!page_count(curr_page) || PageLRU(curr_page))
5310            continue;
5311
5312        immobile++;
5313    }
5314
5315    if (arg.pages_found == immobile)
5316        ret = 0;
5317
5318out:
5319    if (!ret) {
5320        set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5321        move_freepages_block(zone, page, MIGRATE_ISOLATE);
5322    }
5323
5324    spin_unlock_irqrestore(&zone->lock, flags);
5325    if (!ret)
5326        drain_all_pages();
5327    return ret;
5328}
5329
5330void unset_migratetype_isolate(struct page *page)
5331{
5332    struct zone *zone;
5333    unsigned long flags;
5334    zone = page_zone(page);
5335    spin_lock_irqsave(&zone->lock, flags);
5336    if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5337        goto out;
5338    set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5339    move_freepages_block(zone, page, MIGRATE_MOVABLE);
5340out:
5341    spin_unlock_irqrestore(&zone->lock, flags);
5342}
5343
5344#ifdef CONFIG_MEMORY_HOTREMOVE
5345/*
5346 * All pages in the range must be isolated before calling this.
5347 */
5348void
5349__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
5350{
5351    struct page *page;
5352    struct zone *zone;
5353    int order, i;
5354    unsigned long pfn;
5355    unsigned long flags;
5356    /* find the first valid pfn */
5357    for (pfn = start_pfn; pfn < end_pfn; pfn++)
5358        if (pfn_valid(pfn))
5359            break;
5360    if (pfn == end_pfn)
5361        return;
5362    zone = page_zone(pfn_to_page(pfn));
5363    spin_lock_irqsave(&zone->lock, flags);
5364    pfn = start_pfn;
5365    while (pfn < end_pfn) {
5366        if (!pfn_valid(pfn)) {
5367            pfn++;
5368            continue;
5369        }
5370        page = pfn_to_page(pfn);
5371        BUG_ON(page_count(page));
5372        BUG_ON(!PageBuddy(page));
5373        order = page_order(page);
5374#ifdef CONFIG_DEBUG_VM
5375        printk(KERN_INFO "remove from free list %lx %d %lx\n",
5376               pfn, 1 << order, end_pfn);
5377#endif
5378        list_del(&page->lru);
5379        rmv_page_order(page);
5380        zone->free_area[order].nr_free--;
5381        __mod_zone_page_state(zone, NR_FREE_PAGES,
5382                      - (1UL << order));
5383        for (i = 0; i < (1 << order); i++)
5384            SetPageReserved((page+i));
5385        pfn += (1 << order);
5386    }
5387    spin_unlock_irqrestore(&zone->lock, flags);
5388}
5389#endif
5390
5391#ifdef CONFIG_MEMORY_FAILURE
5392bool is_free_buddy_page(struct page *page)
5393{
5394    struct zone *zone = page_zone(page);
5395    unsigned long pfn = page_to_pfn(page);
5396    unsigned long flags;
5397    int order;
5398
5399    spin_lock_irqsave(&zone->lock, flags);
5400    for (order = 0; order < MAX_ORDER; order++) {
5401        struct page *page_head = page - (pfn & ((1 << order) - 1));
5402
5403        if (PageBuddy(page_head) && page_order(page_head) >= order)
5404            break;
5405    }
5406    spin_unlock_irqrestore(&zone->lock, flags);
5407
5408    return order < MAX_ORDER;
5409}
5410#endif
5411
5412static struct trace_print_flags pageflag_names[] = {
5413    {1UL << PG_locked, "locked" },
5414    {1UL << PG_error, "error" },
5415    {1UL << PG_referenced, "referenced" },
5416    {1UL << PG_uptodate, "uptodate" },
5417    {1UL << PG_dirty, "dirty" },
5418    {1UL << PG_lru, "lru" },
5419    {1UL << PG_active, "active" },
5420    {1UL << PG_slab, "slab" },
5421    {1UL << PG_owner_priv_1, "owner_priv_1" },
5422    {1UL << PG_arch_1, "arch_1" },
5423    {1UL << PG_reserved, "reserved" },
5424    {1UL << PG_private, "private" },
5425    {1UL << PG_private_2, "private_2" },
5426    {1UL << PG_writeback, "writeback" },
5427#ifdef CONFIG_PAGEFLAGS_EXTENDED
5428    {1UL << PG_head, "head" },
5429    {1UL << PG_tail, "tail" },
5430#else
5431    {1UL << PG_compound, "compound" },
5432#endif
5433    {1UL << PG_swapcache, "swapcache" },
5434    {1UL << PG_mappedtodisk, "mappedtodisk" },
5435    {1UL << PG_reclaim, "reclaim" },
5436    {1UL << PG_buddy, "buddy" },
5437    {1UL << PG_swapbacked, "swapbacked" },
5438    {1UL << PG_unevictable, "unevictable" },
5439#ifdef CONFIG_MMU
5440    {1UL << PG_mlocked, "mlocked" },
5441#endif
5442#ifdef CONFIG_ARCH_USES_PG_UNCACHED
5443    {1UL << PG_uncached, "uncached" },
5444#endif
5445#ifdef CONFIG_MEMORY_FAILURE
5446    {1UL << PG_hwpoison, "hwpoison" },
5447#endif
5448    {-1UL, NULL },
5449};
5450
5451static void dump_page_flags(unsigned long flags)
5452{
5453    const char *delim = "";
5454    unsigned long mask;
5455    int i;
5456
5457    printk(KERN_ALERT "page flags: %#lx(", flags);
5458
5459    /* remove zone id */
5460    flags &= (1UL << NR_PAGEFLAGS) - 1;
5461
5462    for (i = 0; pageflag_names[i].name && flags; i++) {
5463
5464        mask = pageflag_names[i].mask;
5465        if ((flags & mask) != mask)
5466            continue;
5467
5468        flags &= ~mask;
5469        printk("%s%s", delim, pageflag_names[i].name);
5470        delim = "|";
5471    }
5472
5473    /* check for left over flags */
5474    if (flags)
5475        printk("%s%#lx", delim, flags);
5476
5477    printk(")\n");
5478}
5479
5480void dump_page(struct page *page)
5481{
5482    printk(KERN_ALERT
5483           "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
5484        page, page_count(page), page_mapcount(page),
5485        page->mapping, page->index);
5486    dump_page_flags(page->flags);
5487}
5488

Archive Download this file



interactive