Root/mm/mempolicy.c

1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
21 *
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
28 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
33 *
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57   fix mmap readahead to honour policy and enable policy for any page cache
58   object
59   statistics for bigpages
60   global policy for page cache? currently it uses process policy. Requires
61   first item above.
62   handle mremap for shared memory (currently ignored for the policy)
63   grows down?
64   make bind policy root only? It can trigger oom much faster and the
65   kernel is not always grateful with that.
66*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
74#include <linux/nodemask.h>
75#include <linux/cpuset.h>
76#include <linux/slab.h>
77#include <linux/string.h>
78#include <linux/module.h>
79#include <linux/nsproxy.h>
80#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
83#include <linux/swap.h>
84#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
86#include <linux/migrate.h>
87#include <linux/ksm.h>
88#include <linux/rmap.h>
89#include <linux/security.h>
90#include <linux/syscalls.h>
91#include <linux/ctype.h>
92#include <linux/mm_inline.h>
93
94#include <asm/tlbflush.h>
95#include <asm/uaccess.h>
96
97#include "internal.h"
98
99/* Internal flags */
100#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
101#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
102#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
103
104static struct kmem_cache *policy_cache;
105static struct kmem_cache *sn_cache;
106
107/* Highest zone. An specific allocation for a zone below that is not
108   policied. */
109enum zone_type policy_zone = 0;
110
111/*
112 * run-time system-wide default policy => local allocation
113 */
114struct mempolicy default_policy = {
115    .refcnt = ATOMIC_INIT(1), /* never free it */
116    .mode = MPOL_PREFERRED,
117    .flags = MPOL_F_LOCAL,
118};
119
120static const struct mempolicy_operations {
121    int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122    /*
123     * If read-side task has no lock to protect task->mempolicy, write-side
124     * task will rebind the task->mempolicy by two step. The first step is
125     * setting all the newly nodes, and the second step is cleaning all the
126     * disallowed nodes. In this way, we can avoid finding no node to alloc
127     * page.
128     * If we have a lock to protect task->mempolicy in read-side, we do
129     * rebind directly.
130     *
131     * step:
132     * MPOL_REBIND_ONCE - do rebind work at once
133     * MPOL_REBIND_STEP1 - set all the newly nodes
134     * MPOL_REBIND_STEP2 - clean all the disallowed nodes
135     */
136    void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137            enum mpol_rebind_step step);
138} mpol_ops[MPOL_MAX];
139
140/* Check that the nodemask contains at least one populated zone */
141static int is_valid_nodemask(const nodemask_t *nodemask)
142{
143    int nd, k;
144
145    for_each_node_mask(nd, *nodemask) {
146        struct zone *z;
147
148        for (k = 0; k <= policy_zone; k++) {
149            z = &NODE_DATA(nd)->node_zones[k];
150            if (z->present_pages > 0)
151                return 1;
152        }
153    }
154
155    return 0;
156}
157
158static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159{
160    return pol->flags & MPOL_MODE_FLAGS;
161}
162
163static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
164                   const nodemask_t *rel)
165{
166    nodemask_t tmp;
167    nodes_fold(tmp, *orig, nodes_weight(*rel));
168    nodes_onto(*ret, tmp, *rel);
169}
170
171static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
172{
173    if (nodes_empty(*nodes))
174        return -EINVAL;
175    pol->v.nodes = *nodes;
176    return 0;
177}
178
179static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
180{
181    if (!nodes)
182        pol->flags |= MPOL_F_LOCAL; /* local allocation */
183    else if (nodes_empty(*nodes))
184        return -EINVAL; /* no allowed nodes */
185    else
186        pol->v.preferred_node = first_node(*nodes);
187    return 0;
188}
189
190static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
191{
192    if (!is_valid_nodemask(nodes))
193        return -EINVAL;
194    pol->v.nodes = *nodes;
195    return 0;
196}
197
198/*
199 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
200 * any, for the new policy. mpol_new() has already validated the nodes
201 * parameter with respect to the policy mode and flags. But, we need to
202 * handle an empty nodemask with MPOL_PREFERRED here.
203 *
204 * Must be called holding task's alloc_lock to protect task's mems_allowed
205 * and mempolicy. May also be called holding the mmap_semaphore for write.
206 */
207static int mpol_set_nodemask(struct mempolicy *pol,
208             const nodemask_t *nodes, struct nodemask_scratch *nsc)
209{
210    int ret;
211
212    /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
213    if (pol == NULL)
214        return 0;
215    /* Check N_HIGH_MEMORY */
216    nodes_and(nsc->mask1,
217          cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
218
219    VM_BUG_ON(!nodes);
220    if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
221        nodes = NULL; /* explicit local allocation */
222    else {
223        if (pol->flags & MPOL_F_RELATIVE_NODES)
224            mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
225        else
226            nodes_and(nsc->mask2, *nodes, nsc->mask1);
227
228        if (mpol_store_user_nodemask(pol))
229            pol->w.user_nodemask = *nodes;
230        else
231            pol->w.cpuset_mems_allowed =
232                        cpuset_current_mems_allowed;
233    }
234
235    if (nodes)
236        ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
237    else
238        ret = mpol_ops[pol->mode].create(pol, NULL);
239    return ret;
240}
241
242/*
243 * This function just creates a new policy, does some check and simple
244 * initialization. You must invoke mpol_set_nodemask() to set nodes.
245 */
246static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247                  nodemask_t *nodes)
248{
249    struct mempolicy *policy;
250
251    pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252         mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253
254    if (mode == MPOL_DEFAULT) {
255        if (nodes && !nodes_empty(*nodes))
256            return ERR_PTR(-EINVAL);
257        return NULL; /* simply delete any existing policy */
258    }
259    VM_BUG_ON(!nodes);
260
261    /*
262     * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
263     * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
264     * All other modes require a valid pointer to a non-empty nodemask.
265     */
266    if (mode == MPOL_PREFERRED) {
267        if (nodes_empty(*nodes)) {
268            if (((flags & MPOL_F_STATIC_NODES) ||
269                 (flags & MPOL_F_RELATIVE_NODES)))
270                return ERR_PTR(-EINVAL);
271        }
272    } else if (nodes_empty(*nodes))
273        return ERR_PTR(-EINVAL);
274    policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275    if (!policy)
276        return ERR_PTR(-ENOMEM);
277    atomic_set(&policy->refcnt, 1);
278    policy->mode = mode;
279    policy->flags = flags;
280
281    return policy;
282}
283
284/* Slow path of a mpol destructor. */
285void __mpol_put(struct mempolicy *p)
286{
287    if (!atomic_dec_and_test(&p->refcnt))
288        return;
289    kmem_cache_free(policy_cache, p);
290}
291
292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293                enum mpol_rebind_step step)
294{
295}
296
297/*
298 * step:
299 * MPOL_REBIND_ONCE - do rebind work at once
300 * MPOL_REBIND_STEP1 - set all the newly nodes
301 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
302 */
303static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304                 enum mpol_rebind_step step)
305{
306    nodemask_t tmp;
307
308    if (pol->flags & MPOL_F_STATIC_NODES)
309        nodes_and(tmp, pol->w.user_nodemask, *nodes);
310    else if (pol->flags & MPOL_F_RELATIVE_NODES)
311        mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
312    else {
313        /*
314         * if step == 1, we use ->w.cpuset_mems_allowed to cache the
315         * result
316         */
317        if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318            nodes_remap(tmp, pol->v.nodes,
319                    pol->w.cpuset_mems_allowed, *nodes);
320            pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321        } else if (step == MPOL_REBIND_STEP2) {
322            tmp = pol->w.cpuset_mems_allowed;
323            pol->w.cpuset_mems_allowed = *nodes;
324        } else
325            BUG();
326    }
327
328    if (nodes_empty(tmp))
329        tmp = *nodes;
330
331    if (step == MPOL_REBIND_STEP1)
332        nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333    else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
334        pol->v.nodes = tmp;
335    else
336        BUG();
337
338    if (!node_isset(current->il_next, tmp)) {
339        current->il_next = next_node(current->il_next, tmp);
340        if (current->il_next >= MAX_NUMNODES)
341            current->il_next = first_node(tmp);
342        if (current->il_next >= MAX_NUMNODES)
343            current->il_next = numa_node_id();
344    }
345}
346
347static void mpol_rebind_preferred(struct mempolicy *pol,
348                  const nodemask_t *nodes,
349                  enum mpol_rebind_step step)
350{
351    nodemask_t tmp;
352
353    if (pol->flags & MPOL_F_STATIC_NODES) {
354        int node = first_node(pol->w.user_nodemask);
355
356        if (node_isset(node, *nodes)) {
357            pol->v.preferred_node = node;
358            pol->flags &= ~MPOL_F_LOCAL;
359        } else
360            pol->flags |= MPOL_F_LOCAL;
361    } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
362        mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363        pol->v.preferred_node = first_node(tmp);
364    } else if (!(pol->flags & MPOL_F_LOCAL)) {
365        pol->v.preferred_node = node_remap(pol->v.preferred_node,
366                           pol->w.cpuset_mems_allowed,
367                           *nodes);
368        pol->w.cpuset_mems_allowed = *nodes;
369    }
370}
371
372/*
373 * mpol_rebind_policy - Migrate a policy to a different set of nodes
374 *
375 * If read-side task has no lock to protect task->mempolicy, write-side
376 * task will rebind the task->mempolicy by two step. The first step is
377 * setting all the newly nodes, and the second step is cleaning all the
378 * disallowed nodes. In this way, we can avoid finding no node to alloc
379 * page.
380 * If we have a lock to protect task->mempolicy in read-side, we do
381 * rebind directly.
382 *
383 * step:
384 * MPOL_REBIND_ONCE - do rebind work at once
385 * MPOL_REBIND_STEP1 - set all the newly nodes
386 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
387 */
388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389                enum mpol_rebind_step step)
390{
391    if (!pol)
392        return;
393    if (!mpol_store_user_nodemask(pol) && step == 0 &&
394        nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
395        return;
396
397    if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398        return;
399
400    if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401        BUG();
402
403    if (step == MPOL_REBIND_STEP1)
404        pol->flags |= MPOL_F_REBINDING;
405    else if (step == MPOL_REBIND_STEP2)
406        pol->flags &= ~MPOL_F_REBINDING;
407    else if (step >= MPOL_REBIND_NSTEP)
408        BUG();
409
410    mpol_ops[pol->mode].rebind(pol, newmask, step);
411}
412
413/*
414 * Wrapper for mpol_rebind_policy() that just requires task
415 * pointer, and updates task mempolicy.
416 *
417 * Called with task's alloc_lock held.
418 */
419
420void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421            enum mpol_rebind_step step)
422{
423    mpol_rebind_policy(tsk->mempolicy, new, step);
424}
425
426/*
427 * Rebind each vma in mm to new nodemask.
428 *
429 * Call holding a reference to mm. Takes mm->mmap_sem during call.
430 */
431
432void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
433{
434    struct vm_area_struct *vma;
435
436    down_write(&mm->mmap_sem);
437    for (vma = mm->mmap; vma; vma = vma->vm_next)
438        mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
439    up_write(&mm->mmap_sem);
440}
441
442static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
443    [MPOL_DEFAULT] = {
444        .rebind = mpol_rebind_default,
445    },
446    [MPOL_INTERLEAVE] = {
447        .create = mpol_new_interleave,
448        .rebind = mpol_rebind_nodemask,
449    },
450    [MPOL_PREFERRED] = {
451        .create = mpol_new_preferred,
452        .rebind = mpol_rebind_preferred,
453    },
454    [MPOL_BIND] = {
455        .create = mpol_new_bind,
456        .rebind = mpol_rebind_nodemask,
457    },
458};
459
460static void gather_stats(struct page *, void *, int pte_dirty);
461static void migrate_page_add(struct page *page, struct list_head *pagelist,
462                unsigned long flags);
463
464/* Scan through pages checking if pages follow certain conditions. */
465static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
466        unsigned long addr, unsigned long end,
467        const nodemask_t *nodes, unsigned long flags,
468        void *private)
469{
470    pte_t *orig_pte;
471    pte_t *pte;
472    spinlock_t *ptl;
473
474    orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
475    do {
476        struct page *page;
477        int nid;
478
479        if (!pte_present(*pte))
480            continue;
481        page = vm_normal_page(vma, addr, *pte);
482        if (!page)
483            continue;
484        /*
485         * vm_normal_page() filters out zero pages, but there might
486         * still be PageReserved pages to skip, perhaps in a VDSO.
487         * And we cannot move PageKsm pages sensibly or safely yet.
488         */
489        if (PageReserved(page) || PageKsm(page))
490            continue;
491        nid = page_to_nid(page);
492        if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
493            continue;
494
495        if (flags & MPOL_MF_STATS)
496            gather_stats(page, private, pte_dirty(*pte));
497        else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
498            migrate_page_add(page, private, flags);
499        else
500            break;
501    } while (pte++, addr += PAGE_SIZE, addr != end);
502    pte_unmap_unlock(orig_pte, ptl);
503    return addr != end;
504}
505
506static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
507        unsigned long addr, unsigned long end,
508        const nodemask_t *nodes, unsigned long flags,
509        void *private)
510{
511    pmd_t *pmd;
512    unsigned long next;
513
514    pmd = pmd_offset(pud, addr);
515    do {
516        next = pmd_addr_end(addr, end);
517        if (pmd_none_or_clear_bad(pmd))
518            continue;
519        if (check_pte_range(vma, pmd, addr, next, nodes,
520                    flags, private))
521            return -EIO;
522    } while (pmd++, addr = next, addr != end);
523    return 0;
524}
525
526static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
527        unsigned long addr, unsigned long end,
528        const nodemask_t *nodes, unsigned long flags,
529        void *private)
530{
531    pud_t *pud;
532    unsigned long next;
533
534    pud = pud_offset(pgd, addr);
535    do {
536        next = pud_addr_end(addr, end);
537        if (pud_none_or_clear_bad(pud))
538            continue;
539        if (check_pmd_range(vma, pud, addr, next, nodes,
540                    flags, private))
541            return -EIO;
542    } while (pud++, addr = next, addr != end);
543    return 0;
544}
545
546static inline int check_pgd_range(struct vm_area_struct *vma,
547        unsigned long addr, unsigned long end,
548        const nodemask_t *nodes, unsigned long flags,
549        void *private)
550{
551    pgd_t *pgd;
552    unsigned long next;
553
554    pgd = pgd_offset(vma->vm_mm, addr);
555    do {
556        next = pgd_addr_end(addr, end);
557        if (pgd_none_or_clear_bad(pgd))
558            continue;
559        if (check_pud_range(vma, pgd, addr, next, nodes,
560                    flags, private))
561            return -EIO;
562    } while (pgd++, addr = next, addr != end);
563    return 0;
564}
565
566/*
567 * Check if all pages in a range are on a set of nodes.
568 * If pagelist != NULL then isolate pages from the LRU and
569 * put them on the pagelist.
570 */
571static struct vm_area_struct *
572check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
573        const nodemask_t *nodes, unsigned long flags, void *private)
574{
575    int err;
576    struct vm_area_struct *first, *vma, *prev;
577
578
579    first = find_vma(mm, start);
580    if (!first)
581        return ERR_PTR(-EFAULT);
582    prev = NULL;
583    for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
584        if (!(flags & MPOL_MF_DISCONTIG_OK)) {
585            if (!vma->vm_next && vma->vm_end < end)
586                return ERR_PTR(-EFAULT);
587            if (prev && prev->vm_end < vma->vm_start)
588                return ERR_PTR(-EFAULT);
589        }
590        if (!is_vm_hugetlb_page(vma) &&
591            ((flags & MPOL_MF_STRICT) ||
592             ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
593                vma_migratable(vma)))) {
594            unsigned long endvma = vma->vm_end;
595
596            if (endvma > end)
597                endvma = end;
598            if (vma->vm_start > start)
599                start = vma->vm_start;
600            err = check_pgd_range(vma, start, endvma, nodes,
601                        flags, private);
602            if (err) {
603                first = ERR_PTR(err);
604                break;
605            }
606        }
607        prev = vma;
608    }
609    return first;
610}
611
612/* Apply policy to a single VMA */
613static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
614{
615    int err = 0;
616    struct mempolicy *old = vma->vm_policy;
617
618    pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
619         vma->vm_start, vma->vm_end, vma->vm_pgoff,
620         vma->vm_ops, vma->vm_file,
621         vma->vm_ops ? vma->vm_ops->set_policy : NULL);
622
623    if (vma->vm_ops && vma->vm_ops->set_policy)
624        err = vma->vm_ops->set_policy(vma, new);
625    if (!err) {
626        mpol_get(new);
627        vma->vm_policy = new;
628        mpol_put(old);
629    }
630    return err;
631}
632
633/* Step 2: apply policy to a range and do splits. */
634static int mbind_range(struct mm_struct *mm, unsigned long start,
635               unsigned long end, struct mempolicy *new_pol)
636{
637    struct vm_area_struct *next;
638    struct vm_area_struct *prev;
639    struct vm_area_struct *vma;
640    int err = 0;
641    pgoff_t pgoff;
642    unsigned long vmstart;
643    unsigned long vmend;
644
645    vma = find_vma_prev(mm, start, &prev);
646    if (!vma || vma->vm_start > start)
647        return -EFAULT;
648
649    for (; vma && vma->vm_start < end; prev = vma, vma = next) {
650        next = vma->vm_next;
651        vmstart = max(start, vma->vm_start);
652        vmend = min(end, vma->vm_end);
653
654        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
655        prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
656                  vma->anon_vma, vma->vm_file, pgoff, new_pol);
657        if (prev) {
658            vma = prev;
659            next = vma->vm_next;
660            continue;
661        }
662        if (vma->vm_start != vmstart) {
663            err = split_vma(vma->vm_mm, vma, vmstart, 1);
664            if (err)
665                goto out;
666        }
667        if (vma->vm_end != vmend) {
668            err = split_vma(vma->vm_mm, vma, vmend, 0);
669            if (err)
670                goto out;
671        }
672        err = policy_vma(vma, new_pol);
673        if (err)
674            goto out;
675    }
676
677 out:
678    return err;
679}
680
681/*
682 * Update task->flags PF_MEMPOLICY bit: set iff non-default
683 * mempolicy. Allows more rapid checking of this (combined perhaps
684 * with other PF_* flag bits) on memory allocation hot code paths.
685 *
686 * If called from outside this file, the task 'p' should -only- be
687 * a newly forked child not yet visible on the task list, because
688 * manipulating the task flags of a visible task is not safe.
689 *
690 * The above limitation is why this routine has the funny name
691 * mpol_fix_fork_child_flag().
692 *
693 * It is also safe to call this with a task pointer of current,
694 * which the static wrapper mpol_set_task_struct_flag() does,
695 * for use within this file.
696 */
697
698void mpol_fix_fork_child_flag(struct task_struct *p)
699{
700    if (p->mempolicy)
701        p->flags |= PF_MEMPOLICY;
702    else
703        p->flags &= ~PF_MEMPOLICY;
704}
705
706static void mpol_set_task_struct_flag(void)
707{
708    mpol_fix_fork_child_flag(current);
709}
710
711/* Set the process memory policy */
712static long do_set_mempolicy(unsigned short mode, unsigned short flags,
713                 nodemask_t *nodes)
714{
715    struct mempolicy *new, *old;
716    struct mm_struct *mm = current->mm;
717    NODEMASK_SCRATCH(scratch);
718    int ret;
719
720    if (!scratch)
721        return -ENOMEM;
722
723    new = mpol_new(mode, flags, nodes);
724    if (IS_ERR(new)) {
725        ret = PTR_ERR(new);
726        goto out;
727    }
728    /*
729     * prevent changing our mempolicy while show_numa_maps()
730     * is using it.
731     * Note: do_set_mempolicy() can be called at init time
732     * with no 'mm'.
733     */
734    if (mm)
735        down_write(&mm->mmap_sem);
736    task_lock(current);
737    ret = mpol_set_nodemask(new, nodes, scratch);
738    if (ret) {
739        task_unlock(current);
740        if (mm)
741            up_write(&mm->mmap_sem);
742        mpol_put(new);
743        goto out;
744    }
745    old = current->mempolicy;
746    current->mempolicy = new;
747    mpol_set_task_struct_flag();
748    if (new && new->mode == MPOL_INTERLEAVE &&
749        nodes_weight(new->v.nodes))
750        current->il_next = first_node(new->v.nodes);
751    task_unlock(current);
752    if (mm)
753        up_write(&mm->mmap_sem);
754
755    mpol_put(old);
756    ret = 0;
757out:
758    NODEMASK_SCRATCH_FREE(scratch);
759    return ret;
760}
761
762/*
763 * Return nodemask for policy for get_mempolicy() query
764 *
765 * Called with task's alloc_lock held
766 */
767static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
768{
769    nodes_clear(*nodes);
770    if (p == &default_policy)
771        return;
772
773    switch (p->mode) {
774    case MPOL_BIND:
775        /* Fall through */
776    case MPOL_INTERLEAVE:
777        *nodes = p->v.nodes;
778        break;
779    case MPOL_PREFERRED:
780        if (!(p->flags & MPOL_F_LOCAL))
781            node_set(p->v.preferred_node, *nodes);
782        /* else return empty node mask for local allocation */
783        break;
784    default:
785        BUG();
786    }
787}
788
789static int lookup_node(struct mm_struct *mm, unsigned long addr)
790{
791    struct page *p;
792    int err;
793
794    err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
795    if (err >= 0) {
796        err = page_to_nid(p);
797        put_page(p);
798    }
799    return err;
800}
801
802/* Retrieve NUMA policy */
803static long do_get_mempolicy(int *policy, nodemask_t *nmask,
804                 unsigned long addr, unsigned long flags)
805{
806    int err;
807    struct mm_struct *mm = current->mm;
808    struct vm_area_struct *vma = NULL;
809    struct mempolicy *pol = current->mempolicy;
810
811    if (flags &
812        ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
813        return -EINVAL;
814
815    if (flags & MPOL_F_MEMS_ALLOWED) {
816        if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
817            return -EINVAL;
818        *policy = 0; /* just so it's initialized */
819        task_lock(current);
820        *nmask = cpuset_current_mems_allowed;
821        task_unlock(current);
822        return 0;
823    }
824
825    if (flags & MPOL_F_ADDR) {
826        /*
827         * Do NOT fall back to task policy if the
828         * vma/shared policy at addr is NULL. We
829         * want to return MPOL_DEFAULT in this case.
830         */
831        down_read(&mm->mmap_sem);
832        vma = find_vma_intersection(mm, addr, addr+1);
833        if (!vma) {
834            up_read(&mm->mmap_sem);
835            return -EFAULT;
836        }
837        if (vma->vm_ops && vma->vm_ops->get_policy)
838            pol = vma->vm_ops->get_policy(vma, addr);
839        else
840            pol = vma->vm_policy;
841    } else if (addr)
842        return -EINVAL;
843
844    if (!pol)
845        pol = &default_policy; /* indicates default behavior */
846
847    if (flags & MPOL_F_NODE) {
848        if (flags & MPOL_F_ADDR) {
849            err = lookup_node(mm, addr);
850            if (err < 0)
851                goto out;
852            *policy = err;
853        } else if (pol == current->mempolicy &&
854                pol->mode == MPOL_INTERLEAVE) {
855            *policy = current->il_next;
856        } else {
857            err = -EINVAL;
858            goto out;
859        }
860    } else {
861        *policy = pol == &default_policy ? MPOL_DEFAULT :
862                        pol->mode;
863        /*
864         * Internal mempolicy flags must be masked off before exposing
865         * the policy to userspace.
866         */
867        *policy |= (pol->flags & MPOL_MODE_FLAGS);
868    }
869
870    if (vma) {
871        up_read(&current->mm->mmap_sem);
872        vma = NULL;
873    }
874
875    err = 0;
876    if (nmask) {
877        if (mpol_store_user_nodemask(pol)) {
878            *nmask = pol->w.user_nodemask;
879        } else {
880            task_lock(current);
881            get_policy_nodemask(pol, nmask);
882            task_unlock(current);
883        }
884    }
885
886 out:
887    mpol_cond_put(pol);
888    if (vma)
889        up_read(&current->mm->mmap_sem);
890    return err;
891}
892
893#ifdef CONFIG_MIGRATION
894/*
895 * page migration
896 */
897static void migrate_page_add(struct page *page, struct list_head *pagelist,
898                unsigned long flags)
899{
900    /*
901     * Avoid migrating a page that is shared with others.
902     */
903    if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
904        if (!isolate_lru_page(page)) {
905            list_add_tail(&page->lru, pagelist);
906            inc_zone_page_state(page, NR_ISOLATED_ANON +
907                        page_is_file_cache(page));
908        }
909    }
910}
911
912static struct page *new_node_page(struct page *page, unsigned long node, int **x)
913{
914    return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
915}
916
917/*
918 * Migrate pages from one node to a target node.
919 * Returns error or the number of pages not migrated.
920 */
921static int migrate_to_node(struct mm_struct *mm, int source, int dest,
922               int flags)
923{
924    nodemask_t nmask;
925    LIST_HEAD(pagelist);
926    int err = 0;
927
928    nodes_clear(nmask);
929    node_set(source, nmask);
930
931    check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
932            flags | MPOL_MF_DISCONTIG_OK, &pagelist);
933
934    if (!list_empty(&pagelist))
935        err = migrate_pages(&pagelist, new_node_page, dest, 0);
936
937    return err;
938}
939
940/*
941 * Move pages between the two nodesets so as to preserve the physical
942 * layout as much as possible.
943 *
944 * Returns the number of page that could not be moved.
945 */
946int do_migrate_pages(struct mm_struct *mm,
947    const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
948{
949    int busy = 0;
950    int err;
951    nodemask_t tmp;
952
953    err = migrate_prep();
954    if (err)
955        return err;
956
957    down_read(&mm->mmap_sem);
958
959    err = migrate_vmas(mm, from_nodes, to_nodes, flags);
960    if (err)
961        goto out;
962
963    /*
964     * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
965     * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
966     * bit in 'tmp', and return that <source, dest> pair for migration.
967     * The pair of nodemasks 'to' and 'from' define the map.
968     *
969     * If no pair of bits is found that way, fallback to picking some
970     * pair of 'source' and 'dest' bits that are not the same. If the
971     * 'source' and 'dest' bits are the same, this represents a node
972     * that will be migrating to itself, so no pages need move.
973     *
974     * If no bits are left in 'tmp', or if all remaining bits left
975     * in 'tmp' correspond to the same bit in 'to', return false
976     * (nothing left to migrate).
977     *
978     * This lets us pick a pair of nodes to migrate between, such that
979     * if possible the dest node is not already occupied by some other
980     * source node, minimizing the risk of overloading the memory on a
981     * node that would happen if we migrated incoming memory to a node
982     * before migrating outgoing memory source that same node.
983     *
984     * A single scan of tmp is sufficient. As we go, we remember the
985     * most recent <s, d> pair that moved (s != d). If we find a pair
986     * that not only moved, but what's better, moved to an empty slot
987     * (d is not set in tmp), then we break out then, with that pair.
988     * Otherwise when we finish scannng from_tmp, we at least have the
989     * most recent <s, d> pair that moved. If we get all the way through
990     * the scan of tmp without finding any node that moved, much less
991     * moved to an empty node, then there is nothing left worth migrating.
992     */
993
994    tmp = *from_nodes;
995    while (!nodes_empty(tmp)) {
996        int s,d;
997        int source = -1;
998        int dest = 0;
999
1000        for_each_node_mask(s, tmp) {
1001            d = node_remap(s, *from_nodes, *to_nodes);
1002            if (s == d)
1003                continue;
1004
1005            source = s; /* Node moved. Memorize */
1006            dest = d;
1007
1008            /* dest not in remaining from nodes? */
1009            if (!node_isset(dest, tmp))
1010                break;
1011        }
1012        if (source == -1)
1013            break;
1014
1015        node_clear(source, tmp);
1016        err = migrate_to_node(mm, source, dest, flags);
1017        if (err > 0)
1018            busy += err;
1019        if (err < 0)
1020            break;
1021    }
1022out:
1023    up_read(&mm->mmap_sem);
1024    if (err < 0)
1025        return err;
1026    return busy;
1027
1028}
1029
1030/*
1031 * Allocate a new page for page migration based on vma policy.
1032 * Start assuming that page is mapped by vma pointed to by @private.
1033 * Search forward from there, if not. N.B., this assumes that the
1034 * list of pages handed to migrate_pages()--which is how we get here--
1035 * is in virtual address order.
1036 */
1037static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1038{
1039    struct vm_area_struct *vma = (struct vm_area_struct *)private;
1040    unsigned long uninitialized_var(address);
1041
1042    while (vma) {
1043        address = page_address_in_vma(page, vma);
1044        if (address != -EFAULT)
1045            break;
1046        vma = vma->vm_next;
1047    }
1048
1049    /*
1050     * if !vma, alloc_page_vma() will use task or system default policy
1051     */
1052    return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1053}
1054#else
1055
1056static void migrate_page_add(struct page *page, struct list_head *pagelist,
1057                unsigned long flags)
1058{
1059}
1060
1061int do_migrate_pages(struct mm_struct *mm,
1062    const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1063{
1064    return -ENOSYS;
1065}
1066
1067static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1068{
1069    return NULL;
1070}
1071#endif
1072
1073static long do_mbind(unsigned long start, unsigned long len,
1074             unsigned short mode, unsigned short mode_flags,
1075             nodemask_t *nmask, unsigned long flags)
1076{
1077    struct vm_area_struct *vma;
1078    struct mm_struct *mm = current->mm;
1079    struct mempolicy *new;
1080    unsigned long end;
1081    int err;
1082    LIST_HEAD(pagelist);
1083
1084    if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1085                     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1086        return -EINVAL;
1087    if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1088        return -EPERM;
1089
1090    if (start & ~PAGE_MASK)
1091        return -EINVAL;
1092
1093    if (mode == MPOL_DEFAULT)
1094        flags &= ~MPOL_MF_STRICT;
1095
1096    len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1097    end = start + len;
1098
1099    if (end < start)
1100        return -EINVAL;
1101    if (end == start)
1102        return 0;
1103
1104    new = mpol_new(mode, mode_flags, nmask);
1105    if (IS_ERR(new))
1106        return PTR_ERR(new);
1107
1108    /*
1109     * If we are using the default policy then operation
1110     * on discontinuous address spaces is okay after all
1111     */
1112    if (!new)
1113        flags |= MPOL_MF_DISCONTIG_OK;
1114
1115    pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1116         start, start + len, mode, mode_flags,
1117         nmask ? nodes_addr(*nmask)[0] : -1);
1118
1119    if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1120
1121        err = migrate_prep();
1122        if (err)
1123            goto mpol_out;
1124    }
1125    {
1126        NODEMASK_SCRATCH(scratch);
1127        if (scratch) {
1128            down_write(&mm->mmap_sem);
1129            task_lock(current);
1130            err = mpol_set_nodemask(new, nmask, scratch);
1131            task_unlock(current);
1132            if (err)
1133                up_write(&mm->mmap_sem);
1134        } else
1135            err = -ENOMEM;
1136        NODEMASK_SCRATCH_FREE(scratch);
1137    }
1138    if (err)
1139        goto mpol_out;
1140
1141    vma = check_range(mm, start, end, nmask,
1142              flags | MPOL_MF_INVERT, &pagelist);
1143
1144    err = PTR_ERR(vma);
1145    if (!IS_ERR(vma)) {
1146        int nr_failed = 0;
1147
1148        err = mbind_range(mm, start, end, new);
1149
1150        if (!list_empty(&pagelist))
1151            nr_failed = migrate_pages(&pagelist, new_vma_page,
1152                        (unsigned long)vma, 0);
1153
1154        if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1155            err = -EIO;
1156    } else
1157        putback_lru_pages(&pagelist);
1158
1159    up_write(&mm->mmap_sem);
1160 mpol_out:
1161    mpol_put(new);
1162    return err;
1163}
1164
1165/*
1166 * User space interface with variable sized bitmaps for nodelists.
1167 */
1168
1169/* Copy a node mask from user space. */
1170static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1171             unsigned long maxnode)
1172{
1173    unsigned long k;
1174    unsigned long nlongs;
1175    unsigned long endmask;
1176
1177    --maxnode;
1178    nodes_clear(*nodes);
1179    if (maxnode == 0 || !nmask)
1180        return 0;
1181    if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1182        return -EINVAL;
1183
1184    nlongs = BITS_TO_LONGS(maxnode);
1185    if ((maxnode % BITS_PER_LONG) == 0)
1186        endmask = ~0UL;
1187    else
1188        endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1189
1190    /* When the user specified more nodes than supported just check
1191       if the non supported part is all zero. */
1192    if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1193        if (nlongs > PAGE_SIZE/sizeof(long))
1194            return -EINVAL;
1195        for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1196            unsigned long t;
1197            if (get_user(t, nmask + k))
1198                return -EFAULT;
1199            if (k == nlongs - 1) {
1200                if (t & endmask)
1201                    return -EINVAL;
1202            } else if (t)
1203                return -EINVAL;
1204        }
1205        nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1206        endmask = ~0UL;
1207    }
1208
1209    if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1210        return -EFAULT;
1211    nodes_addr(*nodes)[nlongs-1] &= endmask;
1212    return 0;
1213}
1214
1215/* Copy a kernel node mask to user space */
1216static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1217                  nodemask_t *nodes)
1218{
1219    unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1220    const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1221
1222    if (copy > nbytes) {
1223        if (copy > PAGE_SIZE)
1224            return -EINVAL;
1225        if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1226            return -EFAULT;
1227        copy = nbytes;
1228    }
1229    return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1230}
1231
1232SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1233        unsigned long, mode, unsigned long __user *, nmask,
1234        unsigned long, maxnode, unsigned, flags)
1235{
1236    nodemask_t nodes;
1237    int err;
1238    unsigned short mode_flags;
1239
1240    mode_flags = mode & MPOL_MODE_FLAGS;
1241    mode &= ~MPOL_MODE_FLAGS;
1242    if (mode >= MPOL_MAX)
1243        return -EINVAL;
1244    if ((mode_flags & MPOL_F_STATIC_NODES) &&
1245        (mode_flags & MPOL_F_RELATIVE_NODES))
1246        return -EINVAL;
1247    err = get_nodes(&nodes, nmask, maxnode);
1248    if (err)
1249        return err;
1250    return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1251}
1252
1253/* Set the process memory policy */
1254SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1255        unsigned long, maxnode)
1256{
1257    int err;
1258    nodemask_t nodes;
1259    unsigned short flags;
1260
1261    flags = mode & MPOL_MODE_FLAGS;
1262    mode &= ~MPOL_MODE_FLAGS;
1263    if ((unsigned int)mode >= MPOL_MAX)
1264        return -EINVAL;
1265    if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1266        return -EINVAL;
1267    err = get_nodes(&nodes, nmask, maxnode);
1268    if (err)
1269        return err;
1270    return do_set_mempolicy(mode, flags, &nodes);
1271}
1272
1273SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1274        const unsigned long __user *, old_nodes,
1275        const unsigned long __user *, new_nodes)
1276{
1277    const struct cred *cred = current_cred(), *tcred;
1278    struct mm_struct *mm = NULL;
1279    struct task_struct *task;
1280    nodemask_t task_nodes;
1281    int err;
1282    nodemask_t *old;
1283    nodemask_t *new;
1284    NODEMASK_SCRATCH(scratch);
1285
1286    if (!scratch)
1287        return -ENOMEM;
1288
1289    old = &scratch->mask1;
1290    new = &scratch->mask2;
1291
1292    err = get_nodes(old, old_nodes, maxnode);
1293    if (err)
1294        goto out;
1295
1296    err = get_nodes(new, new_nodes, maxnode);
1297    if (err)
1298        goto out;
1299
1300    /* Find the mm_struct */
1301    read_lock(&tasklist_lock);
1302    task = pid ? find_task_by_vpid(pid) : current;
1303    if (!task) {
1304        read_unlock(&tasklist_lock);
1305        err = -ESRCH;
1306        goto out;
1307    }
1308    mm = get_task_mm(task);
1309    read_unlock(&tasklist_lock);
1310
1311    err = -EINVAL;
1312    if (!mm)
1313        goto out;
1314
1315    /*
1316     * Check if this process has the right to modify the specified
1317     * process. The right exists if the process has administrative
1318     * capabilities, superuser privileges or the same
1319     * userid as the target process.
1320     */
1321    rcu_read_lock();
1322    tcred = __task_cred(task);
1323    if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1324        cred->uid != tcred->suid && cred->uid != tcred->uid &&
1325        !capable(CAP_SYS_NICE)) {
1326        rcu_read_unlock();
1327        err = -EPERM;
1328        goto out;
1329    }
1330    rcu_read_unlock();
1331
1332    task_nodes = cpuset_mems_allowed(task);
1333    /* Is the user allowed to access the target nodes? */
1334    if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1335        err = -EPERM;
1336        goto out;
1337    }
1338
1339    if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1340        err = -EINVAL;
1341        goto out;
1342    }
1343
1344    err = security_task_movememory(task);
1345    if (err)
1346        goto out;
1347
1348    err = do_migrate_pages(mm, old, new,
1349        capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1350out:
1351    if (mm)
1352        mmput(mm);
1353    NODEMASK_SCRATCH_FREE(scratch);
1354
1355    return err;
1356}
1357
1358
1359/* Retrieve NUMA policy */
1360SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1361        unsigned long __user *, nmask, unsigned long, maxnode,
1362        unsigned long, addr, unsigned long, flags)
1363{
1364    int err;
1365    int uninitialized_var(pval);
1366    nodemask_t nodes;
1367
1368    if (nmask != NULL && maxnode < MAX_NUMNODES)
1369        return -EINVAL;
1370
1371    err = do_get_mempolicy(&pval, &nodes, addr, flags);
1372
1373    if (err)
1374        return err;
1375
1376    if (policy && put_user(pval, policy))
1377        return -EFAULT;
1378
1379    if (nmask)
1380        err = copy_nodes_to_user(nmask, maxnode, &nodes);
1381
1382    return err;
1383}
1384
1385#ifdef CONFIG_COMPAT
1386
1387asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1388                     compat_ulong_t __user *nmask,
1389                     compat_ulong_t maxnode,
1390                     compat_ulong_t addr, compat_ulong_t flags)
1391{
1392    long err;
1393    unsigned long __user *nm = NULL;
1394    unsigned long nr_bits, alloc_size;
1395    DECLARE_BITMAP(bm, MAX_NUMNODES);
1396
1397    nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1398    alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1399
1400    if (nmask)
1401        nm = compat_alloc_user_space(alloc_size);
1402
1403    err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1404
1405    if (!err && nmask) {
1406        err = copy_from_user(bm, nm, alloc_size);
1407        /* ensure entire bitmap is zeroed */
1408        err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1409        err |= compat_put_bitmap(nmask, bm, nr_bits);
1410    }
1411
1412    return err;
1413}
1414
1415asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1416                     compat_ulong_t maxnode)
1417{
1418    long err = 0;
1419    unsigned long __user *nm = NULL;
1420    unsigned long nr_bits, alloc_size;
1421    DECLARE_BITMAP(bm, MAX_NUMNODES);
1422
1423    nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1424    alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1425
1426    if (nmask) {
1427        err = compat_get_bitmap(bm, nmask, nr_bits);
1428        nm = compat_alloc_user_space(alloc_size);
1429        err |= copy_to_user(nm, bm, alloc_size);
1430    }
1431
1432    if (err)
1433        return -EFAULT;
1434
1435    return sys_set_mempolicy(mode, nm, nr_bits+1);
1436}
1437
1438asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1439                 compat_ulong_t mode, compat_ulong_t __user *nmask,
1440                 compat_ulong_t maxnode, compat_ulong_t flags)
1441{
1442    long err = 0;
1443    unsigned long __user *nm = NULL;
1444    unsigned long nr_bits, alloc_size;
1445    nodemask_t bm;
1446
1447    nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1448    alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1449
1450    if (nmask) {
1451        err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1452        nm = compat_alloc_user_space(alloc_size);
1453        err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1454    }
1455
1456    if (err)
1457        return -EFAULT;
1458
1459    return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1460}
1461
1462#endif
1463
1464/*
1465 * get_vma_policy(@task, @vma, @addr)
1466 * @task - task for fallback if vma policy == default
1467 * @vma - virtual memory area whose policy is sought
1468 * @addr - address in @vma for shared policy lookup
1469 *
1470 * Returns effective policy for a VMA at specified address.
1471 * Falls back to @task or system default policy, as necessary.
1472 * Current or other task's task mempolicy and non-shared vma policies
1473 * are protected by the task's mmap_sem, which must be held for read by
1474 * the caller.
1475 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1476 * count--added by the get_policy() vm_op, as appropriate--to protect against
1477 * freeing by another task. It is the caller's responsibility to free the
1478 * extra reference for shared policies.
1479 */
1480static struct mempolicy *get_vma_policy(struct task_struct *task,
1481        struct vm_area_struct *vma, unsigned long addr)
1482{
1483    struct mempolicy *pol = task->mempolicy;
1484
1485    if (vma) {
1486        if (vma->vm_ops && vma->vm_ops->get_policy) {
1487            struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1488                                    addr);
1489            if (vpol)
1490                pol = vpol;
1491        } else if (vma->vm_policy)
1492            pol = vma->vm_policy;
1493    }
1494    if (!pol)
1495        pol = &default_policy;
1496    return pol;
1497}
1498
1499/*
1500 * Return a nodemask representing a mempolicy for filtering nodes for
1501 * page allocation
1502 */
1503static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1504{
1505    /* Lower zones don't get a nodemask applied for MPOL_BIND */
1506    if (unlikely(policy->mode == MPOL_BIND) &&
1507            gfp_zone(gfp) >= policy_zone &&
1508            cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1509        return &policy->v.nodes;
1510
1511    return NULL;
1512}
1513
1514/* Return a zonelist indicated by gfp for node representing a mempolicy */
1515static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1516{
1517    int nd = numa_node_id();
1518
1519    switch (policy->mode) {
1520    case MPOL_PREFERRED:
1521        if (!(policy->flags & MPOL_F_LOCAL))
1522            nd = policy->v.preferred_node;
1523        break;
1524    case MPOL_BIND:
1525        /*
1526         * Normally, MPOL_BIND allocations are node-local within the
1527         * allowed nodemask. However, if __GFP_THISNODE is set and the
1528         * current node isn't part of the mask, we use the zonelist for
1529         * the first node in the mask instead.
1530         */
1531        if (unlikely(gfp & __GFP_THISNODE) &&
1532                unlikely(!node_isset(nd, policy->v.nodes)))
1533            nd = first_node(policy->v.nodes);
1534        break;
1535    default:
1536        BUG();
1537    }
1538    return node_zonelist(nd, gfp);
1539}
1540
1541/* Do dynamic interleaving for a process */
1542static unsigned interleave_nodes(struct mempolicy *policy)
1543{
1544    unsigned nid, next;
1545    struct task_struct *me = current;
1546
1547    nid = me->il_next;
1548    next = next_node(nid, policy->v.nodes);
1549    if (next >= MAX_NUMNODES)
1550        next = first_node(policy->v.nodes);
1551    if (next < MAX_NUMNODES)
1552        me->il_next = next;
1553    return nid;
1554}
1555
1556/*
1557 * Depending on the memory policy provide a node from which to allocate the
1558 * next slab entry.
1559 * @policy must be protected by freeing by the caller. If @policy is
1560 * the current task's mempolicy, this protection is implicit, as only the
1561 * task can change it's policy. The system default policy requires no
1562 * such protection.
1563 */
1564unsigned slab_node(struct mempolicy *policy)
1565{
1566    if (!policy || policy->flags & MPOL_F_LOCAL)
1567        return numa_node_id();
1568
1569    switch (policy->mode) {
1570    case MPOL_PREFERRED:
1571        /*
1572         * handled MPOL_F_LOCAL above
1573         */
1574        return policy->v.preferred_node;
1575
1576    case MPOL_INTERLEAVE:
1577        return interleave_nodes(policy);
1578
1579    case MPOL_BIND: {
1580        /*
1581         * Follow bind policy behavior and start allocation at the
1582         * first node.
1583         */
1584        struct zonelist *zonelist;
1585        struct zone *zone;
1586        enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1587        zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1588        (void)first_zones_zonelist(zonelist, highest_zoneidx,
1589                            &policy->v.nodes,
1590                            &zone);
1591        return zone->node;
1592    }
1593
1594    default:
1595        BUG();
1596    }
1597}
1598
1599/* Do static interleaving for a VMA with known offset. */
1600static unsigned offset_il_node(struct mempolicy *pol,
1601        struct vm_area_struct *vma, unsigned long off)
1602{
1603    unsigned nnodes = nodes_weight(pol->v.nodes);
1604    unsigned target;
1605    int c;
1606    int nid = -1;
1607
1608    if (!nnodes)
1609        return numa_node_id();
1610    target = (unsigned int)off % nnodes;
1611    c = 0;
1612    do {
1613        nid = next_node(nid, pol->v.nodes);
1614        c++;
1615    } while (c <= target);
1616    return nid;
1617}
1618
1619/* Determine a node number for interleave */
1620static inline unsigned interleave_nid(struct mempolicy *pol,
1621         struct vm_area_struct *vma, unsigned long addr, int shift)
1622{
1623    if (vma) {
1624        unsigned long off;
1625
1626        /*
1627         * for small pages, there is no difference between
1628         * shift and PAGE_SHIFT, so the bit-shift is safe.
1629         * for huge pages, since vm_pgoff is in units of small
1630         * pages, we need to shift off the always 0 bits to get
1631         * a useful offset.
1632         */
1633        BUG_ON(shift < PAGE_SHIFT);
1634        off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1635        off += (addr - vma->vm_start) >> shift;
1636        return offset_il_node(pol, vma, off);
1637    } else
1638        return interleave_nodes(pol);
1639}
1640
1641#ifdef CONFIG_HUGETLBFS
1642/*
1643 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1644 * @vma = virtual memory area whose policy is sought
1645 * @addr = address in @vma for shared policy lookup and interleave policy
1646 * @gfp_flags = for requested zone
1647 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1648 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1649 *
1650 * Returns a zonelist suitable for a huge page allocation and a pointer
1651 * to the struct mempolicy for conditional unref after allocation.
1652 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1653 * @nodemask for filtering the zonelist.
1654 *
1655 * Must be protected by get_mems_allowed()
1656 */
1657struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1658                gfp_t gfp_flags, struct mempolicy **mpol,
1659                nodemask_t **nodemask)
1660{
1661    struct zonelist *zl;
1662
1663    *mpol = get_vma_policy(current, vma, addr);
1664    *nodemask = NULL; /* assume !MPOL_BIND */
1665
1666    if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1667        zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1668                huge_page_shift(hstate_vma(vma))), gfp_flags);
1669    } else {
1670        zl = policy_zonelist(gfp_flags, *mpol);
1671        if ((*mpol)->mode == MPOL_BIND)
1672            *nodemask = &(*mpol)->v.nodes;
1673    }
1674    return zl;
1675}
1676
1677/*
1678 * init_nodemask_of_mempolicy
1679 *
1680 * If the current task's mempolicy is "default" [NULL], return 'false'
1681 * to indicate default policy. Otherwise, extract the policy nodemask
1682 * for 'bind' or 'interleave' policy into the argument nodemask, or
1683 * initialize the argument nodemask to contain the single node for
1684 * 'preferred' or 'local' policy and return 'true' to indicate presence
1685 * of non-default mempolicy.
1686 *
1687 * We don't bother with reference counting the mempolicy [mpol_get/put]
1688 * because the current task is examining it's own mempolicy and a task's
1689 * mempolicy is only ever changed by the task itself.
1690 *
1691 * N.B., it is the caller's responsibility to free a returned nodemask.
1692 */
1693bool init_nodemask_of_mempolicy(nodemask_t *mask)
1694{
1695    struct mempolicy *mempolicy;
1696    int nid;
1697
1698    if (!(mask && current->mempolicy))
1699        return false;
1700
1701    task_lock(current);
1702    mempolicy = current->mempolicy;
1703    switch (mempolicy->mode) {
1704    case MPOL_PREFERRED:
1705        if (mempolicy->flags & MPOL_F_LOCAL)
1706            nid = numa_node_id();
1707        else
1708            nid = mempolicy->v.preferred_node;
1709        init_nodemask_of_node(mask, nid);
1710        break;
1711
1712    case MPOL_BIND:
1713        /* Fall through */
1714    case MPOL_INTERLEAVE:
1715        *mask = mempolicy->v.nodes;
1716        break;
1717
1718    default:
1719        BUG();
1720    }
1721    task_unlock(current);
1722
1723    return true;
1724}
1725#endif
1726
1727/*
1728 * mempolicy_nodemask_intersects
1729 *
1730 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1731 * policy. Otherwise, check for intersection between mask and the policy
1732 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1733 * policy, always return true since it may allocate elsewhere on fallback.
1734 *
1735 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1736 */
1737bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1738                    const nodemask_t *mask)
1739{
1740    struct mempolicy *mempolicy;
1741    bool ret = true;
1742
1743    if (!mask)
1744        return ret;
1745    task_lock(tsk);
1746    mempolicy = tsk->mempolicy;
1747    if (!mempolicy)
1748        goto out;
1749
1750    switch (mempolicy->mode) {
1751    case MPOL_PREFERRED:
1752        /*
1753         * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1754         * allocate from, they may fallback to other nodes when oom.
1755         * Thus, it's possible for tsk to have allocated memory from
1756         * nodes in mask.
1757         */
1758        break;
1759    case MPOL_BIND:
1760    case MPOL_INTERLEAVE:
1761        ret = nodes_intersects(mempolicy->v.nodes, *mask);
1762        break;
1763    default:
1764        BUG();
1765    }
1766out:
1767    task_unlock(tsk);
1768    return ret;
1769}
1770
1771/* Allocate a page in interleaved policy.
1772   Own path because it needs to do special accounting. */
1773static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1774                    unsigned nid)
1775{
1776    struct zonelist *zl;
1777    struct page *page;
1778
1779    zl = node_zonelist(nid, gfp);
1780    page = __alloc_pages(gfp, order, zl);
1781    if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1782        inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1783    return page;
1784}
1785
1786/**
1787 * alloc_page_vma - Allocate a page for a VMA.
1788 *
1789 * @gfp:
1790 * %GFP_USER user allocation.
1791 * %GFP_KERNEL kernel allocations,
1792 * %GFP_HIGHMEM highmem/user allocations,
1793 * %GFP_FS allocation should not call back into a file system.
1794 * %GFP_ATOMIC don't sleep.
1795 *
1796 * @vma: Pointer to VMA or NULL if not available.
1797 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1798 *
1799 * This function allocates a page from the kernel page pool and applies
1800 * a NUMA policy associated with the VMA or the current process.
1801 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1802 * mm_struct of the VMA to prevent it from going away. Should be used for
1803 * all allocations for pages that will be mapped into
1804 * user space. Returns NULL when no page can be allocated.
1805 *
1806 * Should be called with the mm_sem of the vma hold.
1807 */
1808struct page *
1809alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1810{
1811    struct mempolicy *pol = get_vma_policy(current, vma, addr);
1812    struct zonelist *zl;
1813    struct page *page;
1814
1815    get_mems_allowed();
1816    if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1817        unsigned nid;
1818
1819        nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1820        mpol_cond_put(pol);
1821        page = alloc_page_interleave(gfp, 0, nid);
1822        put_mems_allowed();
1823        return page;
1824    }
1825    zl = policy_zonelist(gfp, pol);
1826    if (unlikely(mpol_needs_cond_ref(pol))) {
1827        /*
1828         * slow path: ref counted shared policy
1829         */
1830        struct page *page = __alloc_pages_nodemask(gfp, 0,
1831                        zl, policy_nodemask(gfp, pol));
1832        __mpol_put(pol);
1833        put_mems_allowed();
1834        return page;
1835    }
1836    /*
1837     * fast path: default or task policy
1838     */
1839    page = __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1840    put_mems_allowed();
1841    return page;
1842}
1843
1844/**
1845 * alloc_pages_current - Allocate pages.
1846 *
1847 * @gfp:
1848 * %GFP_USER user allocation,
1849 * %GFP_KERNEL kernel allocation,
1850 * %GFP_HIGHMEM highmem allocation,
1851 * %GFP_FS don't call back into a file system.
1852 * %GFP_ATOMIC don't sleep.
1853 * @order: Power of two of allocation size in pages. 0 is a single page.
1854 *
1855 * Allocate a page from the kernel page pool. When not in
1856 * interrupt context and apply the current process NUMA policy.
1857 * Returns NULL when no page can be allocated.
1858 *
1859 * Don't call cpuset_update_task_memory_state() unless
1860 * 1) it's ok to take cpuset_sem (can WAIT), and
1861 * 2) allocating for current task (not interrupt).
1862 */
1863struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1864{
1865    struct mempolicy *pol = current->mempolicy;
1866    struct page *page;
1867
1868    if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1869        pol = &default_policy;
1870
1871    get_mems_allowed();
1872    /*
1873     * No reference counting needed for current->mempolicy
1874     * nor system default_policy
1875     */
1876    if (pol->mode == MPOL_INTERLEAVE)
1877        page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1878    else
1879        page = __alloc_pages_nodemask(gfp, order,
1880            policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1881    put_mems_allowed();
1882    return page;
1883}
1884EXPORT_SYMBOL(alloc_pages_current);
1885
1886/*
1887 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1888 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1889 * with the mems_allowed returned by cpuset_mems_allowed(). This
1890 * keeps mempolicies cpuset relative after its cpuset moves. See
1891 * further kernel/cpuset.c update_nodemask().
1892 *
1893 * current's mempolicy may be rebinded by the other task(the task that changes
1894 * cpuset's mems), so we needn't do rebind work for current task.
1895 */
1896
1897/* Slow path of a mempolicy duplicate */
1898struct mempolicy *__mpol_dup(struct mempolicy *old)
1899{
1900    struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1901
1902    if (!new)
1903        return ERR_PTR(-ENOMEM);
1904
1905    /* task's mempolicy is protected by alloc_lock */
1906    if (old == current->mempolicy) {
1907        task_lock(current);
1908        *new = *old;
1909        task_unlock(current);
1910    } else
1911        *new = *old;
1912
1913    rcu_read_lock();
1914    if (current_cpuset_is_being_rebound()) {
1915        nodemask_t mems = cpuset_mems_allowed(current);
1916        if (new->flags & MPOL_F_REBINDING)
1917            mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1918        else
1919            mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1920    }
1921    rcu_read_unlock();
1922    atomic_set(&new->refcnt, 1);
1923    return new;
1924}
1925
1926/*
1927 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1928 * eliminate the * MPOL_F_* flags that require conditional ref and
1929 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1930 * after return. Use the returned value.
1931 *
1932 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1933 * policy lookup, even if the policy needs/has extra ref on lookup.
1934 * shmem_readahead needs this.
1935 */
1936struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1937                        struct mempolicy *frompol)
1938{
1939    if (!mpol_needs_cond_ref(frompol))
1940        return frompol;
1941
1942    *tompol = *frompol;
1943    tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
1944    __mpol_put(frompol);
1945    return tompol;
1946}
1947
1948/* Slow path of a mempolicy comparison */
1949int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1950{
1951    if (!a || !b)
1952        return 0;
1953    if (a->mode != b->mode)
1954        return 0;
1955    if (a->flags != b->flags)
1956        return 0;
1957    if (mpol_store_user_nodemask(a))
1958        if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
1959            return 0;
1960
1961    switch (a->mode) {
1962    case MPOL_BIND:
1963        /* Fall through */
1964    case MPOL_INTERLEAVE:
1965        return nodes_equal(a->v.nodes, b->v.nodes);
1966    case MPOL_PREFERRED:
1967        return a->v.preferred_node == b->v.preferred_node &&
1968            a->flags == b->flags;
1969    default:
1970        BUG();
1971        return 0;
1972    }
1973}
1974
1975/*
1976 * Shared memory backing store policy support.
1977 *
1978 * Remember policies even when nobody has shared memory mapped.
1979 * The policies are kept in Red-Black tree linked from the inode.
1980 * They are protected by the sp->lock spinlock, which should be held
1981 * for any accesses to the tree.
1982 */
1983
1984/* lookup first element intersecting start-end */
1985/* Caller holds sp->lock */
1986static struct sp_node *
1987sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1988{
1989    struct rb_node *n = sp->root.rb_node;
1990
1991    while (n) {
1992        struct sp_node *p = rb_entry(n, struct sp_node, nd);
1993
1994        if (start >= p->end)
1995            n = n->rb_right;
1996        else if (end <= p->start)
1997            n = n->rb_left;
1998        else
1999            break;
2000    }
2001    if (!n)
2002        return NULL;
2003    for (;;) {
2004        struct sp_node *w = NULL;
2005        struct rb_node *prev = rb_prev(n);
2006        if (!prev)
2007            break;
2008        w = rb_entry(prev, struct sp_node, nd);
2009        if (w->end <= start)
2010            break;
2011        n = prev;
2012    }
2013    return rb_entry(n, struct sp_node, nd);
2014}
2015
2016/* Insert a new shared policy into the list. */
2017/* Caller holds sp->lock */
2018static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2019{
2020    struct rb_node **p = &sp->root.rb_node;
2021    struct rb_node *parent = NULL;
2022    struct sp_node *nd;
2023
2024    while (*p) {
2025        parent = *p;
2026        nd = rb_entry(parent, struct sp_node, nd);
2027        if (new->start < nd->start)
2028            p = &(*p)->rb_left;
2029        else if (new->end > nd->end)
2030            p = &(*p)->rb_right;
2031        else
2032            BUG();
2033    }
2034    rb_link_node(&new->nd, parent, p);
2035    rb_insert_color(&new->nd, &sp->root);
2036    pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2037         new->policy ? new->policy->mode : 0);
2038}
2039
2040/* Find shared policy intersecting idx */
2041struct mempolicy *
2042mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2043{
2044    struct mempolicy *pol = NULL;
2045    struct sp_node *sn;
2046
2047    if (!sp->root.rb_node)
2048        return NULL;
2049    spin_lock(&sp->lock);
2050    sn = sp_lookup(sp, idx, idx+1);
2051    if (sn) {
2052        mpol_get(sn->policy);
2053        pol = sn->policy;
2054    }
2055    spin_unlock(&sp->lock);
2056    return pol;
2057}
2058
2059static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2060{
2061    pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2062    rb_erase(&n->nd, &sp->root);
2063    mpol_put(n->policy);
2064    kmem_cache_free(sn_cache, n);
2065}
2066
2067static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2068                struct mempolicy *pol)
2069{
2070    struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2071
2072    if (!n)
2073        return NULL;
2074    n->start = start;
2075    n->end = end;
2076    mpol_get(pol);
2077    pol->flags |= MPOL_F_SHARED; /* for unref */
2078    n->policy = pol;
2079    return n;
2080}
2081
2082/* Replace a policy range. */
2083static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2084                 unsigned long end, struct sp_node *new)
2085{
2086    struct sp_node *n, *new2 = NULL;
2087
2088restart:
2089    spin_lock(&sp->lock);
2090    n = sp_lookup(sp, start, end);
2091    /* Take care of old policies in the same range. */
2092    while (n && n->start < end) {
2093        struct rb_node *next = rb_next(&n->nd);
2094        if (n->start >= start) {
2095            if (n->end <= end)
2096                sp_delete(sp, n);
2097            else
2098                n->start = end;
2099        } else {
2100            /* Old policy spanning whole new range. */
2101            if (n->end > end) {
2102                if (!new2) {
2103                    spin_unlock(&sp->lock);
2104                    new2 = sp_alloc(end, n->end, n->policy);
2105                    if (!new2)
2106                        return -ENOMEM;
2107                    goto restart;
2108                }
2109                n->end = start;
2110                sp_insert(sp, new2);
2111                new2 = NULL;
2112                break;
2113            } else
2114                n->end = start;
2115        }
2116        if (!next)
2117            break;
2118        n = rb_entry(next, struct sp_node, nd);
2119    }
2120    if (new)
2121        sp_insert(sp, new);
2122    spin_unlock(&sp->lock);
2123    if (new2) {
2124        mpol_put(new2->policy);
2125        kmem_cache_free(sn_cache, new2);
2126    }
2127    return 0;
2128}
2129
2130/**
2131 * mpol_shared_policy_init - initialize shared policy for inode
2132 * @sp: pointer to inode shared policy
2133 * @mpol: struct mempolicy to install
2134 *
2135 * Install non-NULL @mpol in inode's shared policy rb-tree.
2136 * On entry, the current task has a reference on a non-NULL @mpol.
2137 * This must be released on exit.
2138 * This is called at get_inode() calls and we can use GFP_KERNEL.
2139 */
2140void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2141{
2142    int ret;
2143
2144    sp->root = RB_ROOT; /* empty tree == default mempolicy */
2145    spin_lock_init(&sp->lock);
2146
2147    if (mpol) {
2148        struct vm_area_struct pvma;
2149        struct mempolicy *new;
2150        NODEMASK_SCRATCH(scratch);
2151
2152        if (!scratch)
2153            goto put_mpol;
2154        /* contextualize the tmpfs mount point mempolicy */
2155        new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2156        if (IS_ERR(new))
2157            goto free_scratch; /* no valid nodemask intersection */
2158
2159        task_lock(current);
2160        ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2161        task_unlock(current);
2162        if (ret)
2163            goto put_new;
2164
2165        /* Create pseudo-vma that contains just the policy */
2166        memset(&pvma, 0, sizeof(struct vm_area_struct));
2167        pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2168        mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2169
2170put_new:
2171        mpol_put(new); /* drop initial ref */
2172free_scratch:
2173        NODEMASK_SCRATCH_FREE(scratch);
2174put_mpol:
2175        mpol_put(mpol); /* drop our incoming ref on sb mpol */
2176    }
2177}
2178
2179int mpol_set_shared_policy(struct shared_policy *info,
2180            struct vm_area_struct *vma, struct mempolicy *npol)
2181{
2182    int err;
2183    struct sp_node *new = NULL;
2184    unsigned long sz = vma_pages(vma);
2185
2186    pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2187         vma->vm_pgoff,
2188         sz, npol ? npol->mode : -1,
2189         npol ? npol->flags : -1,
2190         npol ? nodes_addr(npol->v.nodes)[0] : -1);
2191
2192    if (npol) {
2193        new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2194        if (!new)
2195            return -ENOMEM;
2196    }
2197    err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2198    if (err && new)
2199        kmem_cache_free(sn_cache, new);
2200    return err;
2201}
2202
2203/* Free a backing policy store on inode delete. */
2204void mpol_free_shared_policy(struct shared_policy *p)
2205{
2206    struct sp_node *n;
2207    struct rb_node *next;
2208
2209    if (!p->root.rb_node)
2210        return;
2211    spin_lock(&p->lock);
2212    next = rb_first(&p->root);
2213    while (next) {
2214        n = rb_entry(next, struct sp_node, nd);
2215        next = rb_next(&n->nd);
2216        rb_erase(&n->nd, &p->root);
2217        mpol_put(n->policy);
2218        kmem_cache_free(sn_cache, n);
2219    }
2220    spin_unlock(&p->lock);
2221}
2222
2223/* assumes fs == KERNEL_DS */
2224void __init numa_policy_init(void)
2225{
2226    nodemask_t interleave_nodes;
2227    unsigned long largest = 0;
2228    int nid, prefer = 0;
2229
2230    policy_cache = kmem_cache_create("numa_policy",
2231                     sizeof(struct mempolicy),
2232                     0, SLAB_PANIC, NULL);
2233
2234    sn_cache = kmem_cache_create("shared_policy_node",
2235                     sizeof(struct sp_node),
2236                     0, SLAB_PANIC, NULL);
2237
2238    /*
2239     * Set interleaving policy for system init. Interleaving is only
2240     * enabled across suitably sized nodes (default is >= 16MB), or
2241     * fall back to the largest node if they're all smaller.
2242     */
2243    nodes_clear(interleave_nodes);
2244    for_each_node_state(nid, N_HIGH_MEMORY) {
2245        unsigned long total_pages = node_present_pages(nid);
2246
2247        /* Preserve the largest node */
2248        if (largest < total_pages) {
2249            largest = total_pages;
2250            prefer = nid;
2251        }
2252
2253        /* Interleave this node? */
2254        if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2255            node_set(nid, interleave_nodes);
2256    }
2257
2258    /* All too small, use the largest */
2259    if (unlikely(nodes_empty(interleave_nodes)))
2260        node_set(prefer, interleave_nodes);
2261
2262    if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2263        printk("numa_policy_init: interleaving failed\n");
2264}
2265
2266/* Reset policy of current process to default */
2267void numa_default_policy(void)
2268{
2269    do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2270}
2271
2272/*
2273 * Parse and format mempolicy from/to strings
2274 */
2275
2276/*
2277 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
2278 * Used only for mpol_parse_str() and mpol_to_str()
2279 */
2280#define MPOL_LOCAL MPOL_MAX
2281static const char * const policy_modes[] =
2282{
2283    [MPOL_DEFAULT] = "default",
2284    [MPOL_PREFERRED] = "prefer",
2285    [MPOL_BIND] = "bind",
2286    [MPOL_INTERLEAVE] = "interleave",
2287    [MPOL_LOCAL] = "local"
2288};
2289
2290
2291#ifdef CONFIG_TMPFS
2292/**
2293 * mpol_parse_str - parse string to mempolicy
2294 * @str: string containing mempolicy to parse
2295 * @mpol: pointer to struct mempolicy pointer, returned on success.
2296 * @no_context: flag whether to "contextualize" the mempolicy
2297 *
2298 * Format of input:
2299 * <mode>[=<flags>][:<nodelist>]
2300 *
2301 * if @no_context is true, save the input nodemask in w.user_nodemask in
2302 * the returned mempolicy. This will be used to "clone" the mempolicy in
2303 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
2304 * mount option. Note that if 'static' or 'relative' mode flags were
2305 * specified, the input nodemask will already have been saved. Saving
2306 * it again is redundant, but safe.
2307 *
2308 * On success, returns 0, else 1
2309 */
2310int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2311{
2312    struct mempolicy *new = NULL;
2313    unsigned short mode;
2314    unsigned short uninitialized_var(mode_flags);
2315    nodemask_t nodes;
2316    char *nodelist = strchr(str, ':');
2317    char *flags = strchr(str, '=');
2318    int err = 1;
2319
2320    if (nodelist) {
2321        /* NUL-terminate mode or flags string */
2322        *nodelist++ = '\0';
2323        if (nodelist_parse(nodelist, nodes))
2324            goto out;
2325        if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2326            goto out;
2327    } else
2328        nodes_clear(nodes);
2329
2330    if (flags)
2331        *flags++ = '\0'; /* terminate mode string */
2332
2333    for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2334        if (!strcmp(str, policy_modes[mode])) {
2335            break;
2336        }
2337    }
2338    if (mode > MPOL_LOCAL)
2339        goto out;
2340
2341    switch (mode) {
2342    case MPOL_PREFERRED:
2343        /*
2344         * Insist on a nodelist of one node only
2345         */
2346        if (nodelist) {
2347            char *rest = nodelist;
2348            while (isdigit(*rest))
2349                rest++;
2350            if (*rest)
2351                goto out;
2352        }
2353        break;
2354    case MPOL_INTERLEAVE:
2355        /*
2356         * Default to online nodes with memory if no nodelist
2357         */
2358        if (!nodelist)
2359            nodes = node_states[N_HIGH_MEMORY];
2360        break;
2361    case MPOL_LOCAL:
2362        /*
2363         * Don't allow a nodelist; mpol_new() checks flags
2364         */
2365        if (nodelist)
2366            goto out;
2367        mode = MPOL_PREFERRED;
2368        break;
2369    case MPOL_DEFAULT:
2370        /*
2371         * Insist on a empty nodelist
2372         */
2373        if (!nodelist)
2374            err = 0;
2375        goto out;
2376    case MPOL_BIND:
2377        /*
2378         * Insist on a nodelist
2379         */
2380        if (!nodelist)
2381            goto out;
2382    }
2383
2384    mode_flags = 0;
2385    if (flags) {
2386        /*
2387         * Currently, we only support two mutually exclusive
2388         * mode flags.
2389         */
2390        if (!strcmp(flags, "static"))
2391            mode_flags |= MPOL_F_STATIC_NODES;
2392        else if (!strcmp(flags, "relative"))
2393            mode_flags |= MPOL_F_RELATIVE_NODES;
2394        else
2395            goto out;
2396    }
2397
2398    new = mpol_new(mode, mode_flags, &nodes);
2399    if (IS_ERR(new))
2400        goto out;
2401
2402    if (no_context) {
2403        /* save for contextualization */
2404        new->w.user_nodemask = nodes;
2405    } else {
2406        int ret;
2407        NODEMASK_SCRATCH(scratch);
2408        if (scratch) {
2409            task_lock(current);
2410            ret = mpol_set_nodemask(new, &nodes, scratch);
2411            task_unlock(current);
2412        } else
2413            ret = -ENOMEM;
2414        NODEMASK_SCRATCH_FREE(scratch);
2415        if (ret) {
2416            mpol_put(new);
2417            goto out;
2418        }
2419    }
2420    err = 0;
2421
2422out:
2423    /* Restore string for error message */
2424    if (nodelist)
2425        *--nodelist = ':';
2426    if (flags)
2427        *--flags = '=';
2428    if (!err)
2429        *mpol = new;
2430    return err;
2431}
2432#endif /* CONFIG_TMPFS */
2433
2434/**
2435 * mpol_to_str - format a mempolicy structure for printing
2436 * @buffer: to contain formatted mempolicy string
2437 * @maxlen: length of @buffer
2438 * @pol: pointer to mempolicy to be formatted
2439 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2440 *
2441 * Convert a mempolicy into a string.
2442 * Returns the number of characters in buffer (if positive)
2443 * or an error (negative)
2444 */
2445int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2446{
2447    char *p = buffer;
2448    int l;
2449    nodemask_t nodes;
2450    unsigned short mode;
2451    unsigned short flags = pol ? pol->flags : 0;
2452
2453    /*
2454     * Sanity check: room for longest mode, flag and some nodes
2455     */
2456    VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2457
2458    if (!pol || pol == &default_policy)
2459        mode = MPOL_DEFAULT;
2460    else
2461        mode = pol->mode;
2462
2463    switch (mode) {
2464    case MPOL_DEFAULT:
2465        nodes_clear(nodes);
2466        break;
2467
2468    case MPOL_PREFERRED:
2469        nodes_clear(nodes);
2470        if (flags & MPOL_F_LOCAL)
2471            mode = MPOL_LOCAL; /* pseudo-policy */
2472        else
2473            node_set(pol->v.preferred_node, nodes);
2474        break;
2475
2476    case MPOL_BIND:
2477        /* Fall through */
2478    case MPOL_INTERLEAVE:
2479        if (no_context)
2480            nodes = pol->w.user_nodemask;
2481        else
2482            nodes = pol->v.nodes;
2483        break;
2484
2485    default:
2486        BUG();
2487    }
2488
2489    l = strlen(policy_modes[mode]);
2490    if (buffer + maxlen < p + l + 1)
2491        return -ENOSPC;
2492
2493    strcpy(p, policy_modes[mode]);
2494    p += l;
2495
2496    if (flags & MPOL_MODE_FLAGS) {
2497        if (buffer + maxlen < p + 2)
2498            return -ENOSPC;
2499        *p++ = '=';
2500
2501        /*
2502         * Currently, the only defined flags are mutually exclusive
2503         */
2504        if (flags & MPOL_F_STATIC_NODES)
2505            p += snprintf(p, buffer + maxlen - p, "static");
2506        else if (flags & MPOL_F_RELATIVE_NODES)
2507            p += snprintf(p, buffer + maxlen - p, "relative");
2508    }
2509
2510    if (!nodes_empty(nodes)) {
2511        if (buffer + maxlen < p + 2)
2512            return -ENOSPC;
2513        *p++ = ':';
2514         p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2515    }
2516    return p - buffer;
2517}
2518
2519struct numa_maps {
2520    unsigned long pages;
2521    unsigned long anon;
2522    unsigned long active;
2523    unsigned long writeback;
2524    unsigned long mapcount_max;
2525    unsigned long dirty;
2526    unsigned long swapcache;
2527    unsigned long node[MAX_NUMNODES];
2528};
2529
2530static void gather_stats(struct page *page, void *private, int pte_dirty)
2531{
2532    struct numa_maps *md = private;
2533    int count = page_mapcount(page);
2534
2535    md->pages++;
2536    if (pte_dirty || PageDirty(page))
2537        md->dirty++;
2538
2539    if (PageSwapCache(page))
2540        md->swapcache++;
2541
2542    if (PageActive(page) || PageUnevictable(page))
2543        md->active++;
2544
2545    if (PageWriteback(page))
2546        md->writeback++;
2547
2548    if (PageAnon(page))
2549        md->anon++;
2550
2551    if (count > md->mapcount_max)
2552        md->mapcount_max = count;
2553
2554    md->node[page_to_nid(page)]++;
2555}
2556
2557#ifdef CONFIG_HUGETLB_PAGE
2558static void check_huge_range(struct vm_area_struct *vma,
2559        unsigned long start, unsigned long end,
2560        struct numa_maps *md)
2561{
2562    unsigned long addr;
2563    struct page *page;
2564    struct hstate *h = hstate_vma(vma);
2565    unsigned long sz = huge_page_size(h);
2566
2567    for (addr = start; addr < end; addr += sz) {
2568        pte_t *ptep = huge_pte_offset(vma->vm_mm,
2569                        addr & huge_page_mask(h));
2570        pte_t pte;
2571
2572        if (!ptep)
2573            continue;
2574
2575        pte = *ptep;
2576        if (pte_none(pte))
2577            continue;
2578
2579        page = pte_page(pte);
2580        if (!page)
2581            continue;
2582
2583        gather_stats(page, md, pte_dirty(*ptep));
2584    }
2585}
2586#else
2587static inline void check_huge_range(struct vm_area_struct *vma,
2588        unsigned long start, unsigned long end,
2589        struct numa_maps *md)
2590{
2591}
2592#endif
2593
2594/*
2595 * Display pages allocated per node and memory policy via /proc.
2596 */
2597int show_numa_map(struct seq_file *m, void *v)
2598{
2599    struct proc_maps_private *priv = m->private;
2600    struct vm_area_struct *vma = v;
2601    struct numa_maps *md;
2602    struct file *file = vma->vm_file;
2603    struct mm_struct *mm = vma->vm_mm;
2604    struct mempolicy *pol;
2605    int n;
2606    char buffer[50];
2607
2608    if (!mm)
2609        return 0;
2610
2611    md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2612    if (!md)
2613        return 0;
2614
2615    pol = get_vma_policy(priv->task, vma, vma->vm_start);
2616    mpol_to_str(buffer, sizeof(buffer), pol, 0);
2617    mpol_cond_put(pol);
2618
2619    seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2620
2621    if (file) {
2622        seq_printf(m, " file=");
2623        seq_path(m, &file->f_path, "\n\t= ");
2624    } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2625        seq_printf(m, " heap");
2626    } else if (vma->vm_start <= mm->start_stack &&
2627            vma->vm_end >= mm->start_stack) {
2628        seq_printf(m, " stack");
2629    }
2630
2631    if (is_vm_hugetlb_page(vma)) {
2632        check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2633        seq_printf(m, " huge");
2634    } else {
2635        check_pgd_range(vma, vma->vm_start, vma->vm_end,
2636            &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2637    }
2638
2639    if (!md->pages)
2640        goto out;
2641
2642    if (md->anon)
2643        seq_printf(m," anon=%lu",md->anon);
2644
2645    if (md->dirty)
2646        seq_printf(m," dirty=%lu",md->dirty);
2647
2648    if (md->pages != md->anon && md->pages != md->dirty)
2649        seq_printf(m, " mapped=%lu", md->pages);
2650
2651    if (md->mapcount_max > 1)
2652        seq_printf(m, " mapmax=%lu", md->mapcount_max);
2653
2654    if (md->swapcache)
2655        seq_printf(m," swapcache=%lu", md->swapcache);
2656
2657    if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2658        seq_printf(m," active=%lu", md->active);
2659
2660    if (md->writeback)
2661        seq_printf(m," writeback=%lu", md->writeback);
2662
2663    for_each_node_state(n, N_HIGH_MEMORY)
2664        if (md->node[n])
2665            seq_printf(m, " N%d=%lu", n, md->node[n]);
2666out:
2667    seq_putc(m, '\n');
2668    kfree(md);
2669
2670    if (m->count < m->size)
2671        m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2672    return 0;
2673}
2674

Archive Download this file



interactive