Root/mm/pgtable-generic.c

1/*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
9#include <linux/pagemap.h>
10#include <asm/tlb.h>
11#include <asm-generic/pgtable.h>
12
13#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
14/*
15 * Only sets the access flags (dirty, accessed, and
16 * writable). Furthermore, we know it always gets set to a "more
17 * permissive" setting, which allows most architectures to optimize
18 * this. We return whether the PTE actually changed, which in turn
19 * instructs the caller to do things like update__mmu_cache. This
20 * used to be done in the caller, but sparc needs minor faults to
21 * force that call on sun4c so we changed this macro slightly
22 */
23int ptep_set_access_flags(struct vm_area_struct *vma,
24              unsigned long address, pte_t *ptep,
25              pte_t entry, int dirty)
26{
27    int changed = !pte_same(*ptep, entry);
28    if (changed) {
29        set_pte_at(vma->vm_mm, address, ptep, entry);
30        flush_tlb_page(vma, address);
31    }
32    return changed;
33}
34#endif
35
36#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
37int pmdp_set_access_flags(struct vm_area_struct *vma,
38              unsigned long address, pmd_t *pmdp,
39              pmd_t entry, int dirty)
40{
41#ifdef CONFIG_TRANSPARENT_HUGEPAGE
42    int changed = !pmd_same(*pmdp, entry);
43    VM_BUG_ON(address & ~HPAGE_PMD_MASK);
44    if (changed) {
45        set_pmd_at(vma->vm_mm, address, pmdp, entry);
46        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
47    }
48    return changed;
49#else /* CONFIG_TRANSPARENT_HUGEPAGE */
50    BUG();
51    return 0;
52#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53}
54#endif
55
56#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
57int ptep_clear_flush_young(struct vm_area_struct *vma,
58               unsigned long address, pte_t *ptep)
59{
60    int young;
61    young = ptep_test_and_clear_young(vma, address, ptep);
62    if (young)
63        flush_tlb_page(vma, address);
64    return young;
65}
66#endif
67
68#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
69int pmdp_clear_flush_young(struct vm_area_struct *vma,
70               unsigned long address, pmd_t *pmdp)
71{
72    int young;
73#ifdef CONFIG_TRANSPARENT_HUGEPAGE
74    VM_BUG_ON(address & ~HPAGE_PMD_MASK);
75#else
76    BUG();
77#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
78    young = pmdp_test_and_clear_young(vma, address, pmdp);
79    if (young)
80        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
81    return young;
82}
83#endif
84
85#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
86pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
87               pte_t *ptep)
88{
89    pte_t pte;
90    pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
91    flush_tlb_page(vma, address);
92    return pte;
93}
94#endif
95
96#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
97#ifdef CONFIG_TRANSPARENT_HUGEPAGE
98pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
99               pmd_t *pmdp)
100{
101    pmd_t pmd;
102    VM_BUG_ON(address & ~HPAGE_PMD_MASK);
103    pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
104    flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
105    return pmd;
106}
107#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
108#endif
109
110#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
111#ifdef CONFIG_TRANSPARENT_HUGEPAGE
112void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
113              pmd_t *pmdp)
114{
115    pmd_t pmd = pmd_mksplitting(*pmdp);
116    VM_BUG_ON(address & ~HPAGE_PMD_MASK);
117    set_pmd_at(vma->vm_mm, address, pmdp, pmd);
118    /* tlb flush only to serialize against gup-fast */
119    flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
120}
121#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
122#endif
123

Archive Download this file



interactive