Root/mm/pgtable-generic.c

1/*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
9#include <linux/pagemap.h>
10#include <asm/tlb.h>
11#include <asm-generic/pgtable.h>
12
13#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
14/*
15 * Only sets the access flags (dirty, accessed, and
16 * writable). Furthermore, we know it always gets set to a "more
17 * permissive" setting, which allows most architectures to optimize
18 * this. We return whether the PTE actually changed, which in turn
19 * instructs the caller to do things like update__mmu_cache. This
20 * used to be done in the caller, but sparc needs minor faults to
21 * force that call on sun4c so we changed this macro slightly
22 */
23int ptep_set_access_flags(struct vm_area_struct *vma,
24              unsigned long address, pte_t *ptep,
25              pte_t entry, int dirty)
26{
27    int changed = !pte_same(*ptep, entry);
28    if (changed) {
29        set_pte_at(vma->vm_mm, address, ptep, entry);
30        flush_tlb_page(vma, address);
31    }
32    return changed;
33}
34#endif
35
36#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
37int pmdp_set_access_flags(struct vm_area_struct *vma,
38              unsigned long address, pmd_t *pmdp,
39              pmd_t entry, int dirty)
40{
41#ifdef CONFIG_TRANSPARENT_HUGEPAGE
42    int changed = !pmd_same(*pmdp, entry);
43    VM_BUG_ON(address & ~HPAGE_PMD_MASK);
44    if (changed) {
45        set_pmd_at(vma->vm_mm, address, pmdp, entry);
46        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
47    }
48    return changed;
49#else /* CONFIG_TRANSPARENT_HUGEPAGE */
50    BUG();
51    return 0;
52#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53}
54#endif
55
56#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
57int ptep_clear_flush_young(struct vm_area_struct *vma,
58               unsigned long address, pte_t *ptep)
59{
60    int young;
61    young = ptep_test_and_clear_young(vma, address, ptep);
62    if (young)
63        flush_tlb_page(vma, address);
64    return young;
65}
66#endif
67
68#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
69int pmdp_clear_flush_young(struct vm_area_struct *vma,
70               unsigned long address, pmd_t *pmdp)
71{
72    int young;
73#ifndef CONFIG_TRANSPARENT_HUGEPAGE
74    BUG();
75#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
76    VM_BUG_ON(address & ~HPAGE_PMD_MASK);
77    young = pmdp_test_and_clear_young(vma, address, pmdp);
78    if (young)
79        flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
80    return young;
81}
82#endif
83
84#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
85pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
86               pte_t *ptep)
87{
88    pte_t pte;
89    pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
90    flush_tlb_page(vma, address);
91    return pte;
92}
93#endif
94
95#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
96#ifdef CONFIG_TRANSPARENT_HUGEPAGE
97pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
98               pmd_t *pmdp)
99{
100    pmd_t pmd;
101    VM_BUG_ON(address & ~HPAGE_PMD_MASK);
102    pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
103    flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
104    return pmd;
105}
106#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
107#endif
108
109#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
110#ifdef CONFIG_TRANSPARENT_HUGEPAGE
111pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
112               pmd_t *pmdp)
113{
114    pmd_t pmd = pmd_mksplitting(*pmdp);
115    VM_BUG_ON(address & ~HPAGE_PMD_MASK);
116    set_pmd_at(vma->vm_mm, address, pmdp, pmd);
117    /* tlb flush only to serialize against gup-fast */
118    flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
119}
120#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
121#endif
122

Archive Download this file



interactive