Date:2013-04-23 22:42:07 (9 years 5 months ago)
Author:John David Anglin
Commit:bda079d336cd8183e1d844a265ea87ae3e1bbe78
Message:parisc: use spin_lock_irqsave/spin_unlock_irqrestore for PTE updates

User applications running on SMP kernels have long suffered from instability
and random segmentation faults. This patch improves the situation although
there is more work to be done.

One of the problems is the various routines in pgtable.h that update page table
entries use different locking mechanisms, or no lock at all (set_pte_at). This
change modifies the routines to all use the same lock pa_dbit_lock. This lock
is used for dirty bit updates in the interruption code. The patch also purges
the TLB entries associated with the PTE to ensure that inconsistent values are
not used after the page table entry is updated. The UP and SMP code are now
identical.

The change also includes a minor update to the purge_tlb_entries function in
cache.c to improve its efficiency.

Signed-off-by: John David Anglin <dave.anglin@bell.net>
Cc: Helge Deller <deller@gmx.de>
Signed-off-by: Helge Deller <deller@gmx.de>
Files: arch/parisc/include/asm/pgtable.h (3 diffs)
arch/parisc/kernel/cache.c (1 diff)

Change Details

arch/parisc/include/asm/pgtable.h
1616#include <asm/processor.h>
1717#include <asm/cache.h>
1818
19extern spinlock_t pa_dbit_lock;
20
1921/*
2022 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
2123 * memory. For the return value to be meaningful, ADDR must be >=
...... 
4446
4547#define set_pte_at(mm, addr, ptep, pteval) \
4648    do { \
49        unsigned long flags; \
50        spin_lock_irqsave(&pa_dbit_lock, flags); \
4751        set_pte(ptep, pteval); \
4852        purge_tlb_entries(mm, addr); \
53        spin_unlock_irqrestore(&pa_dbit_lock, flags); \
4954    } while (0)
5055
5156#endif /* !__ASSEMBLY__ */
...... 
435440
436441static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
437442{
438#ifdef CONFIG_SMP
443    pte_t pte;
444    unsigned long flags;
445
439446    if (!pte_young(*ptep))
440447        return 0;
441    return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep));
442#else
443    pte_t pte = *ptep;
444    if (!pte_young(pte))
448
449    spin_lock_irqsave(&pa_dbit_lock, flags);
450    pte = *ptep;
451    if (!pte_young(pte)) {
452        spin_unlock_irqrestore(&pa_dbit_lock, flags);
445453        return 0;
446    set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
454    }
455    set_pte(ptep, pte_mkold(pte));
456    purge_tlb_entries(vma->vm_mm, addr);
457    spin_unlock_irqrestore(&pa_dbit_lock, flags);
447458    return 1;
448#endif
449459}
450460
451extern spinlock_t pa_dbit_lock;
452
453461struct mm_struct;
454462static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
455463{
456464    pte_t old_pte;
465    unsigned long flags;
457466
458    spin_lock(&pa_dbit_lock);
467    spin_lock_irqsave(&pa_dbit_lock, flags);
459468    old_pte = *ptep;
460469    pte_clear(mm,addr,ptep);
461    spin_unlock(&pa_dbit_lock);
470    purge_tlb_entries(mm, addr);
471    spin_unlock_irqrestore(&pa_dbit_lock, flags);
462472
463473    return old_pte;
464474}
465475
466476static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
467477{
468#ifdef CONFIG_SMP
469    unsigned long new, old;
470
471    do {
472        old = pte_val(*ptep);
473        new = pte_val(pte_wrprotect(__pte (old)));
474    } while (cmpxchg((unsigned long *) ptep, old, new) != old);
478    unsigned long flags;
479    spin_lock_irqsave(&pa_dbit_lock, flags);
480    set_pte(ptep, pte_wrprotect(*ptep));
475481    purge_tlb_entries(mm, addr);
476#else
477    pte_t old_pte = *ptep;
478    set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
479#endif
482    spin_unlock_irqrestore(&pa_dbit_lock, flags);
480483}
481484
482485#define pte_same(A,B) (pte_val(A) == pte_val(B))
arch/parisc/kernel/cache.c
421421    /* Note: purge_tlb_entries can be called at startup with
422422       no context. */
423423
424    /* Disable preemption while we play with %sr1. */
425    preempt_disable();
426    mtsp(mm->context, 1);
427424    purge_tlb_start(flags);
425    mtsp(mm->context, 1);
428426    pdtlb(addr);
429427    pitlb(addr);
430428    purge_tlb_end(flags);
431    preempt_enable();
432429}
433430EXPORT_SYMBOL(purge_tlb_entries);
434431

Archive Download the corresponding diff file



interactive