Root/
1 | /* |
2 | * linux/arch/arm/mm/context.c |
3 | * |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. |
9 | */ |
10 | #include <linux/init.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> |
14 | #include <linux/percpu.h> |
15 | |
16 | #include <asm/mmu_context.h> |
17 | #include <asm/tlbflush.h> |
18 | |
19 | static DEFINE_SPINLOCK(cpu_asid_lock); |
20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
21 | #ifdef CONFIG_SMP |
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); |
23 | #endif |
24 | |
25 | /* |
26 | * We fork()ed a process, and we need a new context for the child |
27 | * to run in. We reserve version 0 for initial tasks so we will |
28 | * always allocate an ASID. The ASID 0 is reserved for the TTBR |
29 | * register changing sequence. |
30 | */ |
31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
32 | { |
33 | mm->context.id = 0; |
34 | spin_lock_init(&mm->context.id_lock); |
35 | } |
36 | |
37 | static void flush_context(void) |
38 | { |
39 | /* set the reserved ASID before flushing the TLB */ |
40 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); |
41 | isb(); |
42 | local_flush_tlb_all(); |
43 | if (icache_is_vivt_asid_tagged()) { |
44 | __flush_icache_all(); |
45 | dsb(); |
46 | } |
47 | } |
48 | |
49 | #ifdef CONFIG_SMP |
50 | |
51 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) |
52 | { |
53 | unsigned long flags; |
54 | |
55 | /* |
56 | * Locking needed for multi-threaded applications where the |
57 | * same mm->context.id could be set from different CPUs during |
58 | * the broadcast. This function is also called via IPI so the |
59 | * mm->context.id_lock has to be IRQ-safe. |
60 | */ |
61 | spin_lock_irqsave(&mm->context.id_lock, flags); |
62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { |
63 | /* |
64 | * Old version of ASID found. Set the new one and |
65 | * reset mm_cpumask(mm). |
66 | */ |
67 | mm->context.id = asid; |
68 | cpumask_clear(mm_cpumask(mm)); |
69 | } |
70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); |
71 | |
72 | /* |
73 | * Set the mm_cpumask(mm) bit for the current CPU. |
74 | */ |
75 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
76 | } |
77 | |
78 | /* |
79 | * Reset the ASID on the current CPU. This function call is broadcast |
80 | * from the CPU handling the ASID rollover and holding cpu_asid_lock. |
81 | */ |
82 | static void reset_context(void *info) |
83 | { |
84 | unsigned int asid; |
85 | unsigned int cpu = smp_processor_id(); |
86 | struct mm_struct *mm = per_cpu(current_mm, cpu); |
87 | |
88 | /* |
89 | * Check if a current_mm was set on this CPU as it might still |
90 | * be in the early booting stages and using the reserved ASID. |
91 | */ |
92 | if (!mm) |
93 | return; |
94 | |
95 | smp_rmb(); |
96 | asid = cpu_last_asid + cpu + 1; |
97 | |
98 | flush_context(); |
99 | set_mm_context(mm, asid); |
100 | |
101 | /* set the new ASID */ |
102 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); |
103 | isb(); |
104 | } |
105 | |
106 | #else |
107 | |
108 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) |
109 | { |
110 | mm->context.id = asid; |
111 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); |
112 | } |
113 | |
114 | #endif |
115 | |
116 | void __new_context(struct mm_struct *mm) |
117 | { |
118 | unsigned int asid; |
119 | |
120 | spin_lock(&cpu_asid_lock); |
121 | #ifdef CONFIG_SMP |
122 | /* |
123 | * Check the ASID again, in case the change was broadcast from |
124 | * another CPU before we acquired the lock. |
125 | */ |
126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { |
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
128 | spin_unlock(&cpu_asid_lock); |
129 | return; |
130 | } |
131 | #endif |
132 | /* |
133 | * At this point, it is guaranteed that the current mm (with |
134 | * an old ASID) isn't active on any other CPU since the ASIDs |
135 | * are changed simultaneously via IPI. |
136 | */ |
137 | asid = ++cpu_last_asid; |
138 | if (asid == 0) |
139 | asid = cpu_last_asid = ASID_FIRST_VERSION; |
140 | |
141 | /* |
142 | * If we've used up all our ASIDs, we need |
143 | * to start a new version and flush the TLB. |
144 | */ |
145 | if (unlikely((asid & ~ASID_MASK) == 0)) { |
146 | asid = cpu_last_asid + smp_processor_id() + 1; |
147 | flush_context(); |
148 | #ifdef CONFIG_SMP |
149 | smp_wmb(); |
150 | smp_call_function(reset_context, NULL, 1); |
151 | #endif |
152 | cpu_last_asid += NR_CPUS; |
153 | } |
154 | |
155 | set_mm_context(mm, asid); |
156 | spin_unlock(&cpu_asid_lock); |
157 | } |
158 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9