Root/
Source at commit 0de2b2b3be81048189a32f7a3d3ba0ba9ec817b6 created 11 years 11 months ago. By Maarten ter Huurne, MIPS: JZ4740: Fixed value for round robin constant. | |
---|---|
1 | /* |
2 | * arch/sh/kernel/smp.c |
3 | * |
4 | * SMP support for the SuperH processors. |
5 | * |
6 | * Copyright (C) 2002 - 2010 Paul Mundt |
7 | * Copyright (C) 2006 - 2007 Akio Idehara |
8 | * |
9 | * This file is subject to the terms and conditions of the GNU General Public |
10 | * License. See the file "COPYING" in the main directory of this archive |
11 | * for more details. |
12 | */ |
13 | #include <linux/err.h> |
14 | #include <linux/cache.h> |
15 | #include <linux/cpumask.h> |
16 | #include <linux/delay.h> |
17 | #include <linux/init.h> |
18 | #include <linux/spinlock.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/module.h> |
21 | #include <linux/cpu.h> |
22 | #include <linux/interrupt.h> |
23 | #include <linux/sched.h> |
24 | #include <linux/atomic.h> |
25 | #include <asm/processor.h> |
26 | #include <asm/system.h> |
27 | #include <asm/mmu_context.h> |
28 | #include <asm/smp.h> |
29 | #include <asm/cacheflush.h> |
30 | #include <asm/sections.h> |
31 | |
32 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ |
33 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ |
34 | |
35 | struct plat_smp_ops *mp_ops = NULL; |
36 | |
37 | /* State of each CPU */ |
38 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
39 | |
40 | void __cpuinit register_smp_ops(struct plat_smp_ops *ops) |
41 | { |
42 | if (mp_ops) |
43 | printk(KERN_WARNING "Overriding previously set SMP ops\n"); |
44 | |
45 | mp_ops = ops; |
46 | } |
47 | |
48 | static inline void __cpuinit smp_store_cpu_info(unsigned int cpu) |
49 | { |
50 | struct sh_cpuinfo *c = cpu_data + cpu; |
51 | |
52 | memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); |
53 | |
54 | c->loops_per_jiffy = loops_per_jiffy; |
55 | } |
56 | |
57 | void __init smp_prepare_cpus(unsigned int max_cpus) |
58 | { |
59 | unsigned int cpu = smp_processor_id(); |
60 | |
61 | init_new_context(current, &init_mm); |
62 | current_thread_info()->cpu = cpu; |
63 | mp_ops->prepare_cpus(max_cpus); |
64 | |
65 | #ifndef CONFIG_HOTPLUG_CPU |
66 | init_cpu_present(cpu_possible_mask); |
67 | #endif |
68 | } |
69 | |
70 | void __init smp_prepare_boot_cpu(void) |
71 | { |
72 | unsigned int cpu = smp_processor_id(); |
73 | |
74 | __cpu_number_map[0] = cpu; |
75 | __cpu_logical_map[0] = cpu; |
76 | |
77 | set_cpu_online(cpu, true); |
78 | set_cpu_possible(cpu, true); |
79 | |
80 | per_cpu(cpu_state, cpu) = CPU_ONLINE; |
81 | } |
82 | |
83 | #ifdef CONFIG_HOTPLUG_CPU |
84 | void native_cpu_die(unsigned int cpu) |
85 | { |
86 | unsigned int i; |
87 | |
88 | for (i = 0; i < 10; i++) { |
89 | smp_rmb(); |
90 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { |
91 | if (system_state == SYSTEM_RUNNING) |
92 | pr_info("CPU %u is now offline\n", cpu); |
93 | |
94 | return; |
95 | } |
96 | |
97 | msleep(100); |
98 | } |
99 | |
100 | pr_err("CPU %u didn't die...\n", cpu); |
101 | } |
102 | |
103 | int native_cpu_disable(unsigned int cpu) |
104 | { |
105 | return cpu == 0 ? -EPERM : 0; |
106 | } |
107 | |
108 | void play_dead_common(void) |
109 | { |
110 | idle_task_exit(); |
111 | irq_ctx_exit(raw_smp_processor_id()); |
112 | mb(); |
113 | |
114 | __get_cpu_var(cpu_state) = CPU_DEAD; |
115 | local_irq_disable(); |
116 | } |
117 | |
118 | void native_play_dead(void) |
119 | { |
120 | play_dead_common(); |
121 | } |
122 | |
123 | int __cpu_disable(void) |
124 | { |
125 | unsigned int cpu = smp_processor_id(); |
126 | struct task_struct *p; |
127 | int ret; |
128 | |
129 | ret = mp_ops->cpu_disable(cpu); |
130 | if (ret) |
131 | return ret; |
132 | |
133 | /* |
134 | * Take this CPU offline. Once we clear this, we can't return, |
135 | * and we must not schedule until we're ready to give up the cpu. |
136 | */ |
137 | set_cpu_online(cpu, false); |
138 | |
139 | /* |
140 | * OK - migrate IRQs away from this CPU |
141 | */ |
142 | migrate_irqs(); |
143 | |
144 | /* |
145 | * Stop the local timer for this CPU. |
146 | */ |
147 | local_timer_stop(cpu); |
148 | |
149 | /* |
150 | * Flush user cache and TLB mappings, and then remove this CPU |
151 | * from the vm mask set of all processes. |
152 | */ |
153 | flush_cache_all(); |
154 | local_flush_tlb_all(); |
155 | |
156 | read_lock(&tasklist_lock); |
157 | for_each_process(p) |
158 | if (p->mm) |
159 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); |
160 | read_unlock(&tasklist_lock); |
161 | |
162 | return 0; |
163 | } |
164 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
165 | int native_cpu_disable(unsigned int cpu) |
166 | { |
167 | return -ENOSYS; |
168 | } |
169 | |
170 | void native_cpu_die(unsigned int cpu) |
171 | { |
172 | /* We said "no" in __cpu_disable */ |
173 | BUG(); |
174 | } |
175 | |
176 | void native_play_dead(void) |
177 | { |
178 | BUG(); |
179 | } |
180 | #endif |
181 | |
182 | asmlinkage void __cpuinit start_secondary(void) |
183 | { |
184 | unsigned int cpu = smp_processor_id(); |
185 | struct mm_struct *mm = &init_mm; |
186 | |
187 | enable_mmu(); |
188 | atomic_inc(&mm->mm_count); |
189 | atomic_inc(&mm->mm_users); |
190 | current->active_mm = mm; |
191 | enter_lazy_tlb(mm, current); |
192 | local_flush_tlb_all(); |
193 | |
194 | per_cpu_trap_init(); |
195 | |
196 | preempt_disable(); |
197 | |
198 | notify_cpu_starting(cpu); |
199 | |
200 | local_irq_enable(); |
201 | |
202 | /* Enable local timers */ |
203 | local_timer_setup(cpu); |
204 | calibrate_delay(); |
205 | |
206 | smp_store_cpu_info(cpu); |
207 | |
208 | set_cpu_online(cpu, true); |
209 | per_cpu(cpu_state, cpu) = CPU_ONLINE; |
210 | |
211 | cpu_idle(); |
212 | } |
213 | |
214 | extern struct { |
215 | unsigned long sp; |
216 | unsigned long bss_start; |
217 | unsigned long bss_end; |
218 | void *start_kernel_fn; |
219 | void *cpu_init_fn; |
220 | void *thread_info; |
221 | } stack_start; |
222 | |
223 | int __cpuinit __cpu_up(unsigned int cpu) |
224 | { |
225 | struct task_struct *tsk; |
226 | unsigned long timeout; |
227 | |
228 | tsk = cpu_data[cpu].idle; |
229 | if (!tsk) { |
230 | tsk = fork_idle(cpu); |
231 | if (IS_ERR(tsk)) { |
232 | pr_err("Failed forking idle task for cpu %d\n", cpu); |
233 | return PTR_ERR(tsk); |
234 | } |
235 | |
236 | cpu_data[cpu].idle = tsk; |
237 | } |
238 | |
239 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
240 | |
241 | /* Fill in data in head.S for secondary cpus */ |
242 | stack_start.sp = tsk->thread.sp; |
243 | stack_start.thread_info = tsk->stack; |
244 | stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ |
245 | stack_start.start_kernel_fn = start_secondary; |
246 | |
247 | flush_icache_range((unsigned long)&stack_start, |
248 | (unsigned long)&stack_start + sizeof(stack_start)); |
249 | wmb(); |
250 | |
251 | mp_ops->start_cpu(cpu, (unsigned long)_stext); |
252 | |
253 | timeout = jiffies + HZ; |
254 | while (time_before(jiffies, timeout)) { |
255 | if (cpu_online(cpu)) |
256 | break; |
257 | |
258 | udelay(10); |
259 | barrier(); |
260 | } |
261 | |
262 | if (cpu_online(cpu)) |
263 | return 0; |
264 | |
265 | return -ENOENT; |
266 | } |
267 | |
268 | void __init smp_cpus_done(unsigned int max_cpus) |
269 | { |
270 | unsigned long bogosum = 0; |
271 | int cpu; |
272 | |
273 | for_each_online_cpu(cpu) |
274 | bogosum += cpu_data[cpu].loops_per_jiffy; |
275 | |
276 | printk(KERN_INFO "SMP: Total of %d processors activated " |
277 | "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), |
278 | bogosum / (500000/HZ), |
279 | (bogosum / (5000/HZ)) % 100); |
280 | } |
281 | |
282 | void smp_send_reschedule(int cpu) |
283 | { |
284 | mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); |
285 | } |
286 | |
287 | void smp_send_stop(void) |
288 | { |
289 | smp_call_function(stop_this_cpu, 0, 0); |
290 | } |
291 | |
292 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
293 | { |
294 | int cpu; |
295 | |
296 | for_each_cpu(cpu, mask) |
297 | mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); |
298 | } |
299 | |
300 | void arch_send_call_function_single_ipi(int cpu) |
301 | { |
302 | mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); |
303 | } |
304 | |
305 | void smp_timer_broadcast(const struct cpumask *mask) |
306 | { |
307 | int cpu; |
308 | |
309 | for_each_cpu(cpu, mask) |
310 | mp_ops->send_ipi(cpu, SMP_MSG_TIMER); |
311 | } |
312 | |
313 | static void ipi_timer(void) |
314 | { |
315 | irq_enter(); |
316 | local_timer_interrupt(); |
317 | irq_exit(); |
318 | } |
319 | |
320 | void smp_message_recv(unsigned int msg) |
321 | { |
322 | switch (msg) { |
323 | case SMP_MSG_FUNCTION: |
324 | generic_smp_call_function_interrupt(); |
325 | break; |
326 | case SMP_MSG_RESCHEDULE: |
327 | scheduler_ipi(); |
328 | break; |
329 | case SMP_MSG_FUNCTION_SINGLE: |
330 | generic_smp_call_function_single_interrupt(); |
331 | break; |
332 | case SMP_MSG_TIMER: |
333 | ipi_timer(); |
334 | break; |
335 | default: |
336 | printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", |
337 | smp_processor_id(), __func__, msg); |
338 | break; |
339 | } |
340 | } |
341 | |
342 | /* Not really SMP stuff ... */ |
343 | int setup_profiling_timer(unsigned int multiplier) |
344 | { |
345 | return 0; |
346 | } |
347 | |
348 | static void flush_tlb_all_ipi(void *info) |
349 | { |
350 | local_flush_tlb_all(); |
351 | } |
352 | |
353 | void flush_tlb_all(void) |
354 | { |
355 | on_each_cpu(flush_tlb_all_ipi, 0, 1); |
356 | } |
357 | |
358 | static void flush_tlb_mm_ipi(void *mm) |
359 | { |
360 | local_flush_tlb_mm((struct mm_struct *)mm); |
361 | } |
362 | |
363 | /* |
364 | * The following tlb flush calls are invoked when old translations are |
365 | * being torn down, or pte attributes are changing. For single threaded |
366 | * address spaces, a new context is obtained on the current cpu, and tlb |
367 | * context on other cpus are invalidated to force a new context allocation |
368 | * at switch_mm time, should the mm ever be used on other cpus. For |
369 | * multithreaded address spaces, intercpu interrupts have to be sent. |
370 | * Another case where intercpu interrupts are required is when the target |
371 | * mm might be active on another cpu (eg debuggers doing the flushes on |
372 | * behalf of debugees, kswapd stealing pages from another process etc). |
373 | * Kanoj 07/00. |
374 | */ |
375 | void flush_tlb_mm(struct mm_struct *mm) |
376 | { |
377 | preempt_disable(); |
378 | |
379 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
380 | smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); |
381 | } else { |
382 | int i; |
383 | for (i = 0; i < num_online_cpus(); i++) |
384 | if (smp_processor_id() != i) |
385 | cpu_context(i, mm) = 0; |
386 | } |
387 | local_flush_tlb_mm(mm); |
388 | |
389 | preempt_enable(); |
390 | } |
391 | |
392 | struct flush_tlb_data { |
393 | struct vm_area_struct *vma; |
394 | unsigned long addr1; |
395 | unsigned long addr2; |
396 | }; |
397 | |
398 | static void flush_tlb_range_ipi(void *info) |
399 | { |
400 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
401 | |
402 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); |
403 | } |
404 | |
405 | void flush_tlb_range(struct vm_area_struct *vma, |
406 | unsigned long start, unsigned long end) |
407 | { |
408 | struct mm_struct *mm = vma->vm_mm; |
409 | |
410 | preempt_disable(); |
411 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
412 | struct flush_tlb_data fd; |
413 | |
414 | fd.vma = vma; |
415 | fd.addr1 = start; |
416 | fd.addr2 = end; |
417 | smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); |
418 | } else { |
419 | int i; |
420 | for (i = 0; i < num_online_cpus(); i++) |
421 | if (smp_processor_id() != i) |
422 | cpu_context(i, mm) = 0; |
423 | } |
424 | local_flush_tlb_range(vma, start, end); |
425 | preempt_enable(); |
426 | } |
427 | |
428 | static void flush_tlb_kernel_range_ipi(void *info) |
429 | { |
430 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
431 | |
432 | local_flush_tlb_kernel_range(fd->addr1, fd->addr2); |
433 | } |
434 | |
435 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
436 | { |
437 | struct flush_tlb_data fd; |
438 | |
439 | fd.addr1 = start; |
440 | fd.addr2 = end; |
441 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); |
442 | } |
443 | |
444 | static void flush_tlb_page_ipi(void *info) |
445 | { |
446 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
447 | |
448 | local_flush_tlb_page(fd->vma, fd->addr1); |
449 | } |
450 | |
451 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
452 | { |
453 | preempt_disable(); |
454 | if ((atomic_read(&vma->vm_mm->mm_users) != 1) || |
455 | (current->mm != vma->vm_mm)) { |
456 | struct flush_tlb_data fd; |
457 | |
458 | fd.vma = vma; |
459 | fd.addr1 = page; |
460 | smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); |
461 | } else { |
462 | int i; |
463 | for (i = 0; i < num_online_cpus(); i++) |
464 | if (smp_processor_id() != i) |
465 | cpu_context(i, vma->vm_mm) = 0; |
466 | } |
467 | local_flush_tlb_page(vma, page); |
468 | preempt_enable(); |
469 | } |
470 | |
471 | static void flush_tlb_one_ipi(void *info) |
472 | { |
473 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
474 | local_flush_tlb_one(fd->addr1, fd->addr2); |
475 | } |
476 | |
477 | void flush_tlb_one(unsigned long asid, unsigned long vaddr) |
478 | { |
479 | struct flush_tlb_data fd; |
480 | |
481 | fd.addr1 = asid; |
482 | fd.addr2 = vaddr; |
483 | |
484 | smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); |
485 | local_flush_tlb_one(asid, vaddr); |
486 | } |
487 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9