Root/
1 | /* |
2 | * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited) |
3 | * |
4 | * Copyright 2007-2009 Analog Devices Inc. |
5 | * Philippe Gerum <rpm@xenomai.org> |
6 | * |
7 | * Licensed under the GPL-2. |
8 | */ |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/init.h> |
13 | #include <linux/spinlock.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/cache.h> |
17 | #include <linux/profile.h> |
18 | #include <linux/errno.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/cpu.h> |
21 | #include <linux/smp.h> |
22 | #include <linux/seq_file.h> |
23 | #include <linux/irq.h> |
24 | #include <linux/slab.h> |
25 | #include <asm/atomic.h> |
26 | #include <asm/cacheflush.h> |
27 | #include <asm/mmu_context.h> |
28 | #include <asm/pgtable.h> |
29 | #include <asm/pgalloc.h> |
30 | #include <asm/processor.h> |
31 | #include <asm/ptrace.h> |
32 | #include <asm/cpu.h> |
33 | #include <asm/time.h> |
34 | #include <linux/err.h> |
35 | |
36 | /* |
37 | * Anomaly notes: |
38 | * 05000120 - we always define corelock as 32-bit integer in L2 |
39 | */ |
40 | struct corelock_slot corelock __attribute__ ((__section__(".l2.bss"))); |
41 | |
42 | void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, |
43 | *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb, |
44 | *init_saved_dcplb_fault_addr_coreb; |
45 | |
46 | cpumask_t cpu_possible_map; |
47 | EXPORT_SYMBOL(cpu_possible_map); |
48 | |
49 | cpumask_t cpu_online_map; |
50 | EXPORT_SYMBOL(cpu_online_map); |
51 | |
52 | #define BFIN_IPI_RESCHEDULE 0 |
53 | #define BFIN_IPI_CALL_FUNC 1 |
54 | #define BFIN_IPI_CPU_STOP 2 |
55 | |
56 | struct blackfin_flush_data { |
57 | unsigned long start; |
58 | unsigned long end; |
59 | }; |
60 | |
61 | void *secondary_stack; |
62 | |
63 | |
64 | struct smp_call_struct { |
65 | void (*func)(void *info); |
66 | void *info; |
67 | int wait; |
68 | cpumask_t pending; |
69 | cpumask_t waitmask; |
70 | }; |
71 | |
72 | static struct blackfin_flush_data smp_flush_data; |
73 | |
74 | static DEFINE_SPINLOCK(stop_lock); |
75 | |
76 | struct ipi_message { |
77 | struct list_head list; |
78 | unsigned long type; |
79 | struct smp_call_struct call_struct; |
80 | }; |
81 | |
82 | struct ipi_message_queue { |
83 | struct list_head head; |
84 | spinlock_t lock; |
85 | unsigned long count; |
86 | }; |
87 | |
88 | static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); |
89 | |
90 | static void ipi_cpu_stop(unsigned int cpu) |
91 | { |
92 | spin_lock(&stop_lock); |
93 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); |
94 | dump_stack(); |
95 | spin_unlock(&stop_lock); |
96 | |
97 | cpu_clear(cpu, cpu_online_map); |
98 | |
99 | local_irq_disable(); |
100 | |
101 | while (1) |
102 | SSYNC(); |
103 | } |
104 | |
105 | static void ipi_flush_icache(void *info) |
106 | { |
107 | struct blackfin_flush_data *fdata = info; |
108 | |
109 | /* Invalidate the memory holding the bounds of the flushed region. */ |
110 | blackfin_dcache_invalidate_range((unsigned long)fdata, |
111 | (unsigned long)fdata + sizeof(*fdata)); |
112 | |
113 | blackfin_icache_flush_range(fdata->start, fdata->end); |
114 | } |
115 | |
116 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) |
117 | { |
118 | int wait; |
119 | void (*func)(void *info); |
120 | void *info; |
121 | func = msg->call_struct.func; |
122 | info = msg->call_struct.info; |
123 | wait = msg->call_struct.wait; |
124 | cpu_clear(cpu, msg->call_struct.pending); |
125 | func(info); |
126 | if (wait) { |
127 | #ifdef __ARCH_SYNC_CORE_DCACHE |
128 | /* |
129 | * 'wait' usually means synchronization between CPUs. |
130 | * Invalidate D cache in case shared data was changed |
131 | * by func() to ensure cache coherence. |
132 | */ |
133 | resync_core_dcache(); |
134 | #endif |
135 | cpu_clear(cpu, msg->call_struct.waitmask); |
136 | } else |
137 | kfree(msg); |
138 | } |
139 | |
140 | static irqreturn_t ipi_handler(int irq, void *dev_instance) |
141 | { |
142 | struct ipi_message *msg; |
143 | struct ipi_message_queue *msg_queue; |
144 | unsigned int cpu = smp_processor_id(); |
145 | |
146 | platform_clear_ipi(cpu); |
147 | |
148 | msg_queue = &__get_cpu_var(ipi_msg_queue); |
149 | msg_queue->count++; |
150 | |
151 | spin_lock(&msg_queue->lock); |
152 | while (!list_empty(&msg_queue->head)) { |
153 | msg = list_entry(msg_queue->head.next, typeof(*msg), list); |
154 | list_del(&msg->list); |
155 | switch (msg->type) { |
156 | case BFIN_IPI_RESCHEDULE: |
157 | /* That's the easiest one; leave it to |
158 | * return_from_int. */ |
159 | kfree(msg); |
160 | break; |
161 | case BFIN_IPI_CALL_FUNC: |
162 | spin_unlock(&msg_queue->lock); |
163 | ipi_call_function(cpu, msg); |
164 | spin_lock(&msg_queue->lock); |
165 | break; |
166 | case BFIN_IPI_CPU_STOP: |
167 | spin_unlock(&msg_queue->lock); |
168 | ipi_cpu_stop(cpu); |
169 | spin_lock(&msg_queue->lock); |
170 | kfree(msg); |
171 | break; |
172 | default: |
173 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", |
174 | cpu, msg->type); |
175 | kfree(msg); |
176 | break; |
177 | } |
178 | } |
179 | spin_unlock(&msg_queue->lock); |
180 | return IRQ_HANDLED; |
181 | } |
182 | |
183 | static void ipi_queue_init(void) |
184 | { |
185 | unsigned int cpu; |
186 | struct ipi_message_queue *msg_queue; |
187 | for_each_possible_cpu(cpu) { |
188 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
189 | INIT_LIST_HEAD(&msg_queue->head); |
190 | spin_lock_init(&msg_queue->lock); |
191 | msg_queue->count = 0; |
192 | } |
193 | } |
194 | |
195 | int smp_call_function(void (*func)(void *info), void *info, int wait) |
196 | { |
197 | unsigned int cpu; |
198 | cpumask_t callmap; |
199 | unsigned long flags; |
200 | struct ipi_message_queue *msg_queue; |
201 | struct ipi_message *msg; |
202 | |
203 | callmap = cpu_online_map; |
204 | cpu_clear(smp_processor_id(), callmap); |
205 | if (cpus_empty(callmap)) |
206 | return 0; |
207 | |
208 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); |
209 | if (!msg) |
210 | return -ENOMEM; |
211 | INIT_LIST_HEAD(&msg->list); |
212 | msg->call_struct.func = func; |
213 | msg->call_struct.info = info; |
214 | msg->call_struct.wait = wait; |
215 | msg->call_struct.pending = callmap; |
216 | msg->call_struct.waitmask = callmap; |
217 | msg->type = BFIN_IPI_CALL_FUNC; |
218 | |
219 | for_each_cpu_mask(cpu, callmap) { |
220 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
221 | spin_lock_irqsave(&msg_queue->lock, flags); |
222 | list_add_tail(&msg->list, &msg_queue->head); |
223 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
224 | platform_send_ipi_cpu(cpu); |
225 | } |
226 | if (wait) { |
227 | while (!cpus_empty(msg->call_struct.waitmask)) |
228 | blackfin_dcache_invalidate_range( |
229 | (unsigned long)(&msg->call_struct.waitmask), |
230 | (unsigned long)(&msg->call_struct.waitmask)); |
231 | #ifdef __ARCH_SYNC_CORE_DCACHE |
232 | /* |
233 | * Invalidate D cache in case shared data was changed by |
234 | * other processors to ensure cache coherence. |
235 | */ |
236 | resync_core_dcache(); |
237 | #endif |
238 | kfree(msg); |
239 | } |
240 | return 0; |
241 | } |
242 | EXPORT_SYMBOL_GPL(smp_call_function); |
243 | |
244 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, |
245 | int wait) |
246 | { |
247 | unsigned int cpu = cpuid; |
248 | cpumask_t callmap; |
249 | unsigned long flags; |
250 | struct ipi_message_queue *msg_queue; |
251 | struct ipi_message *msg; |
252 | |
253 | if (cpu_is_offline(cpu)) |
254 | return 0; |
255 | cpus_clear(callmap); |
256 | cpu_set(cpu, callmap); |
257 | |
258 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); |
259 | if (!msg) |
260 | return -ENOMEM; |
261 | INIT_LIST_HEAD(&msg->list); |
262 | msg->call_struct.func = func; |
263 | msg->call_struct.info = info; |
264 | msg->call_struct.wait = wait; |
265 | msg->call_struct.pending = callmap; |
266 | msg->call_struct.waitmask = callmap; |
267 | msg->type = BFIN_IPI_CALL_FUNC; |
268 | |
269 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
270 | spin_lock_irqsave(&msg_queue->lock, flags); |
271 | list_add_tail(&msg->list, &msg_queue->head); |
272 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
273 | platform_send_ipi_cpu(cpu); |
274 | |
275 | if (wait) { |
276 | while (!cpus_empty(msg->call_struct.waitmask)) |
277 | blackfin_dcache_invalidate_range( |
278 | (unsigned long)(&msg->call_struct.waitmask), |
279 | (unsigned long)(&msg->call_struct.waitmask)); |
280 | #ifdef __ARCH_SYNC_CORE_DCACHE |
281 | /* |
282 | * Invalidate D cache in case shared data was changed by |
283 | * other processors to ensure cache coherence. |
284 | */ |
285 | resync_core_dcache(); |
286 | #endif |
287 | kfree(msg); |
288 | } |
289 | return 0; |
290 | } |
291 | EXPORT_SYMBOL_GPL(smp_call_function_single); |
292 | |
293 | void smp_send_reschedule(int cpu) |
294 | { |
295 | unsigned long flags; |
296 | struct ipi_message_queue *msg_queue; |
297 | struct ipi_message *msg; |
298 | |
299 | if (cpu_is_offline(cpu)) |
300 | return; |
301 | |
302 | msg = kzalloc(sizeof(*msg), GFP_ATOMIC); |
303 | if (!msg) |
304 | return; |
305 | INIT_LIST_HEAD(&msg->list); |
306 | msg->type = BFIN_IPI_RESCHEDULE; |
307 | |
308 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
309 | spin_lock_irqsave(&msg_queue->lock, flags); |
310 | list_add_tail(&msg->list, &msg_queue->head); |
311 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
312 | platform_send_ipi_cpu(cpu); |
313 | |
314 | return; |
315 | } |
316 | |
317 | void smp_send_stop(void) |
318 | { |
319 | unsigned int cpu; |
320 | cpumask_t callmap; |
321 | unsigned long flags; |
322 | struct ipi_message_queue *msg_queue; |
323 | struct ipi_message *msg; |
324 | |
325 | callmap = cpu_online_map; |
326 | cpu_clear(smp_processor_id(), callmap); |
327 | if (cpus_empty(callmap)) |
328 | return; |
329 | |
330 | msg = kzalloc(sizeof(*msg), GFP_ATOMIC); |
331 | if (!msg) |
332 | return; |
333 | INIT_LIST_HEAD(&msg->list); |
334 | msg->type = BFIN_IPI_CPU_STOP; |
335 | |
336 | for_each_cpu_mask(cpu, callmap) { |
337 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
338 | spin_lock_irqsave(&msg_queue->lock, flags); |
339 | list_add_tail(&msg->list, &msg_queue->head); |
340 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
341 | platform_send_ipi_cpu(cpu); |
342 | } |
343 | return; |
344 | } |
345 | |
346 | int __cpuinit __cpu_up(unsigned int cpu) |
347 | { |
348 | int ret; |
349 | static struct task_struct *idle; |
350 | |
351 | if (idle) |
352 | free_task(idle); |
353 | |
354 | idle = fork_idle(cpu); |
355 | if (IS_ERR(idle)) { |
356 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); |
357 | return PTR_ERR(idle); |
358 | } |
359 | |
360 | secondary_stack = task_stack_page(idle) + THREAD_SIZE; |
361 | |
362 | ret = platform_boot_secondary(cpu, idle); |
363 | |
364 | secondary_stack = NULL; |
365 | |
366 | return ret; |
367 | } |
368 | |
369 | static void __cpuinit setup_secondary(unsigned int cpu) |
370 | { |
371 | unsigned long ilat; |
372 | |
373 | bfin_write_IMASK(0); |
374 | CSYNC(); |
375 | ilat = bfin_read_ILAT(); |
376 | CSYNC(); |
377 | bfin_write_ILAT(ilat); |
378 | CSYNC(); |
379 | |
380 | /* Enable interrupt levels IVG7-15. IARs have been already |
381 | * programmed by the boot CPU. */ |
382 | bfin_irq_flags |= IMASK_IVG15 | |
383 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | |
384 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; |
385 | } |
386 | |
387 | void __cpuinit secondary_start_kernel(void) |
388 | { |
389 | unsigned int cpu = smp_processor_id(); |
390 | struct mm_struct *mm = &init_mm; |
391 | |
392 | if (_bfin_swrst & SWRST_DBL_FAULT_B) { |
393 | printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n"); |
394 | #ifdef CONFIG_DEBUG_DOUBLEFAULT |
395 | printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n", |
396 | (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb); |
397 | printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb); |
398 | printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb); |
399 | #endif |
400 | printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", |
401 | init_retx_coreb); |
402 | } |
403 | |
404 | /* |
405 | * We want the D-cache to be enabled early, in case the atomic |
406 | * support code emulates cache coherence (see |
407 | * __ARCH_SYNC_CORE_DCACHE). |
408 | */ |
409 | init_exception_vectors(); |
410 | |
411 | bfin_setup_caches(cpu); |
412 | |
413 | local_irq_disable(); |
414 | |
415 | /* Attach the new idle task to the global mm. */ |
416 | atomic_inc(&mm->mm_users); |
417 | atomic_inc(&mm->mm_count); |
418 | current->active_mm = mm; |
419 | |
420 | preempt_disable(); |
421 | |
422 | setup_secondary(cpu); |
423 | |
424 | platform_secondary_init(cpu); |
425 | |
426 | /* setup local core timer */ |
427 | bfin_local_timer_setup(); |
428 | |
429 | local_irq_enable(); |
430 | |
431 | /* |
432 | * Calibrate loops per jiffy value. |
433 | * IRQs need to be enabled here - D-cache can be invalidated |
434 | * in timer irq handler, so core B can read correct jiffies. |
435 | */ |
436 | calibrate_delay(); |
437 | |
438 | cpu_idle(); |
439 | } |
440 | |
441 | void __init smp_prepare_boot_cpu(void) |
442 | { |
443 | } |
444 | |
445 | void __init smp_prepare_cpus(unsigned int max_cpus) |
446 | { |
447 | platform_prepare_cpus(max_cpus); |
448 | ipi_queue_init(); |
449 | platform_request_ipi(&ipi_handler); |
450 | } |
451 | |
452 | void __init smp_cpus_done(unsigned int max_cpus) |
453 | { |
454 | unsigned long bogosum = 0; |
455 | unsigned int cpu; |
456 | |
457 | for_each_online_cpu(cpu) |
458 | bogosum += loops_per_jiffy; |
459 | |
460 | printk(KERN_INFO "SMP: Total of %d processors activated " |
461 | "(%lu.%02lu BogoMIPS).\n", |
462 | num_online_cpus(), |
463 | bogosum / (500000/HZ), |
464 | (bogosum / (5000/HZ)) % 100); |
465 | } |
466 | |
467 | void smp_icache_flush_range_others(unsigned long start, unsigned long end) |
468 | { |
469 | smp_flush_data.start = start; |
470 | smp_flush_data.end = end; |
471 | |
472 | if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0)) |
473 | printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n"); |
474 | } |
475 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); |
476 | |
477 | #ifdef __ARCH_SYNC_CORE_ICACHE |
478 | unsigned long icache_invld_count[NR_CPUS]; |
479 | void resync_core_icache(void) |
480 | { |
481 | unsigned int cpu = get_cpu(); |
482 | blackfin_invalidate_entire_icache(); |
483 | icache_invld_count[cpu]++; |
484 | put_cpu(); |
485 | } |
486 | EXPORT_SYMBOL(resync_core_icache); |
487 | #endif |
488 | |
489 | #ifdef __ARCH_SYNC_CORE_DCACHE |
490 | unsigned long dcache_invld_count[NR_CPUS]; |
491 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); |
492 | |
493 | void resync_core_dcache(void) |
494 | { |
495 | unsigned int cpu = get_cpu(); |
496 | blackfin_invalidate_entire_dcache(); |
497 | dcache_invld_count[cpu]++; |
498 | put_cpu(); |
499 | } |
500 | EXPORT_SYMBOL(resync_core_dcache); |
501 | #endif |
502 | |
503 | #ifdef CONFIG_HOTPLUG_CPU |
504 | int __cpuexit __cpu_disable(void) |
505 | { |
506 | unsigned int cpu = smp_processor_id(); |
507 | |
508 | if (cpu == 0) |
509 | return -EPERM; |
510 | |
511 | set_cpu_online(cpu, false); |
512 | return 0; |
513 | } |
514 | |
515 | static DECLARE_COMPLETION(cpu_killed); |
516 | |
517 | int __cpuexit __cpu_die(unsigned int cpu) |
518 | { |
519 | return wait_for_completion_timeout(&cpu_killed, 5000); |
520 | } |
521 | |
522 | void cpu_die(void) |
523 | { |
524 | complete(&cpu_killed); |
525 | |
526 | atomic_dec(&init_mm.mm_users); |
527 | atomic_dec(&init_mm.mm_count); |
528 | |
529 | local_irq_disable(); |
530 | platform_cpu_die(); |
531 | } |
532 | #endif |
533 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9