Root/
1 | /* |
2 | * linux/arch/arm/common/gic.c |
3 | * |
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. |
9 | * |
10 | * Interrupt architecture for the GIC: |
11 | * |
12 | * o There is one Interrupt Distributor, which receives interrupts |
13 | * from system devices and sends them to the Interrupt Controllers. |
14 | * |
15 | * o There is one CPU Interface per CPU, which sends interrupts sent |
16 | * by the Distributor, and interrupts generated locally, to the |
17 | * associated CPU. The base address of the CPU interface is usually |
18 | * aliased so that the same address points to different chips depending |
19 | * on the CPU it is accessed from. |
20 | * |
21 | * Note that IRQs 0-31 are special - they are local to each CPU. |
22 | * As such, the enable set/clear, pending set/clear and active bit |
23 | * registers are banked per-cpu for these sources. |
24 | */ |
25 | #include <linux/init.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/err.h> |
28 | #include <linux/module.h> |
29 | #include <linux/list.h> |
30 | #include <linux/smp.h> |
31 | #include <linux/cpu_pm.h> |
32 | #include <linux/cpumask.h> |
33 | #include <linux/io.h> |
34 | #include <linux/of.h> |
35 | #include <linux/of_address.h> |
36 | #include <linux/of_irq.h> |
37 | #include <linux/irqdomain.h> |
38 | #include <linux/interrupt.h> |
39 | #include <linux/percpu.h> |
40 | #include <linux/slab.h> |
41 | #include <linux/irqchip/arm-gic.h> |
42 | |
43 | #include <asm/irq.h> |
44 | #include <asm/exception.h> |
45 | #include <asm/smp_plat.h> |
46 | #include <asm/mach/irq.h> |
47 | |
48 | #include "irqchip.h" |
49 | |
50 | union gic_base { |
51 | void __iomem *common_base; |
52 | void __percpu __iomem **percpu_base; |
53 | }; |
54 | |
55 | struct gic_chip_data { |
56 | union gic_base dist_base; |
57 | union gic_base cpu_base; |
58 | #ifdef CONFIG_CPU_PM |
59 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; |
60 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; |
61 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; |
62 | u32 __percpu *saved_ppi_enable; |
63 | u32 __percpu *saved_ppi_conf; |
64 | #endif |
65 | struct irq_domain *domain; |
66 | unsigned int gic_irqs; |
67 | #ifdef CONFIG_GIC_NON_BANKED |
68 | void __iomem *(*get_base)(union gic_base *); |
69 | #endif |
70 | }; |
71 | |
72 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
73 | |
74 | /* |
75 | * The GIC mapping of CPU interfaces does not necessarily match |
76 | * the logical CPU numbering. Let's use a mapping as returned |
77 | * by the GIC itself. |
78 | */ |
79 | #define NR_GIC_CPU_IF 8 |
80 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; |
81 | |
82 | /* |
83 | * Supported arch specific GIC irq extension. |
84 | * Default make them NULL. |
85 | */ |
86 | struct irq_chip gic_arch_extn = { |
87 | .irq_eoi = NULL, |
88 | .irq_mask = NULL, |
89 | .irq_unmask = NULL, |
90 | .irq_retrigger = NULL, |
91 | .irq_set_type = NULL, |
92 | .irq_set_wake = NULL, |
93 | }; |
94 | |
95 | #ifndef MAX_GIC_NR |
96 | #define MAX_GIC_NR 1 |
97 | #endif |
98 | |
99 | static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; |
100 | |
101 | #ifdef CONFIG_GIC_NON_BANKED |
102 | static void __iomem *gic_get_percpu_base(union gic_base *base) |
103 | { |
104 | return *__this_cpu_ptr(base->percpu_base); |
105 | } |
106 | |
107 | static void __iomem *gic_get_common_base(union gic_base *base) |
108 | { |
109 | return base->common_base; |
110 | } |
111 | |
112 | static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) |
113 | { |
114 | return data->get_base(&data->dist_base); |
115 | } |
116 | |
117 | static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) |
118 | { |
119 | return data->get_base(&data->cpu_base); |
120 | } |
121 | |
122 | static inline void gic_set_base_accessor(struct gic_chip_data *data, |
123 | void __iomem *(*f)(union gic_base *)) |
124 | { |
125 | data->get_base = f; |
126 | } |
127 | #else |
128 | #define gic_data_dist_base(d) ((d)->dist_base.common_base) |
129 | #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) |
130 | #define gic_set_base_accessor(d,f) |
131 | #endif |
132 | |
133 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
134 | { |
135 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
136 | return gic_data_dist_base(gic_data); |
137 | } |
138 | |
139 | static inline void __iomem *gic_cpu_base(struct irq_data *d) |
140 | { |
141 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
142 | return gic_data_cpu_base(gic_data); |
143 | } |
144 | |
145 | static inline unsigned int gic_irq(struct irq_data *d) |
146 | { |
147 | return d->hwirq; |
148 | } |
149 | |
150 | /* |
151 | * Routines to acknowledge, disable and enable interrupts |
152 | */ |
153 | static void gic_mask_irq(struct irq_data *d) |
154 | { |
155 | u32 mask = 1 << (gic_irq(d) % 32); |
156 | |
157 | raw_spin_lock(&irq_controller_lock); |
158 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
159 | if (gic_arch_extn.irq_mask) |
160 | gic_arch_extn.irq_mask(d); |
161 | raw_spin_unlock(&irq_controller_lock); |
162 | } |
163 | |
164 | static void gic_unmask_irq(struct irq_data *d) |
165 | { |
166 | u32 mask = 1 << (gic_irq(d) % 32); |
167 | |
168 | raw_spin_lock(&irq_controller_lock); |
169 | if (gic_arch_extn.irq_unmask) |
170 | gic_arch_extn.irq_unmask(d); |
171 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
172 | raw_spin_unlock(&irq_controller_lock); |
173 | } |
174 | |
175 | static void gic_eoi_irq(struct irq_data *d) |
176 | { |
177 | if (gic_arch_extn.irq_eoi) { |
178 | raw_spin_lock(&irq_controller_lock); |
179 | gic_arch_extn.irq_eoi(d); |
180 | raw_spin_unlock(&irq_controller_lock); |
181 | } |
182 | |
183 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
184 | } |
185 | |
186 | static int gic_set_type(struct irq_data *d, unsigned int type) |
187 | { |
188 | void __iomem *base = gic_dist_base(d); |
189 | unsigned int gicirq = gic_irq(d); |
190 | u32 enablemask = 1 << (gicirq % 32); |
191 | u32 enableoff = (gicirq / 32) * 4; |
192 | u32 confmask = 0x2 << ((gicirq % 16) * 2); |
193 | u32 confoff = (gicirq / 16) * 4; |
194 | bool enabled = false; |
195 | u32 val; |
196 | |
197 | /* Interrupt configuration for SGIs can't be changed */ |
198 | if (gicirq < 16) |
199 | return -EINVAL; |
200 | |
201 | if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) |
202 | return -EINVAL; |
203 | |
204 | raw_spin_lock(&irq_controller_lock); |
205 | |
206 | if (gic_arch_extn.irq_set_type) |
207 | gic_arch_extn.irq_set_type(d, type); |
208 | |
209 | val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); |
210 | if (type == IRQ_TYPE_LEVEL_HIGH) |
211 | val &= ~confmask; |
212 | else if (type == IRQ_TYPE_EDGE_RISING) |
213 | val |= confmask; |
214 | |
215 | /* |
216 | * As recommended by the spec, disable the interrupt before changing |
217 | * the configuration |
218 | */ |
219 | if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { |
220 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); |
221 | enabled = true; |
222 | } |
223 | |
224 | writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); |
225 | |
226 | if (enabled) |
227 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); |
228 | |
229 | raw_spin_unlock(&irq_controller_lock); |
230 | |
231 | return 0; |
232 | } |
233 | |
234 | static int gic_retrigger(struct irq_data *d) |
235 | { |
236 | if (gic_arch_extn.irq_retrigger) |
237 | return gic_arch_extn.irq_retrigger(d); |
238 | |
239 | /* the genirq layer expects 0 if we can't retrigger in hardware */ |
240 | return 0; |
241 | } |
242 | |
243 | #ifdef CONFIG_SMP |
244 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
245 | bool force) |
246 | { |
247 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
248 | unsigned int shift = (gic_irq(d) % 4) * 8; |
249 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
250 | u32 val, mask, bit; |
251 | |
252 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
253 | return -EINVAL; |
254 | |
255 | mask = 0xff << shift; |
256 | bit = gic_cpu_map[cpu] << shift; |
257 | |
258 | raw_spin_lock(&irq_controller_lock); |
259 | val = readl_relaxed(reg) & ~mask; |
260 | writel_relaxed(val | bit, reg); |
261 | raw_spin_unlock(&irq_controller_lock); |
262 | |
263 | return IRQ_SET_MASK_OK; |
264 | } |
265 | #endif |
266 | |
267 | #ifdef CONFIG_PM |
268 | static int gic_set_wake(struct irq_data *d, unsigned int on) |
269 | { |
270 | int ret = -ENXIO; |
271 | |
272 | if (gic_arch_extn.irq_set_wake) |
273 | ret = gic_arch_extn.irq_set_wake(d, on); |
274 | |
275 | return ret; |
276 | } |
277 | |
278 | #else |
279 | #define gic_set_wake NULL |
280 | #endif |
281 | |
282 | static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) |
283 | { |
284 | u32 irqstat, irqnr; |
285 | struct gic_chip_data *gic = &gic_data[0]; |
286 | void __iomem *cpu_base = gic_data_cpu_base(gic); |
287 | |
288 | do { |
289 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); |
290 | irqnr = irqstat & ~0x1c00; |
291 | |
292 | if (likely(irqnr > 15 && irqnr < 1021)) { |
293 | irqnr = irq_find_mapping(gic->domain, irqnr); |
294 | handle_IRQ(irqnr, regs); |
295 | continue; |
296 | } |
297 | if (irqnr < 16) { |
298 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); |
299 | #ifdef CONFIG_SMP |
300 | handle_IPI(irqnr, regs); |
301 | #endif |
302 | continue; |
303 | } |
304 | break; |
305 | } while (1); |
306 | } |
307 | |
308 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
309 | { |
310 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); |
311 | struct irq_chip *chip = irq_get_chip(irq); |
312 | unsigned int cascade_irq, gic_irq; |
313 | unsigned long status; |
314 | |
315 | chained_irq_enter(chip, desc); |
316 | |
317 | raw_spin_lock(&irq_controller_lock); |
318 | status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); |
319 | raw_spin_unlock(&irq_controller_lock); |
320 | |
321 | gic_irq = (status & 0x3ff); |
322 | if (gic_irq == 1023) |
323 | goto out; |
324 | |
325 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); |
326 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) |
327 | do_bad_IRQ(cascade_irq, desc); |
328 | else |
329 | generic_handle_irq(cascade_irq); |
330 | |
331 | out: |
332 | chained_irq_exit(chip, desc); |
333 | } |
334 | |
335 | static struct irq_chip gic_chip = { |
336 | .name = "GIC", |
337 | .irq_mask = gic_mask_irq, |
338 | .irq_unmask = gic_unmask_irq, |
339 | .irq_eoi = gic_eoi_irq, |
340 | .irq_set_type = gic_set_type, |
341 | .irq_retrigger = gic_retrigger, |
342 | #ifdef CONFIG_SMP |
343 | .irq_set_affinity = gic_set_affinity, |
344 | #endif |
345 | .irq_set_wake = gic_set_wake, |
346 | }; |
347 | |
348 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) |
349 | { |
350 | if (gic_nr >= MAX_GIC_NR) |
351 | BUG(); |
352 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) |
353 | BUG(); |
354 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
355 | } |
356 | |
357 | static u8 gic_get_cpumask(struct gic_chip_data *gic) |
358 | { |
359 | void __iomem *base = gic_data_dist_base(gic); |
360 | u32 mask, i; |
361 | |
362 | for (i = mask = 0; i < 32; i += 4) { |
363 | mask = readl_relaxed(base + GIC_DIST_TARGET + i); |
364 | mask |= mask >> 16; |
365 | mask |= mask >> 8; |
366 | if (mask) |
367 | break; |
368 | } |
369 | |
370 | if (!mask) |
371 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); |
372 | |
373 | return mask; |
374 | } |
375 | |
376 | static void __init gic_dist_init(struct gic_chip_data *gic) |
377 | { |
378 | unsigned int i; |
379 | u32 cpumask; |
380 | unsigned int gic_irqs = gic->gic_irqs; |
381 | void __iomem *base = gic_data_dist_base(gic); |
382 | |
383 | writel_relaxed(0, base + GIC_DIST_CTRL); |
384 | |
385 | /* |
386 | * Set all global interrupts to be level triggered, active low. |
387 | */ |
388 | for (i = 32; i < gic_irqs; i += 16) |
389 | writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); |
390 | |
391 | /* |
392 | * Set all global interrupts to this CPU only. |
393 | */ |
394 | cpumask = gic_get_cpumask(gic); |
395 | cpumask |= cpumask << 8; |
396 | cpumask |= cpumask << 16; |
397 | for (i = 32; i < gic_irqs; i += 4) |
398 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
399 | |
400 | /* |
401 | * Set priority on all global interrupts. |
402 | */ |
403 | for (i = 32; i < gic_irqs; i += 4) |
404 | writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); |
405 | |
406 | /* |
407 | * Disable all interrupts. Leave the PPI and SGIs alone |
408 | * as these enables are banked registers. |
409 | */ |
410 | for (i = 32; i < gic_irqs; i += 32) |
411 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); |
412 | |
413 | writel_relaxed(1, base + GIC_DIST_CTRL); |
414 | } |
415 | |
416 | static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) |
417 | { |
418 | void __iomem *dist_base = gic_data_dist_base(gic); |
419 | void __iomem *base = gic_data_cpu_base(gic); |
420 | unsigned int cpu_mask, cpu = smp_processor_id(); |
421 | int i; |
422 | |
423 | /* |
424 | * Get what the GIC says our CPU mask is. |
425 | */ |
426 | BUG_ON(cpu >= NR_GIC_CPU_IF); |
427 | cpu_mask = gic_get_cpumask(gic); |
428 | gic_cpu_map[cpu] = cpu_mask; |
429 | |
430 | /* |
431 | * Clear our mask from the other map entries in case they're |
432 | * still undefined. |
433 | */ |
434 | for (i = 0; i < NR_GIC_CPU_IF; i++) |
435 | if (i != cpu) |
436 | gic_cpu_map[i] &= ~cpu_mask; |
437 | |
438 | /* |
439 | * Deal with the banked PPI and SGI interrupts - disable all |
440 | * PPI interrupts, ensure all SGI interrupts are enabled. |
441 | */ |
442 | writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); |
443 | writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); |
444 | |
445 | /* |
446 | * Set priority on PPI and SGI interrupts |
447 | */ |
448 | for (i = 0; i < 32; i += 4) |
449 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); |
450 | |
451 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); |
452 | writel_relaxed(1, base + GIC_CPU_CTRL); |
453 | } |
454 | |
455 | #ifdef CONFIG_CPU_PM |
456 | /* |
457 | * Saves the GIC distributor registers during suspend or idle. Must be called |
458 | * with interrupts disabled but before powering down the GIC. After calling |
459 | * this function, no interrupts will be delivered by the GIC, and another |
460 | * platform-specific wakeup source must be enabled. |
461 | */ |
462 | static void gic_dist_save(unsigned int gic_nr) |
463 | { |
464 | unsigned int gic_irqs; |
465 | void __iomem *dist_base; |
466 | int i; |
467 | |
468 | if (gic_nr >= MAX_GIC_NR) |
469 | BUG(); |
470 | |
471 | gic_irqs = gic_data[gic_nr].gic_irqs; |
472 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
473 | |
474 | if (!dist_base) |
475 | return; |
476 | |
477 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) |
478 | gic_data[gic_nr].saved_spi_conf[i] = |
479 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
480 | |
481 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
482 | gic_data[gic_nr].saved_spi_target[i] = |
483 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); |
484 | |
485 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
486 | gic_data[gic_nr].saved_spi_enable[i] = |
487 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
488 | } |
489 | |
490 | /* |
491 | * Restores the GIC distributor registers during resume or when coming out of |
492 | * idle. Must be called before enabling interrupts. If a level interrupt |
493 | * that occured while the GIC was suspended is still present, it will be |
494 | * handled normally, but any edge interrupts that occured will not be seen by |
495 | * the GIC and need to be handled by the platform-specific wakeup source. |
496 | */ |
497 | static void gic_dist_restore(unsigned int gic_nr) |
498 | { |
499 | unsigned int gic_irqs; |
500 | unsigned int i; |
501 | void __iomem *dist_base; |
502 | |
503 | if (gic_nr >= MAX_GIC_NR) |
504 | BUG(); |
505 | |
506 | gic_irqs = gic_data[gic_nr].gic_irqs; |
507 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
508 | |
509 | if (!dist_base) |
510 | return; |
511 | |
512 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); |
513 | |
514 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) |
515 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], |
516 | dist_base + GIC_DIST_CONFIG + i * 4); |
517 | |
518 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
519 | writel_relaxed(0xa0a0a0a0, |
520 | dist_base + GIC_DIST_PRI + i * 4); |
521 | |
522 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
523 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], |
524 | dist_base + GIC_DIST_TARGET + i * 4); |
525 | |
526 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
527 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], |
528 | dist_base + GIC_DIST_ENABLE_SET + i * 4); |
529 | |
530 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); |
531 | } |
532 | |
533 | static void gic_cpu_save(unsigned int gic_nr) |
534 | { |
535 | int i; |
536 | u32 *ptr; |
537 | void __iomem *dist_base; |
538 | void __iomem *cpu_base; |
539 | |
540 | if (gic_nr >= MAX_GIC_NR) |
541 | BUG(); |
542 | |
543 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
544 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); |
545 | |
546 | if (!dist_base || !cpu_base) |
547 | return; |
548 | |
549 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
550 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
551 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
552 | |
553 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
554 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
555 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
556 | |
557 | } |
558 | |
559 | static void gic_cpu_restore(unsigned int gic_nr) |
560 | { |
561 | int i; |
562 | u32 *ptr; |
563 | void __iomem *dist_base; |
564 | void __iomem *cpu_base; |
565 | |
566 | if (gic_nr >= MAX_GIC_NR) |
567 | BUG(); |
568 | |
569 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
570 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); |
571 | |
572 | if (!dist_base || !cpu_base) |
573 | return; |
574 | |
575 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
576 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
577 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); |
578 | |
579 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
580 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
581 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); |
582 | |
583 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) |
584 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); |
585 | |
586 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); |
587 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); |
588 | } |
589 | |
590 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
591 | { |
592 | int i; |
593 | |
594 | for (i = 0; i < MAX_GIC_NR; i++) { |
595 | #ifdef CONFIG_GIC_NON_BANKED |
596 | /* Skip over unused GICs */ |
597 | if (!gic_data[i].get_base) |
598 | continue; |
599 | #endif |
600 | switch (cmd) { |
601 | case CPU_PM_ENTER: |
602 | gic_cpu_save(i); |
603 | break; |
604 | case CPU_PM_ENTER_FAILED: |
605 | case CPU_PM_EXIT: |
606 | gic_cpu_restore(i); |
607 | break; |
608 | case CPU_CLUSTER_PM_ENTER: |
609 | gic_dist_save(i); |
610 | break; |
611 | case CPU_CLUSTER_PM_ENTER_FAILED: |
612 | case CPU_CLUSTER_PM_EXIT: |
613 | gic_dist_restore(i); |
614 | break; |
615 | } |
616 | } |
617 | |
618 | return NOTIFY_OK; |
619 | } |
620 | |
621 | static struct notifier_block gic_notifier_block = { |
622 | .notifier_call = gic_notifier, |
623 | }; |
624 | |
625 | static void __init gic_pm_init(struct gic_chip_data *gic) |
626 | { |
627 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, |
628 | sizeof(u32)); |
629 | BUG_ON(!gic->saved_ppi_enable); |
630 | |
631 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, |
632 | sizeof(u32)); |
633 | BUG_ON(!gic->saved_ppi_conf); |
634 | |
635 | if (gic == &gic_data[0]) |
636 | cpu_pm_register_notifier(&gic_notifier_block); |
637 | } |
638 | #else |
639 | static void __init gic_pm_init(struct gic_chip_data *gic) |
640 | { |
641 | } |
642 | #endif |
643 | |
644 | #ifdef CONFIG_SMP |
645 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) |
646 | { |
647 | int cpu; |
648 | unsigned long map = 0; |
649 | |
650 | /* Convert our logical CPU mask into a physical one. */ |
651 | for_each_cpu(cpu, mask) |
652 | map |= gic_cpu_map[cpu]; |
653 | |
654 | /* |
655 | * Ensure that stores to Normal memory are visible to the |
656 | * other CPUs before issuing the IPI. |
657 | */ |
658 | dsb(); |
659 | |
660 | /* this always happens on GIC0 */ |
661 | writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
662 | } |
663 | #endif |
664 | |
665 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, |
666 | irq_hw_number_t hw) |
667 | { |
668 | if (hw < 32) { |
669 | irq_set_percpu_devid(irq); |
670 | irq_set_chip_and_handler(irq, &gic_chip, |
671 | handle_percpu_devid_irq); |
672 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); |
673 | } else { |
674 | irq_set_chip_and_handler(irq, &gic_chip, |
675 | handle_fasteoi_irq); |
676 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
677 | } |
678 | irq_set_chip_data(irq, d->host_data); |
679 | return 0; |
680 | } |
681 | |
682 | static int gic_irq_domain_xlate(struct irq_domain *d, |
683 | struct device_node *controller, |
684 | const u32 *intspec, unsigned int intsize, |
685 | unsigned long *out_hwirq, unsigned int *out_type) |
686 | { |
687 | if (d->of_node != controller) |
688 | return -EINVAL; |
689 | if (intsize < 3) |
690 | return -EINVAL; |
691 | |
692 | /* Get the interrupt number and add 16 to skip over SGIs */ |
693 | *out_hwirq = intspec[1] + 16; |
694 | |
695 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ |
696 | if (!intspec[0]) |
697 | *out_hwirq += 16; |
698 | |
699 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; |
700 | return 0; |
701 | } |
702 | |
703 | const struct irq_domain_ops gic_irq_domain_ops = { |
704 | .map = gic_irq_domain_map, |
705 | .xlate = gic_irq_domain_xlate, |
706 | }; |
707 | |
708 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, |
709 | void __iomem *dist_base, void __iomem *cpu_base, |
710 | u32 percpu_offset, struct device_node *node) |
711 | { |
712 | irq_hw_number_t hwirq_base; |
713 | struct gic_chip_data *gic; |
714 | int gic_irqs, irq_base, i; |
715 | |
716 | BUG_ON(gic_nr >= MAX_GIC_NR); |
717 | |
718 | gic = &gic_data[gic_nr]; |
719 | #ifdef CONFIG_GIC_NON_BANKED |
720 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ |
721 | unsigned int cpu; |
722 | |
723 | gic->dist_base.percpu_base = alloc_percpu(void __iomem *); |
724 | gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); |
725 | if (WARN_ON(!gic->dist_base.percpu_base || |
726 | !gic->cpu_base.percpu_base)) { |
727 | free_percpu(gic->dist_base.percpu_base); |
728 | free_percpu(gic->cpu_base.percpu_base); |
729 | return; |
730 | } |
731 | |
732 | for_each_possible_cpu(cpu) { |
733 | unsigned long offset = percpu_offset * cpu_logical_map(cpu); |
734 | *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; |
735 | *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; |
736 | } |
737 | |
738 | gic_set_base_accessor(gic, gic_get_percpu_base); |
739 | } else |
740 | #endif |
741 | { /* Normal, sane GIC... */ |
742 | WARN(percpu_offset, |
743 | "GIC_NON_BANKED not enabled, ignoring %08x offset!", |
744 | percpu_offset); |
745 | gic->dist_base.common_base = dist_base; |
746 | gic->cpu_base.common_base = cpu_base; |
747 | gic_set_base_accessor(gic, gic_get_common_base); |
748 | } |
749 | |
750 | /* |
751 | * Initialize the CPU interface map to all CPUs. |
752 | * It will be refined as each CPU probes its ID. |
753 | */ |
754 | for (i = 0; i < NR_GIC_CPU_IF; i++) |
755 | gic_cpu_map[i] = 0xff; |
756 | |
757 | /* |
758 | * For primary GICs, skip over SGIs. |
759 | * For secondary GICs, skip over PPIs, too. |
760 | */ |
761 | if (gic_nr == 0 && (irq_start & 31) > 0) { |
762 | hwirq_base = 16; |
763 | if (irq_start != -1) |
764 | irq_start = (irq_start & ~31) + 16; |
765 | } else { |
766 | hwirq_base = 32; |
767 | } |
768 | |
769 | /* |
770 | * Find out how many interrupts are supported. |
771 | * The GIC only supports up to 1020 interrupt sources. |
772 | */ |
773 | gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; |
774 | gic_irqs = (gic_irqs + 1) * 32; |
775 | if (gic_irqs > 1020) |
776 | gic_irqs = 1020; |
777 | gic->gic_irqs = gic_irqs; |
778 | |
779 | gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ |
780 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); |
781 | if (IS_ERR_VALUE(irq_base)) { |
782 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", |
783 | irq_start); |
784 | irq_base = irq_start; |
785 | } |
786 | gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, |
787 | hwirq_base, &gic_irq_domain_ops, gic); |
788 | if (WARN_ON(!gic->domain)) |
789 | return; |
790 | |
791 | #ifdef CONFIG_SMP |
792 | set_smp_cross_call(gic_raise_softirq); |
793 | #endif |
794 | |
795 | set_handle_irq(gic_handle_irq); |
796 | |
797 | gic_chip.flags |= gic_arch_extn.flags; |
798 | gic_dist_init(gic); |
799 | gic_cpu_init(gic); |
800 | gic_pm_init(gic); |
801 | } |
802 | |
803 | void __cpuinit gic_secondary_init(unsigned int gic_nr) |
804 | { |
805 | BUG_ON(gic_nr >= MAX_GIC_NR); |
806 | |
807 | gic_cpu_init(&gic_data[gic_nr]); |
808 | } |
809 | |
810 | #ifdef CONFIG_OF |
811 | static int gic_cnt __initdata = 0; |
812 | |
813 | int __init gic_of_init(struct device_node *node, struct device_node *parent) |
814 | { |
815 | void __iomem *cpu_base; |
816 | void __iomem *dist_base; |
817 | u32 percpu_offset; |
818 | int irq; |
819 | |
820 | if (WARN_ON(!node)) |
821 | return -ENODEV; |
822 | |
823 | dist_base = of_iomap(node, 0); |
824 | WARN(!dist_base, "unable to map gic dist registers\n"); |
825 | |
826 | cpu_base = of_iomap(node, 1); |
827 | WARN(!cpu_base, "unable to map gic cpu registers\n"); |
828 | |
829 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) |
830 | percpu_offset = 0; |
831 | |
832 | gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); |
833 | |
834 | if (parent) { |
835 | irq = irq_of_parse_and_map(node, 0); |
836 | gic_cascade_irq(gic_cnt, irq); |
837 | } |
838 | gic_cnt++; |
839 | return 0; |
840 | } |
841 | IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); |
842 | IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); |
843 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); |
844 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); |
845 | |
846 | #endif |
847 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9