Root/
1 | /* |
2 | * linux/kernel/irq/spurious.c |
3 | * |
4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
5 | * |
6 | * This file contains spurious interrupt handling. |
7 | */ |
8 | |
9 | #include <linux/jiffies.h> |
10 | #include <linux/irq.h> |
11 | #include <linux/module.h> |
12 | #include <linux/kallsyms.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/moduleparam.h> |
15 | #include <linux/timer.h> |
16 | |
17 | #include "internals.h" |
18 | |
19 | static int irqfixup __read_mostly; |
20 | |
21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
22 | static void poll_spurious_irqs(unsigned long dummy); |
23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
24 | |
25 | /* |
26 | * Recovery handler for misrouted interrupts. |
27 | */ |
28 | static int try_one_irq(int irq, struct irq_desc *desc) |
29 | { |
30 | struct irqaction *action; |
31 | int ok = 0, work = 0; |
32 | |
33 | raw_spin_lock(&desc->lock); |
34 | /* Already running on another processor */ |
35 | if (desc->status & IRQ_INPROGRESS) { |
36 | /* |
37 | * Already running: If it is shared get the other |
38 | * CPU to go looking for our mystery interrupt too |
39 | */ |
40 | if (desc->action && (desc->action->flags & IRQF_SHARED)) |
41 | desc->status |= IRQ_PENDING; |
42 | raw_spin_unlock(&desc->lock); |
43 | return ok; |
44 | } |
45 | /* Honour the normal IRQ locking */ |
46 | desc->status |= IRQ_INPROGRESS; |
47 | action = desc->action; |
48 | raw_spin_unlock(&desc->lock); |
49 | |
50 | while (action) { |
51 | /* Only shared IRQ handlers are safe to call */ |
52 | if (action->flags & IRQF_SHARED) { |
53 | if (action->handler(irq, action->dev_id) == |
54 | IRQ_HANDLED) |
55 | ok = 1; |
56 | } |
57 | action = action->next; |
58 | } |
59 | local_irq_disable(); |
60 | /* Now clean up the flags */ |
61 | raw_spin_lock(&desc->lock); |
62 | action = desc->action; |
63 | |
64 | /* |
65 | * While we were looking for a fixup someone queued a real |
66 | * IRQ clashing with our walk: |
67 | */ |
68 | while ((desc->status & IRQ_PENDING) && action) { |
69 | /* |
70 | * Perform real IRQ processing for the IRQ we deferred |
71 | */ |
72 | work = 1; |
73 | raw_spin_unlock(&desc->lock); |
74 | handle_IRQ_event(irq, action); |
75 | raw_spin_lock(&desc->lock); |
76 | desc->status &= ~IRQ_PENDING; |
77 | } |
78 | desc->status &= ~IRQ_INPROGRESS; |
79 | /* |
80 | * If we did actual work for the real IRQ line we must let the |
81 | * IRQ controller clean up too |
82 | */ |
83 | if (work) |
84 | irq_end(irq, desc); |
85 | raw_spin_unlock(&desc->lock); |
86 | |
87 | return ok; |
88 | } |
89 | |
90 | static int misrouted_irq(int irq) |
91 | { |
92 | struct irq_desc *desc; |
93 | int i, ok = 0; |
94 | |
95 | for_each_irq_desc(i, desc) { |
96 | if (!i) |
97 | continue; |
98 | |
99 | if (i == irq) /* Already tried */ |
100 | continue; |
101 | |
102 | if (try_one_irq(i, desc)) |
103 | ok = 1; |
104 | } |
105 | /* So the caller can adjust the irq error counts */ |
106 | return ok; |
107 | } |
108 | |
109 | static void poll_spurious_irqs(unsigned long dummy) |
110 | { |
111 | struct irq_desc *desc; |
112 | int i; |
113 | |
114 | for_each_irq_desc(i, desc) { |
115 | unsigned int status; |
116 | |
117 | if (!i) |
118 | continue; |
119 | |
120 | /* Racy but it doesn't matter */ |
121 | status = desc->status; |
122 | barrier(); |
123 | if (!(status & IRQ_SPURIOUS_DISABLED)) |
124 | continue; |
125 | |
126 | local_irq_disable(); |
127 | try_one_irq(i, desc); |
128 | local_irq_enable(); |
129 | } |
130 | |
131 | mod_timer(&poll_spurious_irq_timer, |
132 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
133 | } |
134 | |
135 | /* |
136 | * If 99,900 of the previous 100,000 interrupts have not been handled |
137 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
138 | * and try to turn the IRQ off. |
139 | * |
140 | * (The other 100-of-100,000 interrupts may have been a correctly |
141 | * functioning device sharing an IRQ with the failing one) |
142 | * |
143 | * Called under desc->lock |
144 | */ |
145 | |
146 | static void |
147 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
148 | irqreturn_t action_ret) |
149 | { |
150 | struct irqaction *action; |
151 | |
152 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { |
153 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
154 | irq, action_ret); |
155 | } else { |
156 | printk(KERN_ERR "irq %d: nobody cared (try booting with " |
157 | "the \"irqpoll\" option)\n", irq); |
158 | } |
159 | dump_stack(); |
160 | printk(KERN_ERR "handlers:\n"); |
161 | |
162 | action = desc->action; |
163 | while (action) { |
164 | printk(KERN_ERR "[<%p>]", action->handler); |
165 | print_symbol(" (%s)", |
166 | (unsigned long)action->handler); |
167 | printk("\n"); |
168 | action = action->next; |
169 | } |
170 | } |
171 | |
172 | static void |
173 | report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) |
174 | { |
175 | static int count = 100; |
176 | |
177 | if (count > 0) { |
178 | count--; |
179 | __report_bad_irq(irq, desc, action_ret); |
180 | } |
181 | } |
182 | |
183 | static inline int |
184 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, |
185 | irqreturn_t action_ret) |
186 | { |
187 | struct irqaction *action; |
188 | |
189 | if (!irqfixup) |
190 | return 0; |
191 | |
192 | /* We didn't actually handle the IRQ - see if it was misrouted? */ |
193 | if (action_ret == IRQ_NONE) |
194 | return 1; |
195 | |
196 | /* |
197 | * But for 'irqfixup == 2' we also do it for handled interrupts if |
198 | * they are marked as IRQF_IRQPOLL (or for irq zero, which is the |
199 | * traditional PC timer interrupt.. Legacy) |
200 | */ |
201 | if (irqfixup < 2) |
202 | return 0; |
203 | |
204 | if (!irq) |
205 | return 1; |
206 | |
207 | /* |
208 | * Since we don't get the descriptor lock, "action" can |
209 | * change under us. We don't really care, but we don't |
210 | * want to follow a NULL pointer. So tell the compiler to |
211 | * just load it once by using a barrier. |
212 | */ |
213 | action = desc->action; |
214 | barrier(); |
215 | return action && (action->flags & IRQF_IRQPOLL); |
216 | } |
217 | |
218 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
219 | irqreturn_t action_ret) |
220 | { |
221 | if (unlikely(action_ret != IRQ_HANDLED)) { |
222 | /* |
223 | * If we are seeing only the odd spurious IRQ caused by |
224 | * bus asynchronicity then don't eventually trigger an error, |
225 | * otherwise the counter becomes a doomsday timer for otherwise |
226 | * working systems |
227 | */ |
228 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |
229 | desc->irqs_unhandled = 1; |
230 | else |
231 | desc->irqs_unhandled++; |
232 | desc->last_unhandled = jiffies; |
233 | if (unlikely(action_ret != IRQ_NONE)) |
234 | report_bad_irq(irq, desc, action_ret); |
235 | } |
236 | |
237 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |
238 | int ok = misrouted_irq(irq); |
239 | if (action_ret == IRQ_NONE) |
240 | desc->irqs_unhandled -= ok; |
241 | } |
242 | |
243 | desc->irq_count++; |
244 | if (likely(desc->irq_count < 100000)) |
245 | return; |
246 | |
247 | desc->irq_count = 0; |
248 | if (unlikely(desc->irqs_unhandled > 99900)) { |
249 | /* |
250 | * The interrupt is stuck |
251 | */ |
252 | __report_bad_irq(irq, desc, action_ret); |
253 | /* |
254 | * Now kill the IRQ |
255 | */ |
256 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
257 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
258 | desc->depth++; |
259 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
260 | |
261 | mod_timer(&poll_spurious_irq_timer, |
262 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
263 | } |
264 | desc->irqs_unhandled = 0; |
265 | } |
266 | |
267 | int noirqdebug __read_mostly; |
268 | |
269 | int noirqdebug_setup(char *str) |
270 | { |
271 | noirqdebug = 1; |
272 | printk(KERN_INFO "IRQ lockup detection disabled\n"); |
273 | |
274 | return 1; |
275 | } |
276 | |
277 | __setup("noirqdebug", noirqdebug_setup); |
278 | module_param(noirqdebug, bool, 0644); |
279 | MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); |
280 | |
281 | static int __init irqfixup_setup(char *str) |
282 | { |
283 | irqfixup = 1; |
284 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); |
285 | printk(KERN_WARNING "This may impact system performance.\n"); |
286 | |
287 | return 1; |
288 | } |
289 | |
290 | __setup("irqfixup", irqfixup_setup); |
291 | module_param(irqfixup, int, 0644); |
292 | |
293 | static int __init irqpoll_setup(char *str) |
294 | { |
295 | irqfixup = 2; |
296 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " |
297 | "enabled\n"); |
298 | printk(KERN_WARNING "This may significantly impact system " |
299 | "performance\n"); |
300 | return 1; |
301 | } |
302 | |
303 | __setup("irqpoll", irqpoll_setup); |
304 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9