Root/
1 | /* |
2 | * linux/kernel/irq/chip.c |
3 | * |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
6 | * |
7 | * This file contains the core interrupt handling code, for irq-chip |
8 | * based architectures. |
9 | * |
10 | * Detailed information is available in Documentation/DocBook/genericirq |
11 | */ |
12 | |
13 | #include <linux/irq.h> |
14 | #include <linux/msi.h> |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> |
18 | |
19 | #include "internals.h" |
20 | |
21 | /** |
22 | * set_irq_chip - set the irq chip for an irq |
23 | * @irq: irq number |
24 | * @chip: pointer to irq chip description structure |
25 | */ |
26 | int set_irq_chip(unsigned int irq, struct irq_chip *chip) |
27 | { |
28 | struct irq_desc *desc = irq_to_desc(irq); |
29 | unsigned long flags; |
30 | |
31 | if (!desc) { |
32 | WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); |
33 | return -EINVAL; |
34 | } |
35 | |
36 | if (!chip) |
37 | chip = &no_irq_chip; |
38 | |
39 | raw_spin_lock_irqsave(&desc->lock, flags); |
40 | irq_chip_set_defaults(chip); |
41 | desc->irq_data.chip = chip; |
42 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
43 | |
44 | return 0; |
45 | } |
46 | EXPORT_SYMBOL(set_irq_chip); |
47 | |
48 | /** |
49 | * set_irq_type - set the irq trigger type for an irq |
50 | * @irq: irq number |
51 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
52 | */ |
53 | int set_irq_type(unsigned int irq, unsigned int type) |
54 | { |
55 | struct irq_desc *desc = irq_to_desc(irq); |
56 | unsigned long flags; |
57 | int ret = -ENXIO; |
58 | |
59 | if (!desc) { |
60 | printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); |
61 | return -ENODEV; |
62 | } |
63 | |
64 | type &= IRQ_TYPE_SENSE_MASK; |
65 | if (type == IRQ_TYPE_NONE) |
66 | return 0; |
67 | |
68 | raw_spin_lock_irqsave(&desc->lock, flags); |
69 | ret = __irq_set_trigger(desc, irq, type); |
70 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
71 | return ret; |
72 | } |
73 | EXPORT_SYMBOL(set_irq_type); |
74 | |
75 | /** |
76 | * set_irq_data - set irq type data for an irq |
77 | * @irq: Interrupt number |
78 | * @data: Pointer to interrupt specific data |
79 | * |
80 | * Set the hardware irq controller data for an irq |
81 | */ |
82 | int set_irq_data(unsigned int irq, void *data) |
83 | { |
84 | struct irq_desc *desc = irq_to_desc(irq); |
85 | unsigned long flags; |
86 | |
87 | if (!desc) { |
88 | printk(KERN_ERR |
89 | "Trying to install controller data for IRQ%d\n", irq); |
90 | return -EINVAL; |
91 | } |
92 | |
93 | raw_spin_lock_irqsave(&desc->lock, flags); |
94 | desc->irq_data.handler_data = data; |
95 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
96 | return 0; |
97 | } |
98 | EXPORT_SYMBOL(set_irq_data); |
99 | |
100 | /** |
101 | * set_irq_msi - set MSI descriptor data for an irq |
102 | * @irq: Interrupt number |
103 | * @entry: Pointer to MSI descriptor data |
104 | * |
105 | * Set the MSI descriptor entry for an irq |
106 | */ |
107 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) |
108 | { |
109 | struct irq_desc *desc = irq_to_desc(irq); |
110 | unsigned long flags; |
111 | |
112 | if (!desc) { |
113 | printk(KERN_ERR |
114 | "Trying to install msi data for IRQ%d\n", irq); |
115 | return -EINVAL; |
116 | } |
117 | |
118 | raw_spin_lock_irqsave(&desc->lock, flags); |
119 | desc->irq_data.msi_desc = entry; |
120 | if (entry) |
121 | entry->irq = irq; |
122 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
123 | return 0; |
124 | } |
125 | |
126 | /** |
127 | * set_irq_chip_data - set irq chip data for an irq |
128 | * @irq: Interrupt number |
129 | * @data: Pointer to chip specific data |
130 | * |
131 | * Set the hardware irq chip data for an irq |
132 | */ |
133 | int set_irq_chip_data(unsigned int irq, void *data) |
134 | { |
135 | struct irq_desc *desc = irq_to_desc(irq); |
136 | unsigned long flags; |
137 | |
138 | if (!desc) { |
139 | printk(KERN_ERR |
140 | "Trying to install chip data for IRQ%d\n", irq); |
141 | return -EINVAL; |
142 | } |
143 | |
144 | if (!desc->irq_data.chip) { |
145 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); |
146 | return -EINVAL; |
147 | } |
148 | |
149 | raw_spin_lock_irqsave(&desc->lock, flags); |
150 | desc->irq_data.chip_data = data; |
151 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
152 | |
153 | return 0; |
154 | } |
155 | EXPORT_SYMBOL(set_irq_chip_data); |
156 | |
157 | struct irq_data *irq_get_irq_data(unsigned int irq) |
158 | { |
159 | struct irq_desc *desc = irq_to_desc(irq); |
160 | |
161 | return desc ? &desc->irq_data : NULL; |
162 | } |
163 | EXPORT_SYMBOL_GPL(irq_get_irq_data); |
164 | |
165 | /** |
166 | * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq |
167 | * |
168 | * @irq: Interrupt number |
169 | * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag |
170 | * |
171 | * The IRQ_NESTED_THREAD flag indicates that on |
172 | * request_threaded_irq() no separate interrupt thread should be |
173 | * created for the irq as the handler are called nested in the |
174 | * context of a demultiplexing interrupt handler thread. |
175 | */ |
176 | void set_irq_nested_thread(unsigned int irq, int nest) |
177 | { |
178 | struct irq_desc *desc = irq_to_desc(irq); |
179 | unsigned long flags; |
180 | |
181 | if (!desc) |
182 | return; |
183 | |
184 | raw_spin_lock_irqsave(&desc->lock, flags); |
185 | if (nest) |
186 | desc->status |= IRQ_NESTED_THREAD; |
187 | else |
188 | desc->status &= ~IRQ_NESTED_THREAD; |
189 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
190 | } |
191 | EXPORT_SYMBOL_GPL(set_irq_nested_thread); |
192 | |
193 | /* |
194 | * default enable function |
195 | */ |
196 | static void default_enable(struct irq_data *data) |
197 | { |
198 | struct irq_desc *desc = irq_data_to_desc(data); |
199 | |
200 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
201 | desc->status &= ~IRQ_MASKED; |
202 | } |
203 | |
204 | /* |
205 | * default disable function |
206 | */ |
207 | static void default_disable(struct irq_data *data) |
208 | { |
209 | } |
210 | |
211 | /* |
212 | * default startup function |
213 | */ |
214 | static unsigned int default_startup(struct irq_data *data) |
215 | { |
216 | struct irq_desc *desc = irq_data_to_desc(data); |
217 | |
218 | desc->irq_data.chip->irq_enable(data); |
219 | return 0; |
220 | } |
221 | |
222 | /* |
223 | * default shutdown function |
224 | */ |
225 | static void default_shutdown(struct irq_data *data) |
226 | { |
227 | struct irq_desc *desc = irq_data_to_desc(data); |
228 | |
229 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
230 | desc->status |= IRQ_MASKED; |
231 | } |
232 | |
233 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
234 | /* Temporary migration helpers */ |
235 | static void compat_irq_mask(struct irq_data *data) |
236 | { |
237 | data->chip->mask(data->irq); |
238 | } |
239 | |
240 | static void compat_irq_unmask(struct irq_data *data) |
241 | { |
242 | data->chip->unmask(data->irq); |
243 | } |
244 | |
245 | static void compat_irq_ack(struct irq_data *data) |
246 | { |
247 | data->chip->ack(data->irq); |
248 | } |
249 | |
250 | static void compat_irq_mask_ack(struct irq_data *data) |
251 | { |
252 | data->chip->mask_ack(data->irq); |
253 | } |
254 | |
255 | static void compat_irq_eoi(struct irq_data *data) |
256 | { |
257 | data->chip->eoi(data->irq); |
258 | } |
259 | |
260 | static void compat_irq_enable(struct irq_data *data) |
261 | { |
262 | data->chip->enable(data->irq); |
263 | } |
264 | |
265 | static void compat_irq_disable(struct irq_data *data) |
266 | { |
267 | data->chip->disable(data->irq); |
268 | } |
269 | |
270 | static void compat_irq_shutdown(struct irq_data *data) |
271 | { |
272 | data->chip->shutdown(data->irq); |
273 | } |
274 | |
275 | static unsigned int compat_irq_startup(struct irq_data *data) |
276 | { |
277 | return data->chip->startup(data->irq); |
278 | } |
279 | |
280 | static int compat_irq_set_affinity(struct irq_data *data, |
281 | const struct cpumask *dest, bool force) |
282 | { |
283 | return data->chip->set_affinity(data->irq, dest); |
284 | } |
285 | |
286 | static int compat_irq_set_type(struct irq_data *data, unsigned int type) |
287 | { |
288 | return data->chip->set_type(data->irq, type); |
289 | } |
290 | |
291 | static int compat_irq_set_wake(struct irq_data *data, unsigned int on) |
292 | { |
293 | return data->chip->set_wake(data->irq, on); |
294 | } |
295 | |
296 | static int compat_irq_retrigger(struct irq_data *data) |
297 | { |
298 | return data->chip->retrigger(data->irq); |
299 | } |
300 | |
301 | static void compat_bus_lock(struct irq_data *data) |
302 | { |
303 | data->chip->bus_lock(data->irq); |
304 | } |
305 | |
306 | static void compat_bus_sync_unlock(struct irq_data *data) |
307 | { |
308 | data->chip->bus_sync_unlock(data->irq); |
309 | } |
310 | #endif |
311 | |
312 | /* |
313 | * Fixup enable/disable function pointers |
314 | */ |
315 | void irq_chip_set_defaults(struct irq_chip *chip) |
316 | { |
317 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
318 | /* |
319 | * Compat fixup functions need to be before we set the |
320 | * defaults for enable/disable/startup/shutdown |
321 | */ |
322 | if (chip->enable) |
323 | chip->irq_enable = compat_irq_enable; |
324 | if (chip->disable) |
325 | chip->irq_disable = compat_irq_disable; |
326 | if (chip->shutdown) |
327 | chip->irq_shutdown = compat_irq_shutdown; |
328 | if (chip->startup) |
329 | chip->irq_startup = compat_irq_startup; |
330 | #endif |
331 | /* |
332 | * The real defaults |
333 | */ |
334 | if (!chip->irq_enable) |
335 | chip->irq_enable = default_enable; |
336 | if (!chip->irq_disable) |
337 | chip->irq_disable = default_disable; |
338 | if (!chip->irq_startup) |
339 | chip->irq_startup = default_startup; |
340 | /* |
341 | * We use chip->irq_disable, when the user provided its own. When |
342 | * we have default_disable set for chip->irq_disable, then we need |
343 | * to use default_shutdown, otherwise the irq line is not |
344 | * disabled on free_irq(): |
345 | */ |
346 | if (!chip->irq_shutdown) |
347 | chip->irq_shutdown = chip->irq_disable != default_disable ? |
348 | chip->irq_disable : default_shutdown; |
349 | |
350 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
351 | if (!chip->end) |
352 | chip->end = dummy_irq_chip.end; |
353 | |
354 | /* |
355 | * Now fix up the remaining compat handlers |
356 | */ |
357 | if (chip->bus_lock) |
358 | chip->irq_bus_lock = compat_bus_lock; |
359 | if (chip->bus_sync_unlock) |
360 | chip->irq_bus_sync_unlock = compat_bus_sync_unlock; |
361 | if (chip->mask) |
362 | chip->irq_mask = compat_irq_mask; |
363 | if (chip->unmask) |
364 | chip->irq_unmask = compat_irq_unmask; |
365 | if (chip->ack) |
366 | chip->irq_ack = compat_irq_ack; |
367 | if (chip->mask_ack) |
368 | chip->irq_mask_ack = compat_irq_mask_ack; |
369 | if (chip->eoi) |
370 | chip->irq_eoi = compat_irq_eoi; |
371 | if (chip->set_affinity) |
372 | chip->irq_set_affinity = compat_irq_set_affinity; |
373 | if (chip->set_type) |
374 | chip->irq_set_type = compat_irq_set_type; |
375 | if (chip->set_wake) |
376 | chip->irq_set_wake = compat_irq_set_wake; |
377 | if (chip->retrigger) |
378 | chip->irq_retrigger = compat_irq_retrigger; |
379 | #endif |
380 | } |
381 | |
382 | static inline void mask_ack_irq(struct irq_desc *desc) |
383 | { |
384 | if (desc->irq_data.chip->irq_mask_ack) |
385 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); |
386 | else { |
387 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
388 | if (desc->irq_data.chip->irq_ack) |
389 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
390 | } |
391 | desc->status |= IRQ_MASKED; |
392 | } |
393 | |
394 | static inline void mask_irq(struct irq_desc *desc) |
395 | { |
396 | if (desc->irq_data.chip->irq_mask) { |
397 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
398 | desc->status |= IRQ_MASKED; |
399 | } |
400 | } |
401 | |
402 | static inline void unmask_irq(struct irq_desc *desc) |
403 | { |
404 | if (desc->irq_data.chip->irq_unmask) { |
405 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
406 | desc->status &= ~IRQ_MASKED; |
407 | } |
408 | } |
409 | |
410 | /* |
411 | * handle_nested_irq - Handle a nested irq from a irq thread |
412 | * @irq: the interrupt number |
413 | * |
414 | * Handle interrupts which are nested into a threaded interrupt |
415 | * handler. The handler function is called inside the calling |
416 | * threads context. |
417 | */ |
418 | void handle_nested_irq(unsigned int irq) |
419 | { |
420 | struct irq_desc *desc = irq_to_desc(irq); |
421 | struct irqaction *action; |
422 | irqreturn_t action_ret; |
423 | |
424 | might_sleep(); |
425 | |
426 | raw_spin_lock_irq(&desc->lock); |
427 | |
428 | kstat_incr_irqs_this_cpu(irq, desc); |
429 | |
430 | action = desc->action; |
431 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) |
432 | goto out_unlock; |
433 | |
434 | desc->status |= IRQ_INPROGRESS; |
435 | raw_spin_unlock_irq(&desc->lock); |
436 | |
437 | action_ret = action->thread_fn(action->irq, action->dev_id); |
438 | if (!noirqdebug) |
439 | note_interrupt(irq, desc, action_ret); |
440 | |
441 | raw_spin_lock_irq(&desc->lock); |
442 | desc->status &= ~IRQ_INPROGRESS; |
443 | |
444 | out_unlock: |
445 | raw_spin_unlock_irq(&desc->lock); |
446 | } |
447 | EXPORT_SYMBOL_GPL(handle_nested_irq); |
448 | |
449 | /** |
450 | * handle_simple_irq - Simple and software-decoded IRQs. |
451 | * @irq: the interrupt number |
452 | * @desc: the interrupt description structure for this irq |
453 | * |
454 | * Simple interrupts are either sent from a demultiplexing interrupt |
455 | * handler or come from hardware, where no interrupt hardware control |
456 | * is necessary. |
457 | * |
458 | * Note: The caller is expected to handle the ack, clear, mask and |
459 | * unmask issues if necessary. |
460 | */ |
461 | void |
462 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
463 | { |
464 | struct irqaction *action; |
465 | irqreturn_t action_ret; |
466 | |
467 | raw_spin_lock(&desc->lock); |
468 | |
469 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
470 | goto out_unlock; |
471 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
472 | kstat_incr_irqs_this_cpu(irq, desc); |
473 | |
474 | action = desc->action; |
475 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) |
476 | goto out_unlock; |
477 | |
478 | desc->status |= IRQ_INPROGRESS; |
479 | raw_spin_unlock(&desc->lock); |
480 | |
481 | action_ret = handle_IRQ_event(irq, action); |
482 | if (!noirqdebug) |
483 | note_interrupt(irq, desc, action_ret); |
484 | |
485 | raw_spin_lock(&desc->lock); |
486 | desc->status &= ~IRQ_INPROGRESS; |
487 | out_unlock: |
488 | raw_spin_unlock(&desc->lock); |
489 | } |
490 | |
491 | /** |
492 | * handle_level_irq - Level type irq handler |
493 | * @irq: the interrupt number |
494 | * @desc: the interrupt description structure for this irq |
495 | * |
496 | * Level type interrupts are active as long as the hardware line has |
497 | * the active level. This may require to mask the interrupt and unmask |
498 | * it after the associated handler has acknowledged the device, so the |
499 | * interrupt line is back to inactive. |
500 | */ |
501 | void |
502 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
503 | { |
504 | struct irqaction *action; |
505 | irqreturn_t action_ret; |
506 | |
507 | raw_spin_lock(&desc->lock); |
508 | mask_ack_irq(desc); |
509 | |
510 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
511 | goto out_unlock; |
512 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
513 | kstat_incr_irqs_this_cpu(irq, desc); |
514 | |
515 | /* |
516 | * If its disabled or no action available |
517 | * keep it masked and get out of here |
518 | */ |
519 | action = desc->action; |
520 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) |
521 | goto out_unlock; |
522 | |
523 | desc->status |= IRQ_INPROGRESS; |
524 | raw_spin_unlock(&desc->lock); |
525 | |
526 | action_ret = handle_IRQ_event(irq, action); |
527 | if (!noirqdebug) |
528 | note_interrupt(irq, desc, action_ret); |
529 | |
530 | raw_spin_lock(&desc->lock); |
531 | desc->status &= ~IRQ_INPROGRESS; |
532 | |
533 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) |
534 | unmask_irq(desc); |
535 | out_unlock: |
536 | raw_spin_unlock(&desc->lock); |
537 | } |
538 | EXPORT_SYMBOL_GPL(handle_level_irq); |
539 | |
540 | /** |
541 | * handle_fasteoi_irq - irq handler for transparent controllers |
542 | * @irq: the interrupt number |
543 | * @desc: the interrupt description structure for this irq |
544 | * |
545 | * Only a single callback will be issued to the chip: an ->eoi() |
546 | * call when the interrupt has been serviced. This enables support |
547 | * for modern forms of interrupt handlers, which handle the flow |
548 | * details in hardware, transparently. |
549 | */ |
550 | void |
551 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
552 | { |
553 | struct irqaction *action; |
554 | irqreturn_t action_ret; |
555 | |
556 | raw_spin_lock(&desc->lock); |
557 | |
558 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
559 | goto out; |
560 | |
561 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
562 | kstat_incr_irqs_this_cpu(irq, desc); |
563 | |
564 | /* |
565 | * If its disabled or no action available |
566 | * then mask it and get out of here: |
567 | */ |
568 | action = desc->action; |
569 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { |
570 | desc->status |= IRQ_PENDING; |
571 | mask_irq(desc); |
572 | goto out; |
573 | } |
574 | |
575 | desc->status |= IRQ_INPROGRESS; |
576 | desc->status &= ~IRQ_PENDING; |
577 | raw_spin_unlock(&desc->lock); |
578 | |
579 | action_ret = handle_IRQ_event(irq, action); |
580 | if (!noirqdebug) |
581 | note_interrupt(irq, desc, action_ret); |
582 | |
583 | raw_spin_lock(&desc->lock); |
584 | desc->status &= ~IRQ_INPROGRESS; |
585 | out: |
586 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
587 | |
588 | raw_spin_unlock(&desc->lock); |
589 | } |
590 | |
591 | /** |
592 | * handle_edge_irq - edge type IRQ handler |
593 | * @irq: the interrupt number |
594 | * @desc: the interrupt description structure for this irq |
595 | * |
596 | * Interrupt occures on the falling and/or rising edge of a hardware |
597 | * signal. The occurence is latched into the irq controller hardware |
598 | * and must be acked in order to be reenabled. After the ack another |
599 | * interrupt can happen on the same source even before the first one |
600 | * is handled by the associated event handler. If this happens it |
601 | * might be necessary to disable (mask) the interrupt depending on the |
602 | * controller hardware. This requires to reenable the interrupt inside |
603 | * of the loop which handles the interrupts which have arrived while |
604 | * the handler was running. If all pending interrupts are handled, the |
605 | * loop is left. |
606 | */ |
607 | void |
608 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
609 | { |
610 | raw_spin_lock(&desc->lock); |
611 | |
612 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
613 | |
614 | /* |
615 | * If we're currently running this IRQ, or its disabled, |
616 | * we shouldn't process the IRQ. Mark it pending, handle |
617 | * the necessary masking and go out |
618 | */ |
619 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || |
620 | !desc->action)) { |
621 | desc->status |= (IRQ_PENDING | IRQ_MASKED); |
622 | mask_ack_irq(desc); |
623 | goto out_unlock; |
624 | } |
625 | kstat_incr_irqs_this_cpu(irq, desc); |
626 | |
627 | /* Start handling the irq */ |
628 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
629 | |
630 | /* Mark the IRQ currently in progress.*/ |
631 | desc->status |= IRQ_INPROGRESS; |
632 | |
633 | do { |
634 | struct irqaction *action = desc->action; |
635 | irqreturn_t action_ret; |
636 | |
637 | if (unlikely(!action)) { |
638 | mask_irq(desc); |
639 | goto out_unlock; |
640 | } |
641 | |
642 | /* |
643 | * When another irq arrived while we were handling |
644 | * one, we could have masked the irq. |
645 | * Renable it, if it was not disabled in meantime. |
646 | */ |
647 | if (unlikely((desc->status & |
648 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == |
649 | (IRQ_PENDING | IRQ_MASKED))) { |
650 | unmask_irq(desc); |
651 | } |
652 | |
653 | desc->status &= ~IRQ_PENDING; |
654 | raw_spin_unlock(&desc->lock); |
655 | action_ret = handle_IRQ_event(irq, action); |
656 | if (!noirqdebug) |
657 | note_interrupt(irq, desc, action_ret); |
658 | raw_spin_lock(&desc->lock); |
659 | |
660 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); |
661 | |
662 | desc->status &= ~IRQ_INPROGRESS; |
663 | out_unlock: |
664 | raw_spin_unlock(&desc->lock); |
665 | } |
666 | |
667 | /** |
668 | * handle_percpu_irq - Per CPU local irq handler |
669 | * @irq: the interrupt number |
670 | * @desc: the interrupt description structure for this irq |
671 | * |
672 | * Per CPU interrupts on SMP machines without locking requirements |
673 | */ |
674 | void |
675 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
676 | { |
677 | irqreturn_t action_ret; |
678 | |
679 | kstat_incr_irqs_this_cpu(irq, desc); |
680 | |
681 | if (desc->irq_data.chip->irq_ack) |
682 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
683 | |
684 | action_ret = handle_IRQ_event(irq, desc->action); |
685 | if (!noirqdebug) |
686 | note_interrupt(irq, desc, action_ret); |
687 | |
688 | if (desc->irq_data.chip->irq_eoi) |
689 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
690 | } |
691 | |
692 | void |
693 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
694 | const char *name) |
695 | { |
696 | struct irq_desc *desc = irq_to_desc(irq); |
697 | unsigned long flags; |
698 | |
699 | if (!desc) { |
700 | printk(KERN_ERR |
701 | "Trying to install type control for IRQ%d\n", irq); |
702 | return; |
703 | } |
704 | |
705 | if (!handle) |
706 | handle = handle_bad_irq; |
707 | else if (desc->irq_data.chip == &no_irq_chip) { |
708 | printk(KERN_WARNING "Trying to install %sinterrupt handler " |
709 | "for IRQ%d\n", is_chained ? "chained " : "", irq); |
710 | /* |
711 | * Some ARM implementations install a handler for really dumb |
712 | * interrupt hardware without setting an irq_chip. This worked |
713 | * with the ARM no_irq_chip but the check in setup_irq would |
714 | * prevent us to setup the interrupt at all. Switch it to |
715 | * dummy_irq_chip for easy transition. |
716 | */ |
717 | desc->irq_data.chip = &dummy_irq_chip; |
718 | } |
719 | |
720 | chip_bus_lock(desc); |
721 | raw_spin_lock_irqsave(&desc->lock, flags); |
722 | |
723 | /* Uninstall? */ |
724 | if (handle == handle_bad_irq) { |
725 | if (desc->irq_data.chip != &no_irq_chip) |
726 | mask_ack_irq(desc); |
727 | desc->status |= IRQ_DISABLED; |
728 | desc->depth = 1; |
729 | } |
730 | desc->handle_irq = handle; |
731 | desc->name = name; |
732 | |
733 | if (handle != handle_bad_irq && is_chained) { |
734 | desc->status &= ~IRQ_DISABLED; |
735 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; |
736 | desc->depth = 0; |
737 | desc->irq_data.chip->irq_startup(&desc->irq_data); |
738 | } |
739 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
740 | chip_bus_sync_unlock(desc); |
741 | } |
742 | EXPORT_SYMBOL_GPL(__set_irq_handler); |
743 | |
744 | void |
745 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
746 | irq_flow_handler_t handle) |
747 | { |
748 | set_irq_chip(irq, chip); |
749 | __set_irq_handler(irq, handle, 0, NULL); |
750 | } |
751 | EXPORT_SYMBOL_GPL(set_irq_chip_and_handler); |
752 | |
753 | void |
754 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
755 | irq_flow_handler_t handle, const char *name) |
756 | { |
757 | set_irq_chip(irq, chip); |
758 | __set_irq_handler(irq, handle, 0, name); |
759 | } |
760 | |
761 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
762 | { |
763 | struct irq_desc *desc = irq_to_desc(irq); |
764 | unsigned long flags; |
765 | |
766 | if (!desc) |
767 | return; |
768 | |
769 | /* Sanitize flags */ |
770 | set &= IRQF_MODIFY_MASK; |
771 | clr &= IRQF_MODIFY_MASK; |
772 | |
773 | raw_spin_lock_irqsave(&desc->lock, flags); |
774 | desc->status &= ~clr; |
775 | desc->status |= set; |
776 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
777 | } |
778 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9