Root/
1 | /* |
2 | * linux/kernel/irq/chip.c |
3 | * |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
6 | * |
7 | * This file contains the core interrupt handling code, for irq-chip |
8 | * based architectures. |
9 | * |
10 | * Detailed information is available in Documentation/DocBook/genericirq |
11 | */ |
12 | |
13 | #include <linux/irq.h> |
14 | #include <linux/msi.h> |
15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> |
18 | |
19 | #include <trace/events/irq.h> |
20 | |
21 | #include "internals.h" |
22 | |
23 | /** |
24 | * irq_set_chip - set the irq chip for an irq |
25 | * @irq: irq number |
26 | * @chip: pointer to irq chip description structure |
27 | */ |
28 | int irq_set_chip(unsigned int irq, struct irq_chip *chip) |
29 | { |
30 | unsigned long flags; |
31 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
32 | |
33 | if (!desc) |
34 | return -EINVAL; |
35 | |
36 | if (!chip) |
37 | chip = &no_irq_chip; |
38 | |
39 | desc->irq_data.chip = chip; |
40 | irq_put_desc_unlock(desc, flags); |
41 | /* |
42 | * For !CONFIG_SPARSE_IRQ make the irq show up in |
43 | * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is |
44 | * already marked, and this call is harmless. |
45 | */ |
46 | irq_reserve_irq(irq); |
47 | return 0; |
48 | } |
49 | EXPORT_SYMBOL(irq_set_chip); |
50 | |
51 | /** |
52 | * irq_set_type - set the irq trigger type for an irq |
53 | * @irq: irq number |
54 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
55 | */ |
56 | int irq_set_irq_type(unsigned int irq, unsigned int type) |
57 | { |
58 | unsigned long flags; |
59 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
60 | int ret = 0; |
61 | |
62 | if (!desc) |
63 | return -EINVAL; |
64 | |
65 | type &= IRQ_TYPE_SENSE_MASK; |
66 | ret = __irq_set_trigger(desc, irq, type); |
67 | irq_put_desc_busunlock(desc, flags); |
68 | return ret; |
69 | } |
70 | EXPORT_SYMBOL(irq_set_irq_type); |
71 | |
72 | /** |
73 | * irq_set_handler_data - set irq handler data for an irq |
74 | * @irq: Interrupt number |
75 | * @data: Pointer to interrupt specific data |
76 | * |
77 | * Set the hardware irq controller data for an irq |
78 | */ |
79 | int irq_set_handler_data(unsigned int irq, void *data) |
80 | { |
81 | unsigned long flags; |
82 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
83 | |
84 | if (!desc) |
85 | return -EINVAL; |
86 | desc->irq_data.handler_data = data; |
87 | irq_put_desc_unlock(desc, flags); |
88 | return 0; |
89 | } |
90 | EXPORT_SYMBOL(irq_set_handler_data); |
91 | |
92 | /** |
93 | * irq_set_msi_desc - set MSI descriptor data for an irq |
94 | * @irq: Interrupt number |
95 | * @entry: Pointer to MSI descriptor data |
96 | * |
97 | * Set the MSI descriptor entry for an irq |
98 | */ |
99 | int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) |
100 | { |
101 | unsigned long flags; |
102 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
103 | |
104 | if (!desc) |
105 | return -EINVAL; |
106 | desc->irq_data.msi_desc = entry; |
107 | if (entry) |
108 | entry->irq = irq; |
109 | irq_put_desc_unlock(desc, flags); |
110 | return 0; |
111 | } |
112 | |
113 | /** |
114 | * irq_set_chip_data - set irq chip data for an irq |
115 | * @irq: Interrupt number |
116 | * @data: Pointer to chip specific data |
117 | * |
118 | * Set the hardware irq chip data for an irq |
119 | */ |
120 | int irq_set_chip_data(unsigned int irq, void *data) |
121 | { |
122 | unsigned long flags; |
123 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
124 | |
125 | if (!desc) |
126 | return -EINVAL; |
127 | desc->irq_data.chip_data = data; |
128 | irq_put_desc_unlock(desc, flags); |
129 | return 0; |
130 | } |
131 | EXPORT_SYMBOL(irq_set_chip_data); |
132 | |
133 | struct irq_data *irq_get_irq_data(unsigned int irq) |
134 | { |
135 | struct irq_desc *desc = irq_to_desc(irq); |
136 | |
137 | return desc ? &desc->irq_data : NULL; |
138 | } |
139 | EXPORT_SYMBOL_GPL(irq_get_irq_data); |
140 | |
141 | static void irq_state_clr_disabled(struct irq_desc *desc) |
142 | { |
143 | irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); |
144 | } |
145 | |
146 | static void irq_state_set_disabled(struct irq_desc *desc) |
147 | { |
148 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
149 | } |
150 | |
151 | static void irq_state_clr_masked(struct irq_desc *desc) |
152 | { |
153 | irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); |
154 | } |
155 | |
156 | static void irq_state_set_masked(struct irq_desc *desc) |
157 | { |
158 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
159 | } |
160 | |
161 | int irq_startup(struct irq_desc *desc, bool resend) |
162 | { |
163 | int ret = 0; |
164 | |
165 | irq_state_clr_disabled(desc); |
166 | desc->depth = 0; |
167 | |
168 | if (desc->irq_data.chip->irq_startup) { |
169 | ret = desc->irq_data.chip->irq_startup(&desc->irq_data); |
170 | irq_state_clr_masked(desc); |
171 | } else { |
172 | irq_enable(desc); |
173 | } |
174 | if (resend) |
175 | check_irq_resend(desc, desc->irq_data.irq); |
176 | return ret; |
177 | } |
178 | |
179 | void irq_shutdown(struct irq_desc *desc) |
180 | { |
181 | irq_state_set_disabled(desc); |
182 | desc->depth = 1; |
183 | if (desc->irq_data.chip->irq_shutdown) |
184 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); |
185 | else if (desc->irq_data.chip->irq_disable) |
186 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
187 | else |
188 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
189 | irq_state_set_masked(desc); |
190 | } |
191 | |
192 | void irq_enable(struct irq_desc *desc) |
193 | { |
194 | irq_state_clr_disabled(desc); |
195 | if (desc->irq_data.chip->irq_enable) |
196 | desc->irq_data.chip->irq_enable(&desc->irq_data); |
197 | else |
198 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
199 | irq_state_clr_masked(desc); |
200 | } |
201 | |
202 | void irq_disable(struct irq_desc *desc) |
203 | { |
204 | irq_state_set_disabled(desc); |
205 | if (desc->irq_data.chip->irq_disable) { |
206 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
207 | irq_state_set_masked(desc); |
208 | } |
209 | } |
210 | |
211 | void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) |
212 | { |
213 | if (desc->irq_data.chip->irq_enable) |
214 | desc->irq_data.chip->irq_enable(&desc->irq_data); |
215 | else |
216 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
217 | cpumask_set_cpu(cpu, desc->percpu_enabled); |
218 | } |
219 | |
220 | void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) |
221 | { |
222 | if (desc->irq_data.chip->irq_disable) |
223 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
224 | else |
225 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
226 | cpumask_clear_cpu(cpu, desc->percpu_enabled); |
227 | } |
228 | |
229 | static inline void mask_ack_irq(struct irq_desc *desc) |
230 | { |
231 | if (desc->irq_data.chip->irq_mask_ack) |
232 | desc->irq_data.chip->irq_mask_ack(&desc->irq_data); |
233 | else { |
234 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
235 | if (desc->irq_data.chip->irq_ack) |
236 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
237 | } |
238 | irq_state_set_masked(desc); |
239 | } |
240 | |
241 | void mask_irq(struct irq_desc *desc) |
242 | { |
243 | if (desc->irq_data.chip->irq_mask) { |
244 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
245 | irq_state_set_masked(desc); |
246 | } |
247 | } |
248 | |
249 | void unmask_irq(struct irq_desc *desc) |
250 | { |
251 | if (desc->irq_data.chip->irq_unmask) { |
252 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
253 | irq_state_clr_masked(desc); |
254 | } |
255 | } |
256 | |
257 | /* |
258 | * handle_nested_irq - Handle a nested irq from a irq thread |
259 | * @irq: the interrupt number |
260 | * |
261 | * Handle interrupts which are nested into a threaded interrupt |
262 | * handler. The handler function is called inside the calling |
263 | * threads context. |
264 | */ |
265 | void handle_nested_irq(unsigned int irq) |
266 | { |
267 | struct irq_desc *desc = irq_to_desc(irq); |
268 | struct irqaction *action; |
269 | irqreturn_t action_ret; |
270 | |
271 | might_sleep(); |
272 | |
273 | raw_spin_lock_irq(&desc->lock); |
274 | |
275 | kstat_incr_irqs_this_cpu(irq, desc); |
276 | |
277 | action = desc->action; |
278 | if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { |
279 | desc->istate |= IRQS_PENDING; |
280 | goto out_unlock; |
281 | } |
282 | |
283 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
284 | raw_spin_unlock_irq(&desc->lock); |
285 | |
286 | action_ret = action->thread_fn(action->irq, action->dev_id); |
287 | if (!noirqdebug) |
288 | note_interrupt(irq, desc, action_ret); |
289 | |
290 | raw_spin_lock_irq(&desc->lock); |
291 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
292 | |
293 | out_unlock: |
294 | raw_spin_unlock_irq(&desc->lock); |
295 | } |
296 | EXPORT_SYMBOL_GPL(handle_nested_irq); |
297 | |
298 | static bool irq_check_poll(struct irq_desc *desc) |
299 | { |
300 | if (!(desc->istate & IRQS_POLL_INPROGRESS)) |
301 | return false; |
302 | return irq_wait_for_poll(desc); |
303 | } |
304 | |
305 | /** |
306 | * handle_simple_irq - Simple and software-decoded IRQs. |
307 | * @irq: the interrupt number |
308 | * @desc: the interrupt description structure for this irq |
309 | * |
310 | * Simple interrupts are either sent from a demultiplexing interrupt |
311 | * handler or come from hardware, where no interrupt hardware control |
312 | * is necessary. |
313 | * |
314 | * Note: The caller is expected to handle the ack, clear, mask and |
315 | * unmask issues if necessary. |
316 | */ |
317 | void |
318 | handle_simple_irq(unsigned int irq, struct irq_desc *desc) |
319 | { |
320 | raw_spin_lock(&desc->lock); |
321 | |
322 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
323 | if (!irq_check_poll(desc)) |
324 | goto out_unlock; |
325 | |
326 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
327 | kstat_incr_irqs_this_cpu(irq, desc); |
328 | |
329 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
330 | desc->istate |= IRQS_PENDING; |
331 | goto out_unlock; |
332 | } |
333 | |
334 | handle_irq_event(desc); |
335 | |
336 | out_unlock: |
337 | raw_spin_unlock(&desc->lock); |
338 | } |
339 | EXPORT_SYMBOL_GPL(handle_simple_irq); |
340 | |
341 | /* |
342 | * Called unconditionally from handle_level_irq() and only for oneshot |
343 | * interrupts from handle_fasteoi_irq() |
344 | */ |
345 | static void cond_unmask_irq(struct irq_desc *desc) |
346 | { |
347 | /* |
348 | * We need to unmask in the following cases: |
349 | * - Standard level irq (IRQF_ONESHOT is not set) |
350 | * - Oneshot irq which did not wake the thread (caused by a |
351 | * spurious interrupt or a primary handler handling it |
352 | * completely). |
353 | */ |
354 | if (!irqd_irq_disabled(&desc->irq_data) && |
355 | irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) |
356 | unmask_irq(desc); |
357 | } |
358 | |
359 | /** |
360 | * handle_level_irq - Level type irq handler |
361 | * @irq: the interrupt number |
362 | * @desc: the interrupt description structure for this irq |
363 | * |
364 | * Level type interrupts are active as long as the hardware line has |
365 | * the active level. This may require to mask the interrupt and unmask |
366 | * it after the associated handler has acknowledged the device, so the |
367 | * interrupt line is back to inactive. |
368 | */ |
369 | void |
370 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
371 | { |
372 | raw_spin_lock(&desc->lock); |
373 | mask_ack_irq(desc); |
374 | |
375 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
376 | if (!irq_check_poll(desc)) |
377 | goto out_unlock; |
378 | |
379 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
380 | kstat_incr_irqs_this_cpu(irq, desc); |
381 | |
382 | /* |
383 | * If its disabled or no action available |
384 | * keep it masked and get out of here |
385 | */ |
386 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
387 | desc->istate |= IRQS_PENDING; |
388 | goto out_unlock; |
389 | } |
390 | |
391 | handle_irq_event(desc); |
392 | |
393 | cond_unmask_irq(desc); |
394 | |
395 | out_unlock: |
396 | raw_spin_unlock(&desc->lock); |
397 | } |
398 | EXPORT_SYMBOL_GPL(handle_level_irq); |
399 | |
400 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
401 | static inline void preflow_handler(struct irq_desc *desc) |
402 | { |
403 | if (desc->preflow_handler) |
404 | desc->preflow_handler(&desc->irq_data); |
405 | } |
406 | #else |
407 | static inline void preflow_handler(struct irq_desc *desc) { } |
408 | #endif |
409 | |
410 | /** |
411 | * handle_fasteoi_irq - irq handler for transparent controllers |
412 | * @irq: the interrupt number |
413 | * @desc: the interrupt description structure for this irq |
414 | * |
415 | * Only a single callback will be issued to the chip: an ->eoi() |
416 | * call when the interrupt has been serviced. This enables support |
417 | * for modern forms of interrupt handlers, which handle the flow |
418 | * details in hardware, transparently. |
419 | */ |
420 | void |
421 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
422 | { |
423 | raw_spin_lock(&desc->lock); |
424 | |
425 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) |
426 | if (!irq_check_poll(desc)) |
427 | goto out; |
428 | |
429 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
430 | kstat_incr_irqs_this_cpu(irq, desc); |
431 | |
432 | /* |
433 | * If its disabled or no action available |
434 | * then mask it and get out of here: |
435 | */ |
436 | if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { |
437 | desc->istate |= IRQS_PENDING; |
438 | mask_irq(desc); |
439 | goto out; |
440 | } |
441 | |
442 | if (desc->istate & IRQS_ONESHOT) |
443 | mask_irq(desc); |
444 | |
445 | preflow_handler(desc); |
446 | handle_irq_event(desc); |
447 | |
448 | if (desc->istate & IRQS_ONESHOT) |
449 | cond_unmask_irq(desc); |
450 | |
451 | out_eoi: |
452 | desc->irq_data.chip->irq_eoi(&desc->irq_data); |
453 | out_unlock: |
454 | raw_spin_unlock(&desc->lock); |
455 | return; |
456 | out: |
457 | if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) |
458 | goto out_eoi; |
459 | goto out_unlock; |
460 | } |
461 | |
462 | /** |
463 | * handle_edge_irq - edge type IRQ handler |
464 | * @irq: the interrupt number |
465 | * @desc: the interrupt description structure for this irq |
466 | * |
467 | * Interrupt occures on the falling and/or rising edge of a hardware |
468 | * signal. The occurrence is latched into the irq controller hardware |
469 | * and must be acked in order to be reenabled. After the ack another |
470 | * interrupt can happen on the same source even before the first one |
471 | * is handled by the associated event handler. If this happens it |
472 | * might be necessary to disable (mask) the interrupt depending on the |
473 | * controller hardware. This requires to reenable the interrupt inside |
474 | * of the loop which handles the interrupts which have arrived while |
475 | * the handler was running. If all pending interrupts are handled, the |
476 | * loop is left. |
477 | */ |
478 | void |
479 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
480 | { |
481 | raw_spin_lock(&desc->lock); |
482 | |
483 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
484 | /* |
485 | * If we're currently running this IRQ, or its disabled, |
486 | * we shouldn't process the IRQ. Mark it pending, handle |
487 | * the necessary masking and go out |
488 | */ |
489 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
490 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { |
491 | if (!irq_check_poll(desc)) { |
492 | desc->istate |= IRQS_PENDING; |
493 | mask_ack_irq(desc); |
494 | goto out_unlock; |
495 | } |
496 | } |
497 | kstat_incr_irqs_this_cpu(irq, desc); |
498 | |
499 | /* Start handling the irq */ |
500 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
501 | |
502 | do { |
503 | if (unlikely(!desc->action)) { |
504 | mask_irq(desc); |
505 | goto out_unlock; |
506 | } |
507 | |
508 | /* |
509 | * When another irq arrived while we were handling |
510 | * one, we could have masked the irq. |
511 | * Renable it, if it was not disabled in meantime. |
512 | */ |
513 | if (unlikely(desc->istate & IRQS_PENDING)) { |
514 | if (!irqd_irq_disabled(&desc->irq_data) && |
515 | irqd_irq_masked(&desc->irq_data)) |
516 | unmask_irq(desc); |
517 | } |
518 | |
519 | handle_irq_event(desc); |
520 | |
521 | } while ((desc->istate & IRQS_PENDING) && |
522 | !irqd_irq_disabled(&desc->irq_data)); |
523 | |
524 | out_unlock: |
525 | raw_spin_unlock(&desc->lock); |
526 | } |
527 | EXPORT_SYMBOL(handle_edge_irq); |
528 | |
529 | #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER |
530 | /** |
531 | * handle_edge_eoi_irq - edge eoi type IRQ handler |
532 | * @irq: the interrupt number |
533 | * @desc: the interrupt description structure for this irq |
534 | * |
535 | * Similar as the above handle_edge_irq, but using eoi and w/o the |
536 | * mask/unmask logic. |
537 | */ |
538 | void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) |
539 | { |
540 | struct irq_chip *chip = irq_desc_get_chip(desc); |
541 | |
542 | raw_spin_lock(&desc->lock); |
543 | |
544 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
545 | /* |
546 | * If we're currently running this IRQ, or its disabled, |
547 | * we shouldn't process the IRQ. Mark it pending, handle |
548 | * the necessary masking and go out |
549 | */ |
550 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || |
551 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { |
552 | if (!irq_check_poll(desc)) { |
553 | desc->istate |= IRQS_PENDING; |
554 | goto out_eoi; |
555 | } |
556 | } |
557 | kstat_incr_irqs_this_cpu(irq, desc); |
558 | |
559 | do { |
560 | if (unlikely(!desc->action)) |
561 | goto out_eoi; |
562 | |
563 | handle_irq_event(desc); |
564 | |
565 | } while ((desc->istate & IRQS_PENDING) && |
566 | !irqd_irq_disabled(&desc->irq_data)); |
567 | |
568 | out_eoi: |
569 | chip->irq_eoi(&desc->irq_data); |
570 | raw_spin_unlock(&desc->lock); |
571 | } |
572 | #endif |
573 | |
574 | /** |
575 | * handle_percpu_irq - Per CPU local irq handler |
576 | * @irq: the interrupt number |
577 | * @desc: the interrupt description structure for this irq |
578 | * |
579 | * Per CPU interrupts on SMP machines without locking requirements |
580 | */ |
581 | void |
582 | handle_percpu_irq(unsigned int irq, struct irq_desc *desc) |
583 | { |
584 | struct irq_chip *chip = irq_desc_get_chip(desc); |
585 | |
586 | kstat_incr_irqs_this_cpu(irq, desc); |
587 | |
588 | if (chip->irq_ack) |
589 | chip->irq_ack(&desc->irq_data); |
590 | |
591 | handle_irq_event_percpu(desc, desc->action); |
592 | |
593 | if (chip->irq_eoi) |
594 | chip->irq_eoi(&desc->irq_data); |
595 | } |
596 | |
597 | /** |
598 | * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids |
599 | * @irq: the interrupt number |
600 | * @desc: the interrupt description structure for this irq |
601 | * |
602 | * Per CPU interrupts on SMP machines without locking requirements. Same as |
603 | * handle_percpu_irq() above but with the following extras: |
604 | * |
605 | * action->percpu_dev_id is a pointer to percpu variables which |
606 | * contain the real device id for the cpu on which this handler is |
607 | * called |
608 | */ |
609 | void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) |
610 | { |
611 | struct irq_chip *chip = irq_desc_get_chip(desc); |
612 | struct irqaction *action = desc->action; |
613 | void *dev_id = __this_cpu_ptr(action->percpu_dev_id); |
614 | irqreturn_t res; |
615 | |
616 | kstat_incr_irqs_this_cpu(irq, desc); |
617 | |
618 | if (chip->irq_ack) |
619 | chip->irq_ack(&desc->irq_data); |
620 | |
621 | trace_irq_handler_entry(irq, action); |
622 | res = action->handler(irq, dev_id); |
623 | trace_irq_handler_exit(irq, action, res); |
624 | |
625 | if (chip->irq_eoi) |
626 | chip->irq_eoi(&desc->irq_data); |
627 | } |
628 | |
629 | void |
630 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
631 | const char *name) |
632 | { |
633 | unsigned long flags; |
634 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); |
635 | |
636 | if (!desc) |
637 | return; |
638 | |
639 | if (!handle) { |
640 | handle = handle_bad_irq; |
641 | } else { |
642 | if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) |
643 | goto out; |
644 | } |
645 | |
646 | /* Uninstall? */ |
647 | if (handle == handle_bad_irq) { |
648 | if (desc->irq_data.chip != &no_irq_chip) |
649 | mask_ack_irq(desc); |
650 | irq_state_set_disabled(desc); |
651 | desc->depth = 1; |
652 | } |
653 | desc->handle_irq = handle; |
654 | desc->name = name; |
655 | |
656 | if (handle != handle_bad_irq && is_chained) { |
657 | irq_settings_set_noprobe(desc); |
658 | irq_settings_set_norequest(desc); |
659 | irq_settings_set_nothread(desc); |
660 | irq_startup(desc, true); |
661 | } |
662 | out: |
663 | irq_put_desc_busunlock(desc, flags); |
664 | } |
665 | EXPORT_SYMBOL_GPL(__irq_set_handler); |
666 | |
667 | void |
668 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
669 | irq_flow_handler_t handle, const char *name) |
670 | { |
671 | irq_set_chip(irq, chip); |
672 | __irq_set_handler(irq, handle, 0, name); |
673 | } |
674 | |
675 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) |
676 | { |
677 | unsigned long flags; |
678 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
679 | |
680 | if (!desc) |
681 | return; |
682 | irq_settings_clr_and_set(desc, clr, set); |
683 | |
684 | irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | |
685 | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); |
686 | if (irq_settings_has_no_balance_set(desc)) |
687 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); |
688 | if (irq_settings_is_per_cpu(desc)) |
689 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
690 | if (irq_settings_can_move_pcntxt(desc)) |
691 | irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); |
692 | if (irq_settings_is_level(desc)) |
693 | irqd_set(&desc->irq_data, IRQD_LEVEL); |
694 | |
695 | irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); |
696 | |
697 | irq_put_desc_unlock(desc, flags); |
698 | } |
699 | EXPORT_SYMBOL_GPL(irq_modify_status); |
700 | |
701 | /** |
702 | * irq_cpu_online - Invoke all irq_cpu_online functions. |
703 | * |
704 | * Iterate through all irqs and invoke the chip.irq_cpu_online() |
705 | * for each. |
706 | */ |
707 | void irq_cpu_online(void) |
708 | { |
709 | struct irq_desc *desc; |
710 | struct irq_chip *chip; |
711 | unsigned long flags; |
712 | unsigned int irq; |
713 | |
714 | for_each_active_irq(irq) { |
715 | desc = irq_to_desc(irq); |
716 | if (!desc) |
717 | continue; |
718 | |
719 | raw_spin_lock_irqsave(&desc->lock, flags); |
720 | |
721 | chip = irq_data_get_irq_chip(&desc->irq_data); |
722 | if (chip && chip->irq_cpu_online && |
723 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || |
724 | !irqd_irq_disabled(&desc->irq_data))) |
725 | chip->irq_cpu_online(&desc->irq_data); |
726 | |
727 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
728 | } |
729 | } |
730 | |
731 | /** |
732 | * irq_cpu_offline - Invoke all irq_cpu_offline functions. |
733 | * |
734 | * Iterate through all irqs and invoke the chip.irq_cpu_offline() |
735 | * for each. |
736 | */ |
737 | void irq_cpu_offline(void) |
738 | { |
739 | struct irq_desc *desc; |
740 | struct irq_chip *chip; |
741 | unsigned long flags; |
742 | unsigned int irq; |
743 | |
744 | for_each_active_irq(irq) { |
745 | desc = irq_to_desc(irq); |
746 | if (!desc) |
747 | continue; |
748 | |
749 | raw_spin_lock_irqsave(&desc->lock, flags); |
750 | |
751 | chip = irq_data_get_irq_chip(&desc->irq_data); |
752 | if (chip && chip->irq_cpu_offline && |
753 | (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || |
754 | !irqd_irq_disabled(&desc->irq_data))) |
755 | chip->irq_cpu_offline(&desc->irq_data); |
756 | |
757 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
758 | } |
759 | } |
760 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9