Root/
1 | /* interrupt.h */ |
2 | #ifndef _LINUX_INTERRUPT_H |
3 | #define _LINUX_INTERRUPT_H |
4 | |
5 | #include <linux/kernel.h> |
6 | #include <linux/linkage.h> |
7 | #include <linux/bitops.h> |
8 | #include <linux/preempt.h> |
9 | #include <linux/cpumask.h> |
10 | #include <linux/irqreturn.h> |
11 | #include <linux/irqnr.h> |
12 | #include <linux/hardirq.h> |
13 | #include <linux/irqflags.h> |
14 | #include <linux/smp.h> |
15 | #include <linux/percpu.h> |
16 | #include <linux/hrtimer.h> |
17 | #include <linux/kref.h> |
18 | #include <linux/workqueue.h> |
19 | |
20 | #include <linux/atomic.h> |
21 | #include <asm/ptrace.h> |
22 | |
23 | /* |
24 | * These correspond to the IORESOURCE_IRQ_* defines in |
25 | * linux/ioport.h to select the interrupt line behaviour. When |
26 | * requesting an interrupt without specifying a IRQF_TRIGGER, the |
27 | * setting should be assumed to be "as already configured", which |
28 | * may be as per machine or firmware initialisation. |
29 | */ |
30 | #define IRQF_TRIGGER_NONE 0x00000000 |
31 | #define IRQF_TRIGGER_RISING 0x00000001 |
32 | #define IRQF_TRIGGER_FALLING 0x00000002 |
33 | #define IRQF_TRIGGER_HIGH 0x00000004 |
34 | #define IRQF_TRIGGER_LOW 0x00000008 |
35 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ |
36 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) |
37 | #define IRQF_TRIGGER_PROBE 0x00000010 |
38 | |
39 | /* |
40 | * These flags used only by the kernel as part of the |
41 | * irq handling routines. |
42 | * |
43 | * IRQF_DISABLED - keep irqs disabled when calling the action handler. |
44 | * DEPRECATED. This flag is a NOOP and scheduled to be removed |
45 | * IRQF_SHARED - allow sharing the irq among several devices |
46 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur |
47 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt |
48 | * IRQF_PERCPU - Interrupt is per cpu |
49 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing |
50 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
51 | * registered first in an shared interrupt is considered for |
52 | * performance reasons) |
53 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
54 | * Used by threaded interrupts which need to keep the |
55 | * irq line disabled until the threaded handler has been run. |
56 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
57 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
58 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
59 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
60 | * resume time. |
61 | */ |
62 | #define IRQF_DISABLED 0x00000020 |
63 | #define IRQF_SHARED 0x00000080 |
64 | #define IRQF_PROBE_SHARED 0x00000100 |
65 | #define __IRQF_TIMER 0x00000200 |
66 | #define IRQF_PERCPU 0x00000400 |
67 | #define IRQF_NOBALANCING 0x00000800 |
68 | #define IRQF_IRQPOLL 0x00001000 |
69 | #define IRQF_ONESHOT 0x00002000 |
70 | #define IRQF_NO_SUSPEND 0x00004000 |
71 | #define IRQF_FORCE_RESUME 0x00008000 |
72 | #define IRQF_NO_THREAD 0x00010000 |
73 | #define IRQF_EARLY_RESUME 0x00020000 |
74 | |
75 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
76 | |
77 | /* |
78 | * These values can be returned by request_any_context_irq() and |
79 | * describe the context the interrupt will be run in. |
80 | * |
81 | * IRQC_IS_HARDIRQ - interrupt runs in hardirq context |
82 | * IRQC_IS_NESTED - interrupt runs in a nested threaded context |
83 | */ |
84 | enum { |
85 | IRQC_IS_HARDIRQ = 0, |
86 | IRQC_IS_NESTED, |
87 | }; |
88 | |
89 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
90 | |
91 | /** |
92 | * struct irqaction - per interrupt action descriptor |
93 | * @handler: interrupt handler function |
94 | * @name: name of the device |
95 | * @dev_id: cookie to identify the device |
96 | * @percpu_dev_id: cookie to identify the device |
97 | * @next: pointer to the next irqaction for shared interrupts |
98 | * @irq: interrupt number |
99 | * @flags: flags (see IRQF_* above) |
100 | * @thread_fn: interrupt handler function for threaded interrupts |
101 | * @thread: thread pointer for threaded interrupts |
102 | * @thread_flags: flags related to @thread |
103 | * @thread_mask: bitmask for keeping track of @thread activity |
104 | * @dir: pointer to the proc/irq/NN/name entry |
105 | */ |
106 | struct irqaction { |
107 | irq_handler_t handler; |
108 | void *dev_id; |
109 | void __percpu *percpu_dev_id; |
110 | struct irqaction *next; |
111 | irq_handler_t thread_fn; |
112 | struct task_struct *thread; |
113 | unsigned int irq; |
114 | unsigned int flags; |
115 | unsigned long thread_flags; |
116 | unsigned long thread_mask; |
117 | const char *name; |
118 | struct proc_dir_entry *dir; |
119 | } ____cacheline_internodealigned_in_smp; |
120 | |
121 | extern irqreturn_t no_action(int cpl, void *dev_id); |
122 | |
123 | extern int __must_check |
124 | request_threaded_irq(unsigned int irq, irq_handler_t handler, |
125 | irq_handler_t thread_fn, |
126 | unsigned long flags, const char *name, void *dev); |
127 | |
128 | static inline int __must_check |
129 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, |
130 | const char *name, void *dev) |
131 | { |
132 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); |
133 | } |
134 | |
135 | extern int __must_check |
136 | request_any_context_irq(unsigned int irq, irq_handler_t handler, |
137 | unsigned long flags, const char *name, void *dev_id); |
138 | |
139 | extern int __must_check |
140 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
141 | const char *devname, void __percpu *percpu_dev_id); |
142 | |
143 | extern void free_irq(unsigned int, void *); |
144 | extern void free_percpu_irq(unsigned int, void __percpu *); |
145 | |
146 | struct device; |
147 | |
148 | extern int __must_check |
149 | devm_request_threaded_irq(struct device *dev, unsigned int irq, |
150 | irq_handler_t handler, irq_handler_t thread_fn, |
151 | unsigned long irqflags, const char *devname, |
152 | void *dev_id); |
153 | |
154 | static inline int __must_check |
155 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, |
156 | unsigned long irqflags, const char *devname, void *dev_id) |
157 | { |
158 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, |
159 | devname, dev_id); |
160 | } |
161 | |
162 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
163 | |
164 | /* |
165 | * On lockdep we dont want to enable hardirqs in hardirq |
166 | * context. Use local_irq_enable_in_hardirq() to annotate |
167 | * kernel code that has to do this nevertheless (pretty much |
168 | * the only valid case is for old/broken hardware that is |
169 | * insanely slow). |
170 | * |
171 | * NOTE: in theory this might break fragile code that relies |
172 | * on hardirq delivery - in practice we dont seem to have such |
173 | * places left. So the only effect should be slightly increased |
174 | * irqs-off latencies. |
175 | */ |
176 | #ifdef CONFIG_LOCKDEP |
177 | # define local_irq_enable_in_hardirq() do { } while (0) |
178 | #else |
179 | # define local_irq_enable_in_hardirq() local_irq_enable() |
180 | #endif |
181 | |
182 | extern void disable_irq_nosync(unsigned int irq); |
183 | extern void disable_irq(unsigned int irq); |
184 | extern void disable_percpu_irq(unsigned int irq); |
185 | extern void enable_irq(unsigned int irq); |
186 | extern void enable_percpu_irq(unsigned int irq, unsigned int type); |
187 | |
188 | /* The following three functions are for the core kernel use only. */ |
189 | extern void suspend_device_irqs(void); |
190 | extern void resume_device_irqs(void); |
191 | #ifdef CONFIG_PM_SLEEP |
192 | extern int check_wakeup_irqs(void); |
193 | #else |
194 | static inline int check_wakeup_irqs(void) { return 0; } |
195 | #endif |
196 | |
197 | #if defined(CONFIG_SMP) |
198 | |
199 | extern cpumask_var_t irq_default_affinity; |
200 | |
201 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
202 | extern int irq_can_set_affinity(unsigned int irq); |
203 | extern int irq_select_affinity(unsigned int irq); |
204 | |
205 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); |
206 | |
207 | /** |
208 | * struct irq_affinity_notify - context for notification of IRQ affinity changes |
209 | * @irq: Interrupt to which notification applies |
210 | * @kref: Reference count, for internal use |
211 | * @work: Work item, for internal use |
212 | * @notify: Function to be called on change. This will be |
213 | * called in process context. |
214 | * @release: Function to be called on release. This will be |
215 | * called in process context. Once registered, the |
216 | * structure must only be freed when this function is |
217 | * called or later. |
218 | */ |
219 | struct irq_affinity_notify { |
220 | unsigned int irq; |
221 | struct kref kref; |
222 | struct work_struct work; |
223 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); |
224 | void (*release)(struct kref *ref); |
225 | }; |
226 | |
227 | extern int |
228 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
229 | |
230 | #else /* CONFIG_SMP */ |
231 | |
232 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
233 | { |
234 | return -EINVAL; |
235 | } |
236 | |
237 | static inline int irq_can_set_affinity(unsigned int irq) |
238 | { |
239 | return 0; |
240 | } |
241 | |
242 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
243 | |
244 | static inline int irq_set_affinity_hint(unsigned int irq, |
245 | const struct cpumask *m) |
246 | { |
247 | return -EINVAL; |
248 | } |
249 | #endif /* CONFIG_SMP */ |
250 | |
251 | /* |
252 | * Special lockdep variants of irq disabling/enabling. |
253 | * These should be used for locking constructs that |
254 | * know that a particular irq context which is disabled, |
255 | * and which is the only irq-context user of a lock, |
256 | * that it's safe to take the lock in the irq-disabled |
257 | * section without disabling hardirqs. |
258 | * |
259 | * On !CONFIG_LOCKDEP they are equivalent to the normal |
260 | * irq disable/enable methods. |
261 | */ |
262 | static inline void disable_irq_nosync_lockdep(unsigned int irq) |
263 | { |
264 | disable_irq_nosync(irq); |
265 | #ifdef CONFIG_LOCKDEP |
266 | local_irq_disable(); |
267 | #endif |
268 | } |
269 | |
270 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
271 | { |
272 | disable_irq_nosync(irq); |
273 | #ifdef CONFIG_LOCKDEP |
274 | local_irq_save(*flags); |
275 | #endif |
276 | } |
277 | |
278 | static inline void disable_irq_lockdep(unsigned int irq) |
279 | { |
280 | disable_irq(irq); |
281 | #ifdef CONFIG_LOCKDEP |
282 | local_irq_disable(); |
283 | #endif |
284 | } |
285 | |
286 | static inline void enable_irq_lockdep(unsigned int irq) |
287 | { |
288 | #ifdef CONFIG_LOCKDEP |
289 | local_irq_enable(); |
290 | #endif |
291 | enable_irq(irq); |
292 | } |
293 | |
294 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
295 | { |
296 | #ifdef CONFIG_LOCKDEP |
297 | local_irq_restore(*flags); |
298 | #endif |
299 | enable_irq(irq); |
300 | } |
301 | |
302 | /* IRQ wakeup (PM) control: */ |
303 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); |
304 | |
305 | static inline int enable_irq_wake(unsigned int irq) |
306 | { |
307 | return irq_set_irq_wake(irq, 1); |
308 | } |
309 | |
310 | static inline int disable_irq_wake(unsigned int irq) |
311 | { |
312 | return irq_set_irq_wake(irq, 0); |
313 | } |
314 | |
315 | |
316 | #ifdef CONFIG_IRQ_FORCED_THREADING |
317 | extern bool force_irqthreads; |
318 | #else |
319 | #define force_irqthreads (0) |
320 | #endif |
321 | |
322 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
323 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
324 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
325 | #endif |
326 | |
327 | /* Some architectures might implement lazy enabling/disabling of |
328 | * interrupts. In some cases, such as stop_machine, we might want |
329 | * to ensure that after a local_irq_disable(), interrupts have |
330 | * really been disabled in hardware. Such architectures need to |
331 | * implement the following hook. |
332 | */ |
333 | #ifndef hard_irq_disable |
334 | #define hard_irq_disable() do { } while(0) |
335 | #endif |
336 | |
337 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
338 | frequency threaded job scheduling. For almost all the purposes |
339 | tasklets are more than enough. F.e. all serial device BHs et |
340 | al. should be converted to tasklets, not to softirqs. |
341 | */ |
342 | |
343 | enum |
344 | { |
345 | HI_SOFTIRQ=0, |
346 | TIMER_SOFTIRQ, |
347 | NET_TX_SOFTIRQ, |
348 | NET_RX_SOFTIRQ, |
349 | BLOCK_SOFTIRQ, |
350 | BLOCK_IOPOLL_SOFTIRQ, |
351 | TASKLET_SOFTIRQ, |
352 | SCHED_SOFTIRQ, |
353 | HRTIMER_SOFTIRQ, |
354 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
355 | |
356 | NR_SOFTIRQS |
357 | }; |
358 | |
359 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) |
360 | |
361 | /* map softirq index to softirq name. update 'softirq_to_name' in |
362 | * kernel/softirq.c when adding a new softirq. |
363 | */ |
364 | extern char *softirq_to_name[NR_SOFTIRQS]; |
365 | |
366 | /* softirq mask and active fields moved to irq_cpustat_t in |
367 | * asm/hardirq.h to get better cache usage. KAO |
368 | */ |
369 | |
370 | struct softirq_action |
371 | { |
372 | void (*action)(struct softirq_action *); |
373 | }; |
374 | |
375 | asmlinkage void do_softirq(void); |
376 | asmlinkage void __do_softirq(void); |
377 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
378 | extern void softirq_init(void); |
379 | extern void __raise_softirq_irqoff(unsigned int nr); |
380 | |
381 | extern void raise_softirq_irqoff(unsigned int nr); |
382 | extern void raise_softirq(unsigned int nr); |
383 | |
384 | /* This is the worklist that queues up per-cpu softirq work. |
385 | * |
386 | * send_remote_sendirq() adds work to these lists, and |
387 | * the softirq handler itself dequeues from them. The queues |
388 | * are protected by disabling local cpu interrupts and they must |
389 | * only be accessed by the local cpu that they are for. |
390 | */ |
391 | DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
392 | |
393 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); |
394 | |
395 | static inline struct task_struct *this_cpu_ksoftirqd(void) |
396 | { |
397 | return this_cpu_read(ksoftirqd); |
398 | } |
399 | |
400 | /* Try to send a softirq to a remote cpu. If this cannot be done, the |
401 | * work will be queued to the local cpu. |
402 | */ |
403 | extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); |
404 | |
405 | /* Like send_remote_softirq(), but the caller must disable local cpu interrupts |
406 | * and compute the current cpu, passed in as 'this_cpu'. |
407 | */ |
408 | extern void __send_remote_softirq(struct call_single_data *cp, int cpu, |
409 | int this_cpu, int softirq); |
410 | |
411 | /* Tasklets --- multithreaded analogue of BHs. |
412 | |
413 | Main feature differing them of generic softirqs: tasklet |
414 | is running only on one CPU simultaneously. |
415 | |
416 | Main feature differing them of BHs: different tasklets |
417 | may be run simultaneously on different CPUs. |
418 | |
419 | Properties: |
420 | * If tasklet_schedule() is called, then tasklet is guaranteed |
421 | to be executed on some cpu at least once after this. |
422 | * If the tasklet is already scheduled, but its execution is still not |
423 | started, it will be executed only once. |
424 | * If this tasklet is already running on another CPU (or schedule is called |
425 | from tasklet itself), it is rescheduled for later. |
426 | * Tasklet is strictly serialized wrt itself, but not |
427 | wrt another tasklets. If client needs some intertask synchronization, |
428 | he makes it with spinlocks. |
429 | */ |
430 | |
431 | struct tasklet_struct |
432 | { |
433 | struct tasklet_struct *next; |
434 | unsigned long state; |
435 | atomic_t count; |
436 | void (*func)(unsigned long); |
437 | unsigned long data; |
438 | }; |
439 | |
440 | #define DECLARE_TASKLET(name, func, data) \ |
441 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
442 | |
443 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ |
444 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
445 | |
446 | |
447 | enum |
448 | { |
449 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ |
450 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
451 | }; |
452 | |
453 | #ifdef CONFIG_SMP |
454 | static inline int tasklet_trylock(struct tasklet_struct *t) |
455 | { |
456 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
457 | } |
458 | |
459 | static inline void tasklet_unlock(struct tasklet_struct *t) |
460 | { |
461 | smp_mb__before_clear_bit(); |
462 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
463 | } |
464 | |
465 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
466 | { |
467 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
468 | } |
469 | #else |
470 | #define tasklet_trylock(t) 1 |
471 | #define tasklet_unlock_wait(t) do { } while (0) |
472 | #define tasklet_unlock(t) do { } while (0) |
473 | #endif |
474 | |
475 | extern void __tasklet_schedule(struct tasklet_struct *t); |
476 | |
477 | static inline void tasklet_schedule(struct tasklet_struct *t) |
478 | { |
479 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
480 | __tasklet_schedule(t); |
481 | } |
482 | |
483 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
484 | |
485 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
486 | { |
487 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
488 | __tasklet_hi_schedule(t); |
489 | } |
490 | |
491 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); |
492 | |
493 | /* |
494 | * This version avoids touching any other tasklets. Needed for kmemcheck |
495 | * in order not to take any page faults while enqueueing this tasklet; |
496 | * consider VERY carefully whether you really need this or |
497 | * tasklet_hi_schedule()... |
498 | */ |
499 | static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) |
500 | { |
501 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
502 | __tasklet_hi_schedule_first(t); |
503 | } |
504 | |
505 | |
506 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
507 | { |
508 | atomic_inc(&t->count); |
509 | smp_mb__after_atomic_inc(); |
510 | } |
511 | |
512 | static inline void tasklet_disable(struct tasklet_struct *t) |
513 | { |
514 | tasklet_disable_nosync(t); |
515 | tasklet_unlock_wait(t); |
516 | smp_mb(); |
517 | } |
518 | |
519 | static inline void tasklet_enable(struct tasklet_struct *t) |
520 | { |
521 | smp_mb__before_atomic_dec(); |
522 | atomic_dec(&t->count); |
523 | } |
524 | |
525 | static inline void tasklet_hi_enable(struct tasklet_struct *t) |
526 | { |
527 | smp_mb__before_atomic_dec(); |
528 | atomic_dec(&t->count); |
529 | } |
530 | |
531 | extern void tasklet_kill(struct tasklet_struct *t); |
532 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
533 | extern void tasklet_init(struct tasklet_struct *t, |
534 | void (*func)(unsigned long), unsigned long data); |
535 | |
536 | struct tasklet_hrtimer { |
537 | struct hrtimer timer; |
538 | struct tasklet_struct tasklet; |
539 | enum hrtimer_restart (*function)(struct hrtimer *); |
540 | }; |
541 | |
542 | extern void |
543 | tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
544 | enum hrtimer_restart (*function)(struct hrtimer *), |
545 | clockid_t which_clock, enum hrtimer_mode mode); |
546 | |
547 | static inline |
548 | int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, |
549 | const enum hrtimer_mode mode) |
550 | { |
551 | return hrtimer_start(&ttimer->timer, time, mode); |
552 | } |
553 | |
554 | static inline |
555 | void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) |
556 | { |
557 | hrtimer_cancel(&ttimer->timer); |
558 | tasklet_kill(&ttimer->tasklet); |
559 | } |
560 | |
561 | /* |
562 | * Autoprobing for irqs: |
563 | * |
564 | * probe_irq_on() and probe_irq_off() provide robust primitives |
565 | * for accurate IRQ probing during kernel initialization. They are |
566 | * reasonably simple to use, are not "fooled" by spurious interrupts, |
567 | * and, unlike other attempts at IRQ probing, they do not get hung on |
568 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). |
569 | * |
570 | * For reasonably foolproof probing, use them as follows: |
571 | * |
572 | * 1. clear and/or mask the device's internal interrupt. |
573 | * 2. sti(); |
574 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs |
575 | * 4. enable the device and cause it to trigger an interrupt. |
576 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. |
577 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple |
578 | * 7. service the device to clear its pending interrupt. |
579 | * 8. loop again if paranoia is required. |
580 | * |
581 | * probe_irq_on() returns a mask of allocated irq's. |
582 | * |
583 | * probe_irq_off() takes the mask as a parameter, |
584 | * and returns the irq number which occurred, |
585 | * or zero if none occurred, or a negative irq number |
586 | * if more than one irq occurred. |
587 | */ |
588 | |
589 | #if !defined(CONFIG_GENERIC_IRQ_PROBE) |
590 | static inline unsigned long probe_irq_on(void) |
591 | { |
592 | return 0; |
593 | } |
594 | static inline int probe_irq_off(unsigned long val) |
595 | { |
596 | return 0; |
597 | } |
598 | static inline unsigned int probe_irq_mask(unsigned long val) |
599 | { |
600 | return 0; |
601 | } |
602 | #else |
603 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ |
604 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ |
605 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ |
606 | #endif |
607 | |
608 | #ifdef CONFIG_PROC_FS |
609 | /* Initialize /proc/irq/ */ |
610 | extern void init_irq_proc(void); |
611 | #else |
612 | static inline void init_irq_proc(void) |
613 | { |
614 | } |
615 | #endif |
616 | |
617 | struct seq_file; |
618 | int show_interrupts(struct seq_file *p, void *v); |
619 | int arch_show_interrupts(struct seq_file *p, int prec); |
620 | |
621 | extern int early_irq_init(void); |
622 | extern int arch_probe_nr_irqs(void); |
623 | extern int arch_early_irq_init(void); |
624 | |
625 | #endif |
626 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9