Root/
1 | /* |
2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
4 | * |
5 | * This file contains the interrupt descriptor management code |
6 | * |
7 | * Detailed information is available in Documentation/DocBook/genericirq |
8 | * |
9 | */ |
10 | #include <linux/irq.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/module.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> |
15 | #include <linux/radix-tree.h> |
16 | #include <linux/bitmap.h> |
17 | |
18 | #include "internals.h" |
19 | |
20 | /* |
21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
22 | */ |
23 | static struct lock_class_key irq_desc_lock_class; |
24 | |
25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
26 | static void __init init_irq_default_affinity(void) |
27 | { |
28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
29 | cpumask_setall(irq_default_affinity); |
30 | } |
31 | #else |
32 | static void __init init_irq_default_affinity(void) |
33 | { |
34 | } |
35 | #endif |
36 | |
37 | #ifdef CONFIG_SMP |
38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) |
39 | { |
40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) |
41 | return -ENOMEM; |
42 | |
43 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
45 | free_cpumask_var(desc->irq_data.affinity); |
46 | return -ENOMEM; |
47 | } |
48 | #endif |
49 | return 0; |
50 | } |
51 | |
52 | static void desc_smp_init(struct irq_desc *desc, int node) |
53 | { |
54 | desc->irq_data.node = node; |
55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); |
56 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
57 | cpumask_clear(desc->pending_mask); |
58 | #endif |
59 | } |
60 | |
61 | static inline int desc_node(struct irq_desc *desc) |
62 | { |
63 | return desc->irq_data.node; |
64 | } |
65 | |
66 | #else |
67 | static inline int |
68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } |
70 | static inline int desc_node(struct irq_desc *desc) { return 0; } |
71 | #endif |
72 | |
73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) |
74 | { |
75 | int cpu; |
76 | |
77 | desc->irq_data.irq = irq; |
78 | desc->irq_data.chip = &no_irq_chip; |
79 | desc->irq_data.chip_data = NULL; |
80 | desc->irq_data.handler_data = NULL; |
81 | desc->irq_data.msi_desc = NULL; |
82 | desc->status = IRQ_DEFAULT_INIT_FLAGS; |
83 | desc->handle_irq = handle_bad_irq; |
84 | desc->depth = 1; |
85 | desc->irq_count = 0; |
86 | desc->irqs_unhandled = 0; |
87 | desc->name = NULL; |
88 | for_each_possible_cpu(cpu) |
89 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
90 | desc_smp_init(desc, node); |
91 | } |
92 | |
93 | int nr_irqs = NR_IRQS; |
94 | EXPORT_SYMBOL_GPL(nr_irqs); |
95 | |
96 | static DEFINE_MUTEX(sparse_irq_lock); |
97 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
98 | |
99 | #ifdef CONFIG_SPARSE_IRQ |
100 | |
101 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
102 | |
103 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
104 | { |
105 | radix_tree_insert(&irq_desc_tree, irq, desc); |
106 | } |
107 | |
108 | struct irq_desc *irq_to_desc(unsigned int irq) |
109 | { |
110 | return radix_tree_lookup(&irq_desc_tree, irq); |
111 | } |
112 | EXPORT_SYMBOL_GPL(irq_to_desc); |
113 | |
114 | static void delete_irq_desc(unsigned int irq) |
115 | { |
116 | radix_tree_delete(&irq_desc_tree, irq); |
117 | } |
118 | |
119 | #ifdef CONFIG_SMP |
120 | static void free_masks(struct irq_desc *desc) |
121 | { |
122 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
123 | free_cpumask_var(desc->pending_mask); |
124 | #endif |
125 | free_cpumask_var(desc->irq_data.affinity); |
126 | } |
127 | #else |
128 | static inline void free_masks(struct irq_desc *desc) { } |
129 | #endif |
130 | |
131 | static struct irq_desc *alloc_desc(int irq, int node) |
132 | { |
133 | struct irq_desc *desc; |
134 | gfp_t gfp = GFP_KERNEL; |
135 | |
136 | desc = kzalloc_node(sizeof(*desc), gfp, node); |
137 | if (!desc) |
138 | return NULL; |
139 | /* allocate based on nr_cpu_ids */ |
140 | desc->kstat_irqs = alloc_percpu(unsigned int); |
141 | if (!desc->kstat_irqs) |
142 | goto err_desc; |
143 | |
144 | if (alloc_masks(desc, gfp, node)) |
145 | goto err_kstat; |
146 | |
147 | raw_spin_lock_init(&desc->lock); |
148 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
149 | |
150 | desc_set_defaults(irq, desc, node); |
151 | |
152 | return desc; |
153 | |
154 | err_kstat: |
155 | free_percpu(desc->kstat_irqs); |
156 | err_desc: |
157 | kfree(desc); |
158 | return NULL; |
159 | } |
160 | |
161 | static void free_desc(unsigned int irq) |
162 | { |
163 | struct irq_desc *desc = irq_to_desc(irq); |
164 | |
165 | unregister_irq_proc(irq, desc); |
166 | |
167 | mutex_lock(&sparse_irq_lock); |
168 | delete_irq_desc(irq); |
169 | mutex_unlock(&sparse_irq_lock); |
170 | |
171 | free_masks(desc); |
172 | free_percpu(desc->kstat_irqs); |
173 | kfree(desc); |
174 | } |
175 | |
176 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) |
177 | { |
178 | struct irq_desc *desc; |
179 | int i; |
180 | |
181 | for (i = 0; i < cnt; i++) { |
182 | desc = alloc_desc(start + i, node); |
183 | if (!desc) |
184 | goto err; |
185 | mutex_lock(&sparse_irq_lock); |
186 | irq_insert_desc(start + i, desc); |
187 | mutex_unlock(&sparse_irq_lock); |
188 | } |
189 | return start; |
190 | |
191 | err: |
192 | for (i--; i >= 0; i--) |
193 | free_desc(start + i); |
194 | |
195 | mutex_lock(&sparse_irq_lock); |
196 | bitmap_clear(allocated_irqs, start, cnt); |
197 | mutex_unlock(&sparse_irq_lock); |
198 | return -ENOMEM; |
199 | } |
200 | |
201 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) |
202 | { |
203 | int res = irq_alloc_descs(irq, irq, 1, node); |
204 | |
205 | if (res == -EEXIST || res == irq) |
206 | return irq_to_desc(irq); |
207 | return NULL; |
208 | } |
209 | |
210 | int __init early_irq_init(void) |
211 | { |
212 | int i, initcnt, node = first_online_node; |
213 | struct irq_desc *desc; |
214 | |
215 | init_irq_default_affinity(); |
216 | |
217 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
218 | initcnt = arch_probe_nr_irqs(); |
219 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
220 | |
221 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
222 | nr_irqs = IRQ_BITMAP_BITS; |
223 | |
224 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) |
225 | initcnt = IRQ_BITMAP_BITS; |
226 | |
227 | if (initcnt > nr_irqs) |
228 | nr_irqs = initcnt; |
229 | |
230 | for (i = 0; i < initcnt; i++) { |
231 | desc = alloc_desc(i, node); |
232 | set_bit(i, allocated_irqs); |
233 | irq_insert_desc(i, desc); |
234 | } |
235 | return arch_early_irq_init(); |
236 | } |
237 | |
238 | #else /* !CONFIG_SPARSE_IRQ */ |
239 | |
240 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
241 | [0 ... NR_IRQS-1] = { |
242 | .status = IRQ_DEFAULT_INIT_FLAGS, |
243 | .handle_irq = handle_bad_irq, |
244 | .depth = 1, |
245 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
246 | } |
247 | }; |
248 | |
249 | int __init early_irq_init(void) |
250 | { |
251 | int count, i, node = first_online_node; |
252 | struct irq_desc *desc; |
253 | |
254 | init_irq_default_affinity(); |
255 | |
256 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); |
257 | |
258 | desc = irq_desc; |
259 | count = ARRAY_SIZE(irq_desc); |
260 | |
261 | for (i = 0; i < count; i++) { |
262 | desc[i].irq_data.irq = i; |
263 | desc[i].irq_data.chip = &no_irq_chip; |
264 | /* TODO : do this allocation on-demand ... */ |
265 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
266 | alloc_masks(desc + i, GFP_KERNEL, node); |
267 | desc_smp_init(desc + i, node); |
268 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
269 | } |
270 | return arch_early_irq_init(); |
271 | } |
272 | |
273 | struct irq_desc *irq_to_desc(unsigned int irq) |
274 | { |
275 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
276 | } |
277 | EXPORT_SYMBOL_GPL(irq_to_desc); |
278 | |
279 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) |
280 | { |
281 | return irq_to_desc(irq); |
282 | } |
283 | |
284 | static void free_desc(unsigned int irq) |
285 | { |
286 | dynamic_irq_cleanup(irq); |
287 | } |
288 | |
289 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) |
290 | { |
291 | #if defined(CONFIG_KSTAT_IRQS_ONDEMAND) |
292 | struct irq_desc *desc; |
293 | unsigned int i; |
294 | |
295 | for (i = 0; i < cnt; i++) { |
296 | desc = irq_to_desc(start + i); |
297 | if (desc && !desc->kstat_irqs) { |
298 | unsigned int __percpu *stats = alloc_percpu(unsigned int); |
299 | |
300 | if (!stats) |
301 | return -1; |
302 | if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL) |
303 | free_percpu(stats); |
304 | } |
305 | } |
306 | #endif |
307 | return start; |
308 | } |
309 | #endif /* !CONFIG_SPARSE_IRQ */ |
310 | |
311 | /* Dynamic interrupt handling */ |
312 | |
313 | /** |
314 | * irq_free_descs - free irq descriptors |
315 | * @from: Start of descriptor range |
316 | * @cnt: Number of consecutive irqs to free |
317 | */ |
318 | void irq_free_descs(unsigned int from, unsigned int cnt) |
319 | { |
320 | int i; |
321 | |
322 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
323 | return; |
324 | |
325 | for (i = 0; i < cnt; i++) |
326 | free_desc(from + i); |
327 | |
328 | mutex_lock(&sparse_irq_lock); |
329 | bitmap_clear(allocated_irqs, from, cnt); |
330 | mutex_unlock(&sparse_irq_lock); |
331 | } |
332 | EXPORT_SYMBOL_GPL(irq_free_descs); |
333 | |
334 | /** |
335 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
336 | * @irq: Allocate for specific irq number if irq >= 0 |
337 | * @from: Start the search from this irq number |
338 | * @cnt: Number of consecutive irqs to allocate. |
339 | * @node: Preferred node on which the irq descriptor should be allocated |
340 | * |
341 | * Returns the first irq number or error code |
342 | */ |
343 | int __ref |
344 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) |
345 | { |
346 | int start, ret; |
347 | |
348 | if (!cnt) |
349 | return -EINVAL; |
350 | |
351 | mutex_lock(&sparse_irq_lock); |
352 | |
353 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); |
354 | ret = -EEXIST; |
355 | if (irq >=0 && start != irq) |
356 | goto err; |
357 | |
358 | ret = -ENOMEM; |
359 | if (start >= nr_irqs) |
360 | goto err; |
361 | |
362 | bitmap_set(allocated_irqs, start, cnt); |
363 | mutex_unlock(&sparse_irq_lock); |
364 | return alloc_descs(start, cnt, node); |
365 | |
366 | err: |
367 | mutex_unlock(&sparse_irq_lock); |
368 | return ret; |
369 | } |
370 | EXPORT_SYMBOL_GPL(irq_alloc_descs); |
371 | |
372 | /** |
373 | * irq_reserve_irqs - mark irqs allocated |
374 | * @from: mark from irq number |
375 | * @cnt: number of irqs to mark |
376 | * |
377 | * Returns 0 on success or an appropriate error code |
378 | */ |
379 | int irq_reserve_irqs(unsigned int from, unsigned int cnt) |
380 | { |
381 | unsigned int start; |
382 | int ret = 0; |
383 | |
384 | if (!cnt || (from + cnt) > nr_irqs) |
385 | return -EINVAL; |
386 | |
387 | mutex_lock(&sparse_irq_lock); |
388 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); |
389 | if (start == from) |
390 | bitmap_set(allocated_irqs, start, cnt); |
391 | else |
392 | ret = -EEXIST; |
393 | mutex_unlock(&sparse_irq_lock); |
394 | return ret; |
395 | } |
396 | |
397 | /** |
398 | * irq_get_next_irq - get next allocated irq number |
399 | * @offset: where to start the search |
400 | * |
401 | * Returns next irq number after offset or nr_irqs if none is found. |
402 | */ |
403 | unsigned int irq_get_next_irq(unsigned int offset) |
404 | { |
405 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
406 | } |
407 | |
408 | /** |
409 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq |
410 | * @irq: irq number to initialize |
411 | */ |
412 | void dynamic_irq_cleanup(unsigned int irq) |
413 | { |
414 | struct irq_desc *desc = irq_to_desc(irq); |
415 | unsigned long flags; |
416 | |
417 | raw_spin_lock_irqsave(&desc->lock, flags); |
418 | desc_set_defaults(irq, desc, desc_node(desc)); |
419 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
420 | } |
421 | |
422 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
423 | { |
424 | struct irq_desc *desc = irq_to_desc(irq); |
425 | |
426 | return desc && desc->kstat_irqs ? |
427 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
428 | } |
429 | |
430 | #ifdef CONFIG_GENERIC_HARDIRQS |
431 | unsigned int kstat_irqs(unsigned int irq) |
432 | { |
433 | struct irq_desc *desc = irq_to_desc(irq); |
434 | int cpu; |
435 | int sum = 0; |
436 | |
437 | if (!desc || !desc->kstat_irqs) |
438 | return 0; |
439 | for_each_possible_cpu(cpu) |
440 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
441 | return sum; |
442 | } |
443 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
444 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9