Root/
1 | /* |
2 | * linux/kernel/irq/proc.c |
3 | * |
4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
5 | * |
6 | * This file contains the /proc/irq/ handling code. |
7 | */ |
8 | |
9 | #include <linux/irq.h> |
10 | #include <linux/gfp.h> |
11 | #include <linux/proc_fs.h> |
12 | #include <linux/seq_file.h> |
13 | #include <linux/interrupt.h> |
14 | |
15 | #include "internals.h" |
16 | |
17 | static struct proc_dir_entry *root_irq_dir; |
18 | |
19 | #ifdef CONFIG_SMP |
20 | |
21 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
22 | { |
23 | struct irq_desc *desc = irq_to_desc((long)m->private); |
24 | const struct cpumask *mask = desc->irq_data.affinity; |
25 | |
26 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
27 | if (desc->status & IRQ_MOVE_PENDING) |
28 | mask = desc->pending_mask; |
29 | #endif |
30 | seq_cpumask(m, mask); |
31 | seq_putc(m, '\n'); |
32 | return 0; |
33 | } |
34 | |
35 | static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) |
36 | { |
37 | struct irq_desc *desc = irq_to_desc((long)m->private); |
38 | unsigned long flags; |
39 | cpumask_var_t mask; |
40 | |
41 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) |
42 | return -ENOMEM; |
43 | |
44 | raw_spin_lock_irqsave(&desc->lock, flags); |
45 | if (desc->affinity_hint) |
46 | cpumask_copy(mask, desc->affinity_hint); |
47 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
48 | |
49 | seq_cpumask(m, mask); |
50 | seq_putc(m, '\n'); |
51 | free_cpumask_var(mask); |
52 | |
53 | return 0; |
54 | } |
55 | |
56 | #ifndef is_affinity_mask_valid |
57 | #define is_affinity_mask_valid(val) 1 |
58 | #endif |
59 | |
60 | int no_irq_affinity; |
61 | static ssize_t irq_affinity_proc_write(struct file *file, |
62 | const char __user *buffer, size_t count, loff_t *pos) |
63 | { |
64 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; |
65 | cpumask_var_t new_value; |
66 | int err; |
67 | |
68 | if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity || |
69 | irq_balancing_disabled(irq)) |
70 | return -EIO; |
71 | |
72 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
73 | return -ENOMEM; |
74 | |
75 | err = cpumask_parse_user(buffer, count, new_value); |
76 | if (err) |
77 | goto free_cpumask; |
78 | |
79 | if (!is_affinity_mask_valid(new_value)) { |
80 | err = -EINVAL; |
81 | goto free_cpumask; |
82 | } |
83 | |
84 | /* |
85 | * Do not allow disabling IRQs completely - it's a too easy |
86 | * way to make the system unusable accidentally :-) At least |
87 | * one online CPU still has to be targeted. |
88 | */ |
89 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
90 | /* Special case for empty set - allow the architecture |
91 | code to set default SMP affinity. */ |
92 | err = irq_select_affinity_usr(irq) ? -EINVAL : count; |
93 | } else { |
94 | irq_set_affinity(irq, new_value); |
95 | err = count; |
96 | } |
97 | |
98 | free_cpumask: |
99 | free_cpumask_var(new_value); |
100 | return err; |
101 | } |
102 | |
103 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
104 | { |
105 | return single_open(file, irq_affinity_proc_show, PDE(inode)->data); |
106 | } |
107 | |
108 | static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) |
109 | { |
110 | return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); |
111 | } |
112 | |
113 | static const struct file_operations irq_affinity_proc_fops = { |
114 | .open = irq_affinity_proc_open, |
115 | .read = seq_read, |
116 | .llseek = seq_lseek, |
117 | .release = single_release, |
118 | .write = irq_affinity_proc_write, |
119 | }; |
120 | |
121 | static const struct file_operations irq_affinity_hint_proc_fops = { |
122 | .open = irq_affinity_hint_proc_open, |
123 | .read = seq_read, |
124 | .llseek = seq_lseek, |
125 | .release = single_release, |
126 | }; |
127 | |
128 | static int default_affinity_show(struct seq_file *m, void *v) |
129 | { |
130 | seq_cpumask(m, irq_default_affinity); |
131 | seq_putc(m, '\n'); |
132 | return 0; |
133 | } |
134 | |
135 | static ssize_t default_affinity_write(struct file *file, |
136 | const char __user *buffer, size_t count, loff_t *ppos) |
137 | { |
138 | cpumask_var_t new_value; |
139 | int err; |
140 | |
141 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
142 | return -ENOMEM; |
143 | |
144 | err = cpumask_parse_user(buffer, count, new_value); |
145 | if (err) |
146 | goto out; |
147 | |
148 | if (!is_affinity_mask_valid(new_value)) { |
149 | err = -EINVAL; |
150 | goto out; |
151 | } |
152 | |
153 | /* |
154 | * Do not allow disabling IRQs completely - it's a too easy |
155 | * way to make the system unusable accidentally :-) At least |
156 | * one online CPU still has to be targeted. |
157 | */ |
158 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
159 | err = -EINVAL; |
160 | goto out; |
161 | } |
162 | |
163 | cpumask_copy(irq_default_affinity, new_value); |
164 | err = count; |
165 | |
166 | out: |
167 | free_cpumask_var(new_value); |
168 | return err; |
169 | } |
170 | |
171 | static int default_affinity_open(struct inode *inode, struct file *file) |
172 | { |
173 | return single_open(file, default_affinity_show, PDE(inode)->data); |
174 | } |
175 | |
176 | static const struct file_operations default_affinity_proc_fops = { |
177 | .open = default_affinity_open, |
178 | .read = seq_read, |
179 | .llseek = seq_lseek, |
180 | .release = single_release, |
181 | .write = default_affinity_write, |
182 | }; |
183 | |
184 | static int irq_node_proc_show(struct seq_file *m, void *v) |
185 | { |
186 | struct irq_desc *desc = irq_to_desc((long) m->private); |
187 | |
188 | seq_printf(m, "%d\n", desc->irq_data.node); |
189 | return 0; |
190 | } |
191 | |
192 | static int irq_node_proc_open(struct inode *inode, struct file *file) |
193 | { |
194 | return single_open(file, irq_node_proc_show, PDE(inode)->data); |
195 | } |
196 | |
197 | static const struct file_operations irq_node_proc_fops = { |
198 | .open = irq_node_proc_open, |
199 | .read = seq_read, |
200 | .llseek = seq_lseek, |
201 | .release = single_release, |
202 | }; |
203 | #endif |
204 | |
205 | static int irq_spurious_proc_show(struct seq_file *m, void *v) |
206 | { |
207 | struct irq_desc *desc = irq_to_desc((long) m->private); |
208 | |
209 | seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", |
210 | desc->irq_count, desc->irqs_unhandled, |
211 | jiffies_to_msecs(desc->last_unhandled)); |
212 | return 0; |
213 | } |
214 | |
215 | static int irq_spurious_proc_open(struct inode *inode, struct file *file) |
216 | { |
217 | return single_open(file, irq_spurious_proc_show, PDE(inode)->data); |
218 | } |
219 | |
220 | static const struct file_operations irq_spurious_proc_fops = { |
221 | .open = irq_spurious_proc_open, |
222 | .read = seq_read, |
223 | .llseek = seq_lseek, |
224 | .release = single_release, |
225 | }; |
226 | |
227 | #define MAX_NAMELEN 128 |
228 | |
229 | static int name_unique(unsigned int irq, struct irqaction *new_action) |
230 | { |
231 | struct irq_desc *desc = irq_to_desc(irq); |
232 | struct irqaction *action; |
233 | unsigned long flags; |
234 | int ret = 1; |
235 | |
236 | raw_spin_lock_irqsave(&desc->lock, flags); |
237 | for (action = desc->action ; action; action = action->next) { |
238 | if ((action != new_action) && action->name && |
239 | !strcmp(new_action->name, action->name)) { |
240 | ret = 0; |
241 | break; |
242 | } |
243 | } |
244 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
245 | return ret; |
246 | } |
247 | |
248 | void register_handler_proc(unsigned int irq, struct irqaction *action) |
249 | { |
250 | char name [MAX_NAMELEN]; |
251 | struct irq_desc *desc = irq_to_desc(irq); |
252 | |
253 | if (!desc->dir || action->dir || !action->name || |
254 | !name_unique(irq, action)) |
255 | return; |
256 | |
257 | memset(name, 0, MAX_NAMELEN); |
258 | snprintf(name, MAX_NAMELEN, "%s", action->name); |
259 | |
260 | /* create /proc/irq/1234/handler/ */ |
261 | action->dir = proc_mkdir(name, desc->dir); |
262 | } |
263 | |
264 | #undef MAX_NAMELEN |
265 | |
266 | #define MAX_NAMELEN 10 |
267 | |
268 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
269 | { |
270 | char name [MAX_NAMELEN]; |
271 | |
272 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) |
273 | return; |
274 | |
275 | memset(name, 0, MAX_NAMELEN); |
276 | sprintf(name, "%d", irq); |
277 | |
278 | /* create /proc/irq/1234 */ |
279 | desc->dir = proc_mkdir(name, root_irq_dir); |
280 | if (!desc->dir) |
281 | return; |
282 | |
283 | #ifdef CONFIG_SMP |
284 | /* create /proc/irq/<irq>/smp_affinity */ |
285 | proc_create_data("smp_affinity", 0600, desc->dir, |
286 | &irq_affinity_proc_fops, (void *)(long)irq); |
287 | |
288 | /* create /proc/irq/<irq>/affinity_hint */ |
289 | proc_create_data("affinity_hint", 0400, desc->dir, |
290 | &irq_affinity_hint_proc_fops, (void *)(long)irq); |
291 | |
292 | proc_create_data("node", 0444, desc->dir, |
293 | &irq_node_proc_fops, (void *)(long)irq); |
294 | #endif |
295 | |
296 | proc_create_data("spurious", 0444, desc->dir, |
297 | &irq_spurious_proc_fops, (void *)(long)irq); |
298 | } |
299 | |
300 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) |
301 | { |
302 | char name [MAX_NAMELEN]; |
303 | |
304 | if (!root_irq_dir || !desc->dir) |
305 | return; |
306 | #ifdef CONFIG_SMP |
307 | remove_proc_entry("smp_affinity", desc->dir); |
308 | remove_proc_entry("affinity_hint", desc->dir); |
309 | remove_proc_entry("node", desc->dir); |
310 | #endif |
311 | remove_proc_entry("spurious", desc->dir); |
312 | |
313 | memset(name, 0, MAX_NAMELEN); |
314 | sprintf(name, "%u", irq); |
315 | remove_proc_entry(name, root_irq_dir); |
316 | } |
317 | |
318 | #undef MAX_NAMELEN |
319 | |
320 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) |
321 | { |
322 | if (action->dir) { |
323 | struct irq_desc *desc = irq_to_desc(irq); |
324 | |
325 | remove_proc_entry(action->dir->name, desc->dir); |
326 | } |
327 | } |
328 | |
329 | static void register_default_affinity_proc(void) |
330 | { |
331 | #ifdef CONFIG_SMP |
332 | proc_create("irq/default_smp_affinity", 0600, NULL, |
333 | &default_affinity_proc_fops); |
334 | #endif |
335 | } |
336 | |
337 | void init_irq_proc(void) |
338 | { |
339 | unsigned int irq; |
340 | struct irq_desc *desc; |
341 | |
342 | /* create /proc/irq */ |
343 | root_irq_dir = proc_mkdir("irq", NULL); |
344 | if (!root_irq_dir) |
345 | return; |
346 | |
347 | register_default_affinity_proc(); |
348 | |
349 | /* |
350 | * Create entries for all existing IRQs. |
351 | */ |
352 | for_each_irq_desc(irq, desc) { |
353 | if (!desc) |
354 | continue; |
355 | |
356 | register_irq_proc(irq, desc); |
357 | } |
358 | } |
359 | |
360 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9