Root/
1 | /* Industrial I/O event handling |
2 | * |
3 | * Copyright (c) 2008 Jonathan Cameron |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published by |
7 | * the Free Software Foundation. |
8 | * |
9 | * Based on elements of hwmon and input subsystems. |
10 | */ |
11 | |
12 | #include <linux/anon_inodes.h> |
13 | #include <linux/device.h> |
14 | #include <linux/fs.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/kfifo.h> |
17 | #include <linux/module.h> |
18 | #include <linux/poll.h> |
19 | #include <linux/sched.h> |
20 | #include <linux/slab.h> |
21 | #include <linux/uaccess.h> |
22 | #include <linux/wait.h> |
23 | #include <linux/iio/iio.h> |
24 | #include "iio_core.h" |
25 | #include <linux/iio/sysfs.h> |
26 | #include <linux/iio/events.h> |
27 | |
28 | /** |
29 | * struct iio_event_interface - chrdev interface for an event line |
30 | * @wait: wait queue to allow blocking reads of events |
31 | * @det_events: list of detected events |
32 | * @dev_attr_list: list of event interface sysfs attribute |
33 | * @flags: file operations related flags including busy flag. |
34 | * @group: event interface sysfs attribute group |
35 | */ |
36 | struct iio_event_interface { |
37 | wait_queue_head_t wait; |
38 | DECLARE_KFIFO(det_events, struct iio_event_data, 16); |
39 | |
40 | struct list_head dev_attr_list; |
41 | unsigned long flags; |
42 | struct attribute_group group; |
43 | }; |
44 | |
45 | int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) |
46 | { |
47 | struct iio_event_interface *ev_int = indio_dev->event_interface; |
48 | struct iio_event_data ev; |
49 | unsigned long flags; |
50 | int copied; |
51 | |
52 | /* Does anyone care? */ |
53 | spin_lock_irqsave(&ev_int->wait.lock, flags); |
54 | if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { |
55 | |
56 | ev.id = ev_code; |
57 | ev.timestamp = timestamp; |
58 | |
59 | copied = kfifo_put(&ev_int->det_events, &ev); |
60 | if (copied != 0) |
61 | wake_up_locked_poll(&ev_int->wait, POLLIN); |
62 | } |
63 | spin_unlock_irqrestore(&ev_int->wait.lock, flags); |
64 | |
65 | return 0; |
66 | } |
67 | EXPORT_SYMBOL(iio_push_event); |
68 | |
69 | /** |
70 | * iio_event_poll() - poll the event queue to find out if it has data |
71 | */ |
72 | static unsigned int iio_event_poll(struct file *filep, |
73 | struct poll_table_struct *wait) |
74 | { |
75 | struct iio_dev *indio_dev = filep->private_data; |
76 | struct iio_event_interface *ev_int = indio_dev->event_interface; |
77 | unsigned int events = 0; |
78 | |
79 | poll_wait(filep, &ev_int->wait, wait); |
80 | |
81 | spin_lock_irq(&ev_int->wait.lock); |
82 | if (!kfifo_is_empty(&ev_int->det_events)) |
83 | events = POLLIN | POLLRDNORM; |
84 | spin_unlock_irq(&ev_int->wait.lock); |
85 | |
86 | return events; |
87 | } |
88 | |
89 | static ssize_t iio_event_chrdev_read(struct file *filep, |
90 | char __user *buf, |
91 | size_t count, |
92 | loff_t *f_ps) |
93 | { |
94 | struct iio_dev *indio_dev = filep->private_data; |
95 | struct iio_event_interface *ev_int = indio_dev->event_interface; |
96 | unsigned int copied; |
97 | int ret; |
98 | |
99 | if (count < sizeof(struct iio_event_data)) |
100 | return -EINVAL; |
101 | |
102 | spin_lock_irq(&ev_int->wait.lock); |
103 | if (kfifo_is_empty(&ev_int->det_events)) { |
104 | if (filep->f_flags & O_NONBLOCK) { |
105 | ret = -EAGAIN; |
106 | goto error_unlock; |
107 | } |
108 | /* Blocking on device; waiting for something to be there */ |
109 | ret = wait_event_interruptible_locked_irq(ev_int->wait, |
110 | !kfifo_is_empty(&ev_int->det_events)); |
111 | if (ret) |
112 | goto error_unlock; |
113 | /* Single access device so no one else can get the data */ |
114 | } |
115 | |
116 | ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); |
117 | |
118 | error_unlock: |
119 | spin_unlock_irq(&ev_int->wait.lock); |
120 | |
121 | return ret ? ret : copied; |
122 | } |
123 | |
124 | static int iio_event_chrdev_release(struct inode *inode, struct file *filep) |
125 | { |
126 | struct iio_dev *indio_dev = filep->private_data; |
127 | struct iio_event_interface *ev_int = indio_dev->event_interface; |
128 | |
129 | spin_lock_irq(&ev_int->wait.lock); |
130 | __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); |
131 | /* |
132 | * In order to maintain a clean state for reopening, |
133 | * clear out any awaiting events. The mask will prevent |
134 | * any new __iio_push_event calls running. |
135 | */ |
136 | kfifo_reset_out(&ev_int->det_events); |
137 | spin_unlock_irq(&ev_int->wait.lock); |
138 | |
139 | iio_device_put(indio_dev); |
140 | |
141 | return 0; |
142 | } |
143 | |
144 | static const struct file_operations iio_event_chrdev_fileops = { |
145 | .read = iio_event_chrdev_read, |
146 | .poll = iio_event_poll, |
147 | .release = iio_event_chrdev_release, |
148 | .owner = THIS_MODULE, |
149 | .llseek = noop_llseek, |
150 | }; |
151 | |
152 | int iio_event_getfd(struct iio_dev *indio_dev) |
153 | { |
154 | struct iio_event_interface *ev_int = indio_dev->event_interface; |
155 | int fd; |
156 | |
157 | if (ev_int == NULL) |
158 | return -ENODEV; |
159 | |
160 | spin_lock_irq(&ev_int->wait.lock); |
161 | if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { |
162 | spin_unlock_irq(&ev_int->wait.lock); |
163 | return -EBUSY; |
164 | } |
165 | spin_unlock_irq(&ev_int->wait.lock); |
166 | iio_device_get(indio_dev); |
167 | |
168 | fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, |
169 | indio_dev, O_RDONLY); |
170 | if (fd < 0) { |
171 | spin_lock_irq(&ev_int->wait.lock); |
172 | __clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); |
173 | spin_unlock_irq(&ev_int->wait.lock); |
174 | iio_device_put(indio_dev); |
175 | } |
176 | return fd; |
177 | } |
178 | |
179 | static const char * const iio_ev_type_text[] = { |
180 | [IIO_EV_TYPE_THRESH] = "thresh", |
181 | [IIO_EV_TYPE_MAG] = "mag", |
182 | [IIO_EV_TYPE_ROC] = "roc", |
183 | [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", |
184 | [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", |
185 | }; |
186 | |
187 | static const char * const iio_ev_dir_text[] = { |
188 | [IIO_EV_DIR_EITHER] = "either", |
189 | [IIO_EV_DIR_RISING] = "rising", |
190 | [IIO_EV_DIR_FALLING] = "falling" |
191 | }; |
192 | |
193 | static ssize_t iio_ev_state_store(struct device *dev, |
194 | struct device_attribute *attr, |
195 | const char *buf, |
196 | size_t len) |
197 | { |
198 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
199 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
200 | int ret; |
201 | bool val; |
202 | |
203 | ret = strtobool(buf, &val); |
204 | if (ret < 0) |
205 | return ret; |
206 | |
207 | ret = indio_dev->info->write_event_config(indio_dev, |
208 | this_attr->address, |
209 | val); |
210 | return (ret < 0) ? ret : len; |
211 | } |
212 | |
213 | static ssize_t iio_ev_state_show(struct device *dev, |
214 | struct device_attribute *attr, |
215 | char *buf) |
216 | { |
217 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
218 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
219 | int val = indio_dev->info->read_event_config(indio_dev, |
220 | this_attr->address); |
221 | |
222 | if (val < 0) |
223 | return val; |
224 | else |
225 | return sprintf(buf, "%d\n", val); |
226 | } |
227 | |
228 | static ssize_t iio_ev_value_show(struct device *dev, |
229 | struct device_attribute *attr, |
230 | char *buf) |
231 | { |
232 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
233 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
234 | int val, ret; |
235 | |
236 | ret = indio_dev->info->read_event_value(indio_dev, |
237 | this_attr->address, &val); |
238 | if (ret < 0) |
239 | return ret; |
240 | |
241 | return sprintf(buf, "%d\n", val); |
242 | } |
243 | |
244 | static ssize_t iio_ev_value_store(struct device *dev, |
245 | struct device_attribute *attr, |
246 | const char *buf, |
247 | size_t len) |
248 | { |
249 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
250 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
251 | int val; |
252 | int ret; |
253 | |
254 | if (!indio_dev->info->write_event_value) |
255 | return -EINVAL; |
256 | |
257 | ret = kstrtoint(buf, 10, &val); |
258 | if (ret) |
259 | return ret; |
260 | |
261 | ret = indio_dev->info->write_event_value(indio_dev, this_attr->address, |
262 | val); |
263 | if (ret < 0) |
264 | return ret; |
265 | |
266 | return len; |
267 | } |
268 | |
269 | static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, |
270 | struct iio_chan_spec const *chan) |
271 | { |
272 | int ret = 0, i, attrcount = 0; |
273 | u64 mask = 0; |
274 | char *postfix; |
275 | if (!chan->event_mask) |
276 | return 0; |
277 | |
278 | for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) { |
279 | postfix = kasprintf(GFP_KERNEL, "%s_%s_en", |
280 | iio_ev_type_text[i/IIO_EV_DIR_MAX], |
281 | iio_ev_dir_text[i%IIO_EV_DIR_MAX]); |
282 | if (postfix == NULL) { |
283 | ret = -ENOMEM; |
284 | goto error_ret; |
285 | } |
286 | if (chan->modified) |
287 | mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2, |
288 | i/IIO_EV_DIR_MAX, |
289 | i%IIO_EV_DIR_MAX); |
290 | else if (chan->differential) |
291 | mask = IIO_EVENT_CODE(chan->type, |
292 | 0, 0, |
293 | i%IIO_EV_DIR_MAX, |
294 | i/IIO_EV_DIR_MAX, |
295 | 0, |
296 | chan->channel, |
297 | chan->channel2); |
298 | else |
299 | mask = IIO_UNMOD_EVENT_CODE(chan->type, |
300 | chan->channel, |
301 | i/IIO_EV_DIR_MAX, |
302 | i%IIO_EV_DIR_MAX); |
303 | |
304 | ret = __iio_add_chan_devattr(postfix, |
305 | chan, |
306 | &iio_ev_state_show, |
307 | iio_ev_state_store, |
308 | mask, |
309 | 0, |
310 | &indio_dev->dev, |
311 | &indio_dev->event_interface-> |
312 | dev_attr_list); |
313 | kfree(postfix); |
314 | if (ret) |
315 | goto error_ret; |
316 | attrcount++; |
317 | postfix = kasprintf(GFP_KERNEL, "%s_%s_value", |
318 | iio_ev_type_text[i/IIO_EV_DIR_MAX], |
319 | iio_ev_dir_text[i%IIO_EV_DIR_MAX]); |
320 | if (postfix == NULL) { |
321 | ret = -ENOMEM; |
322 | goto error_ret; |
323 | } |
324 | ret = __iio_add_chan_devattr(postfix, chan, |
325 | iio_ev_value_show, |
326 | iio_ev_value_store, |
327 | mask, |
328 | 0, |
329 | &indio_dev->dev, |
330 | &indio_dev->event_interface-> |
331 | dev_attr_list); |
332 | kfree(postfix); |
333 | if (ret) |
334 | goto error_ret; |
335 | attrcount++; |
336 | } |
337 | ret = attrcount; |
338 | error_ret: |
339 | return ret; |
340 | } |
341 | |
342 | static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev) |
343 | { |
344 | struct iio_dev_attr *p, *n; |
345 | list_for_each_entry_safe(p, n, |
346 | &indio_dev->event_interface-> |
347 | dev_attr_list, l) { |
348 | kfree(p->dev_attr.attr.name); |
349 | kfree(p); |
350 | } |
351 | } |
352 | |
353 | static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) |
354 | { |
355 | int j, ret, attrcount = 0; |
356 | |
357 | /* Dynically created from the channels array */ |
358 | for (j = 0; j < indio_dev->num_channels; j++) { |
359 | ret = iio_device_add_event_sysfs(indio_dev, |
360 | &indio_dev->channels[j]); |
361 | if (ret < 0) |
362 | return ret; |
363 | attrcount += ret; |
364 | } |
365 | return attrcount; |
366 | } |
367 | |
368 | static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) |
369 | { |
370 | int j; |
371 | |
372 | for (j = 0; j < indio_dev->num_channels; j++) |
373 | if (indio_dev->channels[j].event_mask != 0) |
374 | return true; |
375 | return false; |
376 | } |
377 | |
378 | static void iio_setup_ev_int(struct iio_event_interface *ev_int) |
379 | { |
380 | INIT_KFIFO(ev_int->det_events); |
381 | init_waitqueue_head(&ev_int->wait); |
382 | } |
383 | |
384 | static const char *iio_event_group_name = "events"; |
385 | int iio_device_register_eventset(struct iio_dev *indio_dev) |
386 | { |
387 | struct iio_dev_attr *p; |
388 | int ret = 0, attrcount_orig = 0, attrcount, attrn; |
389 | struct attribute **attr; |
390 | |
391 | if (!(indio_dev->info->event_attrs || |
392 | iio_check_for_dynamic_events(indio_dev))) |
393 | return 0; |
394 | |
395 | indio_dev->event_interface = |
396 | kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL); |
397 | if (indio_dev->event_interface == NULL) { |
398 | ret = -ENOMEM; |
399 | goto error_ret; |
400 | } |
401 | |
402 | INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list); |
403 | |
404 | iio_setup_ev_int(indio_dev->event_interface); |
405 | if (indio_dev->info->event_attrs != NULL) { |
406 | attr = indio_dev->info->event_attrs->attrs; |
407 | while (*attr++ != NULL) |
408 | attrcount_orig++; |
409 | } |
410 | attrcount = attrcount_orig; |
411 | if (indio_dev->channels) { |
412 | ret = __iio_add_event_config_attrs(indio_dev); |
413 | if (ret < 0) |
414 | goto error_free_setup_event_lines; |
415 | attrcount += ret; |
416 | } |
417 | |
418 | indio_dev->event_interface->group.name = iio_event_group_name; |
419 | indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, |
420 | sizeof(indio_dev->event_interface->group.attrs[0]), |
421 | GFP_KERNEL); |
422 | if (indio_dev->event_interface->group.attrs == NULL) { |
423 | ret = -ENOMEM; |
424 | goto error_free_setup_event_lines; |
425 | } |
426 | if (indio_dev->info->event_attrs) |
427 | memcpy(indio_dev->event_interface->group.attrs, |
428 | indio_dev->info->event_attrs->attrs, |
429 | sizeof(indio_dev->event_interface->group.attrs[0]) |
430 | *attrcount_orig); |
431 | attrn = attrcount_orig; |
432 | /* Add all elements from the list. */ |
433 | list_for_each_entry(p, |
434 | &indio_dev->event_interface->dev_attr_list, |
435 | l) |
436 | indio_dev->event_interface->group.attrs[attrn++] = |
437 | &p->dev_attr.attr; |
438 | indio_dev->groups[indio_dev->groupcounter++] = |
439 | &indio_dev->event_interface->group; |
440 | |
441 | return 0; |
442 | |
443 | error_free_setup_event_lines: |
444 | __iio_remove_event_config_attrs(indio_dev); |
445 | kfree(indio_dev->event_interface); |
446 | error_ret: |
447 | |
448 | return ret; |
449 | } |
450 | |
451 | void iio_device_unregister_eventset(struct iio_dev *indio_dev) |
452 | { |
453 | if (indio_dev->event_interface == NULL) |
454 | return; |
455 | __iio_remove_event_config_attrs(indio_dev); |
456 | kfree(indio_dev->event_interface->group.attrs); |
457 | kfree(indio_dev->event_interface); |
458 | } |
459 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9