Root/
1 | /* |
2 | * linux/drivers/mfd/ucb1x00-core.c |
3 | * |
4 | * Copyright (C) 2001 Russell King, All Rights Reserved. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License. |
9 | * |
10 | * The UCB1x00 core driver provides basic services for handling IO, |
11 | * the ADC, interrupts, and accessing registers. It is designed |
12 | * such that everything goes through this layer, thereby providing |
13 | * a consistent locking methodology, as well as allowing the drivers |
14 | * to be used on other non-MCP-enabled hardware platforms. |
15 | * |
16 | * Note that all locks are private to this file. Nothing else may |
17 | * touch them. |
18 | */ |
19 | #include <linux/module.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/sched.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/init.h> |
24 | #include <linux/errno.h> |
25 | #include <linux/interrupt.h> |
26 | #include <linux/irq.h> |
27 | #include <linux/device.h> |
28 | #include <linux/mutex.h> |
29 | #include <linux/mfd/ucb1x00.h> |
30 | #include <linux/pm.h> |
31 | #include <linux/gpio.h> |
32 | |
33 | static DEFINE_MUTEX(ucb1x00_mutex); |
34 | static LIST_HEAD(ucb1x00_drivers); |
35 | static LIST_HEAD(ucb1x00_devices); |
36 | |
37 | /** |
38 | * ucb1x00_io_set_dir - set IO direction |
39 | * @ucb: UCB1x00 structure describing chip |
40 | * @in: bitfield of IO pins to be set as inputs |
41 | * @out: bitfield of IO pins to be set as outputs |
42 | * |
43 | * Set the IO direction of the ten general purpose IO pins on |
44 | * the UCB1x00 chip. The @in bitfield has priority over the |
45 | * @out bitfield, in that if you specify a pin as both input |
46 | * and output, it will end up as an input. |
47 | * |
48 | * ucb1x00_enable must have been called to enable the comms |
49 | * before using this function. |
50 | * |
51 | * This function takes a spinlock, disabling interrupts. |
52 | */ |
53 | void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out) |
54 | { |
55 | unsigned long flags; |
56 | |
57 | spin_lock_irqsave(&ucb->io_lock, flags); |
58 | ucb->io_dir |= out; |
59 | ucb->io_dir &= ~in; |
60 | |
61 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); |
62 | spin_unlock_irqrestore(&ucb->io_lock, flags); |
63 | } |
64 | |
65 | /** |
66 | * ucb1x00_io_write - set or clear IO outputs |
67 | * @ucb: UCB1x00 structure describing chip |
68 | * @set: bitfield of IO pins to set to logic '1' |
69 | * @clear: bitfield of IO pins to set to logic '0' |
70 | * |
71 | * Set the IO output state of the specified IO pins. The value |
72 | * is retained if the pins are subsequently configured as inputs. |
73 | * The @clear bitfield has priority over the @set bitfield - |
74 | * outputs will be cleared. |
75 | * |
76 | * ucb1x00_enable must have been called to enable the comms |
77 | * before using this function. |
78 | * |
79 | * This function takes a spinlock, disabling interrupts. |
80 | */ |
81 | void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear) |
82 | { |
83 | unsigned long flags; |
84 | |
85 | spin_lock_irqsave(&ucb->io_lock, flags); |
86 | ucb->io_out |= set; |
87 | ucb->io_out &= ~clear; |
88 | |
89 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); |
90 | spin_unlock_irqrestore(&ucb->io_lock, flags); |
91 | } |
92 | |
93 | /** |
94 | * ucb1x00_io_read - read the current state of the IO pins |
95 | * @ucb: UCB1x00 structure describing chip |
96 | * |
97 | * Return a bitfield describing the logic state of the ten |
98 | * general purpose IO pins. |
99 | * |
100 | * ucb1x00_enable must have been called to enable the comms |
101 | * before using this function. |
102 | * |
103 | * This function does not take any mutexes or spinlocks. |
104 | */ |
105 | unsigned int ucb1x00_io_read(struct ucb1x00 *ucb) |
106 | { |
107 | return ucb1x00_reg_read(ucb, UCB_IO_DATA); |
108 | } |
109 | |
110 | static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value) |
111 | { |
112 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); |
113 | unsigned long flags; |
114 | |
115 | spin_lock_irqsave(&ucb->io_lock, flags); |
116 | if (value) |
117 | ucb->io_out |= 1 << offset; |
118 | else |
119 | ucb->io_out &= ~(1 << offset); |
120 | |
121 | ucb1x00_enable(ucb); |
122 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); |
123 | ucb1x00_disable(ucb); |
124 | spin_unlock_irqrestore(&ucb->io_lock, flags); |
125 | } |
126 | |
127 | static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset) |
128 | { |
129 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); |
130 | unsigned val; |
131 | |
132 | ucb1x00_enable(ucb); |
133 | val = ucb1x00_reg_read(ucb, UCB_IO_DATA); |
134 | ucb1x00_disable(ucb); |
135 | |
136 | return val & (1 << offset); |
137 | } |
138 | |
139 | static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset) |
140 | { |
141 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); |
142 | unsigned long flags; |
143 | |
144 | spin_lock_irqsave(&ucb->io_lock, flags); |
145 | ucb->io_dir &= ~(1 << offset); |
146 | ucb1x00_enable(ucb); |
147 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); |
148 | ucb1x00_disable(ucb); |
149 | spin_unlock_irqrestore(&ucb->io_lock, flags); |
150 | |
151 | return 0; |
152 | } |
153 | |
154 | static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset |
155 | , int value) |
156 | { |
157 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); |
158 | unsigned long flags; |
159 | unsigned old, mask = 1 << offset; |
160 | |
161 | spin_lock_irqsave(&ucb->io_lock, flags); |
162 | old = ucb->io_out; |
163 | if (value) |
164 | ucb->io_out |= mask; |
165 | else |
166 | ucb->io_out &= ~mask; |
167 | |
168 | ucb1x00_enable(ucb); |
169 | if (old != ucb->io_out) |
170 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); |
171 | |
172 | if (!(ucb->io_dir & mask)) { |
173 | ucb->io_dir |= mask; |
174 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); |
175 | } |
176 | ucb1x00_disable(ucb); |
177 | spin_unlock_irqrestore(&ucb->io_lock, flags); |
178 | |
179 | return 0; |
180 | } |
181 | |
182 | static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset) |
183 | { |
184 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); |
185 | |
186 | return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO; |
187 | } |
188 | |
189 | /* |
190 | * UCB1300 data sheet says we must: |
191 | * 1. enable ADC => 5us (including reference startup time) |
192 | * 2. select input => 51*tsibclk => 4.3us |
193 | * 3. start conversion => 102*tsibclk => 8.5us |
194 | * (tsibclk = 1/11981000) |
195 | * Period between SIB 128-bit frames = 10.7us |
196 | */ |
197 | |
198 | /** |
199 | * ucb1x00_adc_enable - enable the ADC converter |
200 | * @ucb: UCB1x00 structure describing chip |
201 | * |
202 | * Enable the ucb1x00 and ADC converter on the UCB1x00 for use. |
203 | * Any code wishing to use the ADC converter must call this |
204 | * function prior to using it. |
205 | * |
206 | * This function takes the ADC mutex to prevent two or more |
207 | * concurrent uses, and therefore may sleep. As a result, it |
208 | * can only be called from process context, not interrupt |
209 | * context. |
210 | * |
211 | * You should release the ADC as soon as possible using |
212 | * ucb1x00_adc_disable. |
213 | */ |
214 | void ucb1x00_adc_enable(struct ucb1x00 *ucb) |
215 | { |
216 | mutex_lock(&ucb->adc_mutex); |
217 | |
218 | ucb->adc_cr |= UCB_ADC_ENA; |
219 | |
220 | ucb1x00_enable(ucb); |
221 | ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); |
222 | } |
223 | |
224 | /** |
225 | * ucb1x00_adc_read - read the specified ADC channel |
226 | * @ucb: UCB1x00 structure describing chip |
227 | * @adc_channel: ADC channel mask |
228 | * @sync: wait for syncronisation pulse. |
229 | * |
230 | * Start an ADC conversion and wait for the result. Note that |
231 | * synchronised ADC conversions (via the ADCSYNC pin) must wait |
232 | * until the trigger is asserted and the conversion is finished. |
233 | * |
234 | * This function currently spins waiting for the conversion to |
235 | * complete (2 frames max without sync). |
236 | * |
237 | * If called for a synchronised ADC conversion, it may sleep |
238 | * with the ADC mutex held. |
239 | */ |
240 | unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) |
241 | { |
242 | unsigned int val; |
243 | |
244 | if (sync) |
245 | adc_channel |= UCB_ADC_SYNC_ENA; |
246 | |
247 | ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel); |
248 | ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START); |
249 | |
250 | for (;;) { |
251 | val = ucb1x00_reg_read(ucb, UCB_ADC_DATA); |
252 | if (val & UCB_ADC_DAT_VAL) |
253 | break; |
254 | /* yield to other processes */ |
255 | set_current_state(TASK_INTERRUPTIBLE); |
256 | schedule_timeout(1); |
257 | } |
258 | |
259 | return UCB_ADC_DAT(val); |
260 | } |
261 | |
262 | /** |
263 | * ucb1x00_adc_disable - disable the ADC converter |
264 | * @ucb: UCB1x00 structure describing chip |
265 | * |
266 | * Disable the ADC converter and release the ADC mutex. |
267 | */ |
268 | void ucb1x00_adc_disable(struct ucb1x00 *ucb) |
269 | { |
270 | ucb->adc_cr &= ~UCB_ADC_ENA; |
271 | ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); |
272 | ucb1x00_disable(ucb); |
273 | |
274 | mutex_unlock(&ucb->adc_mutex); |
275 | } |
276 | |
277 | /* |
278 | * UCB1x00 Interrupt handling. |
279 | * |
280 | * The UCB1x00 can generate interrupts when the SIBCLK is stopped. |
281 | * Since we need to read an internal register, we must re-enable |
282 | * SIBCLK to talk to the chip. We leave the clock running until |
283 | * we have finished processing all interrupts from the chip. |
284 | */ |
285 | static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc) |
286 | { |
287 | struct ucb1x00 *ucb = irq_desc_get_handler_data(desc); |
288 | unsigned int isr, i; |
289 | |
290 | ucb1x00_enable(ucb); |
291 | isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS); |
292 | ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr); |
293 | ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); |
294 | |
295 | for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++) |
296 | if (isr & 1) |
297 | generic_handle_irq(ucb->irq_base + i); |
298 | ucb1x00_disable(ucb); |
299 | } |
300 | |
301 | static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask) |
302 | { |
303 | ucb1x00_enable(ucb); |
304 | if (ucb->irq_ris_enbl & mask) |
305 | ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & |
306 | ucb->irq_mask); |
307 | if (ucb->irq_fal_enbl & mask) |
308 | ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & |
309 | ucb->irq_mask); |
310 | ucb1x00_disable(ucb); |
311 | } |
312 | |
313 | static void ucb1x00_irq_noop(struct irq_data *data) |
314 | { |
315 | } |
316 | |
317 | static void ucb1x00_irq_mask(struct irq_data *data) |
318 | { |
319 | struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data); |
320 | unsigned mask = 1 << (data->irq - ucb->irq_base); |
321 | |
322 | raw_spin_lock(&ucb->irq_lock); |
323 | ucb->irq_mask &= ~mask; |
324 | ucb1x00_irq_update(ucb, mask); |
325 | raw_spin_unlock(&ucb->irq_lock); |
326 | } |
327 | |
328 | static void ucb1x00_irq_unmask(struct irq_data *data) |
329 | { |
330 | struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data); |
331 | unsigned mask = 1 << (data->irq - ucb->irq_base); |
332 | |
333 | raw_spin_lock(&ucb->irq_lock); |
334 | ucb->irq_mask |= mask; |
335 | ucb1x00_irq_update(ucb, mask); |
336 | raw_spin_unlock(&ucb->irq_lock); |
337 | } |
338 | |
339 | static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type) |
340 | { |
341 | struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data); |
342 | unsigned mask = 1 << (data->irq - ucb->irq_base); |
343 | |
344 | raw_spin_lock(&ucb->irq_lock); |
345 | if (type & IRQ_TYPE_EDGE_RISING) |
346 | ucb->irq_ris_enbl |= mask; |
347 | else |
348 | ucb->irq_ris_enbl &= ~mask; |
349 | |
350 | if (type & IRQ_TYPE_EDGE_FALLING) |
351 | ucb->irq_fal_enbl |= mask; |
352 | else |
353 | ucb->irq_fal_enbl &= ~mask; |
354 | if (ucb->irq_mask & mask) { |
355 | ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & |
356 | ucb->irq_mask); |
357 | ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & |
358 | ucb->irq_mask); |
359 | } |
360 | raw_spin_unlock(&ucb->irq_lock); |
361 | |
362 | return 0; |
363 | } |
364 | |
365 | static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on) |
366 | { |
367 | struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data); |
368 | struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data; |
369 | unsigned mask = 1 << (data->irq - ucb->irq_base); |
370 | |
371 | if (!pdata || !pdata->can_wakeup) |
372 | return -EINVAL; |
373 | |
374 | raw_spin_lock(&ucb->irq_lock); |
375 | if (on) |
376 | ucb->irq_wake |= mask; |
377 | else |
378 | ucb->irq_wake &= ~mask; |
379 | raw_spin_unlock(&ucb->irq_lock); |
380 | |
381 | return 0; |
382 | } |
383 | |
384 | static struct irq_chip ucb1x00_irqchip = { |
385 | .name = "ucb1x00", |
386 | .irq_ack = ucb1x00_irq_noop, |
387 | .irq_mask = ucb1x00_irq_mask, |
388 | .irq_unmask = ucb1x00_irq_unmask, |
389 | .irq_set_type = ucb1x00_irq_set_type, |
390 | .irq_set_wake = ucb1x00_irq_set_wake, |
391 | }; |
392 | |
393 | static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv) |
394 | { |
395 | struct ucb1x00_dev *dev; |
396 | int ret = -ENOMEM; |
397 | |
398 | dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL); |
399 | if (dev) { |
400 | dev->ucb = ucb; |
401 | dev->drv = drv; |
402 | |
403 | ret = drv->add(dev); |
404 | |
405 | if (ret == 0) { |
406 | list_add_tail(&dev->dev_node, &ucb->devs); |
407 | list_add_tail(&dev->drv_node, &drv->devs); |
408 | } else { |
409 | kfree(dev); |
410 | } |
411 | } |
412 | return ret; |
413 | } |
414 | |
415 | static void ucb1x00_remove_dev(struct ucb1x00_dev *dev) |
416 | { |
417 | dev->drv->remove(dev); |
418 | list_del(&dev->dev_node); |
419 | list_del(&dev->drv_node); |
420 | kfree(dev); |
421 | } |
422 | |
423 | /* |
424 | * Try to probe our interrupt, rather than relying on lots of |
425 | * hard-coded machine dependencies. For reference, the expected |
426 | * IRQ mappings are: |
427 | * |
428 | * Machine Default IRQ |
429 | * adsbitsy IRQ_GPCIN4 |
430 | * cerf IRQ_GPIO_UCB1200_IRQ |
431 | * flexanet IRQ_GPIO_GUI |
432 | * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ |
433 | * graphicsclient ADS_EXT_IRQ(8) |
434 | * graphicsmaster ADS_EXT_IRQ(8) |
435 | * lart LART_IRQ_UCB1200 |
436 | * omnimeter IRQ_GPIO23 |
437 | * pfs168 IRQ_GPIO_UCB1300_IRQ |
438 | * simpad IRQ_GPIO_UCB1300_IRQ |
439 | * shannon SHANNON_IRQ_GPIO_IRQ_CODEC |
440 | * yopy IRQ_GPIO_UCB1200_IRQ |
441 | */ |
442 | static int ucb1x00_detect_irq(struct ucb1x00 *ucb) |
443 | { |
444 | unsigned long mask; |
445 | |
446 | mask = probe_irq_on(); |
447 | if (!mask) { |
448 | probe_irq_off(mask); |
449 | return NO_IRQ; |
450 | } |
451 | |
452 | /* |
453 | * Enable the ADC interrupt. |
454 | */ |
455 | ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC); |
456 | ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC); |
457 | ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff); |
458 | ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); |
459 | |
460 | /* |
461 | * Cause an ADC interrupt. |
462 | */ |
463 | ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA); |
464 | ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START); |
465 | |
466 | /* |
467 | * Wait for the conversion to complete. |
468 | */ |
469 | while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0); |
470 | ucb1x00_reg_write(ucb, UCB_ADC_CR, 0); |
471 | |
472 | /* |
473 | * Disable and clear interrupt. |
474 | */ |
475 | ucb1x00_reg_write(ucb, UCB_IE_RIS, 0); |
476 | ucb1x00_reg_write(ucb, UCB_IE_FAL, 0); |
477 | ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff); |
478 | ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); |
479 | |
480 | /* |
481 | * Read triggered interrupt. |
482 | */ |
483 | return probe_irq_off(mask); |
484 | } |
485 | |
486 | static void ucb1x00_release(struct device *dev) |
487 | { |
488 | struct ucb1x00 *ucb = classdev_to_ucb1x00(dev); |
489 | kfree(ucb); |
490 | } |
491 | |
492 | static struct class ucb1x00_class = { |
493 | .name = "ucb1x00", |
494 | .dev_release = ucb1x00_release, |
495 | }; |
496 | |
497 | static int ucb1x00_probe(struct mcp *mcp) |
498 | { |
499 | struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data; |
500 | struct ucb1x00_driver *drv; |
501 | struct ucb1x00 *ucb; |
502 | unsigned id, i, irq_base; |
503 | int ret = -ENODEV; |
504 | |
505 | /* Tell the platform to deassert the UCB1x00 reset */ |
506 | if (pdata && pdata->reset) |
507 | pdata->reset(UCB_RST_PROBE); |
508 | |
509 | mcp_enable(mcp); |
510 | id = mcp_reg_read(mcp, UCB_ID); |
511 | mcp_disable(mcp); |
512 | |
513 | if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) { |
514 | printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id); |
515 | goto out; |
516 | } |
517 | |
518 | ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL); |
519 | ret = -ENOMEM; |
520 | if (!ucb) |
521 | goto out; |
522 | |
523 | device_initialize(&ucb->dev); |
524 | ucb->dev.class = &ucb1x00_class; |
525 | ucb->dev.parent = &mcp->attached_device; |
526 | dev_set_name(&ucb->dev, "ucb1x00"); |
527 | |
528 | raw_spin_lock_init(&ucb->irq_lock); |
529 | spin_lock_init(&ucb->io_lock); |
530 | mutex_init(&ucb->adc_mutex); |
531 | |
532 | ucb->id = id; |
533 | ucb->mcp = mcp; |
534 | |
535 | ret = device_add(&ucb->dev); |
536 | if (ret) |
537 | goto err_dev_add; |
538 | |
539 | ucb1x00_enable(ucb); |
540 | ucb->irq = ucb1x00_detect_irq(ucb); |
541 | ucb1x00_disable(ucb); |
542 | if (ucb->irq == NO_IRQ) { |
543 | dev_err(&ucb->dev, "IRQ probe failed\n"); |
544 | ret = -ENODEV; |
545 | goto err_no_irq; |
546 | } |
547 | |
548 | ucb->gpio.base = -1; |
549 | irq_base = pdata ? pdata->irq_base : 0; |
550 | ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1); |
551 | if (ucb->irq_base < 0) { |
552 | dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n", |
553 | ucb->irq_base); |
554 | goto err_irq_alloc; |
555 | } |
556 | |
557 | for (i = 0; i < 16; i++) { |
558 | unsigned irq = ucb->irq_base + i; |
559 | |
560 | irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq); |
561 | irq_set_chip_data(irq, ucb); |
562 | set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST); |
563 | } |
564 | |
565 | irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING); |
566 | irq_set_handler_data(ucb->irq, ucb); |
567 | irq_set_chained_handler(ucb->irq, ucb1x00_irq); |
568 | |
569 | if (pdata && pdata->gpio_base) { |
570 | ucb->gpio.label = dev_name(&ucb->dev); |
571 | ucb->gpio.dev = &ucb->dev; |
572 | ucb->gpio.owner = THIS_MODULE; |
573 | ucb->gpio.base = pdata->gpio_base; |
574 | ucb->gpio.ngpio = 10; |
575 | ucb->gpio.set = ucb1x00_gpio_set; |
576 | ucb->gpio.get = ucb1x00_gpio_get; |
577 | ucb->gpio.direction_input = ucb1x00_gpio_direction_input; |
578 | ucb->gpio.direction_output = ucb1x00_gpio_direction_output; |
579 | ucb->gpio.to_irq = ucb1x00_to_irq; |
580 | ret = gpiochip_add(&ucb->gpio); |
581 | if (ret) |
582 | goto err_gpio_add; |
583 | } else |
584 | dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); |
585 | |
586 | mcp_set_drvdata(mcp, ucb); |
587 | |
588 | if (pdata) |
589 | device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup); |
590 | |
591 | INIT_LIST_HEAD(&ucb->devs); |
592 | mutex_lock(&ucb1x00_mutex); |
593 | list_add_tail(&ucb->node, &ucb1x00_devices); |
594 | list_for_each_entry(drv, &ucb1x00_drivers, node) { |
595 | ucb1x00_add_dev(ucb, drv); |
596 | } |
597 | mutex_unlock(&ucb1x00_mutex); |
598 | |
599 | return ret; |
600 | |
601 | err_gpio_add: |
602 | irq_set_chained_handler(ucb->irq, NULL); |
603 | err_irq_alloc: |
604 | if (ucb->irq_base > 0) |
605 | irq_free_descs(ucb->irq_base, 16); |
606 | err_no_irq: |
607 | device_del(&ucb->dev); |
608 | err_dev_add: |
609 | put_device(&ucb->dev); |
610 | out: |
611 | if (pdata && pdata->reset) |
612 | pdata->reset(UCB_RST_PROBE_FAIL); |
613 | return ret; |
614 | } |
615 | |
616 | static void ucb1x00_remove(struct mcp *mcp) |
617 | { |
618 | struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data; |
619 | struct ucb1x00 *ucb = mcp_get_drvdata(mcp); |
620 | struct list_head *l, *n; |
621 | int ret; |
622 | |
623 | mutex_lock(&ucb1x00_mutex); |
624 | list_del(&ucb->node); |
625 | list_for_each_safe(l, n, &ucb->devs) { |
626 | struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node); |
627 | ucb1x00_remove_dev(dev); |
628 | } |
629 | mutex_unlock(&ucb1x00_mutex); |
630 | |
631 | if (ucb->gpio.base != -1) { |
632 | ret = gpiochip_remove(&ucb->gpio); |
633 | if (ret) |
634 | dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret); |
635 | } |
636 | |
637 | irq_set_chained_handler(ucb->irq, NULL); |
638 | irq_free_descs(ucb->irq_base, 16); |
639 | device_unregister(&ucb->dev); |
640 | |
641 | if (pdata && pdata->reset) |
642 | pdata->reset(UCB_RST_REMOVE); |
643 | } |
644 | |
645 | int ucb1x00_register_driver(struct ucb1x00_driver *drv) |
646 | { |
647 | struct ucb1x00 *ucb; |
648 | |
649 | INIT_LIST_HEAD(&drv->devs); |
650 | mutex_lock(&ucb1x00_mutex); |
651 | list_add_tail(&drv->node, &ucb1x00_drivers); |
652 | list_for_each_entry(ucb, &ucb1x00_devices, node) { |
653 | ucb1x00_add_dev(ucb, drv); |
654 | } |
655 | mutex_unlock(&ucb1x00_mutex); |
656 | return 0; |
657 | } |
658 | |
659 | void ucb1x00_unregister_driver(struct ucb1x00_driver *drv) |
660 | { |
661 | struct list_head *n, *l; |
662 | |
663 | mutex_lock(&ucb1x00_mutex); |
664 | list_del(&drv->node); |
665 | list_for_each_safe(l, n, &drv->devs) { |
666 | struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node); |
667 | ucb1x00_remove_dev(dev); |
668 | } |
669 | mutex_unlock(&ucb1x00_mutex); |
670 | } |
671 | |
672 | static int ucb1x00_suspend(struct device *dev) |
673 | { |
674 | struct ucb1x00_plat_data *pdata = dev->platform_data; |
675 | struct ucb1x00 *ucb = dev_get_drvdata(dev); |
676 | struct ucb1x00_dev *udev; |
677 | |
678 | mutex_lock(&ucb1x00_mutex); |
679 | list_for_each_entry(udev, &ucb->devs, dev_node) { |
680 | if (udev->drv->suspend) |
681 | udev->drv->suspend(udev); |
682 | } |
683 | mutex_unlock(&ucb1x00_mutex); |
684 | |
685 | if (ucb->irq_wake) { |
686 | unsigned long flags; |
687 | |
688 | raw_spin_lock_irqsave(&ucb->irq_lock, flags); |
689 | ucb1x00_enable(ucb); |
690 | ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & |
691 | ucb->irq_wake); |
692 | ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & |
693 | ucb->irq_wake); |
694 | ucb1x00_disable(ucb); |
695 | raw_spin_unlock_irqrestore(&ucb->irq_lock, flags); |
696 | |
697 | enable_irq_wake(ucb->irq); |
698 | } else if (pdata && pdata->reset) |
699 | pdata->reset(UCB_RST_SUSPEND); |
700 | |
701 | return 0; |
702 | } |
703 | |
704 | static int ucb1x00_resume(struct device *dev) |
705 | { |
706 | struct ucb1x00_plat_data *pdata = dev->platform_data; |
707 | struct ucb1x00 *ucb = dev_get_drvdata(dev); |
708 | struct ucb1x00_dev *udev; |
709 | |
710 | if (!ucb->irq_wake && pdata && pdata->reset) |
711 | pdata->reset(UCB_RST_RESUME); |
712 | |
713 | ucb1x00_enable(ucb); |
714 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); |
715 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); |
716 | |
717 | if (ucb->irq_wake) { |
718 | unsigned long flags; |
719 | |
720 | raw_spin_lock_irqsave(&ucb->irq_lock, flags); |
721 | ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & |
722 | ucb->irq_mask); |
723 | ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & |
724 | ucb->irq_mask); |
725 | raw_spin_unlock_irqrestore(&ucb->irq_lock, flags); |
726 | |
727 | disable_irq_wake(ucb->irq); |
728 | } |
729 | ucb1x00_disable(ucb); |
730 | |
731 | mutex_lock(&ucb1x00_mutex); |
732 | list_for_each_entry(udev, &ucb->devs, dev_node) { |
733 | if (udev->drv->resume) |
734 | udev->drv->resume(udev); |
735 | } |
736 | mutex_unlock(&ucb1x00_mutex); |
737 | return 0; |
738 | } |
739 | |
740 | static const struct dev_pm_ops ucb1x00_pm_ops = { |
741 | SET_SYSTEM_SLEEP_PM_OPS(ucb1x00_suspend, ucb1x00_resume) |
742 | }; |
743 | |
744 | static struct mcp_driver ucb1x00_driver = { |
745 | .drv = { |
746 | .name = "ucb1x00", |
747 | .owner = THIS_MODULE, |
748 | .pm = &ucb1x00_pm_ops, |
749 | }, |
750 | .probe = ucb1x00_probe, |
751 | .remove = ucb1x00_remove, |
752 | }; |
753 | |
754 | static int __init ucb1x00_init(void) |
755 | { |
756 | int ret = class_register(&ucb1x00_class); |
757 | if (ret == 0) { |
758 | ret = mcp_driver_register(&ucb1x00_driver); |
759 | if (ret) |
760 | class_unregister(&ucb1x00_class); |
761 | } |
762 | return ret; |
763 | } |
764 | |
765 | static void __exit ucb1x00_exit(void) |
766 | { |
767 | mcp_driver_unregister(&ucb1x00_driver); |
768 | class_unregister(&ucb1x00_class); |
769 | } |
770 | |
771 | module_init(ucb1x00_init); |
772 | module_exit(ucb1x00_exit); |
773 | |
774 | EXPORT_SYMBOL(ucb1x00_io_set_dir); |
775 | EXPORT_SYMBOL(ucb1x00_io_write); |
776 | EXPORT_SYMBOL(ucb1x00_io_read); |
777 | |
778 | EXPORT_SYMBOL(ucb1x00_adc_enable); |
779 | EXPORT_SYMBOL(ucb1x00_adc_read); |
780 | EXPORT_SYMBOL(ucb1x00_adc_disable); |
781 | |
782 | EXPORT_SYMBOL(ucb1x00_register_driver); |
783 | EXPORT_SYMBOL(ucb1x00_unregister_driver); |
784 | |
785 | MODULE_ALIAS("mcp:ucb1x00"); |
786 | MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); |
787 | MODULE_DESCRIPTION("UCB1x00 core driver"); |
788 | MODULE_LICENSE("GPL"); |
789 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9