Root/
1 | /* |
2 | * Hardware spinlock framework |
3 | * |
4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com |
5 | * |
6 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License version 2 as published |
10 | * by the Free Software Foundation. |
11 | * |
12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. |
16 | */ |
17 | |
18 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
19 | |
20 | #include <linux/kernel.h> |
21 | #include <linux/module.h> |
22 | #include <linux/spinlock.h> |
23 | #include <linux/types.h> |
24 | #include <linux/err.h> |
25 | #include <linux/jiffies.h> |
26 | #include <linux/radix-tree.h> |
27 | #include <linux/hwspinlock.h> |
28 | #include <linux/pm_runtime.h> |
29 | #include <linux/mutex.h> |
30 | |
31 | #include "hwspinlock_internal.h" |
32 | |
33 | /* radix tree tags */ |
34 | #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ |
35 | |
36 | /* |
37 | * A radix tree is used to maintain the available hwspinlock instances. |
38 | * The tree associates hwspinlock pointers with their integer key id, |
39 | * and provides easy-to-use API which makes the hwspinlock core code simple |
40 | * and easy to read. |
41 | * |
42 | * Radix trees are quick on lookups, and reasonably efficient in terms of |
43 | * storage, especially with high density usages such as this framework |
44 | * requires (a continuous range of integer keys, beginning with zero, is |
45 | * used as the ID's of the hwspinlock instances). |
46 | * |
47 | * The radix tree API supports tagging items in the tree, which this |
48 | * framework uses to mark unused hwspinlock instances (see the |
49 | * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the |
50 | * tree, looking for an unused hwspinlock instance, is now reduced to a |
51 | * single radix tree API call. |
52 | */ |
53 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); |
54 | |
55 | /* |
56 | * Synchronization of access to the tree is achieved using this mutex, |
57 | * as the radix-tree API requires that users provide all synchronisation. |
58 | * A mutex is needed because we're using non-atomic radix tree allocations. |
59 | */ |
60 | static DEFINE_MUTEX(hwspinlock_tree_lock); |
61 | |
62 | |
63 | /** |
64 | * __hwspin_trylock() - attempt to lock a specific hwspinlock |
65 | * @hwlock: an hwspinlock which we want to trylock |
66 | * @mode: controls whether local interrupts are disabled or not |
67 | * @flags: a pointer where the caller's interrupt state will be saved at (if |
68 | * requested) |
69 | * |
70 | * This function attempts to lock an hwspinlock, and will immediately |
71 | * fail if the hwspinlock is already taken. |
72 | * |
73 | * Upon a successful return from this function, preemption (and possibly |
74 | * interrupts) is disabled, so the caller must not sleep, and is advised to |
75 | * release the hwspinlock as soon as possible. This is required in order to |
76 | * minimize remote cores polling on the hardware interconnect. |
77 | * |
78 | * The user decides whether local interrupts are disabled or not, and if yes, |
79 | * whether he wants their previous state to be saved. It is up to the user |
80 | * to choose the appropriate @mode of operation, exactly the same way users |
81 | * should decide between spin_trylock, spin_trylock_irq and |
82 | * spin_trylock_irqsave. |
83 | * |
84 | * Returns 0 if we successfully locked the hwspinlock or -EBUSY if |
85 | * the hwspinlock was already taken. |
86 | * This function will never sleep. |
87 | */ |
88 | int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) |
89 | { |
90 | int ret; |
91 | |
92 | BUG_ON(!hwlock); |
93 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); |
94 | |
95 | /* |
96 | * This spin_lock{_irq, _irqsave} serves three purposes: |
97 | * |
98 | * 1. Disable preemption, in order to minimize the period of time |
99 | * in which the hwspinlock is taken. This is important in order |
100 | * to minimize the possible polling on the hardware interconnect |
101 | * by a remote user of this lock. |
102 | * 2. Make the hwspinlock SMP-safe (so we can take it from |
103 | * additional contexts on the local host). |
104 | * 3. Ensure that in_atomic/might_sleep checks catch potential |
105 | * problems with hwspinlock usage (e.g. scheduler checks like |
106 | * 'scheduling while atomic' etc.) |
107 | */ |
108 | if (mode == HWLOCK_IRQSTATE) |
109 | ret = spin_trylock_irqsave(&hwlock->lock, *flags); |
110 | else if (mode == HWLOCK_IRQ) |
111 | ret = spin_trylock_irq(&hwlock->lock); |
112 | else |
113 | ret = spin_trylock(&hwlock->lock); |
114 | |
115 | /* is lock already taken by another context on the local cpu ? */ |
116 | if (!ret) |
117 | return -EBUSY; |
118 | |
119 | /* try to take the hwspinlock device */ |
120 | ret = hwlock->bank->ops->trylock(hwlock); |
121 | |
122 | /* if hwlock is already taken, undo spin_trylock_* and exit */ |
123 | if (!ret) { |
124 | if (mode == HWLOCK_IRQSTATE) |
125 | spin_unlock_irqrestore(&hwlock->lock, *flags); |
126 | else if (mode == HWLOCK_IRQ) |
127 | spin_unlock_irq(&hwlock->lock); |
128 | else |
129 | spin_unlock(&hwlock->lock); |
130 | |
131 | return -EBUSY; |
132 | } |
133 | |
134 | /* |
135 | * We can be sure the other core's memory operations |
136 | * are observable to us only _after_ we successfully take |
137 | * the hwspinlock, and we must make sure that subsequent memory |
138 | * operations (both reads and writes) will not be reordered before |
139 | * we actually took the hwspinlock. |
140 | * |
141 | * Note: the implicit memory barrier of the spinlock above is too |
142 | * early, so we need this additional explicit memory barrier. |
143 | */ |
144 | mb(); |
145 | |
146 | return 0; |
147 | } |
148 | EXPORT_SYMBOL_GPL(__hwspin_trylock); |
149 | |
150 | /** |
151 | * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit |
152 | * @hwlock: the hwspinlock to be locked |
153 | * @timeout: timeout value in msecs |
154 | * @mode: mode which controls whether local interrupts are disabled or not |
155 | * @flags: a pointer to where the caller's interrupt state will be saved at (if |
156 | * requested) |
157 | * |
158 | * This function locks the given @hwlock. If the @hwlock |
159 | * is already taken, the function will busy loop waiting for it to |
160 | * be released, but give up after @timeout msecs have elapsed. |
161 | * |
162 | * Upon a successful return from this function, preemption is disabled |
163 | * (and possibly local interrupts, too), so the caller must not sleep, |
164 | * and is advised to release the hwspinlock as soon as possible. |
165 | * This is required in order to minimize remote cores polling on the |
166 | * hardware interconnect. |
167 | * |
168 | * The user decides whether local interrupts are disabled or not, and if yes, |
169 | * whether he wants their previous state to be saved. It is up to the user |
170 | * to choose the appropriate @mode of operation, exactly the same way users |
171 | * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. |
172 | * |
173 | * Returns 0 when the @hwlock was successfully taken, and an appropriate |
174 | * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still |
175 | * busy after @timeout msecs). The function will never sleep. |
176 | */ |
177 | int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, |
178 | int mode, unsigned long *flags) |
179 | { |
180 | int ret; |
181 | unsigned long expire; |
182 | |
183 | expire = msecs_to_jiffies(to) + jiffies; |
184 | |
185 | for (;;) { |
186 | /* Try to take the hwspinlock */ |
187 | ret = __hwspin_trylock(hwlock, mode, flags); |
188 | if (ret != -EBUSY) |
189 | break; |
190 | |
191 | /* |
192 | * The lock is already taken, let's check if the user wants |
193 | * us to try again |
194 | */ |
195 | if (time_is_before_eq_jiffies(expire)) |
196 | return -ETIMEDOUT; |
197 | |
198 | /* |
199 | * Allow platform-specific relax handlers to prevent |
200 | * hogging the interconnect (no sleeping, though) |
201 | */ |
202 | if (hwlock->bank->ops->relax) |
203 | hwlock->bank->ops->relax(hwlock); |
204 | } |
205 | |
206 | return ret; |
207 | } |
208 | EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); |
209 | |
210 | /** |
211 | * __hwspin_unlock() - unlock a specific hwspinlock |
212 | * @hwlock: a previously-acquired hwspinlock which we want to unlock |
213 | * @mode: controls whether local interrupts needs to be restored or not |
214 | * @flags: previous caller's interrupt state to restore (if requested) |
215 | * |
216 | * This function will unlock a specific hwspinlock, enable preemption and |
217 | * (possibly) enable interrupts or restore their previous state. |
218 | * @hwlock must be already locked before calling this function: it is a bug |
219 | * to call unlock on a @hwlock that is already unlocked. |
220 | * |
221 | * The user decides whether local interrupts should be enabled or not, and |
222 | * if yes, whether he wants their previous state to be restored. It is up |
223 | * to the user to choose the appropriate @mode of operation, exactly the |
224 | * same way users decide between spin_unlock, spin_unlock_irq and |
225 | * spin_unlock_irqrestore. |
226 | * |
227 | * The function will never sleep. |
228 | */ |
229 | void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) |
230 | { |
231 | BUG_ON(!hwlock); |
232 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); |
233 | |
234 | /* |
235 | * We must make sure that memory operations (both reads and writes), |
236 | * done before unlocking the hwspinlock, will not be reordered |
237 | * after the lock is released. |
238 | * |
239 | * That's the purpose of this explicit memory barrier. |
240 | * |
241 | * Note: the memory barrier induced by the spin_unlock below is too |
242 | * late; the other core is going to access memory soon after it will |
243 | * take the hwspinlock, and by then we want to be sure our memory |
244 | * operations are already observable. |
245 | */ |
246 | mb(); |
247 | |
248 | hwlock->bank->ops->unlock(hwlock); |
249 | |
250 | /* Undo the spin_trylock{_irq, _irqsave} called while locking */ |
251 | if (mode == HWLOCK_IRQSTATE) |
252 | spin_unlock_irqrestore(&hwlock->lock, *flags); |
253 | else if (mode == HWLOCK_IRQ) |
254 | spin_unlock_irq(&hwlock->lock); |
255 | else |
256 | spin_unlock(&hwlock->lock); |
257 | } |
258 | EXPORT_SYMBOL_GPL(__hwspin_unlock); |
259 | |
260 | static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id) |
261 | { |
262 | struct hwspinlock *tmp; |
263 | int ret; |
264 | |
265 | mutex_lock(&hwspinlock_tree_lock); |
266 | |
267 | ret = radix_tree_insert(&hwspinlock_tree, id, hwlock); |
268 | if (ret) { |
269 | if (ret == -EEXIST) |
270 | pr_err("hwspinlock id %d already exists!\n", id); |
271 | goto out; |
272 | } |
273 | |
274 | /* mark this hwspinlock as available */ |
275 | tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
276 | |
277 | /* self-sanity check which should never fail */ |
278 | WARN_ON(tmp != hwlock); |
279 | |
280 | out: |
281 | mutex_unlock(&hwspinlock_tree_lock); |
282 | return 0; |
283 | } |
284 | |
285 | static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id) |
286 | { |
287 | struct hwspinlock *hwlock = NULL; |
288 | int ret; |
289 | |
290 | mutex_lock(&hwspinlock_tree_lock); |
291 | |
292 | /* make sure the hwspinlock is not in use (tag is set) */ |
293 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
294 | if (ret == 0) { |
295 | pr_err("hwspinlock %d still in use (or not present)\n", id); |
296 | goto out; |
297 | } |
298 | |
299 | hwlock = radix_tree_delete(&hwspinlock_tree, id); |
300 | if (!hwlock) { |
301 | pr_err("failed to delete hwspinlock %d\n", id); |
302 | goto out; |
303 | } |
304 | |
305 | out: |
306 | mutex_unlock(&hwspinlock_tree_lock); |
307 | return hwlock; |
308 | } |
309 | |
310 | /** |
311 | * hwspin_lock_register() - register a new hw spinlock device |
312 | * @bank: the hwspinlock device, which usually provides numerous hw locks |
313 | * @dev: the backing device |
314 | * @ops: hwspinlock handlers for this device |
315 | * @base_id: id of the first hardware spinlock in this bank |
316 | * @num_locks: number of hwspinlocks provided by this device |
317 | * |
318 | * This function should be called from the underlying platform-specific |
319 | * implementation, to register a new hwspinlock device instance. |
320 | * |
321 | * Should be called from a process context (might sleep) |
322 | * |
323 | * Returns 0 on success, or an appropriate error code on failure |
324 | */ |
325 | int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, |
326 | const struct hwspinlock_ops *ops, int base_id, int num_locks) |
327 | { |
328 | struct hwspinlock *hwlock; |
329 | int ret = 0, i; |
330 | |
331 | if (!bank || !ops || !dev || !num_locks || !ops->trylock || |
332 | !ops->unlock) { |
333 | pr_err("invalid parameters\n"); |
334 | return -EINVAL; |
335 | } |
336 | |
337 | bank->dev = dev; |
338 | bank->ops = ops; |
339 | bank->base_id = base_id; |
340 | bank->num_locks = num_locks; |
341 | |
342 | for (i = 0; i < num_locks; i++) { |
343 | hwlock = &bank->lock[i]; |
344 | |
345 | spin_lock_init(&hwlock->lock); |
346 | hwlock->bank = bank; |
347 | |
348 | ret = hwspin_lock_register_single(hwlock, base_id + i); |
349 | if (ret) |
350 | goto reg_failed; |
351 | } |
352 | |
353 | return 0; |
354 | |
355 | reg_failed: |
356 | while (--i >= 0) |
357 | hwspin_lock_unregister_single(base_id + i); |
358 | return ret; |
359 | } |
360 | EXPORT_SYMBOL_GPL(hwspin_lock_register); |
361 | |
362 | /** |
363 | * hwspin_lock_unregister() - unregister an hw spinlock device |
364 | * @bank: the hwspinlock device, which usually provides numerous hw locks |
365 | * |
366 | * This function should be called from the underlying platform-specific |
367 | * implementation, to unregister an existing (and unused) hwspinlock. |
368 | * |
369 | * Should be called from a process context (might sleep) |
370 | * |
371 | * Returns 0 on success, or an appropriate error code on failure |
372 | */ |
373 | int hwspin_lock_unregister(struct hwspinlock_device *bank) |
374 | { |
375 | struct hwspinlock *hwlock, *tmp; |
376 | int i; |
377 | |
378 | for (i = 0; i < bank->num_locks; i++) { |
379 | hwlock = &bank->lock[i]; |
380 | |
381 | tmp = hwspin_lock_unregister_single(bank->base_id + i); |
382 | if (!tmp) |
383 | return -EBUSY; |
384 | |
385 | /* self-sanity check that should never fail */ |
386 | WARN_ON(tmp != hwlock); |
387 | } |
388 | |
389 | return 0; |
390 | } |
391 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); |
392 | |
393 | /** |
394 | * __hwspin_lock_request() - tag an hwspinlock as used and power it up |
395 | * |
396 | * This is an internal function that prepares an hwspinlock instance |
397 | * before it is given to the user. The function assumes that |
398 | * hwspinlock_tree_lock is taken. |
399 | * |
400 | * Returns 0 or positive to indicate success, and a negative value to |
401 | * indicate an error (with the appropriate error code) |
402 | */ |
403 | static int __hwspin_lock_request(struct hwspinlock *hwlock) |
404 | { |
405 | struct device *dev = hwlock->bank->dev; |
406 | struct hwspinlock *tmp; |
407 | int ret; |
408 | |
409 | /* prevent underlying implementation from being removed */ |
410 | if (!try_module_get(dev->driver->owner)) { |
411 | dev_err(dev, "%s: can't get owner\n", __func__); |
412 | return -EINVAL; |
413 | } |
414 | |
415 | /* notify PM core that power is now needed */ |
416 | ret = pm_runtime_get_sync(dev); |
417 | if (ret < 0) { |
418 | dev_err(dev, "%s: can't power on device\n", __func__); |
419 | return ret; |
420 | } |
421 | |
422 | /* mark hwspinlock as used, should not fail */ |
423 | tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock), |
424 | HWSPINLOCK_UNUSED); |
425 | |
426 | /* self-sanity check that should never fail */ |
427 | WARN_ON(tmp != hwlock); |
428 | |
429 | return ret; |
430 | } |
431 | |
432 | /** |
433 | * hwspin_lock_get_id() - retrieve id number of a given hwspinlock |
434 | * @hwlock: a valid hwspinlock instance |
435 | * |
436 | * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. |
437 | */ |
438 | int hwspin_lock_get_id(struct hwspinlock *hwlock) |
439 | { |
440 | if (!hwlock) { |
441 | pr_err("invalid hwlock\n"); |
442 | return -EINVAL; |
443 | } |
444 | |
445 | return hwlock_to_id(hwlock); |
446 | } |
447 | EXPORT_SYMBOL_GPL(hwspin_lock_get_id); |
448 | |
449 | /** |
450 | * hwspin_lock_request() - request an hwspinlock |
451 | * |
452 | * This function should be called by users of the hwspinlock device, |
453 | * in order to dynamically assign them an unused hwspinlock. |
454 | * Usually the user of this lock will then have to communicate the lock's id |
455 | * to the remote core before it can be used for synchronization (to get the |
456 | * id of a given hwlock, use hwspin_lock_get_id()). |
457 | * |
458 | * Should be called from a process context (might sleep) |
459 | * |
460 | * Returns the address of the assigned hwspinlock, or NULL on error |
461 | */ |
462 | struct hwspinlock *hwspin_lock_request(void) |
463 | { |
464 | struct hwspinlock *hwlock; |
465 | int ret; |
466 | |
467 | mutex_lock(&hwspinlock_tree_lock); |
468 | |
469 | /* look for an unused lock */ |
470 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, |
471 | 0, 1, HWSPINLOCK_UNUSED); |
472 | if (ret == 0) { |
473 | pr_warn("a free hwspinlock is not available\n"); |
474 | hwlock = NULL; |
475 | goto out; |
476 | } |
477 | |
478 | /* sanity check that should never fail */ |
479 | WARN_ON(ret > 1); |
480 | |
481 | /* mark as used and power up */ |
482 | ret = __hwspin_lock_request(hwlock); |
483 | if (ret < 0) |
484 | hwlock = NULL; |
485 | |
486 | out: |
487 | mutex_unlock(&hwspinlock_tree_lock); |
488 | return hwlock; |
489 | } |
490 | EXPORT_SYMBOL_GPL(hwspin_lock_request); |
491 | |
492 | /** |
493 | * hwspin_lock_request_specific() - request for a specific hwspinlock |
494 | * @id: index of the specific hwspinlock that is requested |
495 | * |
496 | * This function should be called by users of the hwspinlock module, |
497 | * in order to assign them a specific hwspinlock. |
498 | * Usually early board code will be calling this function in order to |
499 | * reserve specific hwspinlock ids for predefined purposes. |
500 | * |
501 | * Should be called from a process context (might sleep) |
502 | * |
503 | * Returns the address of the assigned hwspinlock, or NULL on error |
504 | */ |
505 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id) |
506 | { |
507 | struct hwspinlock *hwlock; |
508 | int ret; |
509 | |
510 | mutex_lock(&hwspinlock_tree_lock); |
511 | |
512 | /* make sure this hwspinlock exists */ |
513 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); |
514 | if (!hwlock) { |
515 | pr_warn("hwspinlock %u does not exist\n", id); |
516 | goto out; |
517 | } |
518 | |
519 | /* sanity check (this shouldn't happen) */ |
520 | WARN_ON(hwlock_to_id(hwlock) != id); |
521 | |
522 | /* make sure this hwspinlock is unused */ |
523 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
524 | if (ret == 0) { |
525 | pr_warn("hwspinlock %u is already in use\n", id); |
526 | hwlock = NULL; |
527 | goto out; |
528 | } |
529 | |
530 | /* mark as used and power up */ |
531 | ret = __hwspin_lock_request(hwlock); |
532 | if (ret < 0) |
533 | hwlock = NULL; |
534 | |
535 | out: |
536 | mutex_unlock(&hwspinlock_tree_lock); |
537 | return hwlock; |
538 | } |
539 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); |
540 | |
541 | /** |
542 | * hwspin_lock_free() - free a specific hwspinlock |
543 | * @hwlock: the specific hwspinlock to free |
544 | * |
545 | * This function mark @hwlock as free again. |
546 | * Should only be called with an @hwlock that was retrieved from |
547 | * an earlier call to omap_hwspin_lock_request{_specific}. |
548 | * |
549 | * Should be called from a process context (might sleep) |
550 | * |
551 | * Returns 0 on success, or an appropriate error code on failure |
552 | */ |
553 | int hwspin_lock_free(struct hwspinlock *hwlock) |
554 | { |
555 | struct device *dev; |
556 | struct hwspinlock *tmp; |
557 | int ret; |
558 | |
559 | if (!hwlock) { |
560 | pr_err("invalid hwlock\n"); |
561 | return -EINVAL; |
562 | } |
563 | |
564 | dev = hwlock->bank->dev; |
565 | mutex_lock(&hwspinlock_tree_lock); |
566 | |
567 | /* make sure the hwspinlock is used */ |
568 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock), |
569 | HWSPINLOCK_UNUSED); |
570 | if (ret == 1) { |
571 | dev_err(dev, "%s: hwlock is already free\n", __func__); |
572 | dump_stack(); |
573 | ret = -EINVAL; |
574 | goto out; |
575 | } |
576 | |
577 | /* notify the underlying device that power is not needed */ |
578 | ret = pm_runtime_put(dev); |
579 | if (ret < 0) |
580 | goto out; |
581 | |
582 | /* mark this hwspinlock as available */ |
583 | tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock), |
584 | HWSPINLOCK_UNUSED); |
585 | |
586 | /* sanity check (this shouldn't happen) */ |
587 | WARN_ON(tmp != hwlock); |
588 | |
589 | module_put(dev->driver->owner); |
590 | |
591 | out: |
592 | mutex_unlock(&hwspinlock_tree_lock); |
593 | return ret; |
594 | } |
595 | EXPORT_SYMBOL_GPL(hwspin_lock_free); |
596 | |
597 | MODULE_LICENSE("GPL v2"); |
598 | MODULE_DESCRIPTION("Hardware spinlock interface"); |
599 | MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); |
600 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9