Root/
1 | /* |
2 | * linux/mm/mlock.c |
3 | * |
4 | * (C) Copyright 1995 Linus Torvalds |
5 | * (C) Copyright 2002 Christoph Hellwig |
6 | */ |
7 | |
8 | #include <linux/capability.h> |
9 | #include <linux/mman.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> |
12 | #include <linux/swapops.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/mempolicy.h> |
15 | #include <linux/syscalls.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/module.h> |
18 | #include <linux/rmap.h> |
19 | #include <linux/mmzone.h> |
20 | #include <linux/hugetlb.h> |
21 | |
22 | #include "internal.h" |
23 | |
24 | int can_do_mlock(void) |
25 | { |
26 | if (capable(CAP_IPC_LOCK)) |
27 | return 1; |
28 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
29 | return 1; |
30 | return 0; |
31 | } |
32 | EXPORT_SYMBOL(can_do_mlock); |
33 | |
34 | /* |
35 | * Mlocked pages are marked with PageMlocked() flag for efficient testing |
36 | * in vmscan and, possibly, the fault path; and to support semi-accurate |
37 | * statistics. |
38 | * |
39 | * An mlocked page [PageMlocked(page)] is unevictable. As such, it will |
40 | * be placed on the LRU "unevictable" list, rather than the [in]active lists. |
41 | * The unevictable list is an LRU sibling list to the [in]active lists. |
42 | * PageUnevictable is set to indicate the unevictable state. |
43 | * |
44 | * When lazy mlocking via vmscan, it is important to ensure that the |
45 | * vma's VM_LOCKED status is not concurrently being modified, otherwise we |
46 | * may have mlocked a page that is being munlocked. So lazy mlock must take |
47 | * the mmap_sem for read, and verify that the vma really is locked |
48 | * (see mm/rmap.c). |
49 | */ |
50 | |
51 | /* |
52 | * LRU accounting for clear_page_mlock() |
53 | */ |
54 | void __clear_page_mlock(struct page *page) |
55 | { |
56 | VM_BUG_ON(!PageLocked(page)); |
57 | |
58 | if (!page->mapping) { /* truncated ? */ |
59 | return; |
60 | } |
61 | |
62 | dec_zone_page_state(page, NR_MLOCK); |
63 | count_vm_event(UNEVICTABLE_PGCLEARED); |
64 | if (!isolate_lru_page(page)) { |
65 | putback_lru_page(page); |
66 | } else { |
67 | /* |
68 | * We lost the race. the page already moved to evictable list. |
69 | */ |
70 | if (PageUnevictable(page)) |
71 | count_vm_event(UNEVICTABLE_PGSTRANDED); |
72 | } |
73 | } |
74 | |
75 | /* |
76 | * Mark page as mlocked if not already. |
77 | * If page on LRU, isolate and putback to move to unevictable list. |
78 | */ |
79 | void mlock_vma_page(struct page *page) |
80 | { |
81 | BUG_ON(!PageLocked(page)); |
82 | |
83 | if (!TestSetPageMlocked(page)) { |
84 | inc_zone_page_state(page, NR_MLOCK); |
85 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
86 | if (!isolate_lru_page(page)) |
87 | putback_lru_page(page); |
88 | } |
89 | } |
90 | |
91 | /** |
92 | * munlock_vma_page - munlock a vma page |
93 | * @page - page to be unlocked |
94 | * |
95 | * called from munlock()/munmap() path with page supposedly on the LRU. |
96 | * When we munlock a page, because the vma where we found the page is being |
97 | * munlock()ed or munmap()ed, we want to check whether other vmas hold the |
98 | * page locked so that we can leave it on the unevictable lru list and not |
99 | * bother vmscan with it. However, to walk the page's rmap list in |
100 | * try_to_munlock() we must isolate the page from the LRU. If some other |
101 | * task has removed the page from the LRU, we won't be able to do that. |
102 | * So we clear the PageMlocked as we might not get another chance. If we |
103 | * can't isolate the page, we leave it for putback_lru_page() and vmscan |
104 | * [page_referenced()/try_to_unmap()] to deal with. |
105 | */ |
106 | void munlock_vma_page(struct page *page) |
107 | { |
108 | BUG_ON(!PageLocked(page)); |
109 | |
110 | if (TestClearPageMlocked(page)) { |
111 | dec_zone_page_state(page, NR_MLOCK); |
112 | if (!isolate_lru_page(page)) { |
113 | int ret = try_to_munlock(page); |
114 | /* |
115 | * did try_to_unlock() succeed or punt? |
116 | */ |
117 | if (ret != SWAP_MLOCK) |
118 | count_vm_event(UNEVICTABLE_PGMUNLOCKED); |
119 | |
120 | putback_lru_page(page); |
121 | } else { |
122 | /* |
123 | * Some other task has removed the page from the LRU. |
124 | * putback_lru_page() will take care of removing the |
125 | * page from the unevictable list, if necessary. |
126 | * vmscan [page_referenced()] will move the page back |
127 | * to the unevictable list if some other vma has it |
128 | * mlocked. |
129 | */ |
130 | if (PageUnevictable(page)) |
131 | count_vm_event(UNEVICTABLE_PGSTRANDED); |
132 | else |
133 | count_vm_event(UNEVICTABLE_PGMUNLOCKED); |
134 | } |
135 | } |
136 | } |
137 | |
138 | /** |
139 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. |
140 | * @vma: target vma |
141 | * @start: start address |
142 | * @end: end address |
143 | * |
144 | * This takes care of making the pages present too. |
145 | * |
146 | * return 0 on success, negative error code on error. |
147 | * |
148 | * vma->vm_mm->mmap_sem must be held for at least read. |
149 | */ |
150 | static long __mlock_vma_pages_range(struct vm_area_struct *vma, |
151 | unsigned long start, unsigned long end, |
152 | int *nonblocking) |
153 | { |
154 | struct mm_struct *mm = vma->vm_mm; |
155 | unsigned long addr = start; |
156 | int nr_pages = (end - start) / PAGE_SIZE; |
157 | int gup_flags; |
158 | |
159 | VM_BUG_ON(start & ~PAGE_MASK); |
160 | VM_BUG_ON(end & ~PAGE_MASK); |
161 | VM_BUG_ON(start < vma->vm_start); |
162 | VM_BUG_ON(end > vma->vm_end); |
163 | VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); |
164 | |
165 | gup_flags = FOLL_TOUCH | FOLL_MLOCK; |
166 | /* |
167 | * We want to touch writable mappings with a write fault in order |
168 | * to break COW, except for shared mappings because these don't COW |
169 | * and we would not want to dirty them for nothing. |
170 | */ |
171 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) |
172 | gup_flags |= FOLL_WRITE; |
173 | |
174 | /* |
175 | * We want mlock to succeed for regions that have any permissions |
176 | * other than PROT_NONE. |
177 | */ |
178 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) |
179 | gup_flags |= FOLL_FORCE; |
180 | |
181 | return __get_user_pages(current, mm, addr, nr_pages, gup_flags, |
182 | NULL, NULL, nonblocking); |
183 | } |
184 | |
185 | /* |
186 | * convert get_user_pages() return value to posix mlock() error |
187 | */ |
188 | static int __mlock_posix_error_return(long retval) |
189 | { |
190 | if (retval == -EFAULT) |
191 | retval = -ENOMEM; |
192 | else if (retval == -ENOMEM) |
193 | retval = -EAGAIN; |
194 | return retval; |
195 | } |
196 | |
197 | /** |
198 | * mlock_vma_pages_range() - mlock pages in specified vma range. |
199 | * @vma - the vma containing the specfied address range |
200 | * @start - starting address in @vma to mlock |
201 | * @end - end address [+1] in @vma to mlock |
202 | * |
203 | * For mmap()/mremap()/expansion of mlocked vma. |
204 | * |
205 | * return 0 on success for "normal" vmas. |
206 | * |
207 | * return number of pages [> 0] to be removed from locked_vm on success |
208 | * of "special" vmas. |
209 | */ |
210 | long mlock_vma_pages_range(struct vm_area_struct *vma, |
211 | unsigned long start, unsigned long end) |
212 | { |
213 | int nr_pages = (end - start) / PAGE_SIZE; |
214 | BUG_ON(!(vma->vm_flags & VM_LOCKED)); |
215 | |
216 | /* |
217 | * filter unlockable vmas |
218 | */ |
219 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
220 | goto no_mlock; |
221 | |
222 | if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || |
223 | is_vm_hugetlb_page(vma) || |
224 | vma == get_gate_vma(current->mm))) { |
225 | |
226 | __mlock_vma_pages_range(vma, start, end, NULL); |
227 | |
228 | /* Hide errors from mmap() and other callers */ |
229 | return 0; |
230 | } |
231 | |
232 | /* |
233 | * User mapped kernel pages or huge pages: |
234 | * make these pages present to populate the ptes, but |
235 | * fall thru' to reset VM_LOCKED--no need to unlock, and |
236 | * return nr_pages so these don't get counted against task's |
237 | * locked limit. huge pages are already counted against |
238 | * locked vm limit. |
239 | */ |
240 | make_pages_present(start, end); |
241 | |
242 | no_mlock: |
243 | vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */ |
244 | return nr_pages; /* error or pages NOT mlocked */ |
245 | } |
246 | |
247 | /* |
248 | * munlock_vma_pages_range() - munlock all pages in the vma range.' |
249 | * @vma - vma containing range to be munlock()ed. |
250 | * @start - start address in @vma of the range |
251 | * @end - end of range in @vma. |
252 | * |
253 | * For mremap(), munmap() and exit(). |
254 | * |
255 | * Called with @vma VM_LOCKED. |
256 | * |
257 | * Returns with VM_LOCKED cleared. Callers must be prepared to |
258 | * deal with this. |
259 | * |
260 | * We don't save and restore VM_LOCKED here because pages are |
261 | * still on lru. In unmap path, pages might be scanned by reclaim |
262 | * and re-mlocked by try_to_{munlock|unmap} before we unmap and |
263 | * free them. This will result in freeing mlocked pages. |
264 | */ |
265 | void munlock_vma_pages_range(struct vm_area_struct *vma, |
266 | unsigned long start, unsigned long end) |
267 | { |
268 | unsigned long addr; |
269 | |
270 | lru_add_drain(); |
271 | vma->vm_flags &= ~VM_LOCKED; |
272 | |
273 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
274 | struct page *page; |
275 | /* |
276 | * Although FOLL_DUMP is intended for get_dump_page(), |
277 | * it just so happens that its special treatment of the |
278 | * ZERO_PAGE (returning an error instead of doing get_page) |
279 | * suits munlock very well (and if somehow an abnormal page |
280 | * has sneaked into the range, we won't oops here: great). |
281 | */ |
282 | page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); |
283 | if (page && !IS_ERR(page)) { |
284 | lock_page(page); |
285 | /* |
286 | * Like in __mlock_vma_pages_range(), |
287 | * because we lock page here and migration is |
288 | * blocked by the elevated reference, we need |
289 | * only check for file-cache page truncation. |
290 | */ |
291 | if (page->mapping) |
292 | munlock_vma_page(page); |
293 | unlock_page(page); |
294 | put_page(page); |
295 | } |
296 | cond_resched(); |
297 | } |
298 | } |
299 | |
300 | /* |
301 | * mlock_fixup - handle mlock[all]/munlock[all] requests. |
302 | * |
303 | * Filters out "special" vmas -- VM_LOCKED never gets set for these, and |
304 | * munlock is a no-op. However, for some special vmas, we go ahead and |
305 | * populate the ptes via make_pages_present(). |
306 | * |
307 | * For vmas that pass the filters, merge/split as appropriate. |
308 | */ |
309 | static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, |
310 | unsigned long start, unsigned long end, vm_flags_t newflags) |
311 | { |
312 | struct mm_struct *mm = vma->vm_mm; |
313 | pgoff_t pgoff; |
314 | int nr_pages; |
315 | int ret = 0; |
316 | int lock = !!(newflags & VM_LOCKED); |
317 | |
318 | if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || |
319 | is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) |
320 | goto out; /* don't set VM_LOCKED, don't count */ |
321 | |
322 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
323 | *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, |
324 | vma->vm_file, pgoff, vma_policy(vma)); |
325 | if (*prev) { |
326 | vma = *prev; |
327 | goto success; |
328 | } |
329 | |
330 | if (start != vma->vm_start) { |
331 | ret = split_vma(mm, vma, start, 1); |
332 | if (ret) |
333 | goto out; |
334 | } |
335 | |
336 | if (end != vma->vm_end) { |
337 | ret = split_vma(mm, vma, end, 0); |
338 | if (ret) |
339 | goto out; |
340 | } |
341 | |
342 | success: |
343 | /* |
344 | * Keep track of amount of locked VM. |
345 | */ |
346 | nr_pages = (end - start) >> PAGE_SHIFT; |
347 | if (!lock) |
348 | nr_pages = -nr_pages; |
349 | mm->locked_vm += nr_pages; |
350 | |
351 | /* |
352 | * vm_flags is protected by the mmap_sem held in write mode. |
353 | * It's okay if try_to_unmap_one unmaps a page just after we |
354 | * set VM_LOCKED, __mlock_vma_pages_range will bring it back. |
355 | */ |
356 | |
357 | if (lock) |
358 | vma->vm_flags = newflags; |
359 | else |
360 | munlock_vma_pages_range(vma, start, end); |
361 | |
362 | out: |
363 | *prev = vma; |
364 | return ret; |
365 | } |
366 | |
367 | static int do_mlock(unsigned long start, size_t len, int on) |
368 | { |
369 | unsigned long nstart, end, tmp; |
370 | struct vm_area_struct * vma, * prev; |
371 | int error; |
372 | |
373 | VM_BUG_ON(start & ~PAGE_MASK); |
374 | VM_BUG_ON(len != PAGE_ALIGN(len)); |
375 | end = start + len; |
376 | if (end < start) |
377 | return -EINVAL; |
378 | if (end == start) |
379 | return 0; |
380 | vma = find_vma_prev(current->mm, start, &prev); |
381 | if (!vma || vma->vm_start > start) |
382 | return -ENOMEM; |
383 | |
384 | if (start > vma->vm_start) |
385 | prev = vma; |
386 | |
387 | for (nstart = start ; ; ) { |
388 | vm_flags_t newflags; |
389 | |
390 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
391 | |
392 | newflags = vma->vm_flags | VM_LOCKED; |
393 | if (!on) |
394 | newflags &= ~VM_LOCKED; |
395 | |
396 | tmp = vma->vm_end; |
397 | if (tmp > end) |
398 | tmp = end; |
399 | error = mlock_fixup(vma, &prev, nstart, tmp, newflags); |
400 | if (error) |
401 | break; |
402 | nstart = tmp; |
403 | if (nstart < prev->vm_end) |
404 | nstart = prev->vm_end; |
405 | if (nstart >= end) |
406 | break; |
407 | |
408 | vma = prev->vm_next; |
409 | if (!vma || vma->vm_start != nstart) { |
410 | error = -ENOMEM; |
411 | break; |
412 | } |
413 | } |
414 | return error; |
415 | } |
416 | |
417 | static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors) |
418 | { |
419 | struct mm_struct *mm = current->mm; |
420 | unsigned long end, nstart, nend; |
421 | struct vm_area_struct *vma = NULL; |
422 | int locked = 0; |
423 | int ret = 0; |
424 | |
425 | VM_BUG_ON(start & ~PAGE_MASK); |
426 | VM_BUG_ON(len != PAGE_ALIGN(len)); |
427 | end = start + len; |
428 | |
429 | for (nstart = start; nstart < end; nstart = nend) { |
430 | /* |
431 | * We want to fault in pages for [nstart; end) address range. |
432 | * Find first corresponding VMA. |
433 | */ |
434 | if (!locked) { |
435 | locked = 1; |
436 | down_read(&mm->mmap_sem); |
437 | vma = find_vma(mm, nstart); |
438 | } else if (nstart >= vma->vm_end) |
439 | vma = vma->vm_next; |
440 | if (!vma || vma->vm_start >= end) |
441 | break; |
442 | /* |
443 | * Set [nstart; nend) to intersection of desired address |
444 | * range with the first VMA. Also, skip undesirable VMA types. |
445 | */ |
446 | nend = min(end, vma->vm_end); |
447 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
448 | continue; |
449 | if (nstart < vma->vm_start) |
450 | nstart = vma->vm_start; |
451 | /* |
452 | * Now fault in a range of pages. __mlock_vma_pages_range() |
453 | * double checks the vma flags, so that it won't mlock pages |
454 | * if the vma was already munlocked. |
455 | */ |
456 | ret = __mlock_vma_pages_range(vma, nstart, nend, &locked); |
457 | if (ret < 0) { |
458 | if (ignore_errors) { |
459 | ret = 0; |
460 | continue; /* continue at next VMA */ |
461 | } |
462 | ret = __mlock_posix_error_return(ret); |
463 | break; |
464 | } |
465 | nend = nstart + ret * PAGE_SIZE; |
466 | ret = 0; |
467 | } |
468 | if (locked) |
469 | up_read(&mm->mmap_sem); |
470 | return ret; /* 0 or negative error code */ |
471 | } |
472 | |
473 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) |
474 | { |
475 | unsigned long locked; |
476 | unsigned long lock_limit; |
477 | int error = -ENOMEM; |
478 | |
479 | if (!can_do_mlock()) |
480 | return -EPERM; |
481 | |
482 | lru_add_drain_all(); /* flush pagevec */ |
483 | |
484 | down_write(¤t->mm->mmap_sem); |
485 | len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); |
486 | start &= PAGE_MASK; |
487 | |
488 | locked = len >> PAGE_SHIFT; |
489 | locked += current->mm->locked_vm; |
490 | |
491 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
492 | lock_limit >>= PAGE_SHIFT; |
493 | |
494 | /* check against resource limits */ |
495 | if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) |
496 | error = do_mlock(start, len, 1); |
497 | up_write(¤t->mm->mmap_sem); |
498 | if (!error) |
499 | error = do_mlock_pages(start, len, 0); |
500 | return error; |
501 | } |
502 | |
503 | SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) |
504 | { |
505 | int ret; |
506 | |
507 | down_write(¤t->mm->mmap_sem); |
508 | len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); |
509 | start &= PAGE_MASK; |
510 | ret = do_mlock(start, len, 0); |
511 | up_write(¤t->mm->mmap_sem); |
512 | return ret; |
513 | } |
514 | |
515 | static int do_mlockall(int flags) |
516 | { |
517 | struct vm_area_struct * vma, * prev = NULL; |
518 | unsigned int def_flags = 0; |
519 | |
520 | if (flags & MCL_FUTURE) |
521 | def_flags = VM_LOCKED; |
522 | current->mm->def_flags = def_flags; |
523 | if (flags == MCL_FUTURE) |
524 | goto out; |
525 | |
526 | for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { |
527 | vm_flags_t newflags; |
528 | |
529 | newflags = vma->vm_flags | VM_LOCKED; |
530 | if (!(flags & MCL_CURRENT)) |
531 | newflags &= ~VM_LOCKED; |
532 | |
533 | /* Ignore errors */ |
534 | mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); |
535 | } |
536 | out: |
537 | return 0; |
538 | } |
539 | |
540 | SYSCALL_DEFINE1(mlockall, int, flags) |
541 | { |
542 | unsigned long lock_limit; |
543 | int ret = -EINVAL; |
544 | |
545 | if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) |
546 | goto out; |
547 | |
548 | ret = -EPERM; |
549 | if (!can_do_mlock()) |
550 | goto out; |
551 | |
552 | lru_add_drain_all(); /* flush pagevec */ |
553 | |
554 | down_write(¤t->mm->mmap_sem); |
555 | |
556 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
557 | lock_limit >>= PAGE_SHIFT; |
558 | |
559 | ret = -ENOMEM; |
560 | if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || |
561 | capable(CAP_IPC_LOCK)) |
562 | ret = do_mlockall(flags); |
563 | up_write(¤t->mm->mmap_sem); |
564 | if (!ret && (flags & MCL_CURRENT)) { |
565 | /* Ignore errors */ |
566 | do_mlock_pages(0, TASK_SIZE, 1); |
567 | } |
568 | out: |
569 | return ret; |
570 | } |
571 | |
572 | SYSCALL_DEFINE0(munlockall) |
573 | { |
574 | int ret; |
575 | |
576 | down_write(¤t->mm->mmap_sem); |
577 | ret = do_mlockall(0); |
578 | up_write(¤t->mm->mmap_sem); |
579 | return ret; |
580 | } |
581 | |
582 | /* |
583 | * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB |
584 | * shm segments) get accounted against the user_struct instead. |
585 | */ |
586 | static DEFINE_SPINLOCK(shmlock_user_lock); |
587 | |
588 | int user_shm_lock(size_t size, struct user_struct *user) |
589 | { |
590 | unsigned long lock_limit, locked; |
591 | int allowed = 0; |
592 | |
593 | locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
594 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
595 | if (lock_limit == RLIM_INFINITY) |
596 | allowed = 1; |
597 | lock_limit >>= PAGE_SHIFT; |
598 | spin_lock(&shmlock_user_lock); |
599 | if (!allowed && |
600 | locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) |
601 | goto out; |
602 | get_uid(user); |
603 | user->locked_shm += locked; |
604 | allowed = 1; |
605 | out: |
606 | spin_unlock(&shmlock_user_lock); |
607 | return allowed; |
608 | } |
609 | |
610 | void user_shm_unlock(size_t size, struct user_struct *user) |
611 | { |
612 | spin_lock(&shmlock_user_lock); |
613 | user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
614 | spin_unlock(&shmlock_user_lock); |
615 | free_uid(user); |
616 | } |
617 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9