Root/
1 | /* |
2 | * Copyright 2010 |
3 | * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
4 | * |
5 | * This code provides a IOMMU for Xen PV guests with PCI passthrough. |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License v2.0 as published by |
9 | * the Free Software Foundation |
10 | * |
11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. |
15 | * |
16 | * PV guests under Xen are running in an non-contiguous memory architecture. |
17 | * |
18 | * When PCI pass-through is utilized, this necessitates an IOMMU for |
19 | * translating bus (DMA) to virtual and vice-versa and also providing a |
20 | * mechanism to have contiguous pages for device drivers operations (say DMA |
21 | * operations). |
22 | * |
23 | * Specifically, under Xen the Linux idea of pages is an illusion. It |
24 | * assumes that pages start at zero and go up to the available memory. To |
25 | * help with that, the Linux Xen MMU provides a lookup mechanism to |
26 | * translate the page frame numbers (PFN) to machine frame numbers (MFN) |
27 | * and vice-versa. The MFN are the "real" frame numbers. Furthermore |
28 | * memory is not contiguous. Xen hypervisor stitches memory for guests |
29 | * from different pools, which means there is no guarantee that PFN==MFN |
30 | * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are |
31 | * allocated in descending order (high to low), meaning the guest might |
32 | * never get any MFN's under the 4GB mark. |
33 | * |
34 | */ |
35 | |
36 | #include <linux/bootmem.h> |
37 | #include <linux/dma-mapping.h> |
38 | #include <linux/export.h> |
39 | #include <xen/swiotlb-xen.h> |
40 | #include <xen/page.h> |
41 | #include <xen/xen-ops.h> |
42 | #include <xen/hvc-console.h> |
43 | /* |
44 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
45 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
46 | * API. |
47 | */ |
48 | |
49 | static char *xen_io_tlb_start, *xen_io_tlb_end; |
50 | static unsigned long xen_io_tlb_nslabs; |
51 | /* |
52 | * Quick lookup value of the bus address of the IOTLB. |
53 | */ |
54 | |
55 | u64 start_dma_addr; |
56 | |
57 | static dma_addr_t xen_phys_to_bus(phys_addr_t paddr) |
58 | { |
59 | return phys_to_machine(XPADDR(paddr)).maddr; |
60 | } |
61 | |
62 | static phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
63 | { |
64 | return machine_to_phys(XMADDR(baddr)).paddr; |
65 | } |
66 | |
67 | static dma_addr_t xen_virt_to_bus(void *address) |
68 | { |
69 | return xen_phys_to_bus(virt_to_phys(address)); |
70 | } |
71 | |
72 | static int check_pages_physically_contiguous(unsigned long pfn, |
73 | unsigned int offset, |
74 | size_t length) |
75 | { |
76 | unsigned long next_mfn; |
77 | int i; |
78 | int nr_pages; |
79 | |
80 | next_mfn = pfn_to_mfn(pfn); |
81 | nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; |
82 | |
83 | for (i = 1; i < nr_pages; i++) { |
84 | if (pfn_to_mfn(++pfn) != ++next_mfn) |
85 | return 0; |
86 | } |
87 | return 1; |
88 | } |
89 | |
90 | static int range_straddles_page_boundary(phys_addr_t p, size_t size) |
91 | { |
92 | unsigned long pfn = PFN_DOWN(p); |
93 | unsigned int offset = p & ~PAGE_MASK; |
94 | |
95 | if (offset + size <= PAGE_SIZE) |
96 | return 0; |
97 | if (check_pages_physically_contiguous(pfn, offset, size)) |
98 | return 0; |
99 | return 1; |
100 | } |
101 | |
102 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) |
103 | { |
104 | unsigned long mfn = PFN_DOWN(dma_addr); |
105 | unsigned long pfn = mfn_to_local_pfn(mfn); |
106 | phys_addr_t paddr; |
107 | |
108 | /* If the address is outside our domain, it CAN |
109 | * have the same virtual address as another address |
110 | * in our domain. Therefore _only_ check address within our domain. |
111 | */ |
112 | if (pfn_valid(pfn)) { |
113 | paddr = PFN_PHYS(pfn); |
114 | return paddr >= virt_to_phys(xen_io_tlb_start) && |
115 | paddr < virt_to_phys(xen_io_tlb_end); |
116 | } |
117 | return 0; |
118 | } |
119 | |
120 | static int max_dma_bits = 32; |
121 | |
122 | static int |
123 | xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) |
124 | { |
125 | int i, rc; |
126 | int dma_bits; |
127 | |
128 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; |
129 | |
130 | i = 0; |
131 | do { |
132 | int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); |
133 | |
134 | do { |
135 | rc = xen_create_contiguous_region( |
136 | (unsigned long)buf + (i << IO_TLB_SHIFT), |
137 | get_order(slabs << IO_TLB_SHIFT), |
138 | dma_bits); |
139 | } while (rc && dma_bits++ < max_dma_bits); |
140 | if (rc) |
141 | return rc; |
142 | |
143 | i += slabs; |
144 | } while (i < nslabs); |
145 | return 0; |
146 | } |
147 | |
148 | void __init xen_swiotlb_init(int verbose) |
149 | { |
150 | unsigned long bytes; |
151 | int rc = -ENOMEM; |
152 | unsigned long nr_tbl; |
153 | char *m = NULL; |
154 | unsigned int repeat = 3; |
155 | |
156 | nr_tbl = swiotlb_nr_tbl(); |
157 | if (nr_tbl) |
158 | xen_io_tlb_nslabs = nr_tbl; |
159 | else { |
160 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); |
161 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); |
162 | } |
163 | retry: |
164 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; |
165 | |
166 | /* |
167 | * Get IO TLB memory from any location. |
168 | */ |
169 | xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); |
170 | if (!xen_io_tlb_start) { |
171 | m = "Cannot allocate Xen-SWIOTLB buffer!\n"; |
172 | goto error; |
173 | } |
174 | xen_io_tlb_end = xen_io_tlb_start + bytes; |
175 | /* |
176 | * And replace that memory with pages under 4GB. |
177 | */ |
178 | rc = xen_swiotlb_fixup(xen_io_tlb_start, |
179 | bytes, |
180 | xen_io_tlb_nslabs); |
181 | if (rc) { |
182 | free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); |
183 | m = "Failed to get contiguous memory for DMA from Xen!\n"\ |
184 | "You either: don't have the permissions, do not have"\ |
185 | " enough free memory under 4GB, or the hypervisor memory"\ |
186 | "is too fragmented!"; |
187 | goto error; |
188 | } |
189 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); |
190 | swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); |
191 | |
192 | return; |
193 | error: |
194 | if (repeat--) { |
195 | xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ |
196 | (xen_io_tlb_nslabs >> 1)); |
197 | printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n", |
198 | (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); |
199 | goto retry; |
200 | } |
201 | xen_raw_printk("%s (rc:%d)", m, rc); |
202 | panic("%s (rc:%d)", m, rc); |
203 | } |
204 | |
205 | void * |
206 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
207 | dma_addr_t *dma_handle, gfp_t flags, |
208 | struct dma_attrs *attrs) |
209 | { |
210 | void *ret; |
211 | int order = get_order(size); |
212 | u64 dma_mask = DMA_BIT_MASK(32); |
213 | unsigned long vstart; |
214 | phys_addr_t phys; |
215 | dma_addr_t dev_addr; |
216 | |
217 | /* |
218 | * Ignore region specifiers - the kernel's ideas of |
219 | * pseudo-phys memory layout has nothing to do with the |
220 | * machine physical layout. We can't allocate highmem |
221 | * because we can't return a pointer to it. |
222 | */ |
223 | flags &= ~(__GFP_DMA | __GFP_HIGHMEM); |
224 | |
225 | if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) |
226 | return ret; |
227 | |
228 | vstart = __get_free_pages(flags, order); |
229 | ret = (void *)vstart; |
230 | |
231 | if (!ret) |
232 | return ret; |
233 | |
234 | if (hwdev && hwdev->coherent_dma_mask) |
235 | dma_mask = dma_alloc_coherent_mask(hwdev, flags); |
236 | |
237 | phys = virt_to_phys(ret); |
238 | dev_addr = xen_phys_to_bus(phys); |
239 | if (((dev_addr + size - 1 <= dma_mask)) && |
240 | !range_straddles_page_boundary(phys, size)) |
241 | *dma_handle = dev_addr; |
242 | else { |
243 | if (xen_create_contiguous_region(vstart, order, |
244 | fls64(dma_mask)) != 0) { |
245 | free_pages(vstart, order); |
246 | return NULL; |
247 | } |
248 | *dma_handle = virt_to_machine(ret).maddr; |
249 | } |
250 | memset(ret, 0, size); |
251 | return ret; |
252 | } |
253 | EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); |
254 | |
255 | void |
256 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
257 | dma_addr_t dev_addr, struct dma_attrs *attrs) |
258 | { |
259 | int order = get_order(size); |
260 | phys_addr_t phys; |
261 | u64 dma_mask = DMA_BIT_MASK(32); |
262 | |
263 | if (dma_release_from_coherent(hwdev, order, vaddr)) |
264 | return; |
265 | |
266 | if (hwdev && hwdev->coherent_dma_mask) |
267 | dma_mask = hwdev->coherent_dma_mask; |
268 | |
269 | phys = virt_to_phys(vaddr); |
270 | |
271 | if (((dev_addr + size - 1 > dma_mask)) || |
272 | range_straddles_page_boundary(phys, size)) |
273 | xen_destroy_contiguous_region((unsigned long)vaddr, order); |
274 | |
275 | free_pages((unsigned long)vaddr, order); |
276 | } |
277 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); |
278 | |
279 | |
280 | /* |
281 | * Map a single buffer of the indicated size for DMA in streaming mode. The |
282 | * physical address to use is returned. |
283 | * |
284 | * Once the device is given the dma address, the device owns this memory until |
285 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. |
286 | */ |
287 | dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
288 | unsigned long offset, size_t size, |
289 | enum dma_data_direction dir, |
290 | struct dma_attrs *attrs) |
291 | { |
292 | phys_addr_t phys = page_to_phys(page) + offset; |
293 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
294 | void *map; |
295 | |
296 | BUG_ON(dir == DMA_NONE); |
297 | /* |
298 | * If the address happens to be in the device's DMA window, |
299 | * we can safely return the device addr and not worry about bounce |
300 | * buffering it. |
301 | */ |
302 | if (dma_capable(dev, dev_addr, size) && |
303 | !range_straddles_page_boundary(phys, size) && !swiotlb_force) |
304 | return dev_addr; |
305 | |
306 | /* |
307 | * Oh well, have to allocate and map a bounce buffer. |
308 | */ |
309 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); |
310 | if (!map) |
311 | return DMA_ERROR_CODE; |
312 | |
313 | dev_addr = xen_virt_to_bus(map); |
314 | |
315 | /* |
316 | * Ensure that the address returned is DMA'ble |
317 | */ |
318 | if (!dma_capable(dev, dev_addr, size)) { |
319 | swiotlb_tbl_unmap_single(dev, map, size, dir); |
320 | dev_addr = 0; |
321 | } |
322 | return dev_addr; |
323 | } |
324 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); |
325 | |
326 | /* |
327 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
328 | * match what was provided for in a previous xen_swiotlb_map_page call. All |
329 | * other usages are undefined. |
330 | * |
331 | * After this call, reads by the cpu to the buffer are guaranteed to see |
332 | * whatever the device wrote there. |
333 | */ |
334 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
335 | size_t size, enum dma_data_direction dir) |
336 | { |
337 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
338 | |
339 | BUG_ON(dir == DMA_NONE); |
340 | |
341 | /* NOTE: We use dev_addr here, not paddr! */ |
342 | if (is_xen_swiotlb_buffer(dev_addr)) { |
343 | swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); |
344 | return; |
345 | } |
346 | |
347 | if (dir != DMA_FROM_DEVICE) |
348 | return; |
349 | |
350 | /* |
351 | * phys_to_virt doesn't work with hihgmem page but we could |
352 | * call dma_mark_clean() with hihgmem page here. However, we |
353 | * are fine since dma_mark_clean() is null on POWERPC. We can |
354 | * make dma_mark_clean() take a physical address if necessary. |
355 | */ |
356 | dma_mark_clean(phys_to_virt(paddr), size); |
357 | } |
358 | |
359 | void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
360 | size_t size, enum dma_data_direction dir, |
361 | struct dma_attrs *attrs) |
362 | { |
363 | xen_unmap_single(hwdev, dev_addr, size, dir); |
364 | } |
365 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); |
366 | |
367 | /* |
368 | * Make physical memory consistent for a single streaming mode DMA translation |
369 | * after a transfer. |
370 | * |
371 | * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer |
372 | * using the cpu, yet do not wish to teardown the dma mapping, you must |
373 | * call this function before doing so. At the next point you give the dma |
374 | * address back to the card, you must first perform a |
375 | * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer |
376 | */ |
377 | static void |
378 | xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
379 | size_t size, enum dma_data_direction dir, |
380 | enum dma_sync_target target) |
381 | { |
382 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
383 | |
384 | BUG_ON(dir == DMA_NONE); |
385 | |
386 | /* NOTE: We use dev_addr here, not paddr! */ |
387 | if (is_xen_swiotlb_buffer(dev_addr)) { |
388 | swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, |
389 | target); |
390 | return; |
391 | } |
392 | |
393 | if (dir != DMA_FROM_DEVICE) |
394 | return; |
395 | |
396 | dma_mark_clean(phys_to_virt(paddr), size); |
397 | } |
398 | |
399 | void |
400 | xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
401 | size_t size, enum dma_data_direction dir) |
402 | { |
403 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
404 | } |
405 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu); |
406 | |
407 | void |
408 | xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
409 | size_t size, enum dma_data_direction dir) |
410 | { |
411 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
412 | } |
413 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); |
414 | |
415 | /* |
416 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
417 | * This is the scatter-gather version of the above xen_swiotlb_map_page |
418 | * interface. Here the scatter gather list elements are each tagged with the |
419 | * appropriate dma address and length. They are obtained via |
420 | * sg_dma_{address,length}(SG). |
421 | * |
422 | * NOTE: An implementation may be able to use a smaller number of |
423 | * DMA address/length pairs than there are SG table elements. |
424 | * (for example via virtual mapping capabilities) |
425 | * The routine returns the number of addr/length pairs actually |
426 | * used, at most nents. |
427 | * |
428 | * Device ownership issues as mentioned above for xen_swiotlb_map_page are the |
429 | * same here. |
430 | */ |
431 | int |
432 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
433 | int nelems, enum dma_data_direction dir, |
434 | struct dma_attrs *attrs) |
435 | { |
436 | struct scatterlist *sg; |
437 | int i; |
438 | |
439 | BUG_ON(dir == DMA_NONE); |
440 | |
441 | for_each_sg(sgl, sg, nelems, i) { |
442 | phys_addr_t paddr = sg_phys(sg); |
443 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); |
444 | |
445 | if (swiotlb_force || |
446 | !dma_capable(hwdev, dev_addr, sg->length) || |
447 | range_straddles_page_boundary(paddr, sg->length)) { |
448 | void *map = swiotlb_tbl_map_single(hwdev, |
449 | start_dma_addr, |
450 | sg_phys(sg), |
451 | sg->length, dir); |
452 | if (!map) { |
453 | /* Don't panic here, we expect map_sg users |
454 | to do proper error handling. */ |
455 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
456 | attrs); |
457 | sgl[0].dma_length = 0; |
458 | return DMA_ERROR_CODE; |
459 | } |
460 | sg->dma_address = xen_virt_to_bus(map); |
461 | } else |
462 | sg->dma_address = dev_addr; |
463 | sg->dma_length = sg->length; |
464 | } |
465 | return nelems; |
466 | } |
467 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); |
468 | |
469 | int |
470 | xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
471 | enum dma_data_direction dir) |
472 | { |
473 | return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
474 | } |
475 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg); |
476 | |
477 | /* |
478 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
479 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
480 | */ |
481 | void |
482 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
483 | int nelems, enum dma_data_direction dir, |
484 | struct dma_attrs *attrs) |
485 | { |
486 | struct scatterlist *sg; |
487 | int i; |
488 | |
489 | BUG_ON(dir == DMA_NONE); |
490 | |
491 | for_each_sg(sgl, sg, nelems, i) |
492 | xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); |
493 | |
494 | } |
495 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); |
496 | |
497 | void |
498 | xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
499 | enum dma_data_direction dir) |
500 | { |
501 | return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
502 | } |
503 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg); |
504 | |
505 | /* |
506 | * Make physical memory consistent for a set of streaming mode DMA translations |
507 | * after a transfer. |
508 | * |
509 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules |
510 | * and usage. |
511 | */ |
512 | static void |
513 | xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, |
514 | int nelems, enum dma_data_direction dir, |
515 | enum dma_sync_target target) |
516 | { |
517 | struct scatterlist *sg; |
518 | int i; |
519 | |
520 | for_each_sg(sgl, sg, nelems, i) |
521 | xen_swiotlb_sync_single(hwdev, sg->dma_address, |
522 | sg->dma_length, dir, target); |
523 | } |
524 | |
525 | void |
526 | xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
527 | int nelems, enum dma_data_direction dir) |
528 | { |
529 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
530 | } |
531 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu); |
532 | |
533 | void |
534 | xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
535 | int nelems, enum dma_data_direction dir) |
536 | { |
537 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
538 | } |
539 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); |
540 | |
541 | int |
542 | xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
543 | { |
544 | return !dma_addr; |
545 | } |
546 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error); |
547 | |
548 | /* |
549 | * Return whether the given device DMA address mask can be supported |
550 | * properly. For example, if your device can only drive the low 24-bits |
551 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
552 | * this function. |
553 | */ |
554 | int |
555 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) |
556 | { |
557 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; |
558 | } |
559 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); |
560 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9