Root/
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
3 | * |
4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License |
6 | * as published by the Free Software Foundation, version 2. |
7 | * |
8 | * This program is distributed in the hope that it will be useful, but |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
11 | * NON INFRINGEMENT. See the GNU General Public License for |
12 | * more details. |
13 | */ |
14 | |
15 | #include <linux/mm.h> |
16 | #include <linux/dma-mapping.h> |
17 | #include <linux/swiotlb.h> |
18 | #include <linux/vmalloc.h> |
19 | #include <linux/export.h> |
20 | #include <asm/tlbflush.h> |
21 | #include <asm/homecache.h> |
22 | |
23 | /* Generic DMA mapping functions: */ |
24 | |
25 | /* |
26 | * Allocate what Linux calls "coherent" memory. On TILEPro this is |
27 | * uncached memory; on TILE-Gx it is hash-for-home memory. |
28 | */ |
29 | #ifdef __tilepro__ |
30 | #define PAGE_HOME_DMA PAGE_HOME_UNCACHED |
31 | #else |
32 | #define PAGE_HOME_DMA PAGE_HOME_HASH |
33 | #endif |
34 | |
35 | static void *tile_dma_alloc_coherent(struct device *dev, size_t size, |
36 | dma_addr_t *dma_handle, gfp_t gfp, |
37 | struct dma_attrs *attrs) |
38 | { |
39 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); |
40 | int node = dev_to_node(dev); |
41 | int order = get_order(size); |
42 | struct page *pg; |
43 | dma_addr_t addr; |
44 | |
45 | gfp |= __GFP_ZERO; |
46 | |
47 | /* |
48 | * If the mask specifies that the memory be in the first 4 GB, then |
49 | * we force the allocation to come from the DMA zone. We also |
50 | * force the node to 0 since that's the only node where the DMA |
51 | * zone isn't empty. If the mask size is smaller than 32 bits, we |
52 | * may still not be able to guarantee a suitable memory address, in |
53 | * which case we will return NULL. But such devices are uncommon. |
54 | */ |
55 | if (dma_mask <= DMA_BIT_MASK(32)) { |
56 | gfp |= GFP_DMA; |
57 | node = 0; |
58 | } |
59 | |
60 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); |
61 | if (pg == NULL) |
62 | return NULL; |
63 | |
64 | addr = page_to_phys(pg); |
65 | if (addr + size > dma_mask) { |
66 | __homecache_free_pages(pg, order); |
67 | return NULL; |
68 | } |
69 | |
70 | *dma_handle = addr; |
71 | |
72 | return page_address(pg); |
73 | } |
74 | |
75 | /* |
76 | * Free memory that was allocated with tile_dma_alloc_coherent. |
77 | */ |
78 | static void tile_dma_free_coherent(struct device *dev, size_t size, |
79 | void *vaddr, dma_addr_t dma_handle, |
80 | struct dma_attrs *attrs) |
81 | { |
82 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
83 | } |
84 | |
85 | /* |
86 | * The map routines "map" the specified address range for DMA |
87 | * accesses. The memory belongs to the device after this call is |
88 | * issued, until it is unmapped with dma_unmap_single. |
89 | * |
90 | * We don't need to do any mapping, we just flush the address range |
91 | * out of the cache and return a DMA address. |
92 | * |
93 | * The unmap routines do whatever is necessary before the processor |
94 | * accesses the memory again, and must be called before the driver |
95 | * touches the memory. We can get away with a cache invalidate if we |
96 | * can count on nothing having been touched. |
97 | */ |
98 | |
99 | /* Set up a single page for DMA access. */ |
100 | static void __dma_prep_page(struct page *page, unsigned long offset, |
101 | size_t size, enum dma_data_direction direction) |
102 | { |
103 | /* |
104 | * Flush the page from cache if necessary. |
105 | * On tilegx, data is delivered to hash-for-home L3; on tilepro, |
106 | * data is delivered direct to memory. |
107 | * |
108 | * NOTE: If we were just doing DMA_TO_DEVICE we could optimize |
109 | * this to be a "flush" not a "finv" and keep some of the |
110 | * state in cache across the DMA operation, but it doesn't seem |
111 | * worth creating the necessary flush_buffer_xxx() infrastructure. |
112 | */ |
113 | int home = page_home(page); |
114 | switch (home) { |
115 | case PAGE_HOME_HASH: |
116 | #ifdef __tilegx__ |
117 | return; |
118 | #endif |
119 | break; |
120 | case PAGE_HOME_UNCACHED: |
121 | #ifdef __tilepro__ |
122 | return; |
123 | #endif |
124 | break; |
125 | case PAGE_HOME_IMMUTABLE: |
126 | /* Should be going to the device only. */ |
127 | BUG_ON(direction == DMA_FROM_DEVICE || |
128 | direction == DMA_BIDIRECTIONAL); |
129 | return; |
130 | case PAGE_HOME_INCOHERENT: |
131 | /* Incoherent anyway, so no need to work hard here. */ |
132 | return; |
133 | default: |
134 | BUG_ON(home < 0 || home >= NR_CPUS); |
135 | break; |
136 | } |
137 | homecache_finv_page(page); |
138 | |
139 | #ifdef DEBUG_ALIGNMENT |
140 | /* Warn if the region isn't cacheline aligned. */ |
141 | if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1))) |
142 | pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n", |
143 | PFN_PHYS(page_to_pfn(page)) + offset, size); |
144 | #endif |
145 | } |
146 | |
147 | /* Make the page ready to be read by the core. */ |
148 | static void __dma_complete_page(struct page *page, unsigned long offset, |
149 | size_t size, enum dma_data_direction direction) |
150 | { |
151 | #ifdef __tilegx__ |
152 | switch (page_home(page)) { |
153 | case PAGE_HOME_HASH: |
154 | /* I/O device delivered data the way the cpu wanted it. */ |
155 | break; |
156 | case PAGE_HOME_INCOHERENT: |
157 | /* Incoherent anyway, so no need to work hard here. */ |
158 | break; |
159 | case PAGE_HOME_IMMUTABLE: |
160 | /* Extra read-only copies are not a problem. */ |
161 | break; |
162 | default: |
163 | /* Flush the bogus hash-for-home I/O entries to memory. */ |
164 | homecache_finv_map_page(page, PAGE_HOME_HASH); |
165 | break; |
166 | } |
167 | #endif |
168 | } |
169 | |
170 | static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size, |
171 | enum dma_data_direction direction) |
172 | { |
173 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); |
174 | unsigned long offset = dma_addr & (PAGE_SIZE - 1); |
175 | size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); |
176 | |
177 | while (size != 0) { |
178 | __dma_prep_page(page, offset, bytes, direction); |
179 | size -= bytes; |
180 | ++page; |
181 | offset = 0; |
182 | bytes = min((size_t)PAGE_SIZE, size); |
183 | } |
184 | } |
185 | |
186 | static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, |
187 | enum dma_data_direction direction) |
188 | { |
189 | struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); |
190 | unsigned long offset = dma_addr & (PAGE_SIZE - 1); |
191 | size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); |
192 | |
193 | while (size != 0) { |
194 | __dma_complete_page(page, offset, bytes, direction); |
195 | size -= bytes; |
196 | ++page; |
197 | offset = 0; |
198 | bytes = min((size_t)PAGE_SIZE, size); |
199 | } |
200 | } |
201 | |
202 | static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
203 | int nents, enum dma_data_direction direction, |
204 | struct dma_attrs *attrs) |
205 | { |
206 | struct scatterlist *sg; |
207 | int i; |
208 | |
209 | BUG_ON(!valid_dma_direction(direction)); |
210 | |
211 | WARN_ON(nents == 0 || sglist->length == 0); |
212 | |
213 | for_each_sg(sglist, sg, nents, i) { |
214 | sg->dma_address = sg_phys(sg); |
215 | __dma_prep_pa_range(sg->dma_address, sg->length, direction); |
216 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
217 | sg->dma_length = sg->length; |
218 | #endif |
219 | } |
220 | |
221 | return nents; |
222 | } |
223 | |
224 | static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
225 | int nents, enum dma_data_direction direction, |
226 | struct dma_attrs *attrs) |
227 | { |
228 | struct scatterlist *sg; |
229 | int i; |
230 | |
231 | BUG_ON(!valid_dma_direction(direction)); |
232 | for_each_sg(sglist, sg, nents, i) { |
233 | sg->dma_address = sg_phys(sg); |
234 | __dma_complete_pa_range(sg->dma_address, sg->length, |
235 | direction); |
236 | } |
237 | } |
238 | |
239 | static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, |
240 | unsigned long offset, size_t size, |
241 | enum dma_data_direction direction, |
242 | struct dma_attrs *attrs) |
243 | { |
244 | BUG_ON(!valid_dma_direction(direction)); |
245 | |
246 | BUG_ON(offset + size > PAGE_SIZE); |
247 | __dma_prep_page(page, offset, size, direction); |
248 | |
249 | return page_to_pa(page) + offset; |
250 | } |
251 | |
252 | static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
253 | size_t size, enum dma_data_direction direction, |
254 | struct dma_attrs *attrs) |
255 | { |
256 | BUG_ON(!valid_dma_direction(direction)); |
257 | |
258 | __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), |
259 | dma_address & PAGE_OFFSET, size, direction); |
260 | } |
261 | |
262 | static void tile_dma_sync_single_for_cpu(struct device *dev, |
263 | dma_addr_t dma_handle, |
264 | size_t size, |
265 | enum dma_data_direction direction) |
266 | { |
267 | BUG_ON(!valid_dma_direction(direction)); |
268 | |
269 | __dma_complete_pa_range(dma_handle, size, direction); |
270 | } |
271 | |
272 | static void tile_dma_sync_single_for_device(struct device *dev, |
273 | dma_addr_t dma_handle, size_t size, |
274 | enum dma_data_direction direction) |
275 | { |
276 | __dma_prep_pa_range(dma_handle, size, direction); |
277 | } |
278 | |
279 | static void tile_dma_sync_sg_for_cpu(struct device *dev, |
280 | struct scatterlist *sglist, int nelems, |
281 | enum dma_data_direction direction) |
282 | { |
283 | struct scatterlist *sg; |
284 | int i; |
285 | |
286 | BUG_ON(!valid_dma_direction(direction)); |
287 | WARN_ON(nelems == 0 || sglist->length == 0); |
288 | |
289 | for_each_sg(sglist, sg, nelems, i) { |
290 | dma_sync_single_for_cpu(dev, sg->dma_address, |
291 | sg_dma_len(sg), direction); |
292 | } |
293 | } |
294 | |
295 | static void tile_dma_sync_sg_for_device(struct device *dev, |
296 | struct scatterlist *sglist, int nelems, |
297 | enum dma_data_direction direction) |
298 | { |
299 | struct scatterlist *sg; |
300 | int i; |
301 | |
302 | BUG_ON(!valid_dma_direction(direction)); |
303 | WARN_ON(nelems == 0 || sglist->length == 0); |
304 | |
305 | for_each_sg(sglist, sg, nelems, i) { |
306 | dma_sync_single_for_device(dev, sg->dma_address, |
307 | sg_dma_len(sg), direction); |
308 | } |
309 | } |
310 | |
311 | static inline int |
312 | tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
313 | { |
314 | return 0; |
315 | } |
316 | |
317 | static inline int |
318 | tile_dma_supported(struct device *dev, u64 mask) |
319 | { |
320 | return 1; |
321 | } |
322 | |
323 | static struct dma_map_ops tile_default_dma_map_ops = { |
324 | .alloc = tile_dma_alloc_coherent, |
325 | .free = tile_dma_free_coherent, |
326 | .map_page = tile_dma_map_page, |
327 | .unmap_page = tile_dma_unmap_page, |
328 | .map_sg = tile_dma_map_sg, |
329 | .unmap_sg = tile_dma_unmap_sg, |
330 | .sync_single_for_cpu = tile_dma_sync_single_for_cpu, |
331 | .sync_single_for_device = tile_dma_sync_single_for_device, |
332 | .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, |
333 | .sync_sg_for_device = tile_dma_sync_sg_for_device, |
334 | .mapping_error = tile_dma_mapping_error, |
335 | .dma_supported = tile_dma_supported |
336 | }; |
337 | |
338 | struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; |
339 | EXPORT_SYMBOL(tile_dma_map_ops); |
340 | |
341 | /* Generic PCI DMA mapping functions */ |
342 | |
343 | static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, |
344 | dma_addr_t *dma_handle, gfp_t gfp, |
345 | struct dma_attrs *attrs) |
346 | { |
347 | int node = dev_to_node(dev); |
348 | int order = get_order(size); |
349 | struct page *pg; |
350 | dma_addr_t addr; |
351 | |
352 | gfp |= __GFP_ZERO; |
353 | |
354 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); |
355 | if (pg == NULL) |
356 | return NULL; |
357 | |
358 | addr = page_to_phys(pg); |
359 | |
360 | *dma_handle = phys_to_dma(dev, addr); |
361 | |
362 | return page_address(pg); |
363 | } |
364 | |
365 | /* |
366 | * Free memory that was allocated with tile_pci_dma_alloc_coherent. |
367 | */ |
368 | static void tile_pci_dma_free_coherent(struct device *dev, size_t size, |
369 | void *vaddr, dma_addr_t dma_handle, |
370 | struct dma_attrs *attrs) |
371 | { |
372 | homecache_free_pages((unsigned long)vaddr, get_order(size)); |
373 | } |
374 | |
375 | static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
376 | int nents, enum dma_data_direction direction, |
377 | struct dma_attrs *attrs) |
378 | { |
379 | struct scatterlist *sg; |
380 | int i; |
381 | |
382 | BUG_ON(!valid_dma_direction(direction)); |
383 | |
384 | WARN_ON(nents == 0 || sglist->length == 0); |
385 | |
386 | for_each_sg(sglist, sg, nents, i) { |
387 | sg->dma_address = sg_phys(sg); |
388 | __dma_prep_pa_range(sg->dma_address, sg->length, direction); |
389 | |
390 | sg->dma_address = phys_to_dma(dev, sg->dma_address); |
391 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
392 | sg->dma_length = sg->length; |
393 | #endif |
394 | } |
395 | |
396 | return nents; |
397 | } |
398 | |
399 | static void tile_pci_dma_unmap_sg(struct device *dev, |
400 | struct scatterlist *sglist, int nents, |
401 | enum dma_data_direction direction, |
402 | struct dma_attrs *attrs) |
403 | { |
404 | struct scatterlist *sg; |
405 | int i; |
406 | |
407 | BUG_ON(!valid_dma_direction(direction)); |
408 | for_each_sg(sglist, sg, nents, i) { |
409 | sg->dma_address = sg_phys(sg); |
410 | __dma_complete_pa_range(sg->dma_address, sg->length, |
411 | direction); |
412 | } |
413 | } |
414 | |
415 | static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, |
416 | unsigned long offset, size_t size, |
417 | enum dma_data_direction direction, |
418 | struct dma_attrs *attrs) |
419 | { |
420 | BUG_ON(!valid_dma_direction(direction)); |
421 | |
422 | BUG_ON(offset + size > PAGE_SIZE); |
423 | __dma_prep_page(page, offset, size, direction); |
424 | |
425 | return phys_to_dma(dev, page_to_pa(page) + offset); |
426 | } |
427 | |
428 | static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
429 | size_t size, |
430 | enum dma_data_direction direction, |
431 | struct dma_attrs *attrs) |
432 | { |
433 | BUG_ON(!valid_dma_direction(direction)); |
434 | |
435 | dma_address = dma_to_phys(dev, dma_address); |
436 | |
437 | __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), |
438 | dma_address & PAGE_OFFSET, size, direction); |
439 | } |
440 | |
441 | static void tile_pci_dma_sync_single_for_cpu(struct device *dev, |
442 | dma_addr_t dma_handle, |
443 | size_t size, |
444 | enum dma_data_direction direction) |
445 | { |
446 | BUG_ON(!valid_dma_direction(direction)); |
447 | |
448 | dma_handle = dma_to_phys(dev, dma_handle); |
449 | |
450 | __dma_complete_pa_range(dma_handle, size, direction); |
451 | } |
452 | |
453 | static void tile_pci_dma_sync_single_for_device(struct device *dev, |
454 | dma_addr_t dma_handle, |
455 | size_t size, |
456 | enum dma_data_direction |
457 | direction) |
458 | { |
459 | dma_handle = dma_to_phys(dev, dma_handle); |
460 | |
461 | __dma_prep_pa_range(dma_handle, size, direction); |
462 | } |
463 | |
464 | static void tile_pci_dma_sync_sg_for_cpu(struct device *dev, |
465 | struct scatterlist *sglist, |
466 | int nelems, |
467 | enum dma_data_direction direction) |
468 | { |
469 | struct scatterlist *sg; |
470 | int i; |
471 | |
472 | BUG_ON(!valid_dma_direction(direction)); |
473 | WARN_ON(nelems == 0 || sglist->length == 0); |
474 | |
475 | for_each_sg(sglist, sg, nelems, i) { |
476 | dma_sync_single_for_cpu(dev, sg->dma_address, |
477 | sg_dma_len(sg), direction); |
478 | } |
479 | } |
480 | |
481 | static void tile_pci_dma_sync_sg_for_device(struct device *dev, |
482 | struct scatterlist *sglist, |
483 | int nelems, |
484 | enum dma_data_direction direction) |
485 | { |
486 | struct scatterlist *sg; |
487 | int i; |
488 | |
489 | BUG_ON(!valid_dma_direction(direction)); |
490 | WARN_ON(nelems == 0 || sglist->length == 0); |
491 | |
492 | for_each_sg(sglist, sg, nelems, i) { |
493 | dma_sync_single_for_device(dev, sg->dma_address, |
494 | sg_dma_len(sg), direction); |
495 | } |
496 | } |
497 | |
498 | static inline int |
499 | tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
500 | { |
501 | return 0; |
502 | } |
503 | |
504 | static inline int |
505 | tile_pci_dma_supported(struct device *dev, u64 mask) |
506 | { |
507 | return 1; |
508 | } |
509 | |
510 | static struct dma_map_ops tile_pci_default_dma_map_ops = { |
511 | .alloc = tile_pci_dma_alloc_coherent, |
512 | .free = tile_pci_dma_free_coherent, |
513 | .map_page = tile_pci_dma_map_page, |
514 | .unmap_page = tile_pci_dma_unmap_page, |
515 | .map_sg = tile_pci_dma_map_sg, |
516 | .unmap_sg = tile_pci_dma_unmap_sg, |
517 | .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu, |
518 | .sync_single_for_device = tile_pci_dma_sync_single_for_device, |
519 | .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, |
520 | .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, |
521 | .mapping_error = tile_pci_dma_mapping_error, |
522 | .dma_supported = tile_pci_dma_supported |
523 | }; |
524 | |
525 | struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; |
526 | EXPORT_SYMBOL(gx_pci_dma_map_ops); |
527 | |
528 | /* PCI DMA mapping functions for legacy PCI devices */ |
529 | |
530 | #ifdef CONFIG_SWIOTLB |
531 | static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, |
532 | dma_addr_t *dma_handle, gfp_t gfp, |
533 | struct dma_attrs *attrs) |
534 | { |
535 | gfp |= GFP_DMA; |
536 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); |
537 | } |
538 | |
539 | static void tile_swiotlb_free_coherent(struct device *dev, size_t size, |
540 | void *vaddr, dma_addr_t dma_addr, |
541 | struct dma_attrs *attrs) |
542 | { |
543 | swiotlb_free_coherent(dev, size, vaddr, dma_addr); |
544 | } |
545 | |
546 | static struct dma_map_ops pci_swiotlb_dma_ops = { |
547 | .alloc = tile_swiotlb_alloc_coherent, |
548 | .free = tile_swiotlb_free_coherent, |
549 | .map_page = swiotlb_map_page, |
550 | .unmap_page = swiotlb_unmap_page, |
551 | .map_sg = swiotlb_map_sg_attrs, |
552 | .unmap_sg = swiotlb_unmap_sg_attrs, |
553 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
554 | .sync_single_for_device = swiotlb_sync_single_for_device, |
555 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
556 | .sync_sg_for_device = swiotlb_sync_sg_for_device, |
557 | .dma_supported = swiotlb_dma_supported, |
558 | .mapping_error = swiotlb_dma_mapping_error, |
559 | }; |
560 | |
561 | struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; |
562 | #else |
563 | struct dma_map_ops *gx_legacy_pci_dma_map_ops; |
564 | #endif |
565 | EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); |
566 | |
567 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
568 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
569 | { |
570 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
571 | |
572 | /* Handle legacy PCI devices with limited memory addressability. */ |
573 | if (((dma_ops == gx_pci_dma_map_ops) || |
574 | (dma_ops == gx_legacy_pci_dma_map_ops)) && |
575 | (mask <= DMA_BIT_MASK(32))) { |
576 | if (mask > dev->archdata.max_direct_dma_addr) |
577 | mask = dev->archdata.max_direct_dma_addr; |
578 | } |
579 | |
580 | if (!dma_supported(dev, mask)) |
581 | return -EIO; |
582 | dev->coherent_dma_mask = mask; |
583 | return 0; |
584 | } |
585 | EXPORT_SYMBOL(dma_set_coherent_mask); |
586 | #endif |
587 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9