Root/
1 | /* |
2 | * Remote Processor Framework |
3 | * |
4 | * Copyright (C) 2011 Texas Instruments, Inc. |
5 | * Copyright (C) 2011 Google, Inc. |
6 | * |
7 | * Ohad Ben-Cohen <ohad@wizery.com> |
8 | * Brian Swetland <swetland@google.com> |
9 | * Mark Grosen <mgrosen@ti.com> |
10 | * Fernando Guzman Lugo <fernando.lugo@ti.com> |
11 | * Suman Anna <s-anna@ti.com> |
12 | * Robert Tivy <rtivy@ti.com> |
13 | * Armando Uribe De Leon <x0095078@ti.com> |
14 | * |
15 | * This program is free software; you can redistribute it and/or |
16 | * modify it under the terms of the GNU General Public License |
17 | * version 2 as published by the Free Software Foundation. |
18 | * |
19 | * This program is distributed in the hope that it will be useful, |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
22 | * GNU General Public License for more details. |
23 | */ |
24 | |
25 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
26 | |
27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> |
29 | #include <linux/device.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/mutex.h> |
32 | #include <linux/dma-mapping.h> |
33 | #include <linux/firmware.h> |
34 | #include <linux/string.h> |
35 | #include <linux/debugfs.h> |
36 | #include <linux/remoteproc.h> |
37 | #include <linux/iommu.h> |
38 | #include <linux/idr.h> |
39 | #include <linux/elf.h> |
40 | #include <linux/virtio_ids.h> |
41 | #include <linux/virtio_ring.h> |
42 | #include <asm/byteorder.h> |
43 | |
44 | #include "remoteproc_internal.h" |
45 | |
46 | typedef int (*rproc_handle_resources_t)(struct rproc *rproc, |
47 | struct resource_table *table, int len); |
48 | typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail); |
49 | |
50 | /* Unique indices for remoteproc devices */ |
51 | static DEFINE_IDA(rproc_dev_index); |
52 | |
53 | /* |
54 | * This is the IOMMU fault handler we register with the IOMMU API |
55 | * (when relevant; not all remote processors access memory through |
56 | * an IOMMU). |
57 | * |
58 | * IOMMU core will invoke this handler whenever the remote processor |
59 | * will try to access an unmapped device address. |
60 | * |
61 | * Currently this is mostly a stub, but it will be later used to trigger |
62 | * the recovery of the remote processor. |
63 | */ |
64 | static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, |
65 | unsigned long iova, int flags, void *token) |
66 | { |
67 | dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); |
68 | |
69 | /* |
70 | * Let the iommu core know we're not really handling this fault; |
71 | * we just plan to use this as a recovery trigger. |
72 | */ |
73 | return -ENOSYS; |
74 | } |
75 | |
76 | static int rproc_enable_iommu(struct rproc *rproc) |
77 | { |
78 | struct iommu_domain *domain; |
79 | struct device *dev = rproc->dev.parent; |
80 | int ret; |
81 | |
82 | /* |
83 | * We currently use iommu_present() to decide if an IOMMU |
84 | * setup is needed. |
85 | * |
86 | * This works for simple cases, but will easily fail with |
87 | * platforms that do have an IOMMU, but not for this specific |
88 | * rproc. |
89 | * |
90 | * This will be easily solved by introducing hw capabilities |
91 | * that will be set by the remoteproc driver. |
92 | */ |
93 | if (!iommu_present(dev->bus)) { |
94 | dev_dbg(dev, "iommu not found\n"); |
95 | return 0; |
96 | } |
97 | |
98 | domain = iommu_domain_alloc(dev->bus); |
99 | if (!domain) { |
100 | dev_err(dev, "can't alloc iommu domain\n"); |
101 | return -ENOMEM; |
102 | } |
103 | |
104 | iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); |
105 | |
106 | ret = iommu_attach_device(domain, dev); |
107 | if (ret) { |
108 | dev_err(dev, "can't attach iommu device: %d\n", ret); |
109 | goto free_domain; |
110 | } |
111 | |
112 | rproc->domain = domain; |
113 | |
114 | return 0; |
115 | |
116 | free_domain: |
117 | iommu_domain_free(domain); |
118 | return ret; |
119 | } |
120 | |
121 | static void rproc_disable_iommu(struct rproc *rproc) |
122 | { |
123 | struct iommu_domain *domain = rproc->domain; |
124 | struct device *dev = rproc->dev.parent; |
125 | |
126 | if (!domain) |
127 | return; |
128 | |
129 | iommu_detach_device(domain, dev); |
130 | iommu_domain_free(domain); |
131 | |
132 | return; |
133 | } |
134 | |
135 | /* |
136 | * Some remote processors will ask us to allocate them physically contiguous |
137 | * memory regions (which we call "carveouts"), and map them to specific |
138 | * device addresses (which are hardcoded in the firmware). |
139 | * |
140 | * They may then ask us to copy objects into specific device addresses (e.g. |
141 | * code/data sections) or expose us certain symbols in other device address |
142 | * (e.g. their trace buffer). |
143 | * |
144 | * This function is an internal helper with which we can go over the allocated |
145 | * carveouts and translate specific device address to kernel virtual addresses |
146 | * so we can access the referenced memory. |
147 | * |
148 | * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, |
149 | * but only on kernel direct mapped RAM memory. Instead, we're just using |
150 | * here the output of the DMA API, which should be more correct. |
151 | */ |
152 | void *rproc_da_to_va(struct rproc *rproc, u64 da, int len) |
153 | { |
154 | struct rproc_mem_entry *carveout; |
155 | void *ptr = NULL; |
156 | |
157 | list_for_each_entry(carveout, &rproc->carveouts, node) { |
158 | int offset = da - carveout->da; |
159 | |
160 | /* try next carveout if da is too small */ |
161 | if (offset < 0) |
162 | continue; |
163 | |
164 | /* try next carveout if da is too large */ |
165 | if (offset + len > carveout->len) |
166 | continue; |
167 | |
168 | ptr = carveout->va + offset; |
169 | |
170 | break; |
171 | } |
172 | |
173 | return ptr; |
174 | } |
175 | EXPORT_SYMBOL(rproc_da_to_va); |
176 | |
177 | int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) |
178 | { |
179 | struct rproc *rproc = rvdev->rproc; |
180 | struct device *dev = &rproc->dev; |
181 | struct rproc_vring *rvring = &rvdev->vring[i]; |
182 | dma_addr_t dma; |
183 | void *va; |
184 | int ret, size, notifyid; |
185 | |
186 | /* actual size of vring (in bytes) */ |
187 | size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); |
188 | |
189 | if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) { |
190 | dev_err(dev, "idr_pre_get failed\n"); |
191 | return -ENOMEM; |
192 | } |
193 | |
194 | /* |
195 | * Allocate non-cacheable memory for the vring. In the future |
196 | * this call will also configure the IOMMU for us |
197 | * TODO: let the rproc know the da of this vring |
198 | */ |
199 | va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL); |
200 | if (!va) { |
201 | dev_err(dev->parent, "dma_alloc_coherent failed\n"); |
202 | return -EINVAL; |
203 | } |
204 | |
205 | /* |
206 | * Assign an rproc-wide unique index for this vring |
207 | * TODO: assign a notifyid for rvdev updates as well |
208 | * TODO: let the rproc know the notifyid of this vring |
209 | * TODO: support predefined notifyids (via resource table) |
210 | */ |
211 | ret = idr_get_new(&rproc->notifyids, rvring, ¬ifyid); |
212 | if (ret) { |
213 | dev_err(dev, "idr_get_new failed: %d\n", ret); |
214 | dma_free_coherent(dev->parent, size, va, dma); |
215 | return ret; |
216 | } |
217 | |
218 | dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va, |
219 | dma, size, notifyid); |
220 | |
221 | rvring->va = va; |
222 | rvring->dma = dma; |
223 | rvring->notifyid = notifyid; |
224 | |
225 | return 0; |
226 | } |
227 | |
228 | static int |
229 | rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) |
230 | { |
231 | struct rproc *rproc = rvdev->rproc; |
232 | struct device *dev = &rproc->dev; |
233 | struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; |
234 | struct rproc_vring *rvring = &rvdev->vring[i]; |
235 | |
236 | dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n", |
237 | i, vring->da, vring->num, vring->align); |
238 | |
239 | /* make sure reserved bytes are zeroes */ |
240 | if (vring->reserved) { |
241 | dev_err(dev, "vring rsc has non zero reserved bytes\n"); |
242 | return -EINVAL; |
243 | } |
244 | |
245 | /* verify queue size and vring alignment are sane */ |
246 | if (!vring->num || !vring->align) { |
247 | dev_err(dev, "invalid qsz (%d) or alignment (%d)\n", |
248 | vring->num, vring->align); |
249 | return -EINVAL; |
250 | } |
251 | |
252 | rvring->len = vring->num; |
253 | rvring->align = vring->align; |
254 | rvring->rvdev = rvdev; |
255 | |
256 | return 0; |
257 | } |
258 | |
259 | void rproc_free_vring(struct rproc_vring *rvring) |
260 | { |
261 | int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); |
262 | struct rproc *rproc = rvring->rvdev->rproc; |
263 | |
264 | dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma); |
265 | idr_remove(&rproc->notifyids, rvring->notifyid); |
266 | } |
267 | |
268 | /** |
269 | * rproc_handle_vdev() - handle a vdev fw resource |
270 | * @rproc: the remote processor |
271 | * @rsc: the vring resource descriptor |
272 | * @avail: size of available data (for sanity checking the image) |
273 | * |
274 | * This resource entry requests the host to statically register a virtio |
275 | * device (vdev), and setup everything needed to support it. It contains |
276 | * everything needed to make it possible: the virtio device id, virtio |
277 | * device features, vrings information, virtio config space, etc... |
278 | * |
279 | * Before registering the vdev, the vrings are allocated from non-cacheable |
280 | * physically contiguous memory. Currently we only support two vrings per |
281 | * remote processor (temporary limitation). We might also want to consider |
282 | * doing the vring allocation only later when ->find_vqs() is invoked, and |
283 | * then release them upon ->del_vqs(). |
284 | * |
285 | * Note: @da is currently not really handled correctly: we dynamically |
286 | * allocate it using the DMA API, ignoring requested hard coded addresses, |
287 | * and we don't take care of any required IOMMU programming. This is all |
288 | * going to be taken care of when the generic iommu-based DMA API will be |
289 | * merged. Meanwhile, statically-addressed iommu-based firmware images should |
290 | * use RSC_DEVMEM resource entries to map their required @da to the physical |
291 | * address of their base CMA region (ouch, hacky!). |
292 | * |
293 | * Returns 0 on success, or an appropriate error code otherwise |
294 | */ |
295 | static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, |
296 | int avail) |
297 | { |
298 | struct device *dev = &rproc->dev; |
299 | struct rproc_vdev *rvdev; |
300 | int i, ret; |
301 | |
302 | /* make sure resource isn't truncated */ |
303 | if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) |
304 | + rsc->config_len > avail) { |
305 | dev_err(dev, "vdev rsc is truncated\n"); |
306 | return -EINVAL; |
307 | } |
308 | |
309 | /* make sure reserved bytes are zeroes */ |
310 | if (rsc->reserved[0] || rsc->reserved[1]) { |
311 | dev_err(dev, "vdev rsc has non zero reserved bytes\n"); |
312 | return -EINVAL; |
313 | } |
314 | |
315 | dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n", |
316 | rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings); |
317 | |
318 | /* we currently support only two vrings per rvdev */ |
319 | if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) { |
320 | dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings); |
321 | return -EINVAL; |
322 | } |
323 | |
324 | rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL); |
325 | if (!rvdev) |
326 | return -ENOMEM; |
327 | |
328 | rvdev->rproc = rproc; |
329 | |
330 | /* parse the vrings */ |
331 | for (i = 0; i < rsc->num_of_vrings; i++) { |
332 | ret = rproc_parse_vring(rvdev, rsc, i); |
333 | if (ret) |
334 | goto free_rvdev; |
335 | } |
336 | |
337 | /* remember the device features */ |
338 | rvdev->dfeatures = rsc->dfeatures; |
339 | |
340 | list_add_tail(&rvdev->node, &rproc->rvdevs); |
341 | |
342 | /* it is now safe to add the virtio device */ |
343 | ret = rproc_add_virtio_dev(rvdev, rsc->id); |
344 | if (ret) |
345 | goto free_rvdev; |
346 | |
347 | return 0; |
348 | |
349 | free_rvdev: |
350 | kfree(rvdev); |
351 | return ret; |
352 | } |
353 | |
354 | /** |
355 | * rproc_handle_trace() - handle a shared trace buffer resource |
356 | * @rproc: the remote processor |
357 | * @rsc: the trace resource descriptor |
358 | * @avail: size of available data (for sanity checking the image) |
359 | * |
360 | * In case the remote processor dumps trace logs into memory, |
361 | * export it via debugfs. |
362 | * |
363 | * Currently, the 'da' member of @rsc should contain the device address |
364 | * where the remote processor is dumping the traces. Later we could also |
365 | * support dynamically allocating this address using the generic |
366 | * DMA API (but currently there isn't a use case for that). |
367 | * |
368 | * Returns 0 on success, or an appropriate error code otherwise |
369 | */ |
370 | static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, |
371 | int avail) |
372 | { |
373 | struct rproc_mem_entry *trace; |
374 | struct device *dev = &rproc->dev; |
375 | void *ptr; |
376 | char name[15]; |
377 | |
378 | if (sizeof(*rsc) > avail) { |
379 | dev_err(dev, "trace rsc is truncated\n"); |
380 | return -EINVAL; |
381 | } |
382 | |
383 | /* make sure reserved bytes are zeroes */ |
384 | if (rsc->reserved) { |
385 | dev_err(dev, "trace rsc has non zero reserved bytes\n"); |
386 | return -EINVAL; |
387 | } |
388 | |
389 | /* what's the kernel address of this resource ? */ |
390 | ptr = rproc_da_to_va(rproc, rsc->da, rsc->len); |
391 | if (!ptr) { |
392 | dev_err(dev, "erroneous trace resource entry\n"); |
393 | return -EINVAL; |
394 | } |
395 | |
396 | trace = kzalloc(sizeof(*trace), GFP_KERNEL); |
397 | if (!trace) { |
398 | dev_err(dev, "kzalloc trace failed\n"); |
399 | return -ENOMEM; |
400 | } |
401 | |
402 | /* set the trace buffer dma properties */ |
403 | trace->len = rsc->len; |
404 | trace->va = ptr; |
405 | |
406 | /* make sure snprintf always null terminates, even if truncating */ |
407 | snprintf(name, sizeof(name), "trace%d", rproc->num_traces); |
408 | |
409 | /* create the debugfs entry */ |
410 | trace->priv = rproc_create_trace_file(name, rproc, trace); |
411 | if (!trace->priv) { |
412 | trace->va = NULL; |
413 | kfree(trace); |
414 | return -EINVAL; |
415 | } |
416 | |
417 | list_add_tail(&trace->node, &rproc->traces); |
418 | |
419 | rproc->num_traces++; |
420 | |
421 | dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr, |
422 | rsc->da, rsc->len); |
423 | |
424 | return 0; |
425 | } |
426 | |
427 | /** |
428 | * rproc_handle_devmem() - handle devmem resource entry |
429 | * @rproc: remote processor handle |
430 | * @rsc: the devmem resource entry |
431 | * @avail: size of available data (for sanity checking the image) |
432 | * |
433 | * Remote processors commonly need to access certain on-chip peripherals. |
434 | * |
435 | * Some of these remote processors access memory via an iommu device, |
436 | * and might require us to configure their iommu before they can access |
437 | * the on-chip peripherals they need. |
438 | * |
439 | * This resource entry is a request to map such a peripheral device. |
440 | * |
441 | * These devmem entries will contain the physical address of the device in |
442 | * the 'pa' member. If a specific device address is expected, then 'da' will |
443 | * contain it (currently this is the only use case supported). 'len' will |
444 | * contain the size of the physical region we need to map. |
445 | * |
446 | * Currently we just "trust" those devmem entries to contain valid physical |
447 | * addresses, but this is going to change: we want the implementations to |
448 | * tell us ranges of physical addresses the firmware is allowed to request, |
449 | * and not allow firmwares to request access to physical addresses that |
450 | * are outside those ranges. |
451 | */ |
452 | static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, |
453 | int avail) |
454 | { |
455 | struct rproc_mem_entry *mapping; |
456 | struct device *dev = &rproc->dev; |
457 | int ret; |
458 | |
459 | /* no point in handling this resource without a valid iommu domain */ |
460 | if (!rproc->domain) |
461 | return -EINVAL; |
462 | |
463 | if (sizeof(*rsc) > avail) { |
464 | dev_err(dev, "devmem rsc is truncated\n"); |
465 | return -EINVAL; |
466 | } |
467 | |
468 | /* make sure reserved bytes are zeroes */ |
469 | if (rsc->reserved) { |
470 | dev_err(dev, "devmem rsc has non zero reserved bytes\n"); |
471 | return -EINVAL; |
472 | } |
473 | |
474 | mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); |
475 | if (!mapping) { |
476 | dev_err(dev, "kzalloc mapping failed\n"); |
477 | return -ENOMEM; |
478 | } |
479 | |
480 | ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); |
481 | if (ret) { |
482 | dev_err(dev, "failed to map devmem: %d\n", ret); |
483 | goto out; |
484 | } |
485 | |
486 | /* |
487 | * We'll need this info later when we'll want to unmap everything |
488 | * (e.g. on shutdown). |
489 | * |
490 | * We can't trust the remote processor not to change the resource |
491 | * table, so we must maintain this info independently. |
492 | */ |
493 | mapping->da = rsc->da; |
494 | mapping->len = rsc->len; |
495 | list_add_tail(&mapping->node, &rproc->mappings); |
496 | |
497 | dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n", |
498 | rsc->pa, rsc->da, rsc->len); |
499 | |
500 | return 0; |
501 | |
502 | out: |
503 | kfree(mapping); |
504 | return ret; |
505 | } |
506 | |
507 | /** |
508 | * rproc_handle_carveout() - handle phys contig memory allocation requests |
509 | * @rproc: rproc handle |
510 | * @rsc: the resource entry |
511 | * @avail: size of available data (for image validation) |
512 | * |
513 | * This function will handle firmware requests for allocation of physically |
514 | * contiguous memory regions. |
515 | * |
516 | * These request entries should come first in the firmware's resource table, |
517 | * as other firmware entries might request placing other data objects inside |
518 | * these memory regions (e.g. data/code segments, trace resource entries, ...). |
519 | * |
520 | * Allocating memory this way helps utilizing the reserved physical memory |
521 | * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries |
522 | * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB |
523 | * pressure is important; it may have a substantial impact on performance. |
524 | */ |
525 | static int rproc_handle_carveout(struct rproc *rproc, |
526 | struct fw_rsc_carveout *rsc, int avail) |
527 | { |
528 | struct rproc_mem_entry *carveout, *mapping; |
529 | struct device *dev = &rproc->dev; |
530 | dma_addr_t dma; |
531 | void *va; |
532 | int ret; |
533 | |
534 | if (sizeof(*rsc) > avail) { |
535 | dev_err(dev, "carveout rsc is truncated\n"); |
536 | return -EINVAL; |
537 | } |
538 | |
539 | /* make sure reserved bytes are zeroes */ |
540 | if (rsc->reserved) { |
541 | dev_err(dev, "carveout rsc has non zero reserved bytes\n"); |
542 | return -EINVAL; |
543 | } |
544 | |
545 | dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n", |
546 | rsc->da, rsc->pa, rsc->len, rsc->flags); |
547 | |
548 | mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); |
549 | if (!mapping) { |
550 | dev_err(dev, "kzalloc mapping failed\n"); |
551 | return -ENOMEM; |
552 | } |
553 | |
554 | carveout = kzalloc(sizeof(*carveout), GFP_KERNEL); |
555 | if (!carveout) { |
556 | dev_err(dev, "kzalloc carveout failed\n"); |
557 | ret = -ENOMEM; |
558 | goto free_mapping; |
559 | } |
560 | |
561 | va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL); |
562 | if (!va) { |
563 | dev_err(dev->parent, "dma_alloc_coherent err: %d\n", rsc->len); |
564 | ret = -ENOMEM; |
565 | goto free_carv; |
566 | } |
567 | |
568 | dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len); |
569 | |
570 | /* |
571 | * Ok, this is non-standard. |
572 | * |
573 | * Sometimes we can't rely on the generic iommu-based DMA API |
574 | * to dynamically allocate the device address and then set the IOMMU |
575 | * tables accordingly, because some remote processors might |
576 | * _require_ us to use hard coded device addresses that their |
577 | * firmware was compiled with. |
578 | * |
579 | * In this case, we must use the IOMMU API directly and map |
580 | * the memory to the device address as expected by the remote |
581 | * processor. |
582 | * |
583 | * Obviously such remote processor devices should not be configured |
584 | * to use the iommu-based DMA API: we expect 'dma' to contain the |
585 | * physical address in this case. |
586 | */ |
587 | if (rproc->domain) { |
588 | ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len, |
589 | rsc->flags); |
590 | if (ret) { |
591 | dev_err(dev, "iommu_map failed: %d\n", ret); |
592 | goto dma_free; |
593 | } |
594 | |
595 | /* |
596 | * We'll need this info later when we'll want to unmap |
597 | * everything (e.g. on shutdown). |
598 | * |
599 | * We can't trust the remote processor not to change the |
600 | * resource table, so we must maintain this info independently. |
601 | */ |
602 | mapping->da = rsc->da; |
603 | mapping->len = rsc->len; |
604 | list_add_tail(&mapping->node, &rproc->mappings); |
605 | |
606 | dev_dbg(dev, "carveout mapped 0x%x to 0x%x\n", rsc->da, dma); |
607 | } |
608 | |
609 | /* |
610 | * Some remote processors might need to know the pa |
611 | * even though they are behind an IOMMU. E.g., OMAP4's |
612 | * remote M3 processor needs this so it can control |
613 | * on-chip hardware accelerators that are not behind |
614 | * the IOMMU, and therefor must know the pa. |
615 | * |
616 | * Generally we don't want to expose physical addresses |
617 | * if we don't have to (remote processors are generally |
618 | * _not_ trusted), so we might want to do this only for |
619 | * remote processor that _must_ have this (e.g. OMAP4's |
620 | * dual M3 subsystem). |
621 | * |
622 | * Non-IOMMU processors might also want to have this info. |
623 | * In this case, the device address and the physical address |
624 | * are the same. |
625 | */ |
626 | rsc->pa = dma; |
627 | |
628 | carveout->va = va; |
629 | carveout->len = rsc->len; |
630 | carveout->dma = dma; |
631 | carveout->da = rsc->da; |
632 | |
633 | list_add_tail(&carveout->node, &rproc->carveouts); |
634 | |
635 | return 0; |
636 | |
637 | dma_free: |
638 | dma_free_coherent(dev->parent, rsc->len, va, dma); |
639 | free_carv: |
640 | kfree(carveout); |
641 | free_mapping: |
642 | kfree(mapping); |
643 | return ret; |
644 | } |
645 | |
646 | /* |
647 | * A lookup table for resource handlers. The indices are defined in |
648 | * enum fw_resource_type. |
649 | */ |
650 | static rproc_handle_resource_t rproc_handle_rsc[] = { |
651 | [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, |
652 | [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, |
653 | [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, |
654 | [RSC_VDEV] = NULL, /* VDEVs were handled upon registrarion */ |
655 | }; |
656 | |
657 | /* handle firmware resource entries before booting the remote processor */ |
658 | static int |
659 | rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len) |
660 | { |
661 | struct device *dev = &rproc->dev; |
662 | rproc_handle_resource_t handler; |
663 | int ret = 0, i; |
664 | |
665 | for (i = 0; i < table->num; i++) { |
666 | int offset = table->offset[i]; |
667 | struct fw_rsc_hdr *hdr = (void *)table + offset; |
668 | int avail = len - offset - sizeof(*hdr); |
669 | void *rsc = (void *)hdr + sizeof(*hdr); |
670 | |
671 | /* make sure table isn't truncated */ |
672 | if (avail < 0) { |
673 | dev_err(dev, "rsc table is truncated\n"); |
674 | return -EINVAL; |
675 | } |
676 | |
677 | dev_dbg(dev, "rsc: type %d\n", hdr->type); |
678 | |
679 | if (hdr->type >= RSC_LAST) { |
680 | dev_warn(dev, "unsupported resource %d\n", hdr->type); |
681 | continue; |
682 | } |
683 | |
684 | handler = rproc_handle_rsc[hdr->type]; |
685 | if (!handler) |
686 | continue; |
687 | |
688 | ret = handler(rproc, rsc, avail); |
689 | if (ret) |
690 | break; |
691 | } |
692 | |
693 | return ret; |
694 | } |
695 | |
696 | /* handle firmware resource entries while registering the remote processor */ |
697 | static int |
698 | rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len) |
699 | { |
700 | struct device *dev = &rproc->dev; |
701 | int ret = 0, i; |
702 | |
703 | for (i = 0; i < table->num; i++) { |
704 | int offset = table->offset[i]; |
705 | struct fw_rsc_hdr *hdr = (void *)table + offset; |
706 | int avail = len - offset - sizeof(*hdr); |
707 | struct fw_rsc_vdev *vrsc; |
708 | |
709 | /* make sure table isn't truncated */ |
710 | if (avail < 0) { |
711 | dev_err(dev, "rsc table is truncated\n"); |
712 | return -EINVAL; |
713 | } |
714 | |
715 | dev_dbg(dev, "%s: rsc type %d\n", __func__, hdr->type); |
716 | |
717 | if (hdr->type != RSC_VDEV) |
718 | continue; |
719 | |
720 | vrsc = (struct fw_rsc_vdev *)hdr->data; |
721 | |
722 | ret = rproc_handle_vdev(rproc, vrsc, avail); |
723 | if (ret) |
724 | break; |
725 | } |
726 | |
727 | return ret; |
728 | } |
729 | |
730 | /** |
731 | * rproc_resource_cleanup() - clean up and free all acquired resources |
732 | * @rproc: rproc handle |
733 | * |
734 | * This function will free all resources acquired for @rproc, and it |
735 | * is called whenever @rproc either shuts down or fails to boot. |
736 | */ |
737 | static void rproc_resource_cleanup(struct rproc *rproc) |
738 | { |
739 | struct rproc_mem_entry *entry, *tmp; |
740 | struct device *dev = &rproc->dev; |
741 | |
742 | /* clean up debugfs trace entries */ |
743 | list_for_each_entry_safe(entry, tmp, &rproc->traces, node) { |
744 | rproc_remove_trace_file(entry->priv); |
745 | rproc->num_traces--; |
746 | list_del(&entry->node); |
747 | kfree(entry); |
748 | } |
749 | |
750 | /* clean up carveout allocations */ |
751 | list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { |
752 | dma_free_coherent(dev->parent, entry->len, entry->va, entry->dma); |
753 | list_del(&entry->node); |
754 | kfree(entry); |
755 | } |
756 | |
757 | /* clean up iommu mapping entries */ |
758 | list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) { |
759 | size_t unmapped; |
760 | |
761 | unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); |
762 | if (unmapped != entry->len) { |
763 | /* nothing much to do besides complaining */ |
764 | dev_err(dev, "failed to unmap %u/%zu\n", entry->len, |
765 | unmapped); |
766 | } |
767 | |
768 | list_del(&entry->node); |
769 | kfree(entry); |
770 | } |
771 | } |
772 | |
773 | /* |
774 | * take a firmware and boot a remote processor with it. |
775 | */ |
776 | static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) |
777 | { |
778 | struct device *dev = &rproc->dev; |
779 | const char *name = rproc->firmware; |
780 | struct resource_table *table; |
781 | int ret, tablesz; |
782 | |
783 | ret = rproc_fw_sanity_check(rproc, fw); |
784 | if (ret) |
785 | return ret; |
786 | |
787 | dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size); |
788 | |
789 | /* |
790 | * if enabling an IOMMU isn't relevant for this rproc, this is |
791 | * just a nop |
792 | */ |
793 | ret = rproc_enable_iommu(rproc); |
794 | if (ret) { |
795 | dev_err(dev, "can't enable iommu: %d\n", ret); |
796 | return ret; |
797 | } |
798 | |
799 | rproc->bootaddr = rproc_get_boot_addr(rproc, fw); |
800 | |
801 | /* look for the resource table */ |
802 | table = rproc_find_rsc_table(rproc, fw, &tablesz); |
803 | if (!table) { |
804 | ret = -EINVAL; |
805 | goto clean_up; |
806 | } |
807 | |
808 | /* handle fw resources which are required to boot rproc */ |
809 | ret = rproc_handle_boot_rsc(rproc, table, tablesz); |
810 | if (ret) { |
811 | dev_err(dev, "Failed to process resources: %d\n", ret); |
812 | goto clean_up; |
813 | } |
814 | |
815 | /* load the ELF segments to memory */ |
816 | ret = rproc_load_segments(rproc, fw); |
817 | if (ret) { |
818 | dev_err(dev, "Failed to load program segments: %d\n", ret); |
819 | goto clean_up; |
820 | } |
821 | |
822 | /* power up the remote processor */ |
823 | ret = rproc->ops->start(rproc); |
824 | if (ret) { |
825 | dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret); |
826 | goto clean_up; |
827 | } |
828 | |
829 | rproc->state = RPROC_RUNNING; |
830 | |
831 | dev_info(dev, "remote processor %s is now up\n", rproc->name); |
832 | |
833 | return 0; |
834 | |
835 | clean_up: |
836 | rproc_resource_cleanup(rproc); |
837 | rproc_disable_iommu(rproc); |
838 | return ret; |
839 | } |
840 | |
841 | /* |
842 | * take a firmware and look for virtio devices to register. |
843 | * |
844 | * Note: this function is called asynchronously upon registration of the |
845 | * remote processor (so we must wait until it completes before we try |
846 | * to unregister the device. one other option is just to use kref here, |
847 | * that might be cleaner). |
848 | */ |
849 | static void rproc_fw_config_virtio(const struct firmware *fw, void *context) |
850 | { |
851 | struct rproc *rproc = context; |
852 | struct resource_table *table; |
853 | int ret, tablesz; |
854 | |
855 | if (rproc_fw_sanity_check(rproc, fw) < 0) |
856 | goto out; |
857 | |
858 | /* look for the resource table */ |
859 | table = rproc_find_rsc_table(rproc, fw, &tablesz); |
860 | if (!table) |
861 | goto out; |
862 | |
863 | /* look for virtio devices and register them */ |
864 | ret = rproc_handle_virtio_rsc(rproc, table, tablesz); |
865 | if (ret) |
866 | goto out; |
867 | |
868 | out: |
869 | release_firmware(fw); |
870 | /* allow rproc_del() contexts, if any, to proceed */ |
871 | complete_all(&rproc->firmware_loading_complete); |
872 | } |
873 | |
874 | /** |
875 | * rproc_boot() - boot a remote processor |
876 | * @rproc: handle of a remote processor |
877 | * |
878 | * Boot a remote processor (i.e. load its firmware, power it on, ...). |
879 | * |
880 | * If the remote processor is already powered on, this function immediately |
881 | * returns (successfully). |
882 | * |
883 | * Returns 0 on success, and an appropriate error value otherwise. |
884 | */ |
885 | int rproc_boot(struct rproc *rproc) |
886 | { |
887 | const struct firmware *firmware_p; |
888 | struct device *dev; |
889 | int ret; |
890 | |
891 | if (!rproc) { |
892 | pr_err("invalid rproc handle\n"); |
893 | return -EINVAL; |
894 | } |
895 | |
896 | dev = &rproc->dev; |
897 | |
898 | ret = mutex_lock_interruptible(&rproc->lock); |
899 | if (ret) { |
900 | dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); |
901 | return ret; |
902 | } |
903 | |
904 | /* loading a firmware is required */ |
905 | if (!rproc->firmware) { |
906 | dev_err(dev, "%s: no firmware to load\n", __func__); |
907 | ret = -EINVAL; |
908 | goto unlock_mutex; |
909 | } |
910 | |
911 | /* prevent underlying implementation from being removed */ |
912 | if (!try_module_get(dev->parent->driver->owner)) { |
913 | dev_err(dev, "%s: can't get owner\n", __func__); |
914 | ret = -EINVAL; |
915 | goto unlock_mutex; |
916 | } |
917 | |
918 | /* skip the boot process if rproc is already powered up */ |
919 | if (atomic_inc_return(&rproc->power) > 1) { |
920 | ret = 0; |
921 | goto unlock_mutex; |
922 | } |
923 | |
924 | dev_info(dev, "powering up %s\n", rproc->name); |
925 | |
926 | /* load firmware */ |
927 | ret = request_firmware(&firmware_p, rproc->firmware, dev); |
928 | if (ret < 0) { |
929 | dev_err(dev, "request_firmware failed: %d\n", ret); |
930 | goto downref_rproc; |
931 | } |
932 | |
933 | ret = rproc_fw_boot(rproc, firmware_p); |
934 | |
935 | release_firmware(firmware_p); |
936 | |
937 | downref_rproc: |
938 | if (ret) { |
939 | module_put(dev->parent->driver->owner); |
940 | atomic_dec(&rproc->power); |
941 | } |
942 | unlock_mutex: |
943 | mutex_unlock(&rproc->lock); |
944 | return ret; |
945 | } |
946 | EXPORT_SYMBOL(rproc_boot); |
947 | |
948 | /** |
949 | * rproc_shutdown() - power off the remote processor |
950 | * @rproc: the remote processor |
951 | * |
952 | * Power off a remote processor (previously booted with rproc_boot()). |
953 | * |
954 | * In case @rproc is still being used by an additional user(s), then |
955 | * this function will just decrement the power refcount and exit, |
956 | * without really powering off the device. |
957 | * |
958 | * Every call to rproc_boot() must (eventually) be accompanied by a call |
959 | * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. |
960 | * |
961 | * Notes: |
962 | * - we're not decrementing the rproc's refcount, only the power refcount. |
963 | * which means that the @rproc handle stays valid even after rproc_shutdown() |
964 | * returns, and users can still use it with a subsequent rproc_boot(), if |
965 | * needed. |
966 | */ |
967 | void rproc_shutdown(struct rproc *rproc) |
968 | { |
969 | struct device *dev = &rproc->dev; |
970 | int ret; |
971 | |
972 | ret = mutex_lock_interruptible(&rproc->lock); |
973 | if (ret) { |
974 | dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); |
975 | return; |
976 | } |
977 | |
978 | /* if the remote proc is still needed, bail out */ |
979 | if (!atomic_dec_and_test(&rproc->power)) |
980 | goto out; |
981 | |
982 | /* power off the remote processor */ |
983 | ret = rproc->ops->stop(rproc); |
984 | if (ret) { |
985 | atomic_inc(&rproc->power); |
986 | dev_err(dev, "can't stop rproc: %d\n", ret); |
987 | goto out; |
988 | } |
989 | |
990 | /* clean up all acquired resources */ |
991 | rproc_resource_cleanup(rproc); |
992 | |
993 | rproc_disable_iommu(rproc); |
994 | |
995 | rproc->state = RPROC_OFFLINE; |
996 | |
997 | dev_info(dev, "stopped remote processor %s\n", rproc->name); |
998 | |
999 | out: |
1000 | mutex_unlock(&rproc->lock); |
1001 | if (!ret) |
1002 | module_put(dev->parent->driver->owner); |
1003 | } |
1004 | EXPORT_SYMBOL(rproc_shutdown); |
1005 | |
1006 | /** |
1007 | * rproc_add() - register a remote processor |
1008 | * @rproc: the remote processor handle to register |
1009 | * |
1010 | * Registers @rproc with the remoteproc framework, after it has been |
1011 | * allocated with rproc_alloc(). |
1012 | * |
1013 | * This is called by the platform-specific rproc implementation, whenever |
1014 | * a new remote processor device is probed. |
1015 | * |
1016 | * Returns 0 on success and an appropriate error code otherwise. |
1017 | * |
1018 | * Note: this function initiates an asynchronous firmware loading |
1019 | * context, which will look for virtio devices supported by the rproc's |
1020 | * firmware. |
1021 | * |
1022 | * If found, those virtio devices will be created and added, so as a result |
1023 | * of registering this remote processor, additional virtio drivers might be |
1024 | * probed. |
1025 | */ |
1026 | int rproc_add(struct rproc *rproc) |
1027 | { |
1028 | struct device *dev = &rproc->dev; |
1029 | int ret = 0; |
1030 | |
1031 | ret = device_add(dev); |
1032 | if (ret < 0) |
1033 | return ret; |
1034 | |
1035 | dev_info(dev, "%s is available\n", rproc->name); |
1036 | |
1037 | dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); |
1038 | dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n"); |
1039 | |
1040 | /* create debugfs entries */ |
1041 | rproc_create_debug_dir(rproc); |
1042 | |
1043 | /* rproc_del() calls must wait until async loader completes */ |
1044 | init_completion(&rproc->firmware_loading_complete); |
1045 | |
1046 | /* |
1047 | * We must retrieve early virtio configuration info from |
1048 | * the firmware (e.g. whether to register a virtio device, |
1049 | * what virtio features does it support, ...). |
1050 | * |
1051 | * We're initiating an asynchronous firmware loading, so we can |
1052 | * be built-in kernel code, without hanging the boot process. |
1053 | */ |
1054 | ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, |
1055 | rproc->firmware, dev, GFP_KERNEL, |
1056 | rproc, rproc_fw_config_virtio); |
1057 | if (ret < 0) { |
1058 | dev_err(dev, "request_firmware_nowait failed: %d\n", ret); |
1059 | complete_all(&rproc->firmware_loading_complete); |
1060 | } |
1061 | |
1062 | return ret; |
1063 | } |
1064 | EXPORT_SYMBOL(rproc_add); |
1065 | |
1066 | /** |
1067 | * rproc_type_release() - release a remote processor instance |
1068 | * @dev: the rproc's device |
1069 | * |
1070 | * This function should _never_ be called directly. |
1071 | * |
1072 | * It will be called by the driver core when no one holds a valid pointer |
1073 | * to @dev anymore. |
1074 | */ |
1075 | static void rproc_type_release(struct device *dev) |
1076 | { |
1077 | struct rproc *rproc = container_of(dev, struct rproc, dev); |
1078 | |
1079 | dev_info(&rproc->dev, "releasing %s\n", rproc->name); |
1080 | |
1081 | rproc_delete_debug_dir(rproc); |
1082 | |
1083 | idr_remove_all(&rproc->notifyids); |
1084 | idr_destroy(&rproc->notifyids); |
1085 | |
1086 | if (rproc->index >= 0) |
1087 | ida_simple_remove(&rproc_dev_index, rproc->index); |
1088 | |
1089 | kfree(rproc); |
1090 | } |
1091 | |
1092 | static struct device_type rproc_type = { |
1093 | .name = "remoteproc", |
1094 | .release = rproc_type_release, |
1095 | }; |
1096 | |
1097 | /** |
1098 | * rproc_alloc() - allocate a remote processor handle |
1099 | * @dev: the underlying device |
1100 | * @name: name of this remote processor |
1101 | * @ops: platform-specific handlers (mainly start/stop) |
1102 | * @firmware: name of firmware file to load |
1103 | * @len: length of private data needed by the rproc driver (in bytes) |
1104 | * |
1105 | * Allocates a new remote processor handle, but does not register |
1106 | * it yet. |
1107 | * |
1108 | * This function should be used by rproc implementations during initialization |
1109 | * of the remote processor. |
1110 | * |
1111 | * After creating an rproc handle using this function, and when ready, |
1112 | * implementations should then call rproc_add() to complete |
1113 | * the registration of the remote processor. |
1114 | * |
1115 | * On success the new rproc is returned, and on failure, NULL. |
1116 | * |
1117 | * Note: _never_ directly deallocate @rproc, even if it was not registered |
1118 | * yet. Instead, when you need to unroll rproc_alloc(), use rproc_put(). |
1119 | */ |
1120 | struct rproc *rproc_alloc(struct device *dev, const char *name, |
1121 | const struct rproc_ops *ops, |
1122 | const char *firmware, int len) |
1123 | { |
1124 | struct rproc *rproc; |
1125 | |
1126 | if (!dev || !name || !ops) |
1127 | return NULL; |
1128 | |
1129 | rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); |
1130 | if (!rproc) { |
1131 | dev_err(dev, "%s: kzalloc failed\n", __func__); |
1132 | return NULL; |
1133 | } |
1134 | |
1135 | rproc->name = name; |
1136 | rproc->ops = ops; |
1137 | rproc->firmware = firmware; |
1138 | rproc->priv = &rproc[1]; |
1139 | |
1140 | device_initialize(&rproc->dev); |
1141 | rproc->dev.parent = dev; |
1142 | rproc->dev.type = &rproc_type; |
1143 | |
1144 | /* Assign a unique device index and name */ |
1145 | rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); |
1146 | if (rproc->index < 0) { |
1147 | dev_err(dev, "ida_simple_get failed: %d\n", rproc->index); |
1148 | put_device(&rproc->dev); |
1149 | return NULL; |
1150 | } |
1151 | |
1152 | dev_set_name(&rproc->dev, "remoteproc%d", rproc->index); |
1153 | |
1154 | atomic_set(&rproc->power, 0); |
1155 | |
1156 | /* Set ELF as the default fw_ops handler */ |
1157 | rproc->fw_ops = &rproc_elf_fw_ops; |
1158 | |
1159 | mutex_init(&rproc->lock); |
1160 | |
1161 | idr_init(&rproc->notifyids); |
1162 | |
1163 | INIT_LIST_HEAD(&rproc->carveouts); |
1164 | INIT_LIST_HEAD(&rproc->mappings); |
1165 | INIT_LIST_HEAD(&rproc->traces); |
1166 | INIT_LIST_HEAD(&rproc->rvdevs); |
1167 | |
1168 | rproc->state = RPROC_OFFLINE; |
1169 | |
1170 | return rproc; |
1171 | } |
1172 | EXPORT_SYMBOL(rproc_alloc); |
1173 | |
1174 | /** |
1175 | * rproc_put() - unroll rproc_alloc() |
1176 | * @rproc: the remote processor handle |
1177 | * |
1178 | * This function decrements the rproc dev refcount. |
1179 | * |
1180 | * If no one holds any reference to rproc anymore, then its refcount would |
1181 | * now drop to zero, and it would be freed. |
1182 | */ |
1183 | void rproc_put(struct rproc *rproc) |
1184 | { |
1185 | put_device(&rproc->dev); |
1186 | } |
1187 | EXPORT_SYMBOL(rproc_put); |
1188 | |
1189 | /** |
1190 | * rproc_del() - unregister a remote processor |
1191 | * @rproc: rproc handle to unregister |
1192 | * |
1193 | * This function should be called when the platform specific rproc |
1194 | * implementation decides to remove the rproc device. it should |
1195 | * _only_ be called if a previous invocation of rproc_add() |
1196 | * has completed successfully. |
1197 | * |
1198 | * After rproc_del() returns, @rproc isn't freed yet, because |
1199 | * of the outstanding reference created by rproc_alloc. To decrement that |
1200 | * one last refcount, one still needs to call rproc_put(). |
1201 | * |
1202 | * Returns 0 on success and -EINVAL if @rproc isn't valid. |
1203 | */ |
1204 | int rproc_del(struct rproc *rproc) |
1205 | { |
1206 | struct rproc_vdev *rvdev, *tmp; |
1207 | |
1208 | if (!rproc) |
1209 | return -EINVAL; |
1210 | |
1211 | /* if rproc is just being registered, wait */ |
1212 | wait_for_completion(&rproc->firmware_loading_complete); |
1213 | |
1214 | /* clean up remote vdev entries */ |
1215 | list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node) |
1216 | rproc_remove_virtio_dev(rvdev); |
1217 | |
1218 | device_del(&rproc->dev); |
1219 | |
1220 | return 0; |
1221 | } |
1222 | EXPORT_SYMBOL(rproc_del); |
1223 | |
1224 | static int __init remoteproc_init(void) |
1225 | { |
1226 | rproc_init_debugfs(); |
1227 | |
1228 | return 0; |
1229 | } |
1230 | module_init(remoteproc_init); |
1231 | |
1232 | static void __exit remoteproc_exit(void) |
1233 | { |
1234 | rproc_exit_debugfs(); |
1235 | } |
1236 | module_exit(remoteproc_exit); |
1237 | |
1238 | MODULE_LICENSE("GPL v2"); |
1239 | MODULE_DESCRIPTION("Generic Remote Processor Framework"); |
1240 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9