Root/
1 | #ifndef _RAID5_H |
2 | #define _RAID5_H |
3 | |
4 | #include <linux/raid/xor.h> |
5 | #include <linux/dmaengine.h> |
6 | |
7 | /* |
8 | * |
9 | * Each stripe contains one buffer per disc. Each buffer can be in |
10 | * one of a number of states stored in "flags". Changes between |
11 | * these states happen *almost* exclusively under a per-stripe |
12 | * spinlock. Some very specific changes can happen in bi_end_io, and |
13 | * these are not protected by the spin lock. |
14 | * |
15 | * The flag bits that are used to represent these states are: |
16 | * R5_UPTODATE and R5_LOCKED |
17 | * |
18 | * State Empty == !UPTODATE, !LOCK |
19 | * We have no data, and there is no active request |
20 | * State Want == !UPTODATE, LOCK |
21 | * A read request is being submitted for this block |
22 | * State Dirty == UPTODATE, LOCK |
23 | * Some new data is in this buffer, and it is being written out |
24 | * State Clean == UPTODATE, !LOCK |
25 | * We have valid data which is the same as on disc |
26 | * |
27 | * The possible state transitions are: |
28 | * |
29 | * Empty -> Want - on read or write to get old data for parity calc |
30 | * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE) |
31 | * Empty -> Clean - on compute_block when computing a block for failed drive |
32 | * Want -> Empty - on failed read |
33 | * Want -> Clean - on successful completion of read request |
34 | * Dirty -> Clean - on successful completion of write request |
35 | * Dirty -> Clean - on failed write |
36 | * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW) |
37 | * |
38 | * The Want->Empty, Want->Clean, Dirty->Clean, transitions |
39 | * all happen in b_end_io at interrupt time. |
40 | * Each sets the Uptodate bit before releasing the Lock bit. |
41 | * This leaves one multi-stage transition: |
42 | * Want->Dirty->Clean |
43 | * This is safe because thinking that a Clean buffer is actually dirty |
44 | * will at worst delay some action, and the stripe will be scheduled |
45 | * for attention after the transition is complete. |
46 | * |
47 | * There is one possibility that is not covered by these states. That |
48 | * is if one drive has failed and there is a spare being rebuilt. We |
49 | * can't distinguish between a clean block that has been generated |
50 | * from parity calculations, and a clean block that has been |
51 | * successfully written to the spare ( or to parity when resyncing). |
52 | * To distingush these states we have a stripe bit STRIPE_INSYNC that |
53 | * is set whenever a write is scheduled to the spare, or to the parity |
54 | * disc if there is no spare. A sync request clears this bit, and |
55 | * when we find it set with no buffers locked, we know the sync is |
56 | * complete. |
57 | * |
58 | * Buffers for the md device that arrive via make_request are attached |
59 | * to the appropriate stripe in one of two lists linked on b_reqnext. |
60 | * One list (bh_read) for read requests, one (bh_write) for write. |
61 | * There should never be more than one buffer on the two lists |
62 | * together, but we are not guaranteed of that so we allow for more. |
63 | * |
64 | * If a buffer is on the read list when the associated cache buffer is |
65 | * Uptodate, the data is copied into the read buffer and it's b_end_io |
66 | * routine is called. This may happen in the end_request routine only |
67 | * if the buffer has just successfully been read. end_request should |
68 | * remove the buffers from the list and then set the Uptodate bit on |
69 | * the buffer. Other threads may do this only if they first check |
70 | * that the Uptodate bit is set. Once they have checked that they may |
71 | * take buffers off the read queue. |
72 | * |
73 | * When a buffer on the write list is committed for write it is copied |
74 | * into the cache buffer, which is then marked dirty, and moved onto a |
75 | * third list, the written list (bh_written). Once both the parity |
76 | * block and the cached buffer are successfully written, any buffer on |
77 | * a written list can be returned with b_end_io. |
78 | * |
79 | * The write list and read list both act as fifos. The read list is |
80 | * protected by the device_lock. The write and written lists are |
81 | * protected by the stripe lock. The device_lock, which can be |
82 | * claimed while the stipe lock is held, is only for list |
83 | * manipulations and will only be held for a very short time. It can |
84 | * be claimed from interrupts. |
85 | * |
86 | * |
87 | * Stripes in the stripe cache can be on one of two lists (or on |
88 | * neither). The "inactive_list" contains stripes which are not |
89 | * currently being used for any request. They can freely be reused |
90 | * for another stripe. The "handle_list" contains stripes that need |
91 | * to be handled in some way. Both of these are fifo queues. Each |
92 | * stripe is also (potentially) linked to a hash bucket in the hash |
93 | * table so that it can be found by sector number. Stripes that are |
94 | * not hashed must be on the inactive_list, and will normally be at |
95 | * the front. All stripes start life this way. |
96 | * |
97 | * The inactive_list, handle_list and hash bucket lists are all protected by the |
98 | * device_lock. |
99 | * - stripes on the inactive_list never have their stripe_lock held. |
100 | * - stripes have a reference counter. If count==0, they are on a list. |
101 | * - If a stripe might need handling, STRIPE_HANDLE is set. |
102 | * - When refcount reaches zero, then if STRIPE_HANDLE it is put on |
103 | * handle_list else inactive_list |
104 | * |
105 | * This, combined with the fact that STRIPE_HANDLE is only ever |
106 | * cleared while a stripe has a non-zero count means that if the |
107 | * refcount is 0 and STRIPE_HANDLE is set, then it is on the |
108 | * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then |
109 | * the stripe is on inactive_list. |
110 | * |
111 | * The possible transitions are: |
112 | * activate an unhashed/inactive stripe (get_active_stripe()) |
113 | * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev |
114 | * activate a hashed, possibly active stripe (get_active_stripe()) |
115 | * lockdev check-hash if(!cnt++)unlink-stripe unlockdev |
116 | * attach a request to an active stripe (add_stripe_bh()) |
117 | * lockdev attach-buffer unlockdev |
118 | * handle a stripe (handle_stripe()) |
119 | * lockstripe clrSTRIPE_HANDLE ... |
120 | * (lockdev check-buffers unlockdev) .. |
121 | * change-state .. |
122 | * record io/ops needed unlockstripe schedule io/ops |
123 | * release an active stripe (release_stripe()) |
124 | * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev |
125 | * |
126 | * The refcount counts each thread that have activated the stripe, |
127 | * plus raid5d if it is handling it, plus one for each active request |
128 | * on a cached buffer, and plus one if the stripe is undergoing stripe |
129 | * operations. |
130 | * |
131 | * Stripe operations are performed outside the stripe lock, |
132 | * the stripe operations are: |
133 | * -copying data between the stripe cache and user application buffers |
134 | * -computing blocks to save a disk access, or to recover a missing block |
135 | * -updating the parity on a write operation (reconstruct write and |
136 | * read-modify-write) |
137 | * -checking parity correctness |
138 | * -running i/o to disk |
139 | * These operations are carried out by raid5_run_ops which uses the async_tx |
140 | * api to (optionally) offload operations to dedicated hardware engines. |
141 | * When requesting an operation handle_stripe sets the pending bit for the |
142 | * operation and increments the count. raid5_run_ops is then run whenever |
143 | * the count is non-zero. |
144 | * There are some critical dependencies between the operations that prevent some |
145 | * from being requested while another is in flight. |
146 | * 1/ Parity check operations destroy the in cache version of the parity block, |
147 | * so we prevent parity dependent operations like writes and compute_blocks |
148 | * from starting while a check is in progress. Some dma engines can perform |
149 | * the check without damaging the parity block, in these cases the parity |
150 | * block is re-marked up to date (assuming the check was successful) and is |
151 | * not re-read from disk. |
152 | * 2/ When a write operation is requested we immediately lock the affected |
153 | * blocks, and mark them as not up to date. This causes new read requests |
154 | * to be held off, as well as parity checks and compute block operations. |
155 | * 3/ Once a compute block operation has been requested handle_stripe treats |
156 | * that block as if it is up to date. raid5_run_ops guaruntees that any |
157 | * operation that is dependent on the compute block result is initiated after |
158 | * the compute block completes. |
159 | */ |
160 | |
161 | /* |
162 | * Operations state - intermediate states that are visible outside of sh->lock |
163 | * In general _idle indicates nothing is running, _run indicates a data |
164 | * processing operation is active, and _result means the data processing result |
165 | * is stable and can be acted upon. For simple operations like biofill and |
166 | * compute that only have an _idle and _run state they are indicated with |
167 | * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN) |
168 | */ |
169 | /** |
170 | * enum check_states - handles syncing / repairing a stripe |
171 | * @check_state_idle - check operations are quiesced |
172 | * @check_state_run - check operation is running |
173 | * @check_state_result - set outside lock when check result is valid |
174 | * @check_state_compute_run - check failed and we are repairing |
175 | * @check_state_compute_result - set outside lock when compute result is valid |
176 | */ |
177 | enum check_states { |
178 | check_state_idle = 0, |
179 | check_state_run, /* xor parity check */ |
180 | check_state_run_q, /* q-parity check */ |
181 | check_state_run_pq, /* pq dual parity check */ |
182 | check_state_check_result, |
183 | check_state_compute_run, /* parity repair */ |
184 | check_state_compute_result, |
185 | }; |
186 | |
187 | /** |
188 | * enum reconstruct_states - handles writing or expanding a stripe |
189 | */ |
190 | enum reconstruct_states { |
191 | reconstruct_state_idle = 0, |
192 | reconstruct_state_prexor_drain_run, /* prexor-write */ |
193 | reconstruct_state_drain_run, /* write */ |
194 | reconstruct_state_run, /* expand */ |
195 | reconstruct_state_prexor_drain_result, |
196 | reconstruct_state_drain_result, |
197 | reconstruct_state_result, |
198 | }; |
199 | |
200 | struct stripe_head { |
201 | struct hlist_node hash; |
202 | struct list_head lru; /* inactive_list or handle_list */ |
203 | struct raid5_private_data *raid_conf; |
204 | short generation; /* increments with every |
205 | * reshape */ |
206 | sector_t sector; /* sector of this row */ |
207 | short pd_idx; /* parity disk index */ |
208 | short qd_idx; /* 'Q' disk index for raid6 */ |
209 | short ddf_layout;/* use DDF ordering to calculate Q */ |
210 | unsigned long state; /* state flags */ |
211 | atomic_t count; /* nr of active thread/requests */ |
212 | spinlock_t lock; |
213 | int bm_seq; /* sequence number for bitmap flushes */ |
214 | int disks; /* disks in stripe */ |
215 | enum check_states check_state; |
216 | enum reconstruct_states reconstruct_state; |
217 | /** |
218 | * struct stripe_operations |
219 | * @target - STRIPE_OP_COMPUTE_BLK target |
220 | * @target2 - 2nd compute target in the raid6 case |
221 | * @zero_sum_result - P and Q verification flags |
222 | * @request - async service request flags for raid_run_ops |
223 | */ |
224 | struct stripe_operations { |
225 | int target, target2; |
226 | enum sum_check_flags zero_sum_result; |
227 | #ifdef CONFIG_MULTICORE_RAID456 |
228 | unsigned long request; |
229 | wait_queue_head_t wait_for_ops; |
230 | #endif |
231 | } ops; |
232 | struct r5dev { |
233 | struct bio req; |
234 | struct bio_vec vec; |
235 | struct page *page; |
236 | struct bio *toread, *read, *towrite, *written; |
237 | sector_t sector; /* sector of this page */ |
238 | unsigned long flags; |
239 | } dev[1]; /* allocated with extra space depending of RAID geometry */ |
240 | }; |
241 | |
242 | /* stripe_head_state - collects and tracks the dynamic state of a stripe_head |
243 | * for handle_stripe. It is only valid under spin_lock(sh->lock); |
244 | */ |
245 | struct stripe_head_state { |
246 | int syncing, expanding, expanded; |
247 | int locked, uptodate, to_read, to_write, failed, written; |
248 | int to_fill, compute, req_compute, non_overwrite; |
249 | int failed_num; |
250 | unsigned long ops_request; |
251 | }; |
252 | |
253 | /* r6_state - extra state data only relevant to r6 */ |
254 | struct r6_state { |
255 | int p_failed, q_failed, failed_num[2]; |
256 | }; |
257 | |
258 | /* Flags */ |
259 | #define R5_UPTODATE 0 /* page contains current data */ |
260 | #define R5_LOCKED 1 /* IO has been submitted on "req" */ |
261 | #define R5_OVERWRITE 2 /* towrite covers whole page */ |
262 | /* and some that are internal to handle_stripe */ |
263 | #define R5_Insync 3 /* rdev && rdev->in_sync at start */ |
264 | #define R5_Wantread 4 /* want to schedule a read */ |
265 | #define R5_Wantwrite 5 |
266 | #define R5_Overlap 7 /* There is a pending overlapping request on this block */ |
267 | #define R5_ReadError 8 /* seen a read error here recently */ |
268 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ |
269 | |
270 | #define R5_Expanded 10 /* This block now has post-expand data */ |
271 | #define R5_Wantcompute 11 /* compute_block in progress treat as |
272 | * uptodate |
273 | */ |
274 | #define R5_Wantfill 12 /* dev->toread contains a bio that needs |
275 | * filling |
276 | */ |
277 | #define R5_Wantdrain 13 /* dev->towrite needs to be drained */ |
278 | /* |
279 | * Write method |
280 | */ |
281 | #define RECONSTRUCT_WRITE 1 |
282 | #define READ_MODIFY_WRITE 2 |
283 | /* not a write method, but a compute_parity mode */ |
284 | #define CHECK_PARITY 3 |
285 | /* Additional compute_parity mode -- updates the parity w/o LOCKING */ |
286 | #define UPDATE_PARITY 4 |
287 | |
288 | /* |
289 | * Stripe state |
290 | */ |
291 | #define STRIPE_HANDLE 2 |
292 | #define STRIPE_SYNCING 3 |
293 | #define STRIPE_INSYNC 4 |
294 | #define STRIPE_PREREAD_ACTIVE 5 |
295 | #define STRIPE_DELAYED 6 |
296 | #define STRIPE_DEGRADED 7 |
297 | #define STRIPE_BIT_DELAY 8 |
298 | #define STRIPE_EXPANDING 9 |
299 | #define STRIPE_EXPAND_SOURCE 10 |
300 | #define STRIPE_EXPAND_READY 11 |
301 | #define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ |
302 | #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ |
303 | #define STRIPE_BIOFILL_RUN 14 |
304 | #define STRIPE_COMPUTE_RUN 15 |
305 | #define STRIPE_OPS_REQ_PENDING 16 |
306 | |
307 | /* |
308 | * Operation request flags |
309 | */ |
310 | #define STRIPE_OP_BIOFILL 0 |
311 | #define STRIPE_OP_COMPUTE_BLK 1 |
312 | #define STRIPE_OP_PREXOR 2 |
313 | #define STRIPE_OP_BIODRAIN 3 |
314 | #define STRIPE_OP_RECONSTRUCT 4 |
315 | #define STRIPE_OP_CHECK 5 |
316 | |
317 | /* |
318 | * Plugging: |
319 | * |
320 | * To improve write throughput, we need to delay the handling of some |
321 | * stripes until there has been a chance that several write requests |
322 | * for the one stripe have all been collected. |
323 | * In particular, any write request that would require pre-reading |
324 | * is put on a "delayed" queue until there are no stripes currently |
325 | * in a pre-read phase. Further, if the "delayed" queue is empty when |
326 | * a stripe is put on it then we "plug" the queue and do not process it |
327 | * until an unplug call is made. (the unplug_io_fn() is called). |
328 | * |
329 | * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add |
330 | * it to the count of prereading stripes. |
331 | * When write is initiated, or the stripe refcnt == 0 (just in case) we |
332 | * clear the PREREAD_ACTIVE flag and decrement the count |
333 | * Whenever the 'handle' queue is empty and the device is not plugged, we |
334 | * move any strips from delayed to handle and clear the DELAYED flag and set |
335 | * PREREAD_ACTIVE. |
336 | * In stripe_handle, if we find pre-reading is necessary, we do it if |
337 | * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. |
338 | * HANDLE gets cleared if stripe_handle leave nothing locked. |
339 | */ |
340 | |
341 | |
342 | struct disk_info { |
343 | mdk_rdev_t *rdev; |
344 | }; |
345 | |
346 | struct raid5_private_data { |
347 | struct hlist_head *stripe_hashtbl; |
348 | mddev_t *mddev; |
349 | struct disk_info *spare; |
350 | int chunk_sectors; |
351 | int level, algorithm; |
352 | int max_degraded; |
353 | int raid_disks; |
354 | int max_nr_stripes; |
355 | |
356 | /* reshape_progress is the leading edge of a 'reshape' |
357 | * It has value MaxSector when no reshape is happening |
358 | * If delta_disks < 0, it is the last sector we started work on, |
359 | * else is it the next sector to work on. |
360 | */ |
361 | sector_t reshape_progress; |
362 | /* reshape_safe is the trailing edge of a reshape. We know that |
363 | * before (or after) this address, all reshape has completed. |
364 | */ |
365 | sector_t reshape_safe; |
366 | int previous_raid_disks; |
367 | int prev_chunk_sectors; |
368 | int prev_algo; |
369 | short generation; /* increments with every reshape */ |
370 | unsigned long reshape_checkpoint; /* Time we last updated |
371 | * metadata */ |
372 | |
373 | struct list_head handle_list; /* stripes needing handling */ |
374 | struct list_head hold_list; /* preread ready stripes */ |
375 | struct list_head delayed_list; /* stripes that have plugged requests */ |
376 | struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ |
377 | struct bio *retry_read_aligned; /* currently retrying aligned bios */ |
378 | struct bio *retry_read_aligned_list; /* aligned bios retry list */ |
379 | atomic_t preread_active_stripes; /* stripes with scheduled io */ |
380 | atomic_t active_aligned_reads; |
381 | atomic_t pending_full_writes; /* full write backlog */ |
382 | int bypass_count; /* bypassed prereads */ |
383 | int bypass_threshold; /* preread nice */ |
384 | struct list_head *last_hold; /* detect hold_list promotions */ |
385 | |
386 | atomic_t reshape_stripes; /* stripes with pending writes for reshape */ |
387 | /* unfortunately we need two cache names as we temporarily have |
388 | * two caches. |
389 | */ |
390 | int active_name; |
391 | char cache_name[2][20]; |
392 | struct kmem_cache *slab_cache; /* for allocating stripes */ |
393 | |
394 | int seq_flush, seq_write; |
395 | int quiesce; |
396 | |
397 | int fullsync; /* set to 1 if a full sync is needed, |
398 | * (fresh device added). |
399 | * Cleared when a sync completes. |
400 | */ |
401 | /* per cpu variables */ |
402 | struct raid5_percpu { |
403 | struct page *spare_page; /* Used when checking P/Q in raid6 */ |
404 | void *scribble; /* space for constructing buffer |
405 | * lists and performing address |
406 | * conversions |
407 | */ |
408 | } __percpu *percpu; |
409 | size_t scribble_len; /* size of scribble region must be |
410 | * associated with conf to handle |
411 | * cpu hotplug while reshaping |
412 | */ |
413 | #ifdef CONFIG_HOTPLUG_CPU |
414 | struct notifier_block cpu_notify; |
415 | #endif |
416 | |
417 | /* |
418 | * Free stripes pool |
419 | */ |
420 | atomic_t active_stripes; |
421 | struct list_head inactive_list; |
422 | wait_queue_head_t wait_for_stripe; |
423 | wait_queue_head_t wait_for_overlap; |
424 | int inactive_blocked; /* release of inactive stripes blocked, |
425 | * waiting for 25% to be free |
426 | */ |
427 | int pool_size; /* number of disks in stripeheads in pool */ |
428 | spinlock_t device_lock; |
429 | struct disk_info *disks; |
430 | |
431 | /* When taking over an array from a different personality, we store |
432 | * the new thread here until we fully activate the array. |
433 | */ |
434 | struct mdk_thread_s *thread; |
435 | }; |
436 | |
437 | typedef struct raid5_private_data raid5_conf_t; |
438 | |
439 | /* |
440 | * Our supported algorithms |
441 | */ |
442 | #define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */ |
443 | #define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */ |
444 | #define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */ |
445 | #define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */ |
446 | |
447 | /* Define non-rotating (raid4) algorithms. These allow |
448 | * conversion of raid4 to raid5. |
449 | */ |
450 | #define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */ |
451 | #define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */ |
452 | |
453 | /* DDF RAID6 layouts differ from md/raid6 layouts in two ways. |
454 | * Firstly, the exact positioning of the parity block is slightly |
455 | * different between the 'LEFT_*' modes of md and the "_N_*" modes |
456 | * of DDF. |
457 | * Secondly, or order of datablocks over which the Q syndrome is computed |
458 | * is different. |
459 | * Consequently we have different layouts for DDF/raid6 than md/raid6. |
460 | * These layouts are from the DDFv1.2 spec. |
461 | * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but |
462 | * leaves RLQ=3 as 'Vendor Specific' |
463 | */ |
464 | |
465 | #define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */ |
466 | #define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */ |
467 | #define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */ |
468 | |
469 | |
470 | /* For every RAID5 algorithm we define a RAID6 algorithm |
471 | * with exactly the same layout for data and parity, and |
472 | * with the Q block always on the last device (N-1). |
473 | * This allows trivial conversion from RAID5 to RAID6 |
474 | */ |
475 | #define ALGORITHM_LEFT_ASYMMETRIC_6 16 |
476 | #define ALGORITHM_RIGHT_ASYMMETRIC_6 17 |
477 | #define ALGORITHM_LEFT_SYMMETRIC_6 18 |
478 | #define ALGORITHM_RIGHT_SYMMETRIC_6 19 |
479 | #define ALGORITHM_PARITY_0_6 20 |
480 | #define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N |
481 | |
482 | static inline int algorithm_valid_raid5(int layout) |
483 | { |
484 | return (layout >= 0) && |
485 | (layout <= 5); |
486 | } |
487 | static inline int algorithm_valid_raid6(int layout) |
488 | { |
489 | return (layout >= 0 && layout <= 5) |
490 | || |
491 | (layout >= 8 && layout <= 10) |
492 | || |
493 | (layout >= 16 && layout <= 20); |
494 | } |
495 | |
496 | static inline int algorithm_is_DDF(int layout) |
497 | { |
498 | return layout >= 8 && layout <= 10; |
499 | } |
500 | #endif |
501 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9