Root/
1 | /* |
2 | * mm.c - Micro Memory(tm) PCI memory board block device driver - v2.3 |
3 | * |
4 | * (C) 2001 San Mehat <nettwerk@valinux.com> |
5 | * (C) 2001 Johannes Erdfelt <jerdfelt@valinux.com> |
6 | * (C) 2001 NeilBrown <neilb@cse.unsw.edu.au> |
7 | * |
8 | * This driver for the Micro Memory PCI Memory Module with Battery Backup |
9 | * is Copyright Micro Memory Inc 2001-2002. All rights reserved. |
10 | * |
11 | * This driver is released to the public under the terms of the |
12 | * GNU GENERAL PUBLIC LICENSE version 2 |
13 | * See the file COPYING for details. |
14 | * |
15 | * This driver provides a standard block device interface for Micro Memory(tm) |
16 | * PCI based RAM boards. |
17 | * 10/05/01: Phap Nguyen - Rebuilt the driver |
18 | * 10/22/01: Phap Nguyen - v2.1 Added disk partitioning |
19 | * 29oct2001:NeilBrown - Use make_request_fn instead of request_fn |
20 | * - use stand disk partitioning (so fdisk works). |
21 | * 08nov2001:NeilBrown - change driver name from "mm" to "umem" |
22 | * - incorporate into main kernel |
23 | * 08apr2002:NeilBrown - Move some of interrupt handle to tasklet |
24 | * - use spin_lock_bh instead of _irq |
25 | * - Never block on make_request. queue |
26 | * bh's instead. |
27 | * - unregister umem from devfs at mod unload |
28 | * - Change version to 2.3 |
29 | * 07Nov2001:Phap Nguyen - Select pci read command: 06, 12, 15 (Decimal) |
30 | * 07Jan2002: P. Nguyen - Used PCI Memory Write & Invalidate for DMA |
31 | * 15May2002:NeilBrown - convert to bio for 2.5 |
32 | * 17May2002:NeilBrown - remove init_mem initialisation. Instead detect |
33 | * - a sequence of writes that cover the card, and |
34 | * - set initialised bit then. |
35 | */ |
36 | |
37 | #undef DEBUG /* #define DEBUG if you want debugging info (pr_debug) */ |
38 | #include <linux/fs.h> |
39 | #include <linux/bio.h> |
40 | #include <linux/kernel.h> |
41 | #include <linux/mm.h> |
42 | #include <linux/mman.h> |
43 | #include <linux/gfp.h> |
44 | #include <linux/ioctl.h> |
45 | #include <linux/module.h> |
46 | #include <linux/init.h> |
47 | #include <linux/interrupt.h> |
48 | #include <linux/timer.h> |
49 | #include <linux/pci.h> |
50 | #include <linux/dma-mapping.h> |
51 | |
52 | #include <linux/fcntl.h> /* O_ACCMODE */ |
53 | #include <linux/hdreg.h> /* HDIO_GETGEO */ |
54 | |
55 | #include "umem.h" |
56 | |
57 | #include <asm/uaccess.h> |
58 | #include <asm/io.h> |
59 | |
60 | #define MM_MAXCARDS 4 |
61 | #define MM_RAHEAD 2 /* two sectors */ |
62 | #define MM_BLKSIZE 1024 /* 1k blocks */ |
63 | #define MM_HARDSECT 512 /* 512-byte hardware sectors */ |
64 | #define MM_SHIFT 6 /* max 64 partitions on 4 cards */ |
65 | |
66 | /* |
67 | * Version Information |
68 | */ |
69 | |
70 | #define DRIVER_NAME "umem" |
71 | #define DRIVER_VERSION "v2.3" |
72 | #define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown" |
73 | #define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver" |
74 | |
75 | static int debug; |
76 | /* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */ |
77 | #define HW_TRACE(x) |
78 | |
79 | #define DEBUG_LED_ON_TRANSFER 0x01 |
80 | #define DEBUG_BATTERY_POLLING 0x02 |
81 | |
82 | module_param(debug, int, 0644); |
83 | MODULE_PARM_DESC(debug, "Debug bitmask"); |
84 | |
85 | static int pci_read_cmd = 0x0C; /* Read Multiple */ |
86 | module_param(pci_read_cmd, int, 0); |
87 | MODULE_PARM_DESC(pci_read_cmd, "PCI read command"); |
88 | |
89 | static int pci_write_cmd = 0x0F; /* Write and Invalidate */ |
90 | module_param(pci_write_cmd, int, 0); |
91 | MODULE_PARM_DESC(pci_write_cmd, "PCI write command"); |
92 | |
93 | static int pci_cmds; |
94 | |
95 | static int major_nr; |
96 | |
97 | #include <linux/blkdev.h> |
98 | #include <linux/blkpg.h> |
99 | |
100 | struct cardinfo { |
101 | struct pci_dev *dev; |
102 | |
103 | unsigned char __iomem *csr_remap; |
104 | unsigned int mm_size; /* size in kbytes */ |
105 | |
106 | unsigned int init_size; /* initial segment, in sectors, |
107 | * that we know to |
108 | * have been written |
109 | */ |
110 | struct bio *bio, *currentbio, **biotail; |
111 | int current_idx; |
112 | sector_t current_sector; |
113 | |
114 | struct request_queue *queue; |
115 | |
116 | struct mm_page { |
117 | dma_addr_t page_dma; |
118 | struct mm_dma_desc *desc; |
119 | int cnt, headcnt; |
120 | struct bio *bio, **biotail; |
121 | int idx; |
122 | } mm_pages[2]; |
123 | #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) |
124 | |
125 | int Active, Ready; |
126 | |
127 | struct tasklet_struct tasklet; |
128 | unsigned int dma_status; |
129 | |
130 | struct { |
131 | int good; |
132 | int warned; |
133 | unsigned long last_change; |
134 | } battery[2]; |
135 | |
136 | spinlock_t lock; |
137 | int check_batteries; |
138 | |
139 | int flags; |
140 | }; |
141 | |
142 | static struct cardinfo cards[MM_MAXCARDS]; |
143 | static struct timer_list battery_timer; |
144 | |
145 | static int num_cards; |
146 | |
147 | static struct gendisk *mm_gendisk[MM_MAXCARDS]; |
148 | |
149 | static void check_batteries(struct cardinfo *card); |
150 | |
151 | static int get_userbit(struct cardinfo *card, int bit) |
152 | { |
153 | unsigned char led; |
154 | |
155 | led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); |
156 | return led & bit; |
157 | } |
158 | |
159 | static int set_userbit(struct cardinfo *card, int bit, unsigned char state) |
160 | { |
161 | unsigned char led; |
162 | |
163 | led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); |
164 | if (state) |
165 | led |= bit; |
166 | else |
167 | led &= ~bit; |
168 | writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL); |
169 | |
170 | return 0; |
171 | } |
172 | |
173 | /* |
174 | * NOTE: For the power LED, use the LED_POWER_* macros since they differ |
175 | */ |
176 | static void set_led(struct cardinfo *card, int shift, unsigned char state) |
177 | { |
178 | unsigned char led; |
179 | |
180 | led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); |
181 | if (state == LED_FLIP) |
182 | led ^= (1<<shift); |
183 | else { |
184 | led &= ~(0x03 << shift); |
185 | led |= (state << shift); |
186 | } |
187 | writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL); |
188 | |
189 | } |
190 | |
191 | #ifdef MM_DIAG |
192 | static void dump_regs(struct cardinfo *card) |
193 | { |
194 | unsigned char *p; |
195 | int i, i1; |
196 | |
197 | p = card->csr_remap; |
198 | for (i = 0; i < 8; i++) { |
199 | printk(KERN_DEBUG "%p ", p); |
200 | |
201 | for (i1 = 0; i1 < 16; i1++) |
202 | printk("%02x ", *p++); |
203 | |
204 | printk("\n"); |
205 | } |
206 | } |
207 | #endif |
208 | |
209 | static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) |
210 | { |
211 | dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - "); |
212 | if (dmastat & DMASCR_ANY_ERR) |
213 | printk(KERN_CONT "ANY_ERR "); |
214 | if (dmastat & DMASCR_MBE_ERR) |
215 | printk(KERN_CONT "MBE_ERR "); |
216 | if (dmastat & DMASCR_PARITY_ERR_REP) |
217 | printk(KERN_CONT "PARITY_ERR_REP "); |
218 | if (dmastat & DMASCR_PARITY_ERR_DET) |
219 | printk(KERN_CONT "PARITY_ERR_DET "); |
220 | if (dmastat & DMASCR_SYSTEM_ERR_SIG) |
221 | printk(KERN_CONT "SYSTEM_ERR_SIG "); |
222 | if (dmastat & DMASCR_TARGET_ABT) |
223 | printk(KERN_CONT "TARGET_ABT "); |
224 | if (dmastat & DMASCR_MASTER_ABT) |
225 | printk(KERN_CONT "MASTER_ABT "); |
226 | if (dmastat & DMASCR_CHAIN_COMPLETE) |
227 | printk(KERN_CONT "CHAIN_COMPLETE "); |
228 | if (dmastat & DMASCR_DMA_COMPLETE) |
229 | printk(KERN_CONT "DMA_COMPLETE "); |
230 | printk("\n"); |
231 | } |
232 | |
233 | /* |
234 | * Theory of request handling |
235 | * |
236 | * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME |
237 | * We have two pages of mm_dma_desc, holding about 64 descriptors |
238 | * each. These are allocated at init time. |
239 | * One page is "Ready" and is either full, or can have request added. |
240 | * The other page might be "Active", which DMA is happening on it. |
241 | * |
242 | * Whenever IO on the active page completes, the Ready page is activated |
243 | * and the ex-Active page is clean out and made Ready. |
244 | * Otherwise the Ready page is only activated when it becomes full. |
245 | * |
246 | * If a request arrives while both pages a full, it is queued, and b_rdev is |
247 | * overloaded to record whether it was a read or a write. |
248 | * |
249 | * The interrupt handler only polls the device to clear the interrupt. |
250 | * The processing of the result is done in a tasklet. |
251 | */ |
252 | |
253 | static void mm_start_io(struct cardinfo *card) |
254 | { |
255 | /* we have the lock, we know there is |
256 | * no IO active, and we know that card->Active |
257 | * is set |
258 | */ |
259 | struct mm_dma_desc *desc; |
260 | struct mm_page *page; |
261 | int offset; |
262 | |
263 | /* make the last descriptor end the chain */ |
264 | page = &card->mm_pages[card->Active]; |
265 | pr_debug("start_io: %d %d->%d\n", |
266 | card->Active, page->headcnt, page->cnt - 1); |
267 | desc = &page->desc[page->cnt-1]; |
268 | |
269 | desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); |
270 | desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN); |
271 | desc->sem_control_bits = desc->control_bits; |
272 | |
273 | |
274 | if (debug & DEBUG_LED_ON_TRANSFER) |
275 | set_led(card, LED_REMOVE, LED_ON); |
276 | |
277 | desc = &page->desc[page->headcnt]; |
278 | writel(0, card->csr_remap + DMA_PCI_ADDR); |
279 | writel(0, card->csr_remap + DMA_PCI_ADDR + 4); |
280 | |
281 | writel(0, card->csr_remap + DMA_LOCAL_ADDR); |
282 | writel(0, card->csr_remap + DMA_LOCAL_ADDR + 4); |
283 | |
284 | writel(0, card->csr_remap + DMA_TRANSFER_SIZE); |
285 | writel(0, card->csr_remap + DMA_TRANSFER_SIZE + 4); |
286 | |
287 | writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR); |
288 | writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4); |
289 | |
290 | offset = ((char *)desc) - ((char *)page->desc); |
291 | writel(cpu_to_le32((page->page_dma+offset) & 0xffffffff), |
292 | card->csr_remap + DMA_DESCRIPTOR_ADDR); |
293 | /* Force the value to u64 before shifting otherwise >> 32 is undefined C |
294 | * and on some ports will do nothing ! */ |
295 | writel(cpu_to_le32(((u64)page->page_dma)>>32), |
296 | card->csr_remap + DMA_DESCRIPTOR_ADDR + 4); |
297 | |
298 | /* Go, go, go */ |
299 | writel(cpu_to_le32(DMASCR_GO | DMASCR_CHAIN_EN | pci_cmds), |
300 | card->csr_remap + DMA_STATUS_CTRL); |
301 | } |
302 | |
303 | static int add_bio(struct cardinfo *card); |
304 | |
305 | static void activate(struct cardinfo *card) |
306 | { |
307 | /* if No page is Active, and Ready is |
308 | * not empty, then switch Ready page |
309 | * to active and start IO. |
310 | * Then add any bh's that are available to Ready |
311 | */ |
312 | |
313 | do { |
314 | while (add_bio(card)) |
315 | ; |
316 | |
317 | if (card->Active == -1 && |
318 | card->mm_pages[card->Ready].cnt > 0) { |
319 | card->Active = card->Ready; |
320 | card->Ready = 1-card->Ready; |
321 | mm_start_io(card); |
322 | } |
323 | |
324 | } while (card->Active == -1 && add_bio(card)); |
325 | } |
326 | |
327 | static inline void reset_page(struct mm_page *page) |
328 | { |
329 | page->cnt = 0; |
330 | page->headcnt = 0; |
331 | page->bio = NULL; |
332 | page->biotail = &page->bio; |
333 | } |
334 | |
335 | /* |
336 | * If there is room on Ready page, take |
337 | * one bh off list and add it. |
338 | * return 1 if there was room, else 0. |
339 | */ |
340 | static int add_bio(struct cardinfo *card) |
341 | { |
342 | struct mm_page *p; |
343 | struct mm_dma_desc *desc; |
344 | dma_addr_t dma_handle; |
345 | int offset; |
346 | struct bio *bio; |
347 | struct bio_vec *vec; |
348 | int idx; |
349 | int rw; |
350 | int len; |
351 | |
352 | bio = card->currentbio; |
353 | if (!bio && card->bio) { |
354 | card->currentbio = card->bio; |
355 | card->current_idx = card->bio->bi_idx; |
356 | card->current_sector = card->bio->bi_sector; |
357 | card->bio = card->bio->bi_next; |
358 | if (card->bio == NULL) |
359 | card->biotail = &card->bio; |
360 | card->currentbio->bi_next = NULL; |
361 | return 1; |
362 | } |
363 | if (!bio) |
364 | return 0; |
365 | idx = card->current_idx; |
366 | |
367 | rw = bio_rw(bio); |
368 | if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) |
369 | return 0; |
370 | |
371 | vec = bio_iovec_idx(bio, idx); |
372 | len = vec->bv_len; |
373 | dma_handle = pci_map_page(card->dev, |
374 | vec->bv_page, |
375 | vec->bv_offset, |
376 | len, |
377 | (rw == READ) ? |
378 | PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); |
379 | |
380 | p = &card->mm_pages[card->Ready]; |
381 | desc = &p->desc[p->cnt]; |
382 | p->cnt++; |
383 | if (p->bio == NULL) |
384 | p->idx = idx; |
385 | if ((p->biotail) != &bio->bi_next) { |
386 | *(p->biotail) = bio; |
387 | p->biotail = &(bio->bi_next); |
388 | bio->bi_next = NULL; |
389 | } |
390 | |
391 | desc->data_dma_handle = dma_handle; |
392 | |
393 | desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); |
394 | desc->local_addr = cpu_to_le64(card->current_sector << 9); |
395 | desc->transfer_size = cpu_to_le32(len); |
396 | offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); |
397 | desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); |
398 | desc->zero1 = desc->zero2 = 0; |
399 | offset = (((char *)(desc+1)) - ((char *)p->desc)); |
400 | desc->next_desc_addr = cpu_to_le64(p->page_dma+offset); |
401 | desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN| |
402 | DMASCR_PARITY_INT_EN| |
403 | DMASCR_CHAIN_EN | |
404 | DMASCR_SEM_EN | |
405 | pci_cmds); |
406 | if (rw == WRITE) |
407 | desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); |
408 | desc->sem_control_bits = desc->control_bits; |
409 | |
410 | card->current_sector += (len >> 9); |
411 | idx++; |
412 | card->current_idx = idx; |
413 | if (idx >= bio->bi_vcnt) |
414 | card->currentbio = NULL; |
415 | |
416 | return 1; |
417 | } |
418 | |
419 | static void process_page(unsigned long data) |
420 | { |
421 | /* check if any of the requests in the page are DMA_COMPLETE, |
422 | * and deal with them appropriately. |
423 | * If we find a descriptor without DMA_COMPLETE in the semaphore, then |
424 | * dma must have hit an error on that descriptor, so use dma_status |
425 | * instead and assume that all following descriptors must be re-tried. |
426 | */ |
427 | struct mm_page *page; |
428 | struct bio *return_bio = NULL; |
429 | struct cardinfo *card = (struct cardinfo *)data; |
430 | unsigned int dma_status = card->dma_status; |
431 | |
432 | spin_lock_bh(&card->lock); |
433 | if (card->Active < 0) |
434 | goto out_unlock; |
435 | page = &card->mm_pages[card->Active]; |
436 | |
437 | while (page->headcnt < page->cnt) { |
438 | struct bio *bio = page->bio; |
439 | struct mm_dma_desc *desc = &page->desc[page->headcnt]; |
440 | int control = le32_to_cpu(desc->sem_control_bits); |
441 | int last = 0; |
442 | int idx; |
443 | |
444 | if (!(control & DMASCR_DMA_COMPLETE)) { |
445 | control = dma_status; |
446 | last = 1; |
447 | } |
448 | page->headcnt++; |
449 | idx = page->idx; |
450 | page->idx++; |
451 | if (page->idx >= bio->bi_vcnt) { |
452 | page->bio = bio->bi_next; |
453 | if (page->bio) |
454 | page->idx = page->bio->bi_idx; |
455 | } |
456 | |
457 | pci_unmap_page(card->dev, desc->data_dma_handle, |
458 | bio_iovec_idx(bio, idx)->bv_len, |
459 | (control & DMASCR_TRANSFER_READ) ? |
460 | PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); |
461 | if (control & DMASCR_HARD_ERROR) { |
462 | /* error */ |
463 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
464 | dev_printk(KERN_WARNING, &card->dev->dev, |
465 | "I/O error on sector %d/%d\n", |
466 | le32_to_cpu(desc->local_addr)>>9, |
467 | le32_to_cpu(desc->transfer_size)); |
468 | dump_dmastat(card, control); |
469 | } else if ((bio->bi_rw & REQ_WRITE) && |
470 | le32_to_cpu(desc->local_addr) >> 9 == |
471 | card->init_size) { |
472 | card->init_size += le32_to_cpu(desc->transfer_size) >> 9; |
473 | if (card->init_size >> 1 >= card->mm_size) { |
474 | dev_printk(KERN_INFO, &card->dev->dev, |
475 | "memory now initialised\n"); |
476 | set_userbit(card, MEMORY_INITIALIZED, 1); |
477 | } |
478 | } |
479 | if (bio != page->bio) { |
480 | bio->bi_next = return_bio; |
481 | return_bio = bio; |
482 | } |
483 | |
484 | if (last) |
485 | break; |
486 | } |
487 | |
488 | if (debug & DEBUG_LED_ON_TRANSFER) |
489 | set_led(card, LED_REMOVE, LED_OFF); |
490 | |
491 | if (card->check_batteries) { |
492 | card->check_batteries = 0; |
493 | check_batteries(card); |
494 | } |
495 | if (page->headcnt >= page->cnt) { |
496 | reset_page(page); |
497 | card->Active = -1; |
498 | activate(card); |
499 | } else { |
500 | /* haven't finished with this one yet */ |
501 | pr_debug("do some more\n"); |
502 | mm_start_io(card); |
503 | } |
504 | out_unlock: |
505 | spin_unlock_bh(&card->lock); |
506 | |
507 | while (return_bio) { |
508 | struct bio *bio = return_bio; |
509 | |
510 | return_bio = bio->bi_next; |
511 | bio->bi_next = NULL; |
512 | bio_endio(bio, 0); |
513 | } |
514 | } |
515 | |
516 | static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule) |
517 | { |
518 | struct cardinfo *card = cb->data; |
519 | |
520 | spin_lock_irq(&card->lock); |
521 | activate(card); |
522 | spin_unlock_irq(&card->lock); |
523 | kfree(cb); |
524 | } |
525 | |
526 | static int mm_check_plugged(struct cardinfo *card) |
527 | { |
528 | return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); |
529 | } |
530 | |
531 | static void mm_make_request(struct request_queue *q, struct bio *bio) |
532 | { |
533 | struct cardinfo *card = q->queuedata; |
534 | pr_debug("mm_make_request %llu %u\n", |
535 | (unsigned long long)bio->bi_sector, bio->bi_size); |
536 | |
537 | spin_lock_irq(&card->lock); |
538 | *card->biotail = bio; |
539 | bio->bi_next = NULL; |
540 | card->biotail = &bio->bi_next; |
541 | if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card)) |
542 | activate(card); |
543 | spin_unlock_irq(&card->lock); |
544 | |
545 | return; |
546 | } |
547 | |
548 | static irqreturn_t mm_interrupt(int irq, void *__card) |
549 | { |
550 | struct cardinfo *card = (struct cardinfo *) __card; |
551 | unsigned int dma_status; |
552 | unsigned short cfg_status; |
553 | |
554 | HW_TRACE(0x30); |
555 | |
556 | dma_status = le32_to_cpu(readl(card->csr_remap + DMA_STATUS_CTRL)); |
557 | |
558 | if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) { |
559 | /* interrupt wasn't for me ... */ |
560 | return IRQ_NONE; |
561 | } |
562 | |
563 | /* clear COMPLETION interrupts */ |
564 | if (card->flags & UM_FLAG_NO_BYTE_STATUS) |
565 | writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE), |
566 | card->csr_remap + DMA_STATUS_CTRL); |
567 | else |
568 | writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16, |
569 | card->csr_remap + DMA_STATUS_CTRL + 2); |
570 | |
571 | /* log errors and clear interrupt status */ |
572 | if (dma_status & DMASCR_ANY_ERR) { |
573 | unsigned int data_log1, data_log2; |
574 | unsigned int addr_log1, addr_log2; |
575 | unsigned char stat, count, syndrome, check; |
576 | |
577 | stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS); |
578 | |
579 | data_log1 = le32_to_cpu(readl(card->csr_remap + |
580 | ERROR_DATA_LOG)); |
581 | data_log2 = le32_to_cpu(readl(card->csr_remap + |
582 | ERROR_DATA_LOG + 4)); |
583 | addr_log1 = le32_to_cpu(readl(card->csr_remap + |
584 | ERROR_ADDR_LOG)); |
585 | addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4); |
586 | |
587 | count = readb(card->csr_remap + ERROR_COUNT); |
588 | syndrome = readb(card->csr_remap + ERROR_SYNDROME); |
589 | check = readb(card->csr_remap + ERROR_CHECK); |
590 | |
591 | dump_dmastat(card, dma_status); |
592 | |
593 | if (stat & 0x01) |
594 | dev_printk(KERN_ERR, &card->dev->dev, |
595 | "Memory access error detected (err count %d)\n", |
596 | count); |
597 | if (stat & 0x02) |
598 | dev_printk(KERN_ERR, &card->dev->dev, |
599 | "Multi-bit EDC error\n"); |
600 | |
601 | dev_printk(KERN_ERR, &card->dev->dev, |
602 | "Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n", |
603 | addr_log2, addr_log1, data_log2, data_log1); |
604 | dev_printk(KERN_ERR, &card->dev->dev, |
605 | "Fault Check 0x%02x, Fault Syndrome 0x%02x\n", |
606 | check, syndrome); |
607 | |
608 | writeb(0, card->csr_remap + ERROR_COUNT); |
609 | } |
610 | |
611 | if (dma_status & DMASCR_PARITY_ERR_REP) { |
612 | dev_printk(KERN_ERR, &card->dev->dev, |
613 | "PARITY ERROR REPORTED\n"); |
614 | pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); |
615 | pci_write_config_word(card->dev, PCI_STATUS, cfg_status); |
616 | } |
617 | |
618 | if (dma_status & DMASCR_PARITY_ERR_DET) { |
619 | dev_printk(KERN_ERR, &card->dev->dev, |
620 | "PARITY ERROR DETECTED\n"); |
621 | pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); |
622 | pci_write_config_word(card->dev, PCI_STATUS, cfg_status); |
623 | } |
624 | |
625 | if (dma_status & DMASCR_SYSTEM_ERR_SIG) { |
626 | dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n"); |
627 | pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); |
628 | pci_write_config_word(card->dev, PCI_STATUS, cfg_status); |
629 | } |
630 | |
631 | if (dma_status & DMASCR_TARGET_ABT) { |
632 | dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n"); |
633 | pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); |
634 | pci_write_config_word(card->dev, PCI_STATUS, cfg_status); |
635 | } |
636 | |
637 | if (dma_status & DMASCR_MASTER_ABT) { |
638 | dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n"); |
639 | pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); |
640 | pci_write_config_word(card->dev, PCI_STATUS, cfg_status); |
641 | } |
642 | |
643 | /* and process the DMA descriptors */ |
644 | card->dma_status = dma_status; |
645 | tasklet_schedule(&card->tasklet); |
646 | |
647 | HW_TRACE(0x36); |
648 | |
649 | return IRQ_HANDLED; |
650 | } |
651 | |
652 | /* |
653 | * If both batteries are good, no LED |
654 | * If either battery has been warned, solid LED |
655 | * If both batteries are bad, flash the LED quickly |
656 | * If either battery is bad, flash the LED semi quickly |
657 | */ |
658 | static void set_fault_to_battery_status(struct cardinfo *card) |
659 | { |
660 | if (card->battery[0].good && card->battery[1].good) |
661 | set_led(card, LED_FAULT, LED_OFF); |
662 | else if (card->battery[0].warned || card->battery[1].warned) |
663 | set_led(card, LED_FAULT, LED_ON); |
664 | else if (!card->battery[0].good && !card->battery[1].good) |
665 | set_led(card, LED_FAULT, LED_FLASH_7_0); |
666 | else |
667 | set_led(card, LED_FAULT, LED_FLASH_3_5); |
668 | } |
669 | |
670 | static void init_battery_timer(void); |
671 | |
672 | static int check_battery(struct cardinfo *card, int battery, int status) |
673 | { |
674 | if (status != card->battery[battery].good) { |
675 | card->battery[battery].good = !card->battery[battery].good; |
676 | card->battery[battery].last_change = jiffies; |
677 | |
678 | if (card->battery[battery].good) { |
679 | dev_printk(KERN_ERR, &card->dev->dev, |
680 | "Battery %d now good\n", battery + 1); |
681 | card->battery[battery].warned = 0; |
682 | } else |
683 | dev_printk(KERN_ERR, &card->dev->dev, |
684 | "Battery %d now FAILED\n", battery + 1); |
685 | |
686 | return 1; |
687 | } else if (!card->battery[battery].good && |
688 | !card->battery[battery].warned && |
689 | time_after_eq(jiffies, card->battery[battery].last_change + |
690 | (HZ * 60 * 60 * 5))) { |
691 | dev_printk(KERN_ERR, &card->dev->dev, |
692 | "Battery %d still FAILED after 5 hours\n", battery + 1); |
693 | card->battery[battery].warned = 1; |
694 | |
695 | return 1; |
696 | } |
697 | |
698 | return 0; |
699 | } |
700 | |
701 | static void check_batteries(struct cardinfo *card) |
702 | { |
703 | /* NOTE: this must *never* be called while the card |
704 | * is doing (bus-to-card) DMA, or you will need the |
705 | * reset switch |
706 | */ |
707 | unsigned char status; |
708 | int ret1, ret2; |
709 | |
710 | status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); |
711 | if (debug & DEBUG_BATTERY_POLLING) |
712 | dev_printk(KERN_DEBUG, &card->dev->dev, |
713 | "checking battery status, 1 = %s, 2 = %s\n", |
714 | (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK", |
715 | (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK"); |
716 | |
717 | ret1 = check_battery(card, 0, !(status & BATTERY_1_FAILURE)); |
718 | ret2 = check_battery(card, 1, !(status & BATTERY_2_FAILURE)); |
719 | |
720 | if (ret1 || ret2) |
721 | set_fault_to_battery_status(card); |
722 | } |
723 | |
724 | static void check_all_batteries(unsigned long ptr) |
725 | { |
726 | int i; |
727 | |
728 | for (i = 0; i < num_cards; i++) |
729 | if (!(cards[i].flags & UM_FLAG_NO_BATT)) { |
730 | struct cardinfo *card = &cards[i]; |
731 | spin_lock_bh(&card->lock); |
732 | if (card->Active >= 0) |
733 | card->check_batteries = 1; |
734 | else |
735 | check_batteries(card); |
736 | spin_unlock_bh(&card->lock); |
737 | } |
738 | |
739 | init_battery_timer(); |
740 | } |
741 | |
742 | static void init_battery_timer(void) |
743 | { |
744 | init_timer(&battery_timer); |
745 | battery_timer.function = check_all_batteries; |
746 | battery_timer.expires = jiffies + (HZ * 60); |
747 | add_timer(&battery_timer); |
748 | } |
749 | |
750 | static void del_battery_timer(void) |
751 | { |
752 | del_timer(&battery_timer); |
753 | } |
754 | |
755 | /* |
756 | * Note no locks taken out here. In a worst case scenario, we could drop |
757 | * a chunk of system memory. But that should never happen, since validation |
758 | * happens at open or mount time, when locks are held. |
759 | * |
760 | * That's crap, since doing that while some partitions are opened |
761 | * or mounted will give you really nasty results. |
762 | */ |
763 | static int mm_revalidate(struct gendisk *disk) |
764 | { |
765 | struct cardinfo *card = disk->private_data; |
766 | set_capacity(disk, card->mm_size << 1); |
767 | return 0; |
768 | } |
769 | |
770 | static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
771 | { |
772 | struct cardinfo *card = bdev->bd_disk->private_data; |
773 | int size = card->mm_size * (1024 / MM_HARDSECT); |
774 | |
775 | /* |
776 | * get geometry: we have to fake one... trim the size to a |
777 | * multiple of 2048 (1M): tell we have 32 sectors, 64 heads, |
778 | * whatever cylinders. |
779 | */ |
780 | geo->heads = 64; |
781 | geo->sectors = 32; |
782 | geo->cylinders = size / (geo->heads * geo->sectors); |
783 | return 0; |
784 | } |
785 | |
786 | static const struct block_device_operations mm_fops = { |
787 | .owner = THIS_MODULE, |
788 | .getgeo = mm_getgeo, |
789 | .revalidate_disk = mm_revalidate, |
790 | }; |
791 | |
792 | static int __devinit mm_pci_probe(struct pci_dev *dev, |
793 | const struct pci_device_id *id) |
794 | { |
795 | int ret = -ENODEV; |
796 | struct cardinfo *card = &cards[num_cards]; |
797 | unsigned char mem_present; |
798 | unsigned char batt_status; |
799 | unsigned int saved_bar, data; |
800 | unsigned long csr_base; |
801 | unsigned long csr_len; |
802 | int magic_number; |
803 | static int printed_version; |
804 | |
805 | if (!printed_version++) |
806 | printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n"); |
807 | |
808 | ret = pci_enable_device(dev); |
809 | if (ret) |
810 | return ret; |
811 | |
812 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8); |
813 | pci_set_master(dev); |
814 | |
815 | card->dev = dev; |
816 | |
817 | csr_base = pci_resource_start(dev, 0); |
818 | csr_len = pci_resource_len(dev, 0); |
819 | if (!csr_base || !csr_len) |
820 | return -ENODEV; |
821 | |
822 | dev_printk(KERN_INFO, &dev->dev, |
823 | "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n"); |
824 | |
825 | if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) && |
826 | pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { |
827 | dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n"); |
828 | return -ENOMEM; |
829 | } |
830 | |
831 | ret = pci_request_regions(dev, DRIVER_NAME); |
832 | if (ret) { |
833 | dev_printk(KERN_ERR, &card->dev->dev, |
834 | "Unable to request memory region\n"); |
835 | goto failed_req_csr; |
836 | } |
837 | |
838 | card->csr_remap = ioremap_nocache(csr_base, csr_len); |
839 | if (!card->csr_remap) { |
840 | dev_printk(KERN_ERR, &card->dev->dev, |
841 | "Unable to remap memory region\n"); |
842 | ret = -ENOMEM; |
843 | |
844 | goto failed_remap_csr; |
845 | } |
846 | |
847 | dev_printk(KERN_INFO, &card->dev->dev, |
848 | "CSR 0x%08lx -> 0x%p (0x%lx)\n", |
849 | csr_base, card->csr_remap, csr_len); |
850 | |
851 | switch (card->dev->device) { |
852 | case 0x5415: |
853 | card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG; |
854 | magic_number = 0x59; |
855 | break; |
856 | |
857 | case 0x5425: |
858 | card->flags |= UM_FLAG_NO_BYTE_STATUS; |
859 | magic_number = 0x5C; |
860 | break; |
861 | |
862 | case 0x6155: |
863 | card->flags |= UM_FLAG_NO_BYTE_STATUS | |
864 | UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT; |
865 | magic_number = 0x99; |
866 | break; |
867 | |
868 | default: |
869 | magic_number = 0x100; |
870 | break; |
871 | } |
872 | |
873 | if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) { |
874 | dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n"); |
875 | ret = -ENOMEM; |
876 | goto failed_magic; |
877 | } |
878 | |
879 | card->mm_pages[0].desc = pci_alloc_consistent(card->dev, |
880 | PAGE_SIZE * 2, |
881 | &card->mm_pages[0].page_dma); |
882 | card->mm_pages[1].desc = pci_alloc_consistent(card->dev, |
883 | PAGE_SIZE * 2, |
884 | &card->mm_pages[1].page_dma); |
885 | if (card->mm_pages[0].desc == NULL || |
886 | card->mm_pages[1].desc == NULL) { |
887 | dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n"); |
888 | goto failed_alloc; |
889 | } |
890 | reset_page(&card->mm_pages[0]); |
891 | reset_page(&card->mm_pages[1]); |
892 | card->Ready = 0; /* page 0 is ready */ |
893 | card->Active = -1; /* no page is active */ |
894 | card->bio = NULL; |
895 | card->biotail = &card->bio; |
896 | |
897 | card->queue = blk_alloc_queue(GFP_KERNEL); |
898 | if (!card->queue) |
899 | goto failed_alloc; |
900 | |
901 | blk_queue_make_request(card->queue, mm_make_request); |
902 | card->queue->queue_lock = &card->lock; |
903 | card->queue->queuedata = card; |
904 | |
905 | tasklet_init(&card->tasklet, process_page, (unsigned long)card); |
906 | |
907 | card->check_batteries = 0; |
908 | |
909 | mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY); |
910 | switch (mem_present) { |
911 | case MEM_128_MB: |
912 | card->mm_size = 1024 * 128; |
913 | break; |
914 | case MEM_256_MB: |
915 | card->mm_size = 1024 * 256; |
916 | break; |
917 | case MEM_512_MB: |
918 | card->mm_size = 1024 * 512; |
919 | break; |
920 | case MEM_1_GB: |
921 | card->mm_size = 1024 * 1024; |
922 | break; |
923 | case MEM_2_GB: |
924 | card->mm_size = 1024 * 2048; |
925 | break; |
926 | default: |
927 | card->mm_size = 0; |
928 | break; |
929 | } |
930 | |
931 | /* Clear the LED's we control */ |
932 | set_led(card, LED_REMOVE, LED_OFF); |
933 | set_led(card, LED_FAULT, LED_OFF); |
934 | |
935 | batt_status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); |
936 | |
937 | card->battery[0].good = !(batt_status & BATTERY_1_FAILURE); |
938 | card->battery[1].good = !(batt_status & BATTERY_2_FAILURE); |
939 | card->battery[0].last_change = card->battery[1].last_change = jiffies; |
940 | |
941 | if (card->flags & UM_FLAG_NO_BATT) |
942 | dev_printk(KERN_INFO, &card->dev->dev, |
943 | "Size %d KB\n", card->mm_size); |
944 | else { |
945 | dev_printk(KERN_INFO, &card->dev->dev, |
946 | "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n", |
947 | card->mm_size, |
948 | batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled", |
949 | card->battery[0].good ? "OK" : "FAILURE", |
950 | batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled", |
951 | card->battery[1].good ? "OK" : "FAILURE"); |
952 | |
953 | set_fault_to_battery_status(card); |
954 | } |
955 | |
956 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &saved_bar); |
957 | data = 0xffffffff; |
958 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, data); |
959 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &data); |
960 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, saved_bar); |
961 | data &= 0xfffffff0; |
962 | data = ~data; |
963 | data += 1; |
964 | |
965 | if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, |
966 | card)) { |
967 | dev_printk(KERN_ERR, &card->dev->dev, |
968 | "Unable to allocate IRQ\n"); |
969 | ret = -ENODEV; |
970 | goto failed_req_irq; |
971 | } |
972 | |
973 | dev_printk(KERN_INFO, &card->dev->dev, |
974 | "Window size %d bytes, IRQ %d\n", data, dev->irq); |
975 | |
976 | spin_lock_init(&card->lock); |
977 | |
978 | pci_set_drvdata(dev, card); |
979 | |
980 | if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */ |
981 | pci_write_cmd = 0x07; /* then Memory Write command */ |
982 | |
983 | if (pci_write_cmd & 0x08) { /* use Memory Write and Invalidate */ |
984 | unsigned short cfg_command; |
985 | pci_read_config_word(dev, PCI_COMMAND, &cfg_command); |
986 | cfg_command |= 0x10; /* Memory Write & Invalidate Enable */ |
987 | pci_write_config_word(dev, PCI_COMMAND, cfg_command); |
988 | } |
989 | pci_cmds = (pci_read_cmd << 28) | (pci_write_cmd << 24); |
990 | |
991 | num_cards++; |
992 | |
993 | if (!get_userbit(card, MEMORY_INITIALIZED)) { |
994 | dev_printk(KERN_INFO, &card->dev->dev, |
995 | "memory NOT initialized. Consider over-writing whole device.\n"); |
996 | card->init_size = 0; |
997 | } else { |
998 | dev_printk(KERN_INFO, &card->dev->dev, |
999 | "memory already initialized\n"); |
1000 | card->init_size = card->mm_size; |
1001 | } |
1002 | |
1003 | /* Enable ECC */ |
1004 | writeb(EDC_STORE_CORRECT, card->csr_remap + MEMCTRLCMD_ERRCTRL); |
1005 | |
1006 | return 0; |
1007 | |
1008 | failed_req_irq: |
1009 | failed_alloc: |
1010 | if (card->mm_pages[0].desc) |
1011 | pci_free_consistent(card->dev, PAGE_SIZE*2, |
1012 | card->mm_pages[0].desc, |
1013 | card->mm_pages[0].page_dma); |
1014 | if (card->mm_pages[1].desc) |
1015 | pci_free_consistent(card->dev, PAGE_SIZE*2, |
1016 | card->mm_pages[1].desc, |
1017 | card->mm_pages[1].page_dma); |
1018 | failed_magic: |
1019 | iounmap(card->csr_remap); |
1020 | failed_remap_csr: |
1021 | pci_release_regions(dev); |
1022 | failed_req_csr: |
1023 | |
1024 | return ret; |
1025 | } |
1026 | |
1027 | static void mm_pci_remove(struct pci_dev *dev) |
1028 | { |
1029 | struct cardinfo *card = pci_get_drvdata(dev); |
1030 | |
1031 | tasklet_kill(&card->tasklet); |
1032 | free_irq(dev->irq, card); |
1033 | iounmap(card->csr_remap); |
1034 | |
1035 | if (card->mm_pages[0].desc) |
1036 | pci_free_consistent(card->dev, PAGE_SIZE*2, |
1037 | card->mm_pages[0].desc, |
1038 | card->mm_pages[0].page_dma); |
1039 | if (card->mm_pages[1].desc) |
1040 | pci_free_consistent(card->dev, PAGE_SIZE*2, |
1041 | card->mm_pages[1].desc, |
1042 | card->mm_pages[1].page_dma); |
1043 | blk_cleanup_queue(card->queue); |
1044 | |
1045 | pci_release_regions(dev); |
1046 | pci_disable_device(dev); |
1047 | } |
1048 | |
1049 | static const struct pci_device_id mm_pci_ids[] = { |
1050 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5415CN)}, |
1051 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5425CN)}, |
1052 | {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_6155)}, |
1053 | { |
1054 | .vendor = 0x8086, |
1055 | .device = 0xB555, |
1056 | .subvendor = 0x1332, |
1057 | .subdevice = 0x5460, |
1058 | .class = 0x050000, |
1059 | .class_mask = 0, |
1060 | }, { /* end: all zeroes */ } |
1061 | }; |
1062 | |
1063 | MODULE_DEVICE_TABLE(pci, mm_pci_ids); |
1064 | |
1065 | static struct pci_driver mm_pci_driver = { |
1066 | .name = DRIVER_NAME, |
1067 | .id_table = mm_pci_ids, |
1068 | .probe = mm_pci_probe, |
1069 | .remove = mm_pci_remove, |
1070 | }; |
1071 | |
1072 | static int __init mm_init(void) |
1073 | { |
1074 | int retval, i; |
1075 | int err; |
1076 | |
1077 | retval = pci_register_driver(&mm_pci_driver); |
1078 | if (retval) |
1079 | return -ENOMEM; |
1080 | |
1081 | err = major_nr = register_blkdev(0, DRIVER_NAME); |
1082 | if (err < 0) { |
1083 | pci_unregister_driver(&mm_pci_driver); |
1084 | return -EIO; |
1085 | } |
1086 | |
1087 | for (i = 0; i < num_cards; i++) { |
1088 | mm_gendisk[i] = alloc_disk(1 << MM_SHIFT); |
1089 | if (!mm_gendisk[i]) |
1090 | goto out; |
1091 | } |
1092 | |
1093 | for (i = 0; i < num_cards; i++) { |
1094 | struct gendisk *disk = mm_gendisk[i]; |
1095 | sprintf(disk->disk_name, "umem%c", 'a'+i); |
1096 | spin_lock_init(&cards[i].lock); |
1097 | disk->major = major_nr; |
1098 | disk->first_minor = i << MM_SHIFT; |
1099 | disk->fops = &mm_fops; |
1100 | disk->private_data = &cards[i]; |
1101 | disk->queue = cards[i].queue; |
1102 | set_capacity(disk, cards[i].mm_size << 1); |
1103 | add_disk(disk); |
1104 | } |
1105 | |
1106 | init_battery_timer(); |
1107 | printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE); |
1108 | /* printk("mm_init: Done. 10-19-01 9:00\n"); */ |
1109 | return 0; |
1110 | |
1111 | out: |
1112 | pci_unregister_driver(&mm_pci_driver); |
1113 | unregister_blkdev(major_nr, DRIVER_NAME); |
1114 | while (i--) |
1115 | put_disk(mm_gendisk[i]); |
1116 | return -ENOMEM; |
1117 | } |
1118 | |
1119 | static void __exit mm_cleanup(void) |
1120 | { |
1121 | int i; |
1122 | |
1123 | del_battery_timer(); |
1124 | |
1125 | for (i = 0; i < num_cards ; i++) { |
1126 | del_gendisk(mm_gendisk[i]); |
1127 | put_disk(mm_gendisk[i]); |
1128 | } |
1129 | |
1130 | pci_unregister_driver(&mm_pci_driver); |
1131 | |
1132 | unregister_blkdev(major_nr, DRIVER_NAME); |
1133 | } |
1134 | |
1135 | module_init(mm_init); |
1136 | module_exit(mm_cleanup); |
1137 | |
1138 | MODULE_AUTHOR(DRIVER_AUTHOR); |
1139 | MODULE_DESCRIPTION(DRIVER_DESC); |
1140 | MODULE_LICENSE("GPL"); |
1141 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9