Root/
1 | /* |
2 | * EP93XX PATA controller driver. |
3 | * |
4 | * Copyright (c) 2012, Metasoft s.c. |
5 | * Rafal Prylowski <prylowski@metasoft.pl> |
6 | * |
7 | * Based on pata_scc.c, pata_icside.c and on earlier version of EP93XX |
8 | * PATA driver by Lennert Buytenhek and Alessandro Zummo. |
9 | * Read/Write timings, resource management and other improvements |
10 | * from driver by Joao Ramos and Bartlomiej Zolnierkiewicz. |
11 | * DMA engine support based on spi-ep93xx.c by Mika Westerberg. |
12 | * |
13 | * Original copyrights: |
14 | * |
15 | * Support for Cirrus Logic's EP93xx (EP9312, EP9315) CPUs |
16 | * PATA host controller driver. |
17 | * |
18 | * Copyright (c) 2009, Bartlomiej Zolnierkiewicz |
19 | * |
20 | * Heavily based on the ep93xx-ide.c driver: |
21 | * |
22 | * Copyright (c) 2009, Joao Ramos <joao.ramos@inov.pt> |
23 | * INESC Inovacao (INOV) |
24 | * |
25 | * EP93XX PATA controller driver. |
26 | * Copyright (C) 2007 Lennert Buytenhek <buytenh@wantstofly.org> |
27 | * |
28 | * An ATA driver for the Cirrus Logic EP93xx PATA controller. |
29 | * |
30 | * Based on an earlier version by Alessandro Zummo, which is: |
31 | * Copyright (C) 2006 Tower Technologies |
32 | */ |
33 | |
34 | #include <linux/err.h> |
35 | #include <linux/kernel.h> |
36 | #include <linux/module.h> |
37 | #include <linux/init.h> |
38 | #include <linux/blkdev.h> |
39 | #include <scsi/scsi_host.h> |
40 | #include <linux/ata.h> |
41 | #include <linux/libata.h> |
42 | #include <linux/platform_device.h> |
43 | #include <linux/delay.h> |
44 | #include <linux/dmaengine.h> |
45 | #include <linux/ktime.h> |
46 | |
47 | #include <linux/platform_data/dma-ep93xx.h> |
48 | #include <mach/platform.h> |
49 | |
50 | #define DRV_NAME "ep93xx-ide" |
51 | #define DRV_VERSION "1.0" |
52 | |
53 | enum { |
54 | /* IDE Control Register */ |
55 | IDECTRL = 0x00, |
56 | IDECTRL_CS0N = (1 << 0), |
57 | IDECTRL_CS1N = (1 << 1), |
58 | IDECTRL_DIORN = (1 << 5), |
59 | IDECTRL_DIOWN = (1 << 6), |
60 | IDECTRL_INTRQ = (1 << 9), |
61 | IDECTRL_IORDY = (1 << 10), |
62 | /* |
63 | * the device IDE register to be accessed is selected through |
64 | * IDECTRL register's specific bitfields 'DA', 'CS1N' and 'CS0N': |
65 | * b4 b3 b2 b1 b0 |
66 | * A2 A1 A0 CS1N CS0N |
67 | * the values filled in this structure allows the value to be directly |
68 | * ORed to the IDECTRL register, hence giving directly the A[2:0] and |
69 | * CS1N/CS0N values for each IDE register. |
70 | * The values correspond to the transformation: |
71 | * ((real IDE address) << 2) | CS1N value << 1 | CS0N value |
72 | */ |
73 | IDECTRL_ADDR_CMD = 0 + 2, /* CS1 */ |
74 | IDECTRL_ADDR_DATA = (ATA_REG_DATA << 2) + 2, |
75 | IDECTRL_ADDR_ERROR = (ATA_REG_ERR << 2) + 2, |
76 | IDECTRL_ADDR_FEATURE = (ATA_REG_FEATURE << 2) + 2, |
77 | IDECTRL_ADDR_NSECT = (ATA_REG_NSECT << 2) + 2, |
78 | IDECTRL_ADDR_LBAL = (ATA_REG_LBAL << 2) + 2, |
79 | IDECTRL_ADDR_LBAM = (ATA_REG_LBAM << 2) + 2, |
80 | IDECTRL_ADDR_LBAH = (ATA_REG_LBAH << 2) + 2, |
81 | IDECTRL_ADDR_DEVICE = (ATA_REG_DEVICE << 2) + 2, |
82 | IDECTRL_ADDR_STATUS = (ATA_REG_STATUS << 2) + 2, |
83 | IDECTRL_ADDR_COMMAND = (ATA_REG_CMD << 2) + 2, |
84 | IDECTRL_ADDR_ALTSTATUS = (0x06 << 2) + 1, /* CS0 */ |
85 | IDECTRL_ADDR_CTL = (0x06 << 2) + 1, /* CS0 */ |
86 | |
87 | /* IDE Configuration Register */ |
88 | IDECFG = 0x04, |
89 | IDECFG_IDEEN = (1 << 0), |
90 | IDECFG_PIO = (1 << 1), |
91 | IDECFG_MDMA = (1 << 2), |
92 | IDECFG_UDMA = (1 << 3), |
93 | IDECFG_MODE_SHIFT = 4, |
94 | IDECFG_MODE_MASK = (0xf << 4), |
95 | IDECFG_WST_SHIFT = 8, |
96 | IDECFG_WST_MASK = (0x3 << 8), |
97 | |
98 | /* MDMA Operation Register */ |
99 | IDEMDMAOP = 0x08, |
100 | |
101 | /* UDMA Operation Register */ |
102 | IDEUDMAOP = 0x0c, |
103 | IDEUDMAOP_UEN = (1 << 0), |
104 | IDEUDMAOP_RWOP = (1 << 1), |
105 | |
106 | /* PIO/MDMA/UDMA Data Registers */ |
107 | IDEDATAOUT = 0x10, |
108 | IDEDATAIN = 0x14, |
109 | IDEMDMADATAOUT = 0x18, |
110 | IDEMDMADATAIN = 0x1c, |
111 | IDEUDMADATAOUT = 0x20, |
112 | IDEUDMADATAIN = 0x24, |
113 | |
114 | /* UDMA Status Register */ |
115 | IDEUDMASTS = 0x28, |
116 | IDEUDMASTS_DMAIDE = (1 << 16), |
117 | IDEUDMASTS_INTIDE = (1 << 17), |
118 | IDEUDMASTS_SBUSY = (1 << 18), |
119 | IDEUDMASTS_NDO = (1 << 24), |
120 | IDEUDMASTS_NDI = (1 << 25), |
121 | IDEUDMASTS_N4X = (1 << 26), |
122 | |
123 | /* UDMA Debug Status Register */ |
124 | IDEUDMADEBUG = 0x2c, |
125 | }; |
126 | |
127 | struct ep93xx_pata_data { |
128 | const struct platform_device *pdev; |
129 | void __iomem *ide_base; |
130 | struct ata_timing t; |
131 | bool iordy; |
132 | |
133 | unsigned long udma_in_phys; |
134 | unsigned long udma_out_phys; |
135 | |
136 | struct dma_chan *dma_rx_channel; |
137 | struct ep93xx_dma_data dma_rx_data; |
138 | struct dma_chan *dma_tx_channel; |
139 | struct ep93xx_dma_data dma_tx_data; |
140 | }; |
141 | |
142 | static void ep93xx_pata_clear_regs(void __iomem *base) |
143 | { |
144 | writel(IDECTRL_CS0N | IDECTRL_CS1N | IDECTRL_DIORN | |
145 | IDECTRL_DIOWN, base + IDECTRL); |
146 | |
147 | writel(0, base + IDECFG); |
148 | writel(0, base + IDEMDMAOP); |
149 | writel(0, base + IDEUDMAOP); |
150 | writel(0, base + IDEDATAOUT); |
151 | writel(0, base + IDEDATAIN); |
152 | writel(0, base + IDEMDMADATAOUT); |
153 | writel(0, base + IDEMDMADATAIN); |
154 | writel(0, base + IDEUDMADATAOUT); |
155 | writel(0, base + IDEUDMADATAIN); |
156 | writel(0, base + IDEUDMADEBUG); |
157 | } |
158 | |
159 | static bool ep93xx_pata_check_iordy(void __iomem *base) |
160 | { |
161 | return !!(readl(base + IDECTRL) & IDECTRL_IORDY); |
162 | } |
163 | |
164 | /* |
165 | * According to EP93xx User's Guide, WST field of IDECFG specifies number |
166 | * of HCLK cycles to hold the data bus after a PIO write operation. |
167 | * It should be programmed to guarantee following delays: |
168 | * |
169 | * PIO Mode [ns] |
170 | * 0 30 |
171 | * 1 20 |
172 | * 2 15 |
173 | * 3 10 |
174 | * 4 5 |
175 | * |
176 | * Maximum possible value for HCLK is 100MHz. |
177 | */ |
178 | static int ep93xx_pata_get_wst(int pio_mode) |
179 | { |
180 | int val; |
181 | |
182 | if (pio_mode == 0) |
183 | val = 3; |
184 | else if (pio_mode < 3) |
185 | val = 2; |
186 | else |
187 | val = 1; |
188 | |
189 | return val << IDECFG_WST_SHIFT; |
190 | } |
191 | |
192 | static void ep93xx_pata_enable_pio(void __iomem *base, int pio_mode) |
193 | { |
194 | writel(IDECFG_IDEEN | IDECFG_PIO | |
195 | ep93xx_pata_get_wst(pio_mode) | |
196 | (pio_mode << IDECFG_MODE_SHIFT), base + IDECFG); |
197 | } |
198 | |
199 | /* |
200 | * Based on delay loop found in mach-pxa/mp900.c. |
201 | * |
202 | * Single iteration should take 5 cpu cycles. This is 25ns assuming the |
203 | * fastest ep93xx cpu speed (200MHz) and is better optimized for PIO4 timings |
204 | * than eg. 20ns. |
205 | */ |
206 | static void ep93xx_pata_delay(unsigned long count) |
207 | { |
208 | __asm__ volatile ( |
209 | "0:\n" |
210 | "mov r0, r0\n" |
211 | "subs %0, %1, #1\n" |
212 | "bge 0b\n" |
213 | : "=r" (count) |
214 | : "0" (count) |
215 | ); |
216 | } |
217 | |
218 | static unsigned long ep93xx_pata_wait_for_iordy(void __iomem *base, |
219 | unsigned long t2) |
220 | { |
221 | /* |
222 | * According to ATA specification, IORDY pin can be first sampled |
223 | * tA = 35ns after activation of DIOR-/DIOW-. Maximum IORDY pulse |
224 | * width is tB = 1250ns. |
225 | * |
226 | * We are already t2 delay loop iterations after activation of |
227 | * DIOR-/DIOW-, so we set timeout to (1250 + 35) / 25 - t2 additional |
228 | * delay loop iterations. |
229 | */ |
230 | unsigned long start = (1250 + 35) / 25 - t2; |
231 | unsigned long counter = start; |
232 | |
233 | while (!ep93xx_pata_check_iordy(base) && counter--) |
234 | ep93xx_pata_delay(1); |
235 | return start - counter; |
236 | } |
237 | |
238 | /* common part at start of ep93xx_pata_read/write() */ |
239 | static void ep93xx_pata_rw_begin(void __iomem *base, unsigned long addr, |
240 | unsigned long t1) |
241 | { |
242 | writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL); |
243 | ep93xx_pata_delay(t1); |
244 | } |
245 | |
246 | /* common part at end of ep93xx_pata_read/write() */ |
247 | static void ep93xx_pata_rw_end(void __iomem *base, unsigned long addr, |
248 | bool iordy, unsigned long t0, unsigned long t2, |
249 | unsigned long t2i) |
250 | { |
251 | ep93xx_pata_delay(t2); |
252 | /* lengthen t2 if needed */ |
253 | if (iordy) |
254 | t2 += ep93xx_pata_wait_for_iordy(base, t2); |
255 | writel(IDECTRL_DIOWN | IDECTRL_DIORN | addr, base + IDECTRL); |
256 | if (t0 > t2 && t0 - t2 > t2i) |
257 | ep93xx_pata_delay(t0 - t2); |
258 | else |
259 | ep93xx_pata_delay(t2i); |
260 | } |
261 | |
262 | static u16 ep93xx_pata_read(struct ep93xx_pata_data *drv_data, |
263 | unsigned long addr, |
264 | bool reg) |
265 | { |
266 | void __iomem *base = drv_data->ide_base; |
267 | const struct ata_timing *t = &drv_data->t; |
268 | unsigned long t0 = reg ? t->cyc8b : t->cycle; |
269 | unsigned long t2 = reg ? t->act8b : t->active; |
270 | unsigned long t2i = reg ? t->rec8b : t->recover; |
271 | |
272 | ep93xx_pata_rw_begin(base, addr, t->setup); |
273 | writel(IDECTRL_DIOWN | addr, base + IDECTRL); |
274 | /* |
275 | * The IDEDATAIN register is loaded from the DD pins at the positive |
276 | * edge of the DIORN signal. (EP93xx UG p27-14) |
277 | */ |
278 | ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i); |
279 | return readl(base + IDEDATAIN); |
280 | } |
281 | |
282 | /* IDE register read */ |
283 | static u16 ep93xx_pata_read_reg(struct ep93xx_pata_data *drv_data, |
284 | unsigned long addr) |
285 | { |
286 | return ep93xx_pata_read(drv_data, addr, true); |
287 | } |
288 | |
289 | /* PIO data read */ |
290 | static u16 ep93xx_pata_read_data(struct ep93xx_pata_data *drv_data, |
291 | unsigned long addr) |
292 | { |
293 | return ep93xx_pata_read(drv_data, addr, false); |
294 | } |
295 | |
296 | static void ep93xx_pata_write(struct ep93xx_pata_data *drv_data, |
297 | u16 value, unsigned long addr, |
298 | bool reg) |
299 | { |
300 | void __iomem *base = drv_data->ide_base; |
301 | const struct ata_timing *t = &drv_data->t; |
302 | unsigned long t0 = reg ? t->cyc8b : t->cycle; |
303 | unsigned long t2 = reg ? t->act8b : t->active; |
304 | unsigned long t2i = reg ? t->rec8b : t->recover; |
305 | |
306 | ep93xx_pata_rw_begin(base, addr, t->setup); |
307 | /* |
308 | * Value from IDEDATAOUT register is driven onto the DD pins when |
309 | * DIOWN is low. (EP93xx UG p27-13) |
310 | */ |
311 | writel(value, base + IDEDATAOUT); |
312 | writel(IDECTRL_DIORN | addr, base + IDECTRL); |
313 | ep93xx_pata_rw_end(base, addr, drv_data->iordy, t0, t2, t2i); |
314 | } |
315 | |
316 | /* IDE register write */ |
317 | static void ep93xx_pata_write_reg(struct ep93xx_pata_data *drv_data, |
318 | u16 value, unsigned long addr) |
319 | { |
320 | ep93xx_pata_write(drv_data, value, addr, true); |
321 | } |
322 | |
323 | /* PIO data write */ |
324 | static void ep93xx_pata_write_data(struct ep93xx_pata_data *drv_data, |
325 | u16 value, unsigned long addr) |
326 | { |
327 | ep93xx_pata_write(drv_data, value, addr, false); |
328 | } |
329 | |
330 | static void ep93xx_pata_set_piomode(struct ata_port *ap, |
331 | struct ata_device *adev) |
332 | { |
333 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
334 | struct ata_device *pair = ata_dev_pair(adev); |
335 | /* |
336 | * Calculate timings for the delay loop, assuming ep93xx cpu speed |
337 | * is 200MHz (maximum possible for ep93xx). If actual cpu speed is |
338 | * slower, we will wait a bit longer in each delay. |
339 | * Additional division of cpu speed by 5, because single iteration |
340 | * of our delay loop takes 5 cpu cycles (25ns). |
341 | */ |
342 | unsigned long T = 1000000 / (200 / 5); |
343 | |
344 | ata_timing_compute(adev, adev->pio_mode, &drv_data->t, T, 0); |
345 | if (pair && pair->pio_mode) { |
346 | struct ata_timing t; |
347 | ata_timing_compute(pair, pair->pio_mode, &t, T, 0); |
348 | ata_timing_merge(&t, &drv_data->t, &drv_data->t, |
349 | ATA_TIMING_SETUP | ATA_TIMING_8BIT); |
350 | } |
351 | drv_data->iordy = ata_pio_need_iordy(adev); |
352 | |
353 | ep93xx_pata_enable_pio(drv_data->ide_base, |
354 | adev->pio_mode - XFER_PIO_0); |
355 | } |
356 | |
357 | /* Note: original code is ata_sff_check_status */ |
358 | static u8 ep93xx_pata_check_status(struct ata_port *ap) |
359 | { |
360 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
361 | |
362 | return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_STATUS); |
363 | } |
364 | |
365 | static u8 ep93xx_pata_check_altstatus(struct ata_port *ap) |
366 | { |
367 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
368 | |
369 | return ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_ALTSTATUS); |
370 | } |
371 | |
372 | /* Note: original code is ata_sff_tf_load */ |
373 | static void ep93xx_pata_tf_load(struct ata_port *ap, |
374 | const struct ata_taskfile *tf) |
375 | { |
376 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
377 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; |
378 | |
379 | if (tf->ctl != ap->last_ctl) { |
380 | ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL); |
381 | ap->last_ctl = tf->ctl; |
382 | ata_wait_idle(ap); |
383 | } |
384 | |
385 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
386 | ep93xx_pata_write_reg(drv_data, tf->hob_feature, |
387 | IDECTRL_ADDR_FEATURE); |
388 | ep93xx_pata_write_reg(drv_data, tf->hob_nsect, |
389 | IDECTRL_ADDR_NSECT); |
390 | ep93xx_pata_write_reg(drv_data, tf->hob_lbal, |
391 | IDECTRL_ADDR_LBAL); |
392 | ep93xx_pata_write_reg(drv_data, tf->hob_lbam, |
393 | IDECTRL_ADDR_LBAM); |
394 | ep93xx_pata_write_reg(drv_data, tf->hob_lbah, |
395 | IDECTRL_ADDR_LBAH); |
396 | } |
397 | |
398 | if (is_addr) { |
399 | ep93xx_pata_write_reg(drv_data, tf->feature, |
400 | IDECTRL_ADDR_FEATURE); |
401 | ep93xx_pata_write_reg(drv_data, tf->nsect, IDECTRL_ADDR_NSECT); |
402 | ep93xx_pata_write_reg(drv_data, tf->lbal, IDECTRL_ADDR_LBAL); |
403 | ep93xx_pata_write_reg(drv_data, tf->lbam, IDECTRL_ADDR_LBAM); |
404 | ep93xx_pata_write_reg(drv_data, tf->lbah, IDECTRL_ADDR_LBAH); |
405 | } |
406 | |
407 | if (tf->flags & ATA_TFLAG_DEVICE) |
408 | ep93xx_pata_write_reg(drv_data, tf->device, |
409 | IDECTRL_ADDR_DEVICE); |
410 | |
411 | ata_wait_idle(ap); |
412 | } |
413 | |
414 | /* Note: original code is ata_sff_tf_read */ |
415 | static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
416 | { |
417 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
418 | |
419 | tf->command = ep93xx_pata_check_status(ap); |
420 | tf->feature = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_FEATURE); |
421 | tf->nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT); |
422 | tf->lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL); |
423 | tf->lbam = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAM); |
424 | tf->lbah = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAH); |
425 | tf->device = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DEVICE); |
426 | |
427 | if (tf->flags & ATA_TFLAG_LBA48) { |
428 | ep93xx_pata_write_reg(drv_data, tf->ctl | ATA_HOB, |
429 | IDECTRL_ADDR_CTL); |
430 | tf->hob_feature = ep93xx_pata_read_reg(drv_data, |
431 | IDECTRL_ADDR_FEATURE); |
432 | tf->hob_nsect = ep93xx_pata_read_reg(drv_data, |
433 | IDECTRL_ADDR_NSECT); |
434 | tf->hob_lbal = ep93xx_pata_read_reg(drv_data, |
435 | IDECTRL_ADDR_LBAL); |
436 | tf->hob_lbam = ep93xx_pata_read_reg(drv_data, |
437 | IDECTRL_ADDR_LBAM); |
438 | tf->hob_lbah = ep93xx_pata_read_reg(drv_data, |
439 | IDECTRL_ADDR_LBAH); |
440 | ep93xx_pata_write_reg(drv_data, tf->ctl, IDECTRL_ADDR_CTL); |
441 | ap->last_ctl = tf->ctl; |
442 | } |
443 | } |
444 | |
445 | /* Note: original code is ata_sff_exec_command */ |
446 | static void ep93xx_pata_exec_command(struct ata_port *ap, |
447 | const struct ata_taskfile *tf) |
448 | { |
449 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
450 | |
451 | ep93xx_pata_write_reg(drv_data, tf->command, |
452 | IDECTRL_ADDR_COMMAND); |
453 | ata_sff_pause(ap); |
454 | } |
455 | |
456 | /* Note: original code is ata_sff_dev_select */ |
457 | static void ep93xx_pata_dev_select(struct ata_port *ap, unsigned int device) |
458 | { |
459 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
460 | u8 tmp = ATA_DEVICE_OBS; |
461 | |
462 | if (device != 0) |
463 | tmp |= ATA_DEV1; |
464 | |
465 | ep93xx_pata_write_reg(drv_data, tmp, IDECTRL_ADDR_DEVICE); |
466 | ata_sff_pause(ap); /* needed; also flushes, for mmio */ |
467 | } |
468 | |
469 | /* Note: original code is ata_sff_set_devctl */ |
470 | static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl) |
471 | { |
472 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
473 | |
474 | ep93xx_pata_write_reg(drv_data, ctl, IDECTRL_ADDR_CTL); |
475 | } |
476 | |
477 | /* Note: original code is ata_sff_data_xfer */ |
478 | static unsigned int ep93xx_pata_data_xfer(struct ata_device *adev, |
479 | unsigned char *buf, |
480 | unsigned int buflen, int rw) |
481 | { |
482 | struct ata_port *ap = adev->link->ap; |
483 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
484 | u16 *data = (u16 *)buf; |
485 | unsigned int words = buflen >> 1; |
486 | |
487 | /* Transfer multiple of 2 bytes */ |
488 | while (words--) |
489 | if (rw == READ) |
490 | *data++ = cpu_to_le16( |
491 | ep93xx_pata_read_data( |
492 | drv_data, IDECTRL_ADDR_DATA)); |
493 | else |
494 | ep93xx_pata_write_data(drv_data, le16_to_cpu(*data++), |
495 | IDECTRL_ADDR_DATA); |
496 | |
497 | /* Transfer trailing 1 byte, if any. */ |
498 | if (unlikely(buflen & 0x01)) { |
499 | unsigned char pad[2] = { }; |
500 | |
501 | buf += buflen - 1; |
502 | |
503 | if (rw == READ) { |
504 | *pad = cpu_to_le16( |
505 | ep93xx_pata_read_data( |
506 | drv_data, IDECTRL_ADDR_DATA)); |
507 | *buf = pad[0]; |
508 | } else { |
509 | pad[0] = *buf; |
510 | ep93xx_pata_write_data(drv_data, le16_to_cpu(*pad), |
511 | IDECTRL_ADDR_DATA); |
512 | } |
513 | words++; |
514 | } |
515 | |
516 | return words << 1; |
517 | } |
518 | |
519 | /* Note: original code is ata_devchk */ |
520 | static bool ep93xx_pata_device_is_present(struct ata_port *ap, |
521 | unsigned int device) |
522 | { |
523 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
524 | u8 nsect, lbal; |
525 | |
526 | ap->ops->sff_dev_select(ap, device); |
527 | |
528 | ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT); |
529 | ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL); |
530 | |
531 | ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_NSECT); |
532 | ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_LBAL); |
533 | |
534 | ep93xx_pata_write_reg(drv_data, 0x55, IDECTRL_ADDR_NSECT); |
535 | ep93xx_pata_write_reg(drv_data, 0xaa, IDECTRL_ADDR_LBAL); |
536 | |
537 | nsect = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_NSECT); |
538 | lbal = ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_LBAL); |
539 | |
540 | if ((nsect == 0x55) && (lbal == 0xaa)) |
541 | return true; |
542 | |
543 | return false; |
544 | } |
545 | |
546 | /* Note: original code is ata_sff_wait_after_reset */ |
547 | static int ep93xx_pata_wait_after_reset(struct ata_link *link, |
548 | unsigned int devmask, |
549 | unsigned long deadline) |
550 | { |
551 | struct ata_port *ap = link->ap; |
552 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
553 | unsigned int dev0 = devmask & (1 << 0); |
554 | unsigned int dev1 = devmask & (1 << 1); |
555 | int rc, ret = 0; |
556 | |
557 | ata_msleep(ap, ATA_WAIT_AFTER_RESET); |
558 | |
559 | /* always check readiness of the master device */ |
560 | rc = ata_sff_wait_ready(link, deadline); |
561 | /* |
562 | * -ENODEV means the odd clown forgot the D7 pulldown resistor |
563 | * and TF status is 0xff, bail out on it too. |
564 | */ |
565 | if (rc) |
566 | return rc; |
567 | |
568 | /* |
569 | * if device 1 was found in ata_devchk, wait for register |
570 | * access briefly, then wait for BSY to clear. |
571 | */ |
572 | if (dev1) { |
573 | int i; |
574 | |
575 | ap->ops->sff_dev_select(ap, 1); |
576 | |
577 | /* |
578 | * Wait for register access. Some ATAPI devices fail |
579 | * to set nsect/lbal after reset, so don't waste too |
580 | * much time on it. We're gonna wait for !BSY anyway. |
581 | */ |
582 | for (i = 0; i < 2; i++) { |
583 | u8 nsect, lbal; |
584 | |
585 | nsect = ep93xx_pata_read_reg(drv_data, |
586 | IDECTRL_ADDR_NSECT); |
587 | lbal = ep93xx_pata_read_reg(drv_data, |
588 | IDECTRL_ADDR_LBAL); |
589 | if (nsect == 1 && lbal == 1) |
590 | break; |
591 | msleep(50); /* give drive a breather */ |
592 | } |
593 | |
594 | rc = ata_sff_wait_ready(link, deadline); |
595 | if (rc) { |
596 | if (rc != -ENODEV) |
597 | return rc; |
598 | ret = rc; |
599 | } |
600 | } |
601 | /* is all this really necessary? */ |
602 | ap->ops->sff_dev_select(ap, 0); |
603 | if (dev1) |
604 | ap->ops->sff_dev_select(ap, 1); |
605 | if (dev0) |
606 | ap->ops->sff_dev_select(ap, 0); |
607 | |
608 | return ret; |
609 | } |
610 | |
611 | /* Note: original code is ata_bus_softreset */ |
612 | static int ep93xx_pata_bus_softreset(struct ata_port *ap, unsigned int devmask, |
613 | unsigned long deadline) |
614 | { |
615 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
616 | |
617 | ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL); |
618 | udelay(20); /* FIXME: flush */ |
619 | ep93xx_pata_write_reg(drv_data, ap->ctl | ATA_SRST, IDECTRL_ADDR_CTL); |
620 | udelay(20); /* FIXME: flush */ |
621 | ep93xx_pata_write_reg(drv_data, ap->ctl, IDECTRL_ADDR_CTL); |
622 | ap->last_ctl = ap->ctl; |
623 | |
624 | return ep93xx_pata_wait_after_reset(&ap->link, devmask, deadline); |
625 | } |
626 | |
627 | static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data) |
628 | { |
629 | if (drv_data->dma_rx_channel) { |
630 | dma_release_channel(drv_data->dma_rx_channel); |
631 | drv_data->dma_rx_channel = NULL; |
632 | } |
633 | if (drv_data->dma_tx_channel) { |
634 | dma_release_channel(drv_data->dma_tx_channel); |
635 | drv_data->dma_tx_channel = NULL; |
636 | } |
637 | } |
638 | |
639 | static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param) |
640 | { |
641 | if (ep93xx_dma_chan_is_m2p(chan)) |
642 | return false; |
643 | |
644 | chan->private = filter_param; |
645 | return true; |
646 | } |
647 | |
648 | static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) |
649 | { |
650 | const struct platform_device *pdev = drv_data->pdev; |
651 | dma_cap_mask_t mask; |
652 | struct dma_slave_config conf; |
653 | |
654 | dma_cap_zero(mask); |
655 | dma_cap_set(DMA_SLAVE, mask); |
656 | |
657 | /* |
658 | * Request two channels for IDE. Another possibility would be |
659 | * to request only one channel, and reprogram it's direction at |
660 | * start of new transfer. |
661 | */ |
662 | drv_data->dma_rx_data.port = EP93XX_DMA_IDE; |
663 | drv_data->dma_rx_data.direction = DMA_FROM_DEVICE; |
664 | drv_data->dma_rx_data.name = "ep93xx-pata-rx"; |
665 | drv_data->dma_rx_channel = dma_request_channel(mask, |
666 | ep93xx_pata_dma_filter, &drv_data->dma_rx_data); |
667 | if (!drv_data->dma_rx_channel) |
668 | return; |
669 | |
670 | drv_data->dma_tx_data.port = EP93XX_DMA_IDE; |
671 | drv_data->dma_tx_data.direction = DMA_TO_DEVICE; |
672 | drv_data->dma_tx_data.name = "ep93xx-pata-tx"; |
673 | drv_data->dma_tx_channel = dma_request_channel(mask, |
674 | ep93xx_pata_dma_filter, &drv_data->dma_tx_data); |
675 | if (!drv_data->dma_tx_channel) { |
676 | dma_release_channel(drv_data->dma_rx_channel); |
677 | return; |
678 | } |
679 | |
680 | /* Configure receive channel direction and source address */ |
681 | memset(&conf, 0, sizeof(conf)); |
682 | conf.direction = DMA_FROM_DEVICE; |
683 | conf.src_addr = drv_data->udma_in_phys; |
684 | conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
685 | if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) { |
686 | dev_err(&pdev->dev, "failed to configure rx dma channel\n"); |
687 | ep93xx_pata_release_dma(drv_data); |
688 | return; |
689 | } |
690 | |
691 | /* Configure transmit channel direction and destination address */ |
692 | memset(&conf, 0, sizeof(conf)); |
693 | conf.direction = DMA_TO_DEVICE; |
694 | conf.dst_addr = drv_data->udma_out_phys; |
695 | conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
696 | if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) { |
697 | dev_err(&pdev->dev, "failed to configure tx dma channel\n"); |
698 | ep93xx_pata_release_dma(drv_data); |
699 | } |
700 | } |
701 | |
702 | static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc) |
703 | { |
704 | struct dma_async_tx_descriptor *txd; |
705 | struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; |
706 | void __iomem *base = drv_data->ide_base; |
707 | struct ata_device *adev = qc->dev; |
708 | u32 v = qc->dma_dir == DMA_TO_DEVICE ? IDEUDMAOP_RWOP : 0; |
709 | struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE |
710 | ? drv_data->dma_tx_channel : drv_data->dma_rx_channel; |
711 | |
712 | txd = channel->device->device_prep_slave_sg(channel, qc->sg, |
713 | qc->n_elem, qc->dma_dir, DMA_CTRL_ACK, NULL); |
714 | if (!txd) { |
715 | dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n"); |
716 | return; |
717 | } |
718 | txd->callback = NULL; |
719 | txd->callback_param = NULL; |
720 | |
721 | if (dmaengine_submit(txd) < 0) { |
722 | dev_err(qc->ap->dev, "failed to submit dma transfer\n"); |
723 | return; |
724 | } |
725 | dma_async_issue_pending(channel); |
726 | |
727 | /* |
728 | * When enabling UDMA operation, IDEUDMAOP register needs to be |
729 | * programmed in three step sequence: |
730 | * 1) set or clear the RWOP bit, |
731 | * 2) perform dummy read of the register, |
732 | * 3) set the UEN bit. |
733 | */ |
734 | writel(v, base + IDEUDMAOP); |
735 | readl(base + IDEUDMAOP); |
736 | writel(v | IDEUDMAOP_UEN, base + IDEUDMAOP); |
737 | |
738 | writel(IDECFG_IDEEN | IDECFG_UDMA | |
739 | ((adev->xfer_mode - XFER_UDMA_0) << IDECFG_MODE_SHIFT), |
740 | base + IDECFG); |
741 | } |
742 | |
743 | static void ep93xx_pata_dma_stop(struct ata_queued_cmd *qc) |
744 | { |
745 | struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; |
746 | void __iomem *base = drv_data->ide_base; |
747 | |
748 | /* terminate all dma transfers, if not yet finished */ |
749 | dmaengine_terminate_all(drv_data->dma_rx_channel); |
750 | dmaengine_terminate_all(drv_data->dma_tx_channel); |
751 | |
752 | /* |
753 | * To properly stop IDE-DMA, IDEUDMAOP register must to be cleared |
754 | * and IDECTRL register must be set to default value. |
755 | */ |
756 | writel(0, base + IDEUDMAOP); |
757 | writel(readl(base + IDECTRL) | IDECTRL_DIOWN | IDECTRL_DIORN | |
758 | IDECTRL_CS0N | IDECTRL_CS1N, base + IDECTRL); |
759 | |
760 | ep93xx_pata_enable_pio(drv_data->ide_base, |
761 | qc->dev->pio_mode - XFER_PIO_0); |
762 | |
763 | ata_sff_dma_pause(qc->ap); |
764 | } |
765 | |
766 | static void ep93xx_pata_dma_setup(struct ata_queued_cmd *qc) |
767 | { |
768 | qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); |
769 | } |
770 | |
771 | static u8 ep93xx_pata_dma_status(struct ata_port *ap) |
772 | { |
773 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
774 | u32 val = readl(drv_data->ide_base + IDEUDMASTS); |
775 | |
776 | /* |
777 | * UDMA Status Register bits: |
778 | * |
779 | * DMAIDE - DMA request signal from UDMA state machine, |
780 | * INTIDE - INT line generated by UDMA because of errors in the |
781 | * state machine, |
782 | * SBUSY - UDMA state machine busy, not in idle state, |
783 | * NDO - error for data-out not completed, |
784 | * NDI - error for data-in not completed, |
785 | * N4X - error for data transferred not multiplies of four |
786 | * 32-bit words. |
787 | * (EP93xx UG p27-17) |
788 | */ |
789 | if (val & IDEUDMASTS_NDO || val & IDEUDMASTS_NDI || |
790 | val & IDEUDMASTS_N4X || val & IDEUDMASTS_INTIDE) |
791 | return ATA_DMA_ERR; |
792 | |
793 | /* read INTRQ (INT[3]) pin input state */ |
794 | if (readl(drv_data->ide_base + IDECTRL) & IDECTRL_INTRQ) |
795 | return ATA_DMA_INTR; |
796 | |
797 | if (val & IDEUDMASTS_SBUSY || val & IDEUDMASTS_DMAIDE) |
798 | return ATA_DMA_ACTIVE; |
799 | |
800 | return 0; |
801 | } |
802 | |
803 | /* Note: original code is ata_sff_softreset */ |
804 | static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes, |
805 | unsigned long deadline) |
806 | { |
807 | struct ata_port *ap = al->ap; |
808 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
809 | unsigned int devmask = 0; |
810 | int rc; |
811 | u8 err; |
812 | |
813 | /* determine if device 0/1 are present */ |
814 | if (ep93xx_pata_device_is_present(ap, 0)) |
815 | devmask |= (1 << 0); |
816 | if (slave_possible && ep93xx_pata_device_is_present(ap, 1)) |
817 | devmask |= (1 << 1); |
818 | |
819 | /* select device 0 again */ |
820 | ap->ops->sff_dev_select(al->ap, 0); |
821 | |
822 | /* issue bus reset */ |
823 | rc = ep93xx_pata_bus_softreset(ap, devmask, deadline); |
824 | /* if link is ocuppied, -ENODEV too is an error */ |
825 | if (rc && (rc != -ENODEV || sata_scr_valid(al))) { |
826 | ata_link_err(al, "SRST failed (errno=%d)\n", rc); |
827 | return rc; |
828 | } |
829 | |
830 | /* determine by signature whether we have ATA or ATAPI devices */ |
831 | classes[0] = ata_sff_dev_classify(&al->device[0], devmask & (1 << 0), |
832 | &err); |
833 | if (slave_possible && err != 0x81) |
834 | classes[1] = ata_sff_dev_classify(&al->device[1], |
835 | devmask & (1 << 1), &err); |
836 | |
837 | return 0; |
838 | } |
839 | |
840 | /* Note: original code is ata_sff_drain_fifo */ |
841 | static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc) |
842 | { |
843 | int count; |
844 | struct ata_port *ap; |
845 | struct ep93xx_pata_data *drv_data; |
846 | |
847 | /* We only need to flush incoming data when a command was running */ |
848 | if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) |
849 | return; |
850 | |
851 | ap = qc->ap; |
852 | drv_data = ap->host->private_data; |
853 | /* Drain up to 64K of data before we give up this recovery method */ |
854 | for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) |
855 | && count < 65536; count += 2) |
856 | ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA); |
857 | |
858 | /* Can become DEBUG later */ |
859 | if (count) |
860 | ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count); |
861 | |
862 | } |
863 | |
864 | static int ep93xx_pata_port_start(struct ata_port *ap) |
865 | { |
866 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
867 | |
868 | /* |
869 | * Set timings to safe values at startup (= number of ns from ATA |
870 | * specification), we'll switch to properly calculated values later. |
871 | */ |
872 | drv_data->t = *ata_timing_find_mode(XFER_PIO_0); |
873 | return 0; |
874 | } |
875 | |
876 | static struct scsi_host_template ep93xx_pata_sht = { |
877 | ATA_BASE_SHT(DRV_NAME), |
878 | /* ep93xx dma implementation limit */ |
879 | .sg_tablesize = 32, |
880 | /* ep93xx dma can't transfer 65536 bytes at once */ |
881 | .dma_boundary = 0x7fff, |
882 | }; |
883 | |
884 | static struct ata_port_operations ep93xx_pata_port_ops = { |
885 | .inherits = &ata_bmdma_port_ops, |
886 | |
887 | .qc_prep = ata_noop_qc_prep, |
888 | |
889 | .softreset = ep93xx_pata_softreset, |
890 | .hardreset = ATA_OP_NULL, |
891 | |
892 | .sff_dev_select = ep93xx_pata_dev_select, |
893 | .sff_set_devctl = ep93xx_pata_set_devctl, |
894 | .sff_check_status = ep93xx_pata_check_status, |
895 | .sff_check_altstatus = ep93xx_pata_check_altstatus, |
896 | .sff_tf_load = ep93xx_pata_tf_load, |
897 | .sff_tf_read = ep93xx_pata_tf_read, |
898 | .sff_exec_command = ep93xx_pata_exec_command, |
899 | .sff_data_xfer = ep93xx_pata_data_xfer, |
900 | .sff_drain_fifo = ep93xx_pata_drain_fifo, |
901 | .sff_irq_clear = ATA_OP_NULL, |
902 | |
903 | .set_piomode = ep93xx_pata_set_piomode, |
904 | |
905 | .bmdma_setup = ep93xx_pata_dma_setup, |
906 | .bmdma_start = ep93xx_pata_dma_start, |
907 | .bmdma_stop = ep93xx_pata_dma_stop, |
908 | .bmdma_status = ep93xx_pata_dma_status, |
909 | |
910 | .cable_detect = ata_cable_unknown, |
911 | .port_start = ep93xx_pata_port_start, |
912 | }; |
913 | |
914 | static int ep93xx_pata_probe(struct platform_device *pdev) |
915 | { |
916 | struct ep93xx_pata_data *drv_data; |
917 | struct ata_host *host; |
918 | struct ata_port *ap; |
919 | unsigned int irq; |
920 | struct resource *mem_res; |
921 | void __iomem *ide_base; |
922 | int err; |
923 | |
924 | err = ep93xx_ide_acquire_gpio(pdev); |
925 | if (err) |
926 | return err; |
927 | |
928 | /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */ |
929 | irq = platform_get_irq(pdev, 0); |
930 | if (irq < 0) { |
931 | err = -ENXIO; |
932 | goto err_rel_gpio; |
933 | } |
934 | |
935 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
936 | if (!mem_res) { |
937 | err = -ENXIO; |
938 | goto err_rel_gpio; |
939 | } |
940 | |
941 | ide_base = devm_ioremap_resource(&pdev->dev, mem_res); |
942 | if (IS_ERR(ide_base)) { |
943 | err = PTR_ERR(ide_base); |
944 | goto err_rel_gpio; |
945 | } |
946 | |
947 | drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL); |
948 | if (!drv_data) { |
949 | err = -ENXIO; |
950 | goto err_rel_gpio; |
951 | } |
952 | |
953 | platform_set_drvdata(pdev, drv_data); |
954 | drv_data->pdev = pdev; |
955 | drv_data->ide_base = ide_base; |
956 | drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN; |
957 | drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT; |
958 | ep93xx_pata_dma_init(drv_data); |
959 | |
960 | /* allocate host */ |
961 | host = ata_host_alloc(&pdev->dev, 1); |
962 | if (!host) { |
963 | err = -ENXIO; |
964 | goto err_rel_dma; |
965 | } |
966 | |
967 | ep93xx_pata_clear_regs(ide_base); |
968 | |
969 | host->private_data = drv_data; |
970 | |
971 | ap = host->ports[0]; |
972 | ap->dev = &pdev->dev; |
973 | ap->ops = &ep93xx_pata_port_ops; |
974 | ap->flags |= ATA_FLAG_SLAVE_POSS; |
975 | ap->pio_mask = ATA_PIO4; |
976 | |
977 | /* |
978 | * Maximum UDMA modes: |
979 | * EP931x rev.E0 - UDMA2 |
980 | * EP931x rev.E1 - UDMA3 |
981 | * EP931x rev.E2 - UDMA4 |
982 | * |
983 | * MWDMA support was removed from EP931x rev.E2, |
984 | * so this driver supports only UDMA modes. |
985 | */ |
986 | if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) { |
987 | int chip_rev = ep93xx_chip_revision(); |
988 | |
989 | if (chip_rev == EP93XX_CHIP_REV_E1) |
990 | ap->udma_mask = ATA_UDMA3; |
991 | else if (chip_rev == EP93XX_CHIP_REV_E2) |
992 | ap->udma_mask = ATA_UDMA4; |
993 | else |
994 | ap->udma_mask = ATA_UDMA2; |
995 | } |
996 | |
997 | /* defaults, pio 0 */ |
998 | ep93xx_pata_enable_pio(ide_base, 0); |
999 | |
1000 | dev_info(&pdev->dev, "version " DRV_VERSION "\n"); |
1001 | |
1002 | /* activate host */ |
1003 | err = ata_host_activate(host, irq, ata_bmdma_interrupt, 0, |
1004 | &ep93xx_pata_sht); |
1005 | if (err == 0) |
1006 | return 0; |
1007 | |
1008 | err_rel_dma: |
1009 | ep93xx_pata_release_dma(drv_data); |
1010 | err_rel_gpio: |
1011 | ep93xx_ide_release_gpio(pdev); |
1012 | return err; |
1013 | } |
1014 | |
1015 | static int ep93xx_pata_remove(struct platform_device *pdev) |
1016 | { |
1017 | struct ata_host *host = platform_get_drvdata(pdev); |
1018 | struct ep93xx_pata_data *drv_data = host->private_data; |
1019 | |
1020 | ata_host_detach(host); |
1021 | ep93xx_pata_release_dma(drv_data); |
1022 | ep93xx_pata_clear_regs(drv_data->ide_base); |
1023 | ep93xx_ide_release_gpio(pdev); |
1024 | return 0; |
1025 | } |
1026 | |
1027 | static struct platform_driver ep93xx_pata_platform_driver = { |
1028 | .driver = { |
1029 | .name = DRV_NAME, |
1030 | .owner = THIS_MODULE, |
1031 | }, |
1032 | .probe = ep93xx_pata_probe, |
1033 | .remove = ep93xx_pata_remove, |
1034 | }; |
1035 | |
1036 | module_platform_driver(ep93xx_pata_platform_driver); |
1037 | |
1038 | MODULE_AUTHOR("Alessandro Zummo, Lennert Buytenhek, Joao Ramos, " |
1039 | "Bartlomiej Zolnierkiewicz, Rafal Prylowski"); |
1040 | MODULE_DESCRIPTION("low-level driver for cirrus ep93xx IDE controller"); |
1041 | MODULE_LICENSE("GPL"); |
1042 | MODULE_VERSION(DRV_VERSION); |
1043 | MODULE_ALIAS("platform:pata_ep93xx"); |
1044 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9