Root/
1 | /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ |
2 | * sungem.c: Sun GEM ethernet driver. |
3 | * |
4 | * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) |
5 | * |
6 | * Support for Apple GMAC and assorted PHYs, WOL, Power Management |
7 | * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) |
8 | * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. |
9 | * |
10 | * NAPI and NETPOLL support |
11 | * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) |
12 | * |
13 | * TODO: |
14 | * - Now that the driver was significantly simplified, I need to rework |
15 | * the locking. I'm sure we don't need _2_ spinlocks, and we probably |
16 | * can avoid taking most of them for so long period of time (and schedule |
17 | * instead). The main issues at this point are caused by the netdev layer |
18 | * though: |
19 | * |
20 | * gem_change_mtu() and gem_set_multicast() are called with a read_lock() |
21 | * help by net/core/dev.c, thus they can't schedule. That means they can't |
22 | * call napi_disable() neither, thus force gem_poll() to keep a spinlock |
23 | * where it could have been dropped. change_mtu especially would love also to |
24 | * be able to msleep instead of horrid locked delays when resetting the HW, |
25 | * but that read_lock() makes it impossible, unless I defer it's action to |
26 | * the reset task, which means it'll be asynchronous (won't take effect until |
27 | * the system schedules a bit). |
28 | * |
29 | * Also, it would probably be possible to also remove most of the long-life |
30 | * locking in open/resume code path (gem_reinit_chip) by beeing more careful |
31 | * about when we can start taking interrupts or get xmit() called... |
32 | */ |
33 | |
34 | #include <linux/module.h> |
35 | #include <linux/kernel.h> |
36 | #include <linux/types.h> |
37 | #include <linux/fcntl.h> |
38 | #include <linux/interrupt.h> |
39 | #include <linux/ioport.h> |
40 | #include <linux/in.h> |
41 | #include <linux/sched.h> |
42 | #include <linux/string.h> |
43 | #include <linux/delay.h> |
44 | #include <linux/init.h> |
45 | #include <linux/errno.h> |
46 | #include <linux/pci.h> |
47 | #include <linux/dma-mapping.h> |
48 | #include <linux/netdevice.h> |
49 | #include <linux/etherdevice.h> |
50 | #include <linux/skbuff.h> |
51 | #include <linux/mii.h> |
52 | #include <linux/ethtool.h> |
53 | #include <linux/crc32.h> |
54 | #include <linux/random.h> |
55 | #include <linux/workqueue.h> |
56 | #include <linux/if_vlan.h> |
57 | #include <linux/bitops.h> |
58 | #include <linux/mutex.h> |
59 | #include <linux/mm.h> |
60 | #include <linux/gfp.h> |
61 | |
62 | #include <asm/system.h> |
63 | #include <asm/io.h> |
64 | #include <asm/byteorder.h> |
65 | #include <asm/uaccess.h> |
66 | #include <asm/irq.h> |
67 | |
68 | #ifdef CONFIG_SPARC |
69 | #include <asm/idprom.h> |
70 | #include <asm/prom.h> |
71 | #endif |
72 | |
73 | #ifdef CONFIG_PPC_PMAC |
74 | #include <asm/pci-bridge.h> |
75 | #include <asm/prom.h> |
76 | #include <asm/machdep.h> |
77 | #include <asm/pmac_feature.h> |
78 | #endif |
79 | |
80 | #include "sungem_phy.h" |
81 | #include "sungem.h" |
82 | |
83 | /* Stripping FCS is causing problems, disabled for now */ |
84 | #undef STRIP_FCS |
85 | |
86 | #define DEFAULT_MSG (NETIF_MSG_DRV | \ |
87 | NETIF_MSG_PROBE | \ |
88 | NETIF_MSG_LINK) |
89 | |
90 | #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ |
91 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ |
92 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ |
93 | SUPPORTED_Pause | SUPPORTED_Autoneg) |
94 | |
95 | #define DRV_NAME "sungem" |
96 | #define DRV_VERSION "0.98" |
97 | #define DRV_RELDATE "8/24/03" |
98 | #define DRV_AUTHOR "David S. Miller (davem@redhat.com)" |
99 | |
100 | static char version[] __devinitdata = |
101 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; |
102 | |
103 | MODULE_AUTHOR(DRV_AUTHOR); |
104 | MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); |
105 | MODULE_LICENSE("GPL"); |
106 | |
107 | #define GEM_MODULE_NAME "gem" |
108 | #define PFX GEM_MODULE_NAME ": " |
109 | |
110 | static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { |
111 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, |
112 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
113 | |
114 | /* These models only differ from the original GEM in |
115 | * that their tx/rx fifos are of a different size and |
116 | * they only support 10/100 speeds. -DaveM |
117 | * |
118 | * Apple's GMAC does support gigabit on machines with |
119 | * the BCM54xx PHYs. -BenH |
120 | */ |
121 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, |
122 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
123 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, |
124 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
125 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, |
126 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
127 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, |
128 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
129 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, |
130 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
131 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, |
132 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
133 | { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, |
134 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
135 | {0, } |
136 | }; |
137 | |
138 | MODULE_DEVICE_TABLE(pci, gem_pci_tbl); |
139 | |
140 | static u16 __phy_read(struct gem *gp, int phy_addr, int reg) |
141 | { |
142 | u32 cmd; |
143 | int limit = 10000; |
144 | |
145 | cmd = (1 << 30); |
146 | cmd |= (2 << 28); |
147 | cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; |
148 | cmd |= (reg << 18) & MIF_FRAME_REGAD; |
149 | cmd |= (MIF_FRAME_TAMSB); |
150 | writel(cmd, gp->regs + MIF_FRAME); |
151 | |
152 | while (--limit) { |
153 | cmd = readl(gp->regs + MIF_FRAME); |
154 | if (cmd & MIF_FRAME_TALSB) |
155 | break; |
156 | |
157 | udelay(10); |
158 | } |
159 | |
160 | if (!limit) |
161 | cmd = 0xffff; |
162 | |
163 | return cmd & MIF_FRAME_DATA; |
164 | } |
165 | |
166 | static inline int _phy_read(struct net_device *dev, int mii_id, int reg) |
167 | { |
168 | struct gem *gp = netdev_priv(dev); |
169 | return __phy_read(gp, mii_id, reg); |
170 | } |
171 | |
172 | static inline u16 phy_read(struct gem *gp, int reg) |
173 | { |
174 | return __phy_read(gp, gp->mii_phy_addr, reg); |
175 | } |
176 | |
177 | static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) |
178 | { |
179 | u32 cmd; |
180 | int limit = 10000; |
181 | |
182 | cmd = (1 << 30); |
183 | cmd |= (1 << 28); |
184 | cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; |
185 | cmd |= (reg << 18) & MIF_FRAME_REGAD; |
186 | cmd |= (MIF_FRAME_TAMSB); |
187 | cmd |= (val & MIF_FRAME_DATA); |
188 | writel(cmd, gp->regs + MIF_FRAME); |
189 | |
190 | while (limit--) { |
191 | cmd = readl(gp->regs + MIF_FRAME); |
192 | if (cmd & MIF_FRAME_TALSB) |
193 | break; |
194 | |
195 | udelay(10); |
196 | } |
197 | } |
198 | |
199 | static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) |
200 | { |
201 | struct gem *gp = netdev_priv(dev); |
202 | __phy_write(gp, mii_id, reg, val & 0xffff); |
203 | } |
204 | |
205 | static inline void phy_write(struct gem *gp, int reg, u16 val) |
206 | { |
207 | __phy_write(gp, gp->mii_phy_addr, reg, val); |
208 | } |
209 | |
210 | static inline void gem_enable_ints(struct gem *gp) |
211 | { |
212 | /* Enable all interrupts but TXDONE */ |
213 | writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); |
214 | } |
215 | |
216 | static inline void gem_disable_ints(struct gem *gp) |
217 | { |
218 | /* Disable all interrupts, including TXDONE */ |
219 | writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); |
220 | } |
221 | |
222 | static void gem_get_cell(struct gem *gp) |
223 | { |
224 | BUG_ON(gp->cell_enabled < 0); |
225 | gp->cell_enabled++; |
226 | #ifdef CONFIG_PPC_PMAC |
227 | if (gp->cell_enabled == 1) { |
228 | mb(); |
229 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); |
230 | udelay(10); |
231 | } |
232 | #endif /* CONFIG_PPC_PMAC */ |
233 | } |
234 | |
235 | /* Turn off the chip's clock */ |
236 | static void gem_put_cell(struct gem *gp) |
237 | { |
238 | BUG_ON(gp->cell_enabled <= 0); |
239 | gp->cell_enabled--; |
240 | #ifdef CONFIG_PPC_PMAC |
241 | if (gp->cell_enabled == 0) { |
242 | mb(); |
243 | pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); |
244 | udelay(10); |
245 | } |
246 | #endif /* CONFIG_PPC_PMAC */ |
247 | } |
248 | |
249 | static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) |
250 | { |
251 | if (netif_msg_intr(gp)) |
252 | printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); |
253 | } |
254 | |
255 | static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
256 | { |
257 | u32 pcs_istat = readl(gp->regs + PCS_ISTAT); |
258 | u32 pcs_miistat; |
259 | |
260 | if (netif_msg_intr(gp)) |
261 | printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", |
262 | gp->dev->name, pcs_istat); |
263 | |
264 | if (!(pcs_istat & PCS_ISTAT_LSC)) { |
265 | printk(KERN_ERR "%s: PCS irq but no link status change???\n", |
266 | dev->name); |
267 | return 0; |
268 | } |
269 | |
270 | /* The link status bit latches on zero, so you must |
271 | * read it twice in such a case to see a transition |
272 | * to the link being up. |
273 | */ |
274 | pcs_miistat = readl(gp->regs + PCS_MIISTAT); |
275 | if (!(pcs_miistat & PCS_MIISTAT_LS)) |
276 | pcs_miistat |= |
277 | (readl(gp->regs + PCS_MIISTAT) & |
278 | PCS_MIISTAT_LS); |
279 | |
280 | if (pcs_miistat & PCS_MIISTAT_ANC) { |
281 | /* The remote-fault indication is only valid |
282 | * when autoneg has completed. |
283 | */ |
284 | if (pcs_miistat & PCS_MIISTAT_RF) |
285 | printk(KERN_INFO "%s: PCS AutoNEG complete, " |
286 | "RemoteFault\n", dev->name); |
287 | else |
288 | printk(KERN_INFO "%s: PCS AutoNEG complete.\n", |
289 | dev->name); |
290 | } |
291 | |
292 | if (pcs_miistat & PCS_MIISTAT_LS) { |
293 | printk(KERN_INFO "%s: PCS link is now up.\n", |
294 | dev->name); |
295 | netif_carrier_on(gp->dev); |
296 | } else { |
297 | printk(KERN_INFO "%s: PCS link is now down.\n", |
298 | dev->name); |
299 | netif_carrier_off(gp->dev); |
300 | /* If this happens and the link timer is not running, |
301 | * reset so we re-negotiate. |
302 | */ |
303 | if (!timer_pending(&gp->link_timer)) |
304 | return 1; |
305 | } |
306 | |
307 | return 0; |
308 | } |
309 | |
310 | static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
311 | { |
312 | u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); |
313 | |
314 | if (netif_msg_intr(gp)) |
315 | printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", |
316 | gp->dev->name, txmac_stat); |
317 | |
318 | /* Defer timer expiration is quite normal, |
319 | * don't even log the event. |
320 | */ |
321 | if ((txmac_stat & MAC_TXSTAT_DTE) && |
322 | !(txmac_stat & ~MAC_TXSTAT_DTE)) |
323 | return 0; |
324 | |
325 | if (txmac_stat & MAC_TXSTAT_URUN) { |
326 | printk(KERN_ERR "%s: TX MAC xmit underrun.\n", |
327 | dev->name); |
328 | gp->net_stats.tx_fifo_errors++; |
329 | } |
330 | |
331 | if (txmac_stat & MAC_TXSTAT_MPE) { |
332 | printk(KERN_ERR "%s: TX MAC max packet size error.\n", |
333 | dev->name); |
334 | gp->net_stats.tx_errors++; |
335 | } |
336 | |
337 | /* The rest are all cases of one of the 16-bit TX |
338 | * counters expiring. |
339 | */ |
340 | if (txmac_stat & MAC_TXSTAT_NCE) |
341 | gp->net_stats.collisions += 0x10000; |
342 | |
343 | if (txmac_stat & MAC_TXSTAT_ECE) { |
344 | gp->net_stats.tx_aborted_errors += 0x10000; |
345 | gp->net_stats.collisions += 0x10000; |
346 | } |
347 | |
348 | if (txmac_stat & MAC_TXSTAT_LCE) { |
349 | gp->net_stats.tx_aborted_errors += 0x10000; |
350 | gp->net_stats.collisions += 0x10000; |
351 | } |
352 | |
353 | /* We do not keep track of MAC_TXSTAT_FCE and |
354 | * MAC_TXSTAT_PCE events. |
355 | */ |
356 | return 0; |
357 | } |
358 | |
359 | /* When we get a RX fifo overflow, the RX unit in GEM is probably hung |
360 | * so we do the following. |
361 | * |
362 | * If any part of the reset goes wrong, we return 1 and that causes the |
363 | * whole chip to be reset. |
364 | */ |
365 | static int gem_rxmac_reset(struct gem *gp) |
366 | { |
367 | struct net_device *dev = gp->dev; |
368 | int limit, i; |
369 | u64 desc_dma; |
370 | u32 val; |
371 | |
372 | /* First, reset & disable MAC RX. */ |
373 | writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); |
374 | for (limit = 0; limit < 5000; limit++) { |
375 | if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) |
376 | break; |
377 | udelay(10); |
378 | } |
379 | if (limit == 5000) { |
380 | printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " |
381 | "chip.\n", dev->name); |
382 | return 1; |
383 | } |
384 | |
385 | writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, |
386 | gp->regs + MAC_RXCFG); |
387 | for (limit = 0; limit < 5000; limit++) { |
388 | if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) |
389 | break; |
390 | udelay(10); |
391 | } |
392 | if (limit == 5000) { |
393 | printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " |
394 | "chip.\n", dev->name); |
395 | return 1; |
396 | } |
397 | |
398 | /* Second, disable RX DMA. */ |
399 | writel(0, gp->regs + RXDMA_CFG); |
400 | for (limit = 0; limit < 5000; limit++) { |
401 | if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) |
402 | break; |
403 | udelay(10); |
404 | } |
405 | if (limit == 5000) { |
406 | printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " |
407 | "chip.\n", dev->name); |
408 | return 1; |
409 | } |
410 | |
411 | udelay(5000); |
412 | |
413 | /* Execute RX reset command. */ |
414 | writel(gp->swrst_base | GREG_SWRST_RXRST, |
415 | gp->regs + GREG_SWRST); |
416 | for (limit = 0; limit < 5000; limit++) { |
417 | if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) |
418 | break; |
419 | udelay(10); |
420 | } |
421 | if (limit == 5000) { |
422 | printk(KERN_ERR "%s: RX reset command will not execute, resetting " |
423 | "whole chip.\n", dev->name); |
424 | return 1; |
425 | } |
426 | |
427 | /* Refresh the RX ring. */ |
428 | for (i = 0; i < RX_RING_SIZE; i++) { |
429 | struct gem_rxd *rxd = &gp->init_block->rxd[i]; |
430 | |
431 | if (gp->rx_skbs[i] == NULL) { |
432 | printk(KERN_ERR "%s: Parts of RX ring empty, resetting " |
433 | "whole chip.\n", dev->name); |
434 | return 1; |
435 | } |
436 | |
437 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); |
438 | } |
439 | gp->rx_new = gp->rx_old = 0; |
440 | |
441 | /* Now we must reprogram the rest of RX unit. */ |
442 | desc_dma = (u64) gp->gblock_dvma; |
443 | desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); |
444 | writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); |
445 | writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); |
446 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); |
447 | val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | |
448 | ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); |
449 | writel(val, gp->regs + RXDMA_CFG); |
450 | if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) |
451 | writel(((5 & RXDMA_BLANK_IPKTS) | |
452 | ((8 << 12) & RXDMA_BLANK_ITIME)), |
453 | gp->regs + RXDMA_BLANK); |
454 | else |
455 | writel(((5 & RXDMA_BLANK_IPKTS) | |
456 | ((4 << 12) & RXDMA_BLANK_ITIME)), |
457 | gp->regs + RXDMA_BLANK); |
458 | val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); |
459 | val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); |
460 | writel(val, gp->regs + RXDMA_PTHRESH); |
461 | val = readl(gp->regs + RXDMA_CFG); |
462 | writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); |
463 | writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); |
464 | val = readl(gp->regs + MAC_RXCFG); |
465 | writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); |
466 | |
467 | return 0; |
468 | } |
469 | |
470 | static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
471 | { |
472 | u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); |
473 | int ret = 0; |
474 | |
475 | if (netif_msg_intr(gp)) |
476 | printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", |
477 | gp->dev->name, rxmac_stat); |
478 | |
479 | if (rxmac_stat & MAC_RXSTAT_OFLW) { |
480 | u32 smac = readl(gp->regs + MAC_SMACHINE); |
481 | |
482 | printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", |
483 | dev->name, smac); |
484 | gp->net_stats.rx_over_errors++; |
485 | gp->net_stats.rx_fifo_errors++; |
486 | |
487 | ret = gem_rxmac_reset(gp); |
488 | } |
489 | |
490 | if (rxmac_stat & MAC_RXSTAT_ACE) |
491 | gp->net_stats.rx_frame_errors += 0x10000; |
492 | |
493 | if (rxmac_stat & MAC_RXSTAT_CCE) |
494 | gp->net_stats.rx_crc_errors += 0x10000; |
495 | |
496 | if (rxmac_stat & MAC_RXSTAT_LCE) |
497 | gp->net_stats.rx_length_errors += 0x10000; |
498 | |
499 | /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE |
500 | * events. |
501 | */ |
502 | return ret; |
503 | } |
504 | |
505 | static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
506 | { |
507 | u32 mac_cstat = readl(gp->regs + MAC_CSTAT); |
508 | |
509 | if (netif_msg_intr(gp)) |
510 | printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", |
511 | gp->dev->name, mac_cstat); |
512 | |
513 | /* This interrupt is just for pause frame and pause |
514 | * tracking. It is useful for diagnostics and debug |
515 | * but probably by default we will mask these events. |
516 | */ |
517 | if (mac_cstat & MAC_CSTAT_PS) |
518 | gp->pause_entered++; |
519 | |
520 | if (mac_cstat & MAC_CSTAT_PRCV) |
521 | gp->pause_last_time_recvd = (mac_cstat >> 16); |
522 | |
523 | return 0; |
524 | } |
525 | |
526 | static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
527 | { |
528 | u32 mif_status = readl(gp->regs + MIF_STATUS); |
529 | u32 reg_val, changed_bits; |
530 | |
531 | reg_val = (mif_status & MIF_STATUS_DATA) >> 16; |
532 | changed_bits = (mif_status & MIF_STATUS_STAT); |
533 | |
534 | gem_handle_mif_event(gp, reg_val, changed_bits); |
535 | |
536 | return 0; |
537 | } |
538 | |
539 | static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) |
540 | { |
541 | u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); |
542 | |
543 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && |
544 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { |
545 | printk(KERN_ERR "%s: PCI error [%04x] ", |
546 | dev->name, pci_estat); |
547 | |
548 | if (pci_estat & GREG_PCIESTAT_BADACK) |
549 | printk("<No ACK64# during ABS64 cycle> "); |
550 | if (pci_estat & GREG_PCIESTAT_DTRTO) |
551 | printk("<Delayed transaction timeout> "); |
552 | if (pci_estat & GREG_PCIESTAT_OTHER) |
553 | printk("<other>"); |
554 | printk("\n"); |
555 | } else { |
556 | pci_estat |= GREG_PCIESTAT_OTHER; |
557 | printk(KERN_ERR "%s: PCI error\n", dev->name); |
558 | } |
559 | |
560 | if (pci_estat & GREG_PCIESTAT_OTHER) { |
561 | u16 pci_cfg_stat; |
562 | |
563 | /* Interrogate PCI config space for the |
564 | * true cause. |
565 | */ |
566 | pci_read_config_word(gp->pdev, PCI_STATUS, |
567 | &pci_cfg_stat); |
568 | printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", |
569 | dev->name, pci_cfg_stat); |
570 | if (pci_cfg_stat & PCI_STATUS_PARITY) |
571 | printk(KERN_ERR "%s: PCI parity error detected.\n", |
572 | dev->name); |
573 | if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) |
574 | printk(KERN_ERR "%s: PCI target abort.\n", |
575 | dev->name); |
576 | if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) |
577 | printk(KERN_ERR "%s: PCI master acks target abort.\n", |
578 | dev->name); |
579 | if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) |
580 | printk(KERN_ERR "%s: PCI master abort.\n", |
581 | dev->name); |
582 | if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) |
583 | printk(KERN_ERR "%s: PCI system error SERR#.\n", |
584 | dev->name); |
585 | if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) |
586 | printk(KERN_ERR "%s: PCI parity error.\n", |
587 | dev->name); |
588 | |
589 | /* Write the error bits back to clear them. */ |
590 | pci_cfg_stat &= (PCI_STATUS_PARITY | |
591 | PCI_STATUS_SIG_TARGET_ABORT | |
592 | PCI_STATUS_REC_TARGET_ABORT | |
593 | PCI_STATUS_REC_MASTER_ABORT | |
594 | PCI_STATUS_SIG_SYSTEM_ERROR | |
595 | PCI_STATUS_DETECTED_PARITY); |
596 | pci_write_config_word(gp->pdev, |
597 | PCI_STATUS, pci_cfg_stat); |
598 | } |
599 | |
600 | /* For all PCI errors, we should reset the chip. */ |
601 | return 1; |
602 | } |
603 | |
604 | /* All non-normal interrupt conditions get serviced here. |
605 | * Returns non-zero if we should just exit the interrupt |
606 | * handler right now (ie. if we reset the card which invalidates |
607 | * all of the other original irq status bits). |
608 | */ |
609 | static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) |
610 | { |
611 | if (gem_status & GREG_STAT_RXNOBUF) { |
612 | /* Frame arrived, no free RX buffers available. */ |
613 | if (netif_msg_rx_err(gp)) |
614 | printk(KERN_DEBUG "%s: no buffer for rx frame\n", |
615 | gp->dev->name); |
616 | gp->net_stats.rx_dropped++; |
617 | } |
618 | |
619 | if (gem_status & GREG_STAT_RXTAGERR) { |
620 | /* corrupt RX tag framing */ |
621 | if (netif_msg_rx_err(gp)) |
622 | printk(KERN_DEBUG "%s: corrupt rx tag framing\n", |
623 | gp->dev->name); |
624 | gp->net_stats.rx_errors++; |
625 | |
626 | goto do_reset; |
627 | } |
628 | |
629 | if (gem_status & GREG_STAT_PCS) { |
630 | if (gem_pcs_interrupt(dev, gp, gem_status)) |
631 | goto do_reset; |
632 | } |
633 | |
634 | if (gem_status & GREG_STAT_TXMAC) { |
635 | if (gem_txmac_interrupt(dev, gp, gem_status)) |
636 | goto do_reset; |
637 | } |
638 | |
639 | if (gem_status & GREG_STAT_RXMAC) { |
640 | if (gem_rxmac_interrupt(dev, gp, gem_status)) |
641 | goto do_reset; |
642 | } |
643 | |
644 | if (gem_status & GREG_STAT_MAC) { |
645 | if (gem_mac_interrupt(dev, gp, gem_status)) |
646 | goto do_reset; |
647 | } |
648 | |
649 | if (gem_status & GREG_STAT_MIF) { |
650 | if (gem_mif_interrupt(dev, gp, gem_status)) |
651 | goto do_reset; |
652 | } |
653 | |
654 | if (gem_status & GREG_STAT_PCIERR) { |
655 | if (gem_pci_interrupt(dev, gp, gem_status)) |
656 | goto do_reset; |
657 | } |
658 | |
659 | return 0; |
660 | |
661 | do_reset: |
662 | gp->reset_task_pending = 1; |
663 | schedule_work(&gp->reset_task); |
664 | |
665 | return 1; |
666 | } |
667 | |
668 | static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) |
669 | { |
670 | int entry, limit; |
671 | |
672 | if (netif_msg_intr(gp)) |
673 | printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", |
674 | gp->dev->name, gem_status); |
675 | |
676 | entry = gp->tx_old; |
677 | limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); |
678 | while (entry != limit) { |
679 | struct sk_buff *skb; |
680 | struct gem_txd *txd; |
681 | dma_addr_t dma_addr; |
682 | u32 dma_len; |
683 | int frag; |
684 | |
685 | if (netif_msg_tx_done(gp)) |
686 | printk(KERN_DEBUG "%s: tx done, slot %d\n", |
687 | gp->dev->name, entry); |
688 | skb = gp->tx_skbs[entry]; |
689 | if (skb_shinfo(skb)->nr_frags) { |
690 | int last = entry + skb_shinfo(skb)->nr_frags; |
691 | int walk = entry; |
692 | int incomplete = 0; |
693 | |
694 | last &= (TX_RING_SIZE - 1); |
695 | for (;;) { |
696 | walk = NEXT_TX(walk); |
697 | if (walk == limit) |
698 | incomplete = 1; |
699 | if (walk == last) |
700 | break; |
701 | } |
702 | if (incomplete) |
703 | break; |
704 | } |
705 | gp->tx_skbs[entry] = NULL; |
706 | gp->net_stats.tx_bytes += skb->len; |
707 | |
708 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { |
709 | txd = &gp->init_block->txd[entry]; |
710 | |
711 | dma_addr = le64_to_cpu(txd->buffer); |
712 | dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; |
713 | |
714 | pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); |
715 | entry = NEXT_TX(entry); |
716 | } |
717 | |
718 | gp->net_stats.tx_packets++; |
719 | dev_kfree_skb_irq(skb); |
720 | } |
721 | gp->tx_old = entry; |
722 | |
723 | if (netif_queue_stopped(dev) && |
724 | TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) |
725 | netif_wake_queue(dev); |
726 | } |
727 | |
728 | static __inline__ void gem_post_rxds(struct gem *gp, int limit) |
729 | { |
730 | int cluster_start, curr, count, kick; |
731 | |
732 | cluster_start = curr = (gp->rx_new & ~(4 - 1)); |
733 | count = 0; |
734 | kick = -1; |
735 | wmb(); |
736 | while (curr != limit) { |
737 | curr = NEXT_RX(curr); |
738 | if (++count == 4) { |
739 | struct gem_rxd *rxd = |
740 | &gp->init_block->rxd[cluster_start]; |
741 | for (;;) { |
742 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); |
743 | rxd++; |
744 | cluster_start = NEXT_RX(cluster_start); |
745 | if (cluster_start == curr) |
746 | break; |
747 | } |
748 | kick = curr; |
749 | count = 0; |
750 | } |
751 | } |
752 | if (kick >= 0) { |
753 | mb(); |
754 | writel(kick, gp->regs + RXDMA_KICK); |
755 | } |
756 | } |
757 | |
758 | static int gem_rx(struct gem *gp, int work_to_do) |
759 | { |
760 | int entry, drops, work_done = 0; |
761 | u32 done; |
762 | __sum16 csum; |
763 | |
764 | if (netif_msg_rx_status(gp)) |
765 | printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", |
766 | gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); |
767 | |
768 | entry = gp->rx_new; |
769 | drops = 0; |
770 | done = readl(gp->regs + RXDMA_DONE); |
771 | for (;;) { |
772 | struct gem_rxd *rxd = &gp->init_block->rxd[entry]; |
773 | struct sk_buff *skb; |
774 | u64 status = le64_to_cpu(rxd->status_word); |
775 | dma_addr_t dma_addr; |
776 | int len; |
777 | |
778 | if ((status & RXDCTRL_OWN) != 0) |
779 | break; |
780 | |
781 | if (work_done >= RX_RING_SIZE || work_done >= work_to_do) |
782 | break; |
783 | |
784 | /* When writing back RX descriptor, GEM writes status |
785 | * then buffer address, possibly in separate transactions. |
786 | * If we don't wait for the chip to write both, we could |
787 | * post a new buffer to this descriptor then have GEM spam |
788 | * on the buffer address. We sync on the RX completion |
789 | * register to prevent this from happening. |
790 | */ |
791 | if (entry == done) { |
792 | done = readl(gp->regs + RXDMA_DONE); |
793 | if (entry == done) |
794 | break; |
795 | } |
796 | |
797 | /* We can now account for the work we're about to do */ |
798 | work_done++; |
799 | |
800 | skb = gp->rx_skbs[entry]; |
801 | |
802 | len = (status & RXDCTRL_BUFSZ) >> 16; |
803 | if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { |
804 | gp->net_stats.rx_errors++; |
805 | if (len < ETH_ZLEN) |
806 | gp->net_stats.rx_length_errors++; |
807 | if (len & RXDCTRL_BAD) |
808 | gp->net_stats.rx_crc_errors++; |
809 | |
810 | /* We'll just return it to GEM. */ |
811 | drop_it: |
812 | gp->net_stats.rx_dropped++; |
813 | goto next; |
814 | } |
815 | |
816 | dma_addr = le64_to_cpu(rxd->buffer); |
817 | if (len > RX_COPY_THRESHOLD) { |
818 | struct sk_buff *new_skb; |
819 | |
820 | new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); |
821 | if (new_skb == NULL) { |
822 | drops++; |
823 | goto drop_it; |
824 | } |
825 | pci_unmap_page(gp->pdev, dma_addr, |
826 | RX_BUF_ALLOC_SIZE(gp), |
827 | PCI_DMA_FROMDEVICE); |
828 | gp->rx_skbs[entry] = new_skb; |
829 | new_skb->dev = gp->dev; |
830 | skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); |
831 | rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, |
832 | virt_to_page(new_skb->data), |
833 | offset_in_page(new_skb->data), |
834 | RX_BUF_ALLOC_SIZE(gp), |
835 | PCI_DMA_FROMDEVICE)); |
836 | skb_reserve(new_skb, RX_OFFSET); |
837 | |
838 | /* Trim the original skb for the netif. */ |
839 | skb_trim(skb, len); |
840 | } else { |
841 | struct sk_buff *copy_skb = dev_alloc_skb(len + 2); |
842 | |
843 | if (copy_skb == NULL) { |
844 | drops++; |
845 | goto drop_it; |
846 | } |
847 | |
848 | skb_reserve(copy_skb, 2); |
849 | skb_put(copy_skb, len); |
850 | pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
851 | skb_copy_from_linear_data(skb, copy_skb->data, len); |
852 | pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
853 | |
854 | /* We'll reuse the original ring buffer. */ |
855 | skb = copy_skb; |
856 | } |
857 | |
858 | csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); |
859 | skb->csum = csum_unfold(csum); |
860 | skb->ip_summed = CHECKSUM_COMPLETE; |
861 | skb->protocol = eth_type_trans(skb, gp->dev); |
862 | |
863 | netif_receive_skb(skb); |
864 | |
865 | gp->net_stats.rx_packets++; |
866 | gp->net_stats.rx_bytes += len; |
867 | |
868 | next: |
869 | entry = NEXT_RX(entry); |
870 | } |
871 | |
872 | gem_post_rxds(gp, entry); |
873 | |
874 | gp->rx_new = entry; |
875 | |
876 | if (drops) |
877 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", |
878 | gp->dev->name); |
879 | |
880 | return work_done; |
881 | } |
882 | |
883 | static int gem_poll(struct napi_struct *napi, int budget) |
884 | { |
885 | struct gem *gp = container_of(napi, struct gem, napi); |
886 | struct net_device *dev = gp->dev; |
887 | unsigned long flags; |
888 | int work_done; |
889 | |
890 | /* |
891 | * NAPI locking nightmare: See comment at head of driver |
892 | */ |
893 | spin_lock_irqsave(&gp->lock, flags); |
894 | |
895 | work_done = 0; |
896 | do { |
897 | /* Handle anomalies */ |
898 | if (gp->status & GREG_STAT_ABNORMAL) { |
899 | if (gem_abnormal_irq(dev, gp, gp->status)) |
900 | break; |
901 | } |
902 | |
903 | /* Run TX completion thread */ |
904 | spin_lock(&gp->tx_lock); |
905 | gem_tx(dev, gp, gp->status); |
906 | spin_unlock(&gp->tx_lock); |
907 | |
908 | spin_unlock_irqrestore(&gp->lock, flags); |
909 | |
910 | /* Run RX thread. We don't use any locking here, |
911 | * code willing to do bad things - like cleaning the |
912 | * rx ring - must call napi_disable(), which |
913 | * schedule_timeout()'s if polling is already disabled. |
914 | */ |
915 | work_done += gem_rx(gp, budget - work_done); |
916 | |
917 | if (work_done >= budget) |
918 | return work_done; |
919 | |
920 | spin_lock_irqsave(&gp->lock, flags); |
921 | |
922 | gp->status = readl(gp->regs + GREG_STAT); |
923 | } while (gp->status & GREG_STAT_NAPI); |
924 | |
925 | __napi_complete(napi); |
926 | gem_enable_ints(gp); |
927 | |
928 | spin_unlock_irqrestore(&gp->lock, flags); |
929 | |
930 | return work_done; |
931 | } |
932 | |
933 | static irqreturn_t gem_interrupt(int irq, void *dev_id) |
934 | { |
935 | struct net_device *dev = dev_id; |
936 | struct gem *gp = netdev_priv(dev); |
937 | unsigned long flags; |
938 | |
939 | /* Swallow interrupts when shutting the chip down, though |
940 | * that shouldn't happen, we should have done free_irq() at |
941 | * this point... |
942 | */ |
943 | if (!gp->running) |
944 | return IRQ_HANDLED; |
945 | |
946 | spin_lock_irqsave(&gp->lock, flags); |
947 | |
948 | if (napi_schedule_prep(&gp->napi)) { |
949 | u32 gem_status = readl(gp->regs + GREG_STAT); |
950 | |
951 | if (gem_status == 0) { |
952 | napi_enable(&gp->napi); |
953 | spin_unlock_irqrestore(&gp->lock, flags); |
954 | return IRQ_NONE; |
955 | } |
956 | gp->status = gem_status; |
957 | gem_disable_ints(gp); |
958 | __napi_schedule(&gp->napi); |
959 | } |
960 | |
961 | spin_unlock_irqrestore(&gp->lock, flags); |
962 | |
963 | /* If polling was disabled at the time we received that |
964 | * interrupt, we may return IRQ_HANDLED here while we |
965 | * should return IRQ_NONE. No big deal... |
966 | */ |
967 | return IRQ_HANDLED; |
968 | } |
969 | |
970 | #ifdef CONFIG_NET_POLL_CONTROLLER |
971 | static void gem_poll_controller(struct net_device *dev) |
972 | { |
973 | /* gem_interrupt is safe to reentrance so no need |
974 | * to disable_irq here. |
975 | */ |
976 | gem_interrupt(dev->irq, dev); |
977 | } |
978 | #endif |
979 | |
980 | static void gem_tx_timeout(struct net_device *dev) |
981 | { |
982 | struct gem *gp = netdev_priv(dev); |
983 | |
984 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); |
985 | if (!gp->running) { |
986 | printk("%s: hrm.. hw not running !\n", dev->name); |
987 | return; |
988 | } |
989 | printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", |
990 | dev->name, |
991 | readl(gp->regs + TXDMA_CFG), |
992 | readl(gp->regs + MAC_TXSTAT), |
993 | readl(gp->regs + MAC_TXCFG)); |
994 | printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", |
995 | dev->name, |
996 | readl(gp->regs + RXDMA_CFG), |
997 | readl(gp->regs + MAC_RXSTAT), |
998 | readl(gp->regs + MAC_RXCFG)); |
999 | |
1000 | spin_lock_irq(&gp->lock); |
1001 | spin_lock(&gp->tx_lock); |
1002 | |
1003 | gp->reset_task_pending = 1; |
1004 | schedule_work(&gp->reset_task); |
1005 | |
1006 | spin_unlock(&gp->tx_lock); |
1007 | spin_unlock_irq(&gp->lock); |
1008 | } |
1009 | |
1010 | static __inline__ int gem_intme(int entry) |
1011 | { |
1012 | /* Algorithm: IRQ every 1/2 of descriptors. */ |
1013 | if (!(entry & ((TX_RING_SIZE>>1)-1))) |
1014 | return 1; |
1015 | |
1016 | return 0; |
1017 | } |
1018 | |
1019 | static netdev_tx_t gem_start_xmit(struct sk_buff *skb, |
1020 | struct net_device *dev) |
1021 | { |
1022 | struct gem *gp = netdev_priv(dev); |
1023 | int entry; |
1024 | u64 ctrl; |
1025 | unsigned long flags; |
1026 | |
1027 | ctrl = 0; |
1028 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1029 | const u64 csum_start_off = skb_transport_offset(skb); |
1030 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; |
1031 | |
1032 | ctrl = (TXDCTRL_CENAB | |
1033 | (csum_start_off << 15) | |
1034 | (csum_stuff_off << 21)); |
1035 | } |
1036 | |
1037 | if (!spin_trylock_irqsave(&gp->tx_lock, flags)) { |
1038 | /* Tell upper layer to requeue */ |
1039 | return NETDEV_TX_LOCKED; |
1040 | } |
1041 | /* We raced with gem_do_stop() */ |
1042 | if (!gp->running) { |
1043 | spin_unlock_irqrestore(&gp->tx_lock, flags); |
1044 | return NETDEV_TX_BUSY; |
1045 | } |
1046 | |
1047 | /* This is a hard error, log it. */ |
1048 | if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { |
1049 | netif_stop_queue(dev); |
1050 | spin_unlock_irqrestore(&gp->tx_lock, flags); |
1051 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", |
1052 | dev->name); |
1053 | return NETDEV_TX_BUSY; |
1054 | } |
1055 | |
1056 | entry = gp->tx_new; |
1057 | gp->tx_skbs[entry] = skb; |
1058 | |
1059 | if (skb_shinfo(skb)->nr_frags == 0) { |
1060 | struct gem_txd *txd = &gp->init_block->txd[entry]; |
1061 | dma_addr_t mapping; |
1062 | u32 len; |
1063 | |
1064 | len = skb->len; |
1065 | mapping = pci_map_page(gp->pdev, |
1066 | virt_to_page(skb->data), |
1067 | offset_in_page(skb->data), |
1068 | len, PCI_DMA_TODEVICE); |
1069 | ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; |
1070 | if (gem_intme(entry)) |
1071 | ctrl |= TXDCTRL_INTME; |
1072 | txd->buffer = cpu_to_le64(mapping); |
1073 | wmb(); |
1074 | txd->control_word = cpu_to_le64(ctrl); |
1075 | entry = NEXT_TX(entry); |
1076 | } else { |
1077 | struct gem_txd *txd; |
1078 | u32 first_len; |
1079 | u64 intme; |
1080 | dma_addr_t first_mapping; |
1081 | int frag, first_entry = entry; |
1082 | |
1083 | intme = 0; |
1084 | if (gem_intme(entry)) |
1085 | intme |= TXDCTRL_INTME; |
1086 | |
1087 | /* We must give this initial chunk to the device last. |
1088 | * Otherwise we could race with the device. |
1089 | */ |
1090 | first_len = skb_headlen(skb); |
1091 | first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), |
1092 | offset_in_page(skb->data), |
1093 | first_len, PCI_DMA_TODEVICE); |
1094 | entry = NEXT_TX(entry); |
1095 | |
1096 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
1097 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; |
1098 | u32 len; |
1099 | dma_addr_t mapping; |
1100 | u64 this_ctrl; |
1101 | |
1102 | len = this_frag->size; |
1103 | mapping = pci_map_page(gp->pdev, |
1104 | this_frag->page, |
1105 | this_frag->page_offset, |
1106 | len, PCI_DMA_TODEVICE); |
1107 | this_ctrl = ctrl; |
1108 | if (frag == skb_shinfo(skb)->nr_frags - 1) |
1109 | this_ctrl |= TXDCTRL_EOF; |
1110 | |
1111 | txd = &gp->init_block->txd[entry]; |
1112 | txd->buffer = cpu_to_le64(mapping); |
1113 | wmb(); |
1114 | txd->control_word = cpu_to_le64(this_ctrl | len); |
1115 | |
1116 | if (gem_intme(entry)) |
1117 | intme |= TXDCTRL_INTME; |
1118 | |
1119 | entry = NEXT_TX(entry); |
1120 | } |
1121 | txd = &gp->init_block->txd[first_entry]; |
1122 | txd->buffer = cpu_to_le64(first_mapping); |
1123 | wmb(); |
1124 | txd->control_word = |
1125 | cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); |
1126 | } |
1127 | |
1128 | gp->tx_new = entry; |
1129 | if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) |
1130 | netif_stop_queue(dev); |
1131 | |
1132 | if (netif_msg_tx_queued(gp)) |
1133 | printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", |
1134 | dev->name, entry, skb->len); |
1135 | mb(); |
1136 | writel(gp->tx_new, gp->regs + TXDMA_KICK); |
1137 | spin_unlock_irqrestore(&gp->tx_lock, flags); |
1138 | |
1139 | dev->trans_start = jiffies; |
1140 | |
1141 | return NETDEV_TX_OK; |
1142 | } |
1143 | |
1144 | static void gem_pcs_reset(struct gem *gp) |
1145 | { |
1146 | int limit; |
1147 | u32 val; |
1148 | |
1149 | /* Reset PCS unit. */ |
1150 | val = readl(gp->regs + PCS_MIICTRL); |
1151 | val |= PCS_MIICTRL_RST; |
1152 | writel(val, gp->regs + PCS_MIICTRL); |
1153 | |
1154 | limit = 32; |
1155 | while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { |
1156 | udelay(100); |
1157 | if (limit-- <= 0) |
1158 | break; |
1159 | } |
1160 | if (limit < 0) |
1161 | printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", |
1162 | gp->dev->name); |
1163 | } |
1164 | |
1165 | static void gem_pcs_reinit_adv(struct gem *gp) |
1166 | { |
1167 | u32 val; |
1168 | |
1169 | /* Make sure PCS is disabled while changing advertisement |
1170 | * configuration. |
1171 | */ |
1172 | val = readl(gp->regs + PCS_CFG); |
1173 | val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); |
1174 | writel(val, gp->regs + PCS_CFG); |
1175 | |
1176 | /* Advertise all capabilities except assymetric |
1177 | * pause. |
1178 | */ |
1179 | val = readl(gp->regs + PCS_MIIADV); |
1180 | val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | |
1181 | PCS_MIIADV_SP | PCS_MIIADV_AP); |
1182 | writel(val, gp->regs + PCS_MIIADV); |
1183 | |
1184 | /* Enable and restart auto-negotiation, disable wrapback/loopback, |
1185 | * and re-enable PCS. |
1186 | */ |
1187 | val = readl(gp->regs + PCS_MIICTRL); |
1188 | val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); |
1189 | val &= ~PCS_MIICTRL_WB; |
1190 | writel(val, gp->regs + PCS_MIICTRL); |
1191 | |
1192 | val = readl(gp->regs + PCS_CFG); |
1193 | val |= PCS_CFG_ENABLE; |
1194 | writel(val, gp->regs + PCS_CFG); |
1195 | |
1196 | /* Make sure serialink loopback is off. The meaning |
1197 | * of this bit is logically inverted based upon whether |
1198 | * you are in Serialink or SERDES mode. |
1199 | */ |
1200 | val = readl(gp->regs + PCS_SCTRL); |
1201 | if (gp->phy_type == phy_serialink) |
1202 | val &= ~PCS_SCTRL_LOOP; |
1203 | else |
1204 | val |= PCS_SCTRL_LOOP; |
1205 | writel(val, gp->regs + PCS_SCTRL); |
1206 | } |
1207 | |
1208 | #define STOP_TRIES 32 |
1209 | |
1210 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1211 | static void gem_reset(struct gem *gp) |
1212 | { |
1213 | int limit; |
1214 | u32 val; |
1215 | |
1216 | /* Make sure we won't get any more interrupts */ |
1217 | writel(0xffffffff, gp->regs + GREG_IMASK); |
1218 | |
1219 | /* Reset the chip */ |
1220 | writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, |
1221 | gp->regs + GREG_SWRST); |
1222 | |
1223 | limit = STOP_TRIES; |
1224 | |
1225 | do { |
1226 | udelay(20); |
1227 | val = readl(gp->regs + GREG_SWRST); |
1228 | if (limit-- <= 0) |
1229 | break; |
1230 | } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); |
1231 | |
1232 | if (limit < 0) |
1233 | printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); |
1234 | |
1235 | if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) |
1236 | gem_pcs_reinit_adv(gp); |
1237 | } |
1238 | |
1239 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1240 | static void gem_start_dma(struct gem *gp) |
1241 | { |
1242 | u32 val; |
1243 | |
1244 | /* We are ready to rock, turn everything on. */ |
1245 | val = readl(gp->regs + TXDMA_CFG); |
1246 | writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); |
1247 | val = readl(gp->regs + RXDMA_CFG); |
1248 | writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); |
1249 | val = readl(gp->regs + MAC_TXCFG); |
1250 | writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); |
1251 | val = readl(gp->regs + MAC_RXCFG); |
1252 | writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); |
1253 | |
1254 | (void) readl(gp->regs + MAC_RXCFG); |
1255 | udelay(100); |
1256 | |
1257 | gem_enable_ints(gp); |
1258 | |
1259 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); |
1260 | } |
1261 | |
1262 | /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be |
1263 | * actually stopped before about 4ms tho ... |
1264 | */ |
1265 | static void gem_stop_dma(struct gem *gp) |
1266 | { |
1267 | u32 val; |
1268 | |
1269 | /* We are done rocking, turn everything off. */ |
1270 | val = readl(gp->regs + TXDMA_CFG); |
1271 | writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); |
1272 | val = readl(gp->regs + RXDMA_CFG); |
1273 | writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); |
1274 | val = readl(gp->regs + MAC_TXCFG); |
1275 | writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); |
1276 | val = readl(gp->regs + MAC_RXCFG); |
1277 | writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); |
1278 | |
1279 | (void) readl(gp->regs + MAC_RXCFG); |
1280 | |
1281 | /* Need to wait a bit ... done by the caller */ |
1282 | } |
1283 | |
1284 | |
1285 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1286 | // XXX dbl check what that function should do when called on PCS PHY |
1287 | static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) |
1288 | { |
1289 | u32 advertise, features; |
1290 | int autoneg; |
1291 | int speed; |
1292 | int duplex; |
1293 | |
1294 | if (gp->phy_type != phy_mii_mdio0 && |
1295 | gp->phy_type != phy_mii_mdio1) |
1296 | goto non_mii; |
1297 | |
1298 | /* Setup advertise */ |
1299 | if (found_mii_phy(gp)) |
1300 | features = gp->phy_mii.def->features; |
1301 | else |
1302 | features = 0; |
1303 | |
1304 | advertise = features & ADVERTISE_MASK; |
1305 | if (gp->phy_mii.advertising != 0) |
1306 | advertise &= gp->phy_mii.advertising; |
1307 | |
1308 | autoneg = gp->want_autoneg; |
1309 | speed = gp->phy_mii.speed; |
1310 | duplex = gp->phy_mii.duplex; |
1311 | |
1312 | /* Setup link parameters */ |
1313 | if (!ep) |
1314 | goto start_aneg; |
1315 | if (ep->autoneg == AUTONEG_ENABLE) { |
1316 | advertise = ep->advertising; |
1317 | autoneg = 1; |
1318 | } else { |
1319 | autoneg = 0; |
1320 | speed = ep->speed; |
1321 | duplex = ep->duplex; |
1322 | } |
1323 | |
1324 | start_aneg: |
1325 | /* Sanitize settings based on PHY capabilities */ |
1326 | if ((features & SUPPORTED_Autoneg) == 0) |
1327 | autoneg = 0; |
1328 | if (speed == SPEED_1000 && |
1329 | !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) |
1330 | speed = SPEED_100; |
1331 | if (speed == SPEED_100 && |
1332 | !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) |
1333 | speed = SPEED_10; |
1334 | if (duplex == DUPLEX_FULL && |
1335 | !(features & (SUPPORTED_1000baseT_Full | |
1336 | SUPPORTED_100baseT_Full | |
1337 | SUPPORTED_10baseT_Full))) |
1338 | duplex = DUPLEX_HALF; |
1339 | if (speed == 0) |
1340 | speed = SPEED_10; |
1341 | |
1342 | /* If we are asleep, we don't try to actually setup the PHY, we |
1343 | * just store the settings |
1344 | */ |
1345 | if (gp->asleep) { |
1346 | gp->phy_mii.autoneg = gp->want_autoneg = autoneg; |
1347 | gp->phy_mii.speed = speed; |
1348 | gp->phy_mii.duplex = duplex; |
1349 | return; |
1350 | } |
1351 | |
1352 | /* Configure PHY & start aneg */ |
1353 | gp->want_autoneg = autoneg; |
1354 | if (autoneg) { |
1355 | if (found_mii_phy(gp)) |
1356 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); |
1357 | gp->lstate = link_aneg; |
1358 | } else { |
1359 | if (found_mii_phy(gp)) |
1360 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); |
1361 | gp->lstate = link_force_ok; |
1362 | } |
1363 | |
1364 | non_mii: |
1365 | gp->timer_ticks = 0; |
1366 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); |
1367 | } |
1368 | |
1369 | /* A link-up condition has occurred, initialize and enable the |
1370 | * rest of the chip. |
1371 | * |
1372 | * Must be invoked under gp->lock and gp->tx_lock. |
1373 | */ |
1374 | static int gem_set_link_modes(struct gem *gp) |
1375 | { |
1376 | u32 val; |
1377 | int full_duplex, speed, pause; |
1378 | |
1379 | full_duplex = 0; |
1380 | speed = SPEED_10; |
1381 | pause = 0; |
1382 | |
1383 | if (found_mii_phy(gp)) { |
1384 | if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) |
1385 | return 1; |
1386 | full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); |
1387 | speed = gp->phy_mii.speed; |
1388 | pause = gp->phy_mii.pause; |
1389 | } else if (gp->phy_type == phy_serialink || |
1390 | gp->phy_type == phy_serdes) { |
1391 | u32 pcs_lpa = readl(gp->regs + PCS_MIILP); |
1392 | |
1393 | if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) |
1394 | full_duplex = 1; |
1395 | speed = SPEED_1000; |
1396 | } |
1397 | |
1398 | if (netif_msg_link(gp)) |
1399 | printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", |
1400 | gp->dev->name, speed, (full_duplex ? "full" : "half")); |
1401 | |
1402 | if (!gp->running) |
1403 | return 0; |
1404 | |
1405 | val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); |
1406 | if (full_duplex) { |
1407 | val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); |
1408 | } else { |
1409 | /* MAC_TXCFG_NBO must be zero. */ |
1410 | } |
1411 | writel(val, gp->regs + MAC_TXCFG); |
1412 | |
1413 | val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); |
1414 | if (!full_duplex && |
1415 | (gp->phy_type == phy_mii_mdio0 || |
1416 | gp->phy_type == phy_mii_mdio1)) { |
1417 | val |= MAC_XIFCFG_DISE; |
1418 | } else if (full_duplex) { |
1419 | val |= MAC_XIFCFG_FLED; |
1420 | } |
1421 | |
1422 | if (speed == SPEED_1000) |
1423 | val |= (MAC_XIFCFG_GMII); |
1424 | |
1425 | writel(val, gp->regs + MAC_XIFCFG); |
1426 | |
1427 | /* If gigabit and half-duplex, enable carrier extension |
1428 | * mode. Else, disable it. |
1429 | */ |
1430 | if (speed == SPEED_1000 && !full_duplex) { |
1431 | val = readl(gp->regs + MAC_TXCFG); |
1432 | writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); |
1433 | |
1434 | val = readl(gp->regs + MAC_RXCFG); |
1435 | writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); |
1436 | } else { |
1437 | val = readl(gp->regs + MAC_TXCFG); |
1438 | writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); |
1439 | |
1440 | val = readl(gp->regs + MAC_RXCFG); |
1441 | writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); |
1442 | } |
1443 | |
1444 | if (gp->phy_type == phy_serialink || |
1445 | gp->phy_type == phy_serdes) { |
1446 | u32 pcs_lpa = readl(gp->regs + PCS_MIILP); |
1447 | |
1448 | if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) |
1449 | pause = 1; |
1450 | } |
1451 | |
1452 | if (netif_msg_link(gp)) { |
1453 | if (pause) { |
1454 | printk(KERN_INFO "%s: Pause is enabled " |
1455 | "(rxfifo: %d off: %d on: %d)\n", |
1456 | gp->dev->name, |
1457 | gp->rx_fifo_sz, |
1458 | gp->rx_pause_off, |
1459 | gp->rx_pause_on); |
1460 | } else { |
1461 | printk(KERN_INFO "%s: Pause is disabled\n", |
1462 | gp->dev->name); |
1463 | } |
1464 | } |
1465 | |
1466 | if (!full_duplex) |
1467 | writel(512, gp->regs + MAC_STIME); |
1468 | else |
1469 | writel(64, gp->regs + MAC_STIME); |
1470 | val = readl(gp->regs + MAC_MCCFG); |
1471 | if (pause) |
1472 | val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); |
1473 | else |
1474 | val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); |
1475 | writel(val, gp->regs + MAC_MCCFG); |
1476 | |
1477 | gem_start_dma(gp); |
1478 | |
1479 | return 0; |
1480 | } |
1481 | |
1482 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1483 | static int gem_mdio_link_not_up(struct gem *gp) |
1484 | { |
1485 | switch (gp->lstate) { |
1486 | case link_force_ret: |
1487 | if (netif_msg_link(gp)) |
1488 | printk(KERN_INFO "%s: Autoneg failed again, keeping" |
1489 | " forced mode\n", gp->dev->name); |
1490 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, |
1491 | gp->last_forced_speed, DUPLEX_HALF); |
1492 | gp->timer_ticks = 5; |
1493 | gp->lstate = link_force_ok; |
1494 | return 0; |
1495 | case link_aneg: |
1496 | /* We try forced modes after a failed aneg only on PHYs that don't |
1497 | * have "magic_aneg" bit set, which means they internally do the |
1498 | * while forced-mode thingy. On these, we just restart aneg |
1499 | */ |
1500 | if (gp->phy_mii.def->magic_aneg) |
1501 | return 1; |
1502 | if (netif_msg_link(gp)) |
1503 | printk(KERN_INFO "%s: switching to forced 100bt\n", |
1504 | gp->dev->name); |
1505 | /* Try forced modes. */ |
1506 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, |
1507 | DUPLEX_HALF); |
1508 | gp->timer_ticks = 5; |
1509 | gp->lstate = link_force_try; |
1510 | return 0; |
1511 | case link_force_try: |
1512 | /* Downgrade from 100 to 10 Mbps if necessary. |
1513 | * If already at 10Mbps, warn user about the |
1514 | * situation every 10 ticks. |
1515 | */ |
1516 | if (gp->phy_mii.speed == SPEED_100) { |
1517 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, |
1518 | DUPLEX_HALF); |
1519 | gp->timer_ticks = 5; |
1520 | if (netif_msg_link(gp)) |
1521 | printk(KERN_INFO "%s: switching to forced 10bt\n", |
1522 | gp->dev->name); |
1523 | return 0; |
1524 | } else |
1525 | return 1; |
1526 | default: |
1527 | return 0; |
1528 | } |
1529 | } |
1530 | |
1531 | static void gem_link_timer(unsigned long data) |
1532 | { |
1533 | struct gem *gp = (struct gem *) data; |
1534 | int restart_aneg = 0; |
1535 | |
1536 | if (gp->asleep) |
1537 | return; |
1538 | |
1539 | spin_lock_irq(&gp->lock); |
1540 | spin_lock(&gp->tx_lock); |
1541 | gem_get_cell(gp); |
1542 | |
1543 | /* If the reset task is still pending, we just |
1544 | * reschedule the link timer |
1545 | */ |
1546 | if (gp->reset_task_pending) |
1547 | goto restart; |
1548 | |
1549 | if (gp->phy_type == phy_serialink || |
1550 | gp->phy_type == phy_serdes) { |
1551 | u32 val = readl(gp->regs + PCS_MIISTAT); |
1552 | |
1553 | if (!(val & PCS_MIISTAT_LS)) |
1554 | val = readl(gp->regs + PCS_MIISTAT); |
1555 | |
1556 | if ((val & PCS_MIISTAT_LS) != 0) { |
1557 | if (gp->lstate == link_up) |
1558 | goto restart; |
1559 | |
1560 | gp->lstate = link_up; |
1561 | netif_carrier_on(gp->dev); |
1562 | (void)gem_set_link_modes(gp); |
1563 | } |
1564 | goto restart; |
1565 | } |
1566 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { |
1567 | /* Ok, here we got a link. If we had it due to a forced |
1568 | * fallback, and we were configured for autoneg, we do |
1569 | * retry a short autoneg pass. If you know your hub is |
1570 | * broken, use ethtool ;) |
1571 | */ |
1572 | if (gp->lstate == link_force_try && gp->want_autoneg) { |
1573 | gp->lstate = link_force_ret; |
1574 | gp->last_forced_speed = gp->phy_mii.speed; |
1575 | gp->timer_ticks = 5; |
1576 | if (netif_msg_link(gp)) |
1577 | printk(KERN_INFO "%s: Got link after fallback, retrying" |
1578 | " autoneg once...\n", gp->dev->name); |
1579 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); |
1580 | } else if (gp->lstate != link_up) { |
1581 | gp->lstate = link_up; |
1582 | netif_carrier_on(gp->dev); |
1583 | if (gem_set_link_modes(gp)) |
1584 | restart_aneg = 1; |
1585 | } |
1586 | } else { |
1587 | /* If the link was previously up, we restart the |
1588 | * whole process |
1589 | */ |
1590 | if (gp->lstate == link_up) { |
1591 | gp->lstate = link_down; |
1592 | if (netif_msg_link(gp)) |
1593 | printk(KERN_INFO "%s: Link down\n", |
1594 | gp->dev->name); |
1595 | netif_carrier_off(gp->dev); |
1596 | gp->reset_task_pending = 1; |
1597 | schedule_work(&gp->reset_task); |
1598 | restart_aneg = 1; |
1599 | } else if (++gp->timer_ticks > 10) { |
1600 | if (found_mii_phy(gp)) |
1601 | restart_aneg = gem_mdio_link_not_up(gp); |
1602 | else |
1603 | restart_aneg = 1; |
1604 | } |
1605 | } |
1606 | if (restart_aneg) { |
1607 | gem_begin_auto_negotiation(gp, NULL); |
1608 | goto out_unlock; |
1609 | } |
1610 | restart: |
1611 | mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); |
1612 | out_unlock: |
1613 | gem_put_cell(gp); |
1614 | spin_unlock(&gp->tx_lock); |
1615 | spin_unlock_irq(&gp->lock); |
1616 | } |
1617 | |
1618 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1619 | static void gem_clean_rings(struct gem *gp) |
1620 | { |
1621 | struct gem_init_block *gb = gp->init_block; |
1622 | struct sk_buff *skb; |
1623 | int i; |
1624 | dma_addr_t dma_addr; |
1625 | |
1626 | for (i = 0; i < RX_RING_SIZE; i++) { |
1627 | struct gem_rxd *rxd; |
1628 | |
1629 | rxd = &gb->rxd[i]; |
1630 | if (gp->rx_skbs[i] != NULL) { |
1631 | skb = gp->rx_skbs[i]; |
1632 | dma_addr = le64_to_cpu(rxd->buffer); |
1633 | pci_unmap_page(gp->pdev, dma_addr, |
1634 | RX_BUF_ALLOC_SIZE(gp), |
1635 | PCI_DMA_FROMDEVICE); |
1636 | dev_kfree_skb_any(skb); |
1637 | gp->rx_skbs[i] = NULL; |
1638 | } |
1639 | rxd->status_word = 0; |
1640 | wmb(); |
1641 | rxd->buffer = 0; |
1642 | } |
1643 | |
1644 | for (i = 0; i < TX_RING_SIZE; i++) { |
1645 | if (gp->tx_skbs[i] != NULL) { |
1646 | struct gem_txd *txd; |
1647 | int frag; |
1648 | |
1649 | skb = gp->tx_skbs[i]; |
1650 | gp->tx_skbs[i] = NULL; |
1651 | |
1652 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { |
1653 | int ent = i & (TX_RING_SIZE - 1); |
1654 | |
1655 | txd = &gb->txd[ent]; |
1656 | dma_addr = le64_to_cpu(txd->buffer); |
1657 | pci_unmap_page(gp->pdev, dma_addr, |
1658 | le64_to_cpu(txd->control_word) & |
1659 | TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); |
1660 | |
1661 | if (frag != skb_shinfo(skb)->nr_frags) |
1662 | i++; |
1663 | } |
1664 | dev_kfree_skb_any(skb); |
1665 | } |
1666 | } |
1667 | } |
1668 | |
1669 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1670 | static void gem_init_rings(struct gem *gp) |
1671 | { |
1672 | struct gem_init_block *gb = gp->init_block; |
1673 | struct net_device *dev = gp->dev; |
1674 | int i; |
1675 | dma_addr_t dma_addr; |
1676 | |
1677 | gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; |
1678 | |
1679 | gem_clean_rings(gp); |
1680 | |
1681 | gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, |
1682 | (unsigned)VLAN_ETH_FRAME_LEN); |
1683 | |
1684 | for (i = 0; i < RX_RING_SIZE; i++) { |
1685 | struct sk_buff *skb; |
1686 | struct gem_rxd *rxd = &gb->rxd[i]; |
1687 | |
1688 | skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); |
1689 | if (!skb) { |
1690 | rxd->buffer = 0; |
1691 | rxd->status_word = 0; |
1692 | continue; |
1693 | } |
1694 | |
1695 | gp->rx_skbs[i] = skb; |
1696 | skb->dev = dev; |
1697 | skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); |
1698 | dma_addr = pci_map_page(gp->pdev, |
1699 | virt_to_page(skb->data), |
1700 | offset_in_page(skb->data), |
1701 | RX_BUF_ALLOC_SIZE(gp), |
1702 | PCI_DMA_FROMDEVICE); |
1703 | rxd->buffer = cpu_to_le64(dma_addr); |
1704 | wmb(); |
1705 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); |
1706 | skb_reserve(skb, RX_OFFSET); |
1707 | } |
1708 | |
1709 | for (i = 0; i < TX_RING_SIZE; i++) { |
1710 | struct gem_txd *txd = &gb->txd[i]; |
1711 | |
1712 | txd->control_word = 0; |
1713 | wmb(); |
1714 | txd->buffer = 0; |
1715 | } |
1716 | wmb(); |
1717 | } |
1718 | |
1719 | /* Init PHY interface and start link poll state machine */ |
1720 | static void gem_init_phy(struct gem *gp) |
1721 | { |
1722 | u32 mifcfg; |
1723 | |
1724 | /* Revert MIF CFG setting done on stop_phy */ |
1725 | mifcfg = readl(gp->regs + MIF_CFG); |
1726 | mifcfg &= ~MIF_CFG_BBMODE; |
1727 | writel(mifcfg, gp->regs + MIF_CFG); |
1728 | |
1729 | if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { |
1730 | int i; |
1731 | |
1732 | /* Those delay sucks, the HW seem to love them though, I'll |
1733 | * serisouly consider breaking some locks here to be able |
1734 | * to schedule instead |
1735 | */ |
1736 | for (i = 0; i < 3; i++) { |
1737 | #ifdef CONFIG_PPC_PMAC |
1738 | pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); |
1739 | msleep(20); |
1740 | #endif |
1741 | /* Some PHYs used by apple have problem getting back to us, |
1742 | * we do an additional reset here |
1743 | */ |
1744 | phy_write(gp, MII_BMCR, BMCR_RESET); |
1745 | msleep(20); |
1746 | if (phy_read(gp, MII_BMCR) != 0xffff) |
1747 | break; |
1748 | if (i == 2) |
1749 | printk(KERN_WARNING "%s: GMAC PHY not responding !\n", |
1750 | gp->dev->name); |
1751 | } |
1752 | } |
1753 | |
1754 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && |
1755 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { |
1756 | u32 val; |
1757 | |
1758 | /* Init datapath mode register. */ |
1759 | if (gp->phy_type == phy_mii_mdio0 || |
1760 | gp->phy_type == phy_mii_mdio1) { |
1761 | val = PCS_DMODE_MGM; |
1762 | } else if (gp->phy_type == phy_serialink) { |
1763 | val = PCS_DMODE_SM | PCS_DMODE_GMOE; |
1764 | } else { |
1765 | val = PCS_DMODE_ESM; |
1766 | } |
1767 | |
1768 | writel(val, gp->regs + PCS_DMODE); |
1769 | } |
1770 | |
1771 | if (gp->phy_type == phy_mii_mdio0 || |
1772 | gp->phy_type == phy_mii_mdio1) { |
1773 | // XXX check for errors |
1774 | mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); |
1775 | |
1776 | /* Init PHY */ |
1777 | if (gp->phy_mii.def && gp->phy_mii.def->ops->init) |
1778 | gp->phy_mii.def->ops->init(&gp->phy_mii); |
1779 | } else { |
1780 | gem_pcs_reset(gp); |
1781 | gem_pcs_reinit_adv(gp); |
1782 | } |
1783 | |
1784 | /* Default aneg parameters */ |
1785 | gp->timer_ticks = 0; |
1786 | gp->lstate = link_down; |
1787 | netif_carrier_off(gp->dev); |
1788 | |
1789 | /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ |
1790 | spin_lock_irq(&gp->lock); |
1791 | gem_begin_auto_negotiation(gp, NULL); |
1792 | spin_unlock_irq(&gp->lock); |
1793 | } |
1794 | |
1795 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1796 | static void gem_init_dma(struct gem *gp) |
1797 | { |
1798 | u64 desc_dma = (u64) gp->gblock_dvma; |
1799 | u32 val; |
1800 | |
1801 | val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); |
1802 | writel(val, gp->regs + TXDMA_CFG); |
1803 | |
1804 | writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); |
1805 | writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); |
1806 | desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); |
1807 | |
1808 | writel(0, gp->regs + TXDMA_KICK); |
1809 | |
1810 | val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | |
1811 | ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); |
1812 | writel(val, gp->regs + RXDMA_CFG); |
1813 | |
1814 | writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); |
1815 | writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); |
1816 | |
1817 | writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); |
1818 | |
1819 | val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); |
1820 | val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); |
1821 | writel(val, gp->regs + RXDMA_PTHRESH); |
1822 | |
1823 | if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) |
1824 | writel(((5 & RXDMA_BLANK_IPKTS) | |
1825 | ((8 << 12) & RXDMA_BLANK_ITIME)), |
1826 | gp->regs + RXDMA_BLANK); |
1827 | else |
1828 | writel(((5 & RXDMA_BLANK_IPKTS) | |
1829 | ((4 << 12) & RXDMA_BLANK_ITIME)), |
1830 | gp->regs + RXDMA_BLANK); |
1831 | } |
1832 | |
1833 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1834 | static u32 gem_setup_multicast(struct gem *gp) |
1835 | { |
1836 | u32 rxcfg = 0; |
1837 | int i; |
1838 | |
1839 | if ((gp->dev->flags & IFF_ALLMULTI) || |
1840 | (netdev_mc_count(gp->dev) > 256)) { |
1841 | for (i=0; i<16; i++) |
1842 | writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); |
1843 | rxcfg |= MAC_RXCFG_HFE; |
1844 | } else if (gp->dev->flags & IFF_PROMISC) { |
1845 | rxcfg |= MAC_RXCFG_PROM; |
1846 | } else { |
1847 | u16 hash_table[16]; |
1848 | u32 crc; |
1849 | struct dev_mc_list *dmi; |
1850 | int i; |
1851 | |
1852 | memset(hash_table, 0, sizeof(hash_table)); |
1853 | netdev_for_each_mc_addr(dmi, gp->dev) { |
1854 | char *addrs = dmi->dmi_addr; |
1855 | |
1856 | if (!(*addrs & 1)) |
1857 | continue; |
1858 | |
1859 | crc = ether_crc_le(6, addrs); |
1860 | crc >>= 24; |
1861 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); |
1862 | } |
1863 | for (i=0; i<16; i++) |
1864 | writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); |
1865 | rxcfg |= MAC_RXCFG_HFE; |
1866 | } |
1867 | |
1868 | return rxcfg; |
1869 | } |
1870 | |
1871 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1872 | static void gem_init_mac(struct gem *gp) |
1873 | { |
1874 | unsigned char *e = &gp->dev->dev_addr[0]; |
1875 | |
1876 | writel(0x1bf0, gp->regs + MAC_SNDPAUSE); |
1877 | |
1878 | writel(0x00, gp->regs + MAC_IPG0); |
1879 | writel(0x08, gp->regs + MAC_IPG1); |
1880 | writel(0x04, gp->regs + MAC_IPG2); |
1881 | writel(0x40, gp->regs + MAC_STIME); |
1882 | writel(0x40, gp->regs + MAC_MINFSZ); |
1883 | |
1884 | /* Ethernet payload + header + FCS + optional VLAN tag. */ |
1885 | writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); |
1886 | |
1887 | writel(0x07, gp->regs + MAC_PASIZE); |
1888 | writel(0x04, gp->regs + MAC_JAMSIZE); |
1889 | writel(0x10, gp->regs + MAC_ATTLIM); |
1890 | writel(0x8808, gp->regs + MAC_MCTYPE); |
1891 | |
1892 | writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); |
1893 | |
1894 | writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); |
1895 | writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); |
1896 | writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); |
1897 | |
1898 | writel(0, gp->regs + MAC_ADDR3); |
1899 | writel(0, gp->regs + MAC_ADDR4); |
1900 | writel(0, gp->regs + MAC_ADDR5); |
1901 | |
1902 | writel(0x0001, gp->regs + MAC_ADDR6); |
1903 | writel(0xc200, gp->regs + MAC_ADDR7); |
1904 | writel(0x0180, gp->regs + MAC_ADDR8); |
1905 | |
1906 | writel(0, gp->regs + MAC_AFILT0); |
1907 | writel(0, gp->regs + MAC_AFILT1); |
1908 | writel(0, gp->regs + MAC_AFILT2); |
1909 | writel(0, gp->regs + MAC_AF21MSK); |
1910 | writel(0, gp->regs + MAC_AF0MSK); |
1911 | |
1912 | gp->mac_rx_cfg = gem_setup_multicast(gp); |
1913 | #ifdef STRIP_FCS |
1914 | gp->mac_rx_cfg |= MAC_RXCFG_SFCS; |
1915 | #endif |
1916 | writel(0, gp->regs + MAC_NCOLL); |
1917 | writel(0, gp->regs + MAC_FASUCC); |
1918 | writel(0, gp->regs + MAC_ECOLL); |
1919 | writel(0, gp->regs + MAC_LCOLL); |
1920 | writel(0, gp->regs + MAC_DTIMER); |
1921 | writel(0, gp->regs + MAC_PATMPS); |
1922 | writel(0, gp->regs + MAC_RFCTR); |
1923 | writel(0, gp->regs + MAC_LERR); |
1924 | writel(0, gp->regs + MAC_AERR); |
1925 | writel(0, gp->regs + MAC_FCSERR); |
1926 | writel(0, gp->regs + MAC_RXCVERR); |
1927 | |
1928 | /* Clear RX/TX/MAC/XIF config, we will set these up and enable |
1929 | * them once a link is established. |
1930 | */ |
1931 | writel(0, gp->regs + MAC_TXCFG); |
1932 | writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); |
1933 | writel(0, gp->regs + MAC_MCCFG); |
1934 | writel(0, gp->regs + MAC_XIFCFG); |
1935 | |
1936 | /* Setup MAC interrupts. We want to get all of the interesting |
1937 | * counter expiration events, but we do not want to hear about |
1938 | * normal rx/tx as the DMA engine tells us that. |
1939 | */ |
1940 | writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); |
1941 | writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); |
1942 | |
1943 | /* Don't enable even the PAUSE interrupts for now, we |
1944 | * make no use of those events other than to record them. |
1945 | */ |
1946 | writel(0xffffffff, gp->regs + MAC_MCMASK); |
1947 | |
1948 | /* Don't enable GEM's WOL in normal operations |
1949 | */ |
1950 | if (gp->has_wol) |
1951 | writel(0, gp->regs + WOL_WAKECSR); |
1952 | } |
1953 | |
1954 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
1955 | static void gem_init_pause_thresholds(struct gem *gp) |
1956 | { |
1957 | u32 cfg; |
1958 | |
1959 | /* Calculate pause thresholds. Setting the OFF threshold to the |
1960 | * full RX fifo size effectively disables PAUSE generation which |
1961 | * is what we do for 10/100 only GEMs which have FIFOs too small |
1962 | * to make real gains from PAUSE. |
1963 | */ |
1964 | if (gp->rx_fifo_sz <= (2 * 1024)) { |
1965 | gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; |
1966 | } else { |
1967 | int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; |
1968 | int off = (gp->rx_fifo_sz - (max_frame * 2)); |
1969 | int on = off - max_frame; |
1970 | |
1971 | gp->rx_pause_off = off; |
1972 | gp->rx_pause_on = on; |
1973 | } |
1974 | |
1975 | |
1976 | /* Configure the chip "burst" DMA mode & enable some |
1977 | * HW bug fixes on Apple version |
1978 | */ |
1979 | cfg = 0; |
1980 | if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) |
1981 | cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; |
1982 | #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) |
1983 | cfg |= GREG_CFG_IBURST; |
1984 | #endif |
1985 | cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); |
1986 | cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); |
1987 | writel(cfg, gp->regs + GREG_CFG); |
1988 | |
1989 | /* If Infinite Burst didn't stick, then use different |
1990 | * thresholds (and Apple bug fixes don't exist) |
1991 | */ |
1992 | if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { |
1993 | cfg = ((2 << 1) & GREG_CFG_TXDMALIM); |
1994 | cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); |
1995 | writel(cfg, gp->regs + GREG_CFG); |
1996 | } |
1997 | } |
1998 | |
1999 | static int gem_check_invariants(struct gem *gp) |
2000 | { |
2001 | struct pci_dev *pdev = gp->pdev; |
2002 | u32 mif_cfg; |
2003 | |
2004 | /* On Apple's sungem, we can't rely on registers as the chip |
2005 | * was been powered down by the firmware. The PHY is looked |
2006 | * up later on. |
2007 | */ |
2008 | if (pdev->vendor == PCI_VENDOR_ID_APPLE) { |
2009 | gp->phy_type = phy_mii_mdio0; |
2010 | gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; |
2011 | gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; |
2012 | gp->swrst_base = 0; |
2013 | |
2014 | mif_cfg = readl(gp->regs + MIF_CFG); |
2015 | mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); |
2016 | mif_cfg |= MIF_CFG_MDI0; |
2017 | writel(mif_cfg, gp->regs + MIF_CFG); |
2018 | writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); |
2019 | writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); |
2020 | |
2021 | /* We hard-code the PHY address so we can properly bring it out of |
2022 | * reset later on, we can't really probe it at this point, though |
2023 | * that isn't an issue. |
2024 | */ |
2025 | if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) |
2026 | gp->mii_phy_addr = 1; |
2027 | else |
2028 | gp->mii_phy_addr = 0; |
2029 | |
2030 | return 0; |
2031 | } |
2032 | |
2033 | mif_cfg = readl(gp->regs + MIF_CFG); |
2034 | |
2035 | if (pdev->vendor == PCI_VENDOR_ID_SUN && |
2036 | pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { |
2037 | /* One of the MII PHYs _must_ be present |
2038 | * as this chip has no gigabit PHY. |
2039 | */ |
2040 | if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { |
2041 | printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", |
2042 | mif_cfg); |
2043 | return -1; |
2044 | } |
2045 | } |
2046 | |
2047 | /* Determine initial PHY interface type guess. MDIO1 is the |
2048 | * external PHY and thus takes precedence over MDIO0. |
2049 | */ |
2050 | |
2051 | if (mif_cfg & MIF_CFG_MDI1) { |
2052 | gp->phy_type = phy_mii_mdio1; |
2053 | mif_cfg |= MIF_CFG_PSELECT; |
2054 | writel(mif_cfg, gp->regs + MIF_CFG); |
2055 | } else if (mif_cfg & MIF_CFG_MDI0) { |
2056 | gp->phy_type = phy_mii_mdio0; |
2057 | mif_cfg &= ~MIF_CFG_PSELECT; |
2058 | writel(mif_cfg, gp->regs + MIF_CFG); |
2059 | } else { |
2060 | #ifdef CONFIG_SPARC |
2061 | const char *p; |
2062 | |
2063 | p = of_get_property(gp->of_node, "shared-pins", NULL); |
2064 | if (p && !strcmp(p, "serdes")) |
2065 | gp->phy_type = phy_serdes; |
2066 | else |
2067 | #endif |
2068 | gp->phy_type = phy_serialink; |
2069 | } |
2070 | if (gp->phy_type == phy_mii_mdio1 || |
2071 | gp->phy_type == phy_mii_mdio0) { |
2072 | int i; |
2073 | |
2074 | for (i = 0; i < 32; i++) { |
2075 | gp->mii_phy_addr = i; |
2076 | if (phy_read(gp, MII_BMCR) != 0xffff) |
2077 | break; |
2078 | } |
2079 | if (i == 32) { |
2080 | if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { |
2081 | printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); |
2082 | return -1; |
2083 | } |
2084 | gp->phy_type = phy_serdes; |
2085 | } |
2086 | } |
2087 | |
2088 | /* Fetch the FIFO configurations now too. */ |
2089 | gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; |
2090 | gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; |
2091 | |
2092 | if (pdev->vendor == PCI_VENDOR_ID_SUN) { |
2093 | if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { |
2094 | if (gp->tx_fifo_sz != (9 * 1024) || |
2095 | gp->rx_fifo_sz != (20 * 1024)) { |
2096 | printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", |
2097 | gp->tx_fifo_sz, gp->rx_fifo_sz); |
2098 | return -1; |
2099 | } |
2100 | gp->swrst_base = 0; |
2101 | } else { |
2102 | if (gp->tx_fifo_sz != (2 * 1024) || |
2103 | gp->rx_fifo_sz != (2 * 1024)) { |
2104 | printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", |
2105 | gp->tx_fifo_sz, gp->rx_fifo_sz); |
2106 | return -1; |
2107 | } |
2108 | gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; |
2109 | } |
2110 | } |
2111 | |
2112 | return 0; |
2113 | } |
2114 | |
2115 | /* Must be invoked under gp->lock and gp->tx_lock. */ |
2116 | static void gem_reinit_chip(struct gem *gp) |
2117 | { |
2118 | /* Reset the chip */ |
2119 | gem_reset(gp); |
2120 | |
2121 | /* Make sure ints are disabled */ |
2122 | gem_disable_ints(gp); |
2123 | |
2124 | /* Allocate & setup ring buffers */ |
2125 | gem_init_rings(gp); |
2126 | |
2127 | /* Configure pause thresholds */ |
2128 | gem_init_pause_thresholds(gp); |
2129 | |
2130 | /* Init DMA & MAC engines */ |
2131 | gem_init_dma(gp); |
2132 | gem_init_mac(gp); |
2133 | } |
2134 | |
2135 | |
2136 | /* Must be invoked with no lock held. */ |
2137 | static void gem_stop_phy(struct gem *gp, int wol) |
2138 | { |
2139 | u32 mifcfg; |
2140 | unsigned long flags; |
2141 | |
2142 | /* Let the chip settle down a bit, it seems that helps |
2143 | * for sleep mode on some models |
2144 | */ |
2145 | msleep(10); |
2146 | |
2147 | /* Make sure we aren't polling PHY status change. We |
2148 | * don't currently use that feature though |
2149 | */ |
2150 | mifcfg = readl(gp->regs + MIF_CFG); |
2151 | mifcfg &= ~MIF_CFG_POLL; |
2152 | writel(mifcfg, gp->regs + MIF_CFG); |
2153 | |
2154 | if (wol && gp->has_wol) { |
2155 | unsigned char *e = &gp->dev->dev_addr[0]; |
2156 | u32 csr; |
2157 | |
2158 | /* Setup wake-on-lan for MAGIC packet */ |
2159 | writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, |
2160 | gp->regs + MAC_RXCFG); |
2161 | writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); |
2162 | writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); |
2163 | writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); |
2164 | |
2165 | writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); |
2166 | csr = WOL_WAKECSR_ENABLE; |
2167 | if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) |
2168 | csr |= WOL_WAKECSR_MII; |
2169 | writel(csr, gp->regs + WOL_WAKECSR); |
2170 | } else { |
2171 | writel(0, gp->regs + MAC_RXCFG); |
2172 | (void)readl(gp->regs + MAC_RXCFG); |
2173 | /* Machine sleep will die in strange ways if we |
2174 | * dont wait a bit here, looks like the chip takes |
2175 | * some time to really shut down |
2176 | */ |
2177 | msleep(10); |
2178 | } |
2179 | |
2180 | writel(0, gp->regs + MAC_TXCFG); |
2181 | writel(0, gp->regs + MAC_XIFCFG); |
2182 | writel(0, gp->regs + TXDMA_CFG); |
2183 | writel(0, gp->regs + RXDMA_CFG); |
2184 | |
2185 | if (!wol) { |
2186 | spin_lock_irqsave(&gp->lock, flags); |
2187 | spin_lock(&gp->tx_lock); |
2188 | gem_reset(gp); |
2189 | writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); |
2190 | writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); |
2191 | spin_unlock(&gp->tx_lock); |
2192 | spin_unlock_irqrestore(&gp->lock, flags); |
2193 | |
2194 | /* No need to take the lock here */ |
2195 | |
2196 | if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) |
2197 | gp->phy_mii.def->ops->suspend(&gp->phy_mii); |
2198 | |
2199 | /* According to Apple, we must set the MDIO pins to this begnign |
2200 | * state or we may 1) eat more current, 2) damage some PHYs |
2201 | */ |
2202 | writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); |
2203 | writel(0, gp->regs + MIF_BBCLK); |
2204 | writel(0, gp->regs + MIF_BBDATA); |
2205 | writel(0, gp->regs + MIF_BBOENAB); |
2206 | writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); |
2207 | (void) readl(gp->regs + MAC_XIFCFG); |
2208 | } |
2209 | } |
2210 | |
2211 | |
2212 | static int gem_do_start(struct net_device *dev) |
2213 | { |
2214 | struct gem *gp = netdev_priv(dev); |
2215 | unsigned long flags; |
2216 | |
2217 | spin_lock_irqsave(&gp->lock, flags); |
2218 | spin_lock(&gp->tx_lock); |
2219 | |
2220 | /* Enable the cell */ |
2221 | gem_get_cell(gp); |
2222 | |
2223 | /* Init & setup chip hardware */ |
2224 | gem_reinit_chip(gp); |
2225 | |
2226 | gp->running = 1; |
2227 | |
2228 | napi_enable(&gp->napi); |
2229 | |
2230 | if (gp->lstate == link_up) { |
2231 | netif_carrier_on(gp->dev); |
2232 | gem_set_link_modes(gp); |
2233 | } |
2234 | |
2235 | netif_wake_queue(gp->dev); |
2236 | |
2237 | spin_unlock(&gp->tx_lock); |
2238 | spin_unlock_irqrestore(&gp->lock, flags); |
2239 | |
2240 | if (request_irq(gp->pdev->irq, gem_interrupt, |
2241 | IRQF_SHARED, dev->name, (void *)dev)) { |
2242 | printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); |
2243 | |
2244 | spin_lock_irqsave(&gp->lock, flags); |
2245 | spin_lock(&gp->tx_lock); |
2246 | |
2247 | napi_disable(&gp->napi); |
2248 | |
2249 | gp->running = 0; |
2250 | gem_reset(gp); |
2251 | gem_clean_rings(gp); |
2252 | gem_put_cell(gp); |
2253 | |
2254 | spin_unlock(&gp->tx_lock); |
2255 | spin_unlock_irqrestore(&gp->lock, flags); |
2256 | |
2257 | return -EAGAIN; |
2258 | } |
2259 | |
2260 | return 0; |
2261 | } |
2262 | |
2263 | static void gem_do_stop(struct net_device *dev, int wol) |
2264 | { |
2265 | struct gem *gp = netdev_priv(dev); |
2266 | unsigned long flags; |
2267 | |
2268 | spin_lock_irqsave(&gp->lock, flags); |
2269 | spin_lock(&gp->tx_lock); |
2270 | |
2271 | gp->running = 0; |
2272 | |
2273 | /* Stop netif queue */ |
2274 | netif_stop_queue(dev); |
2275 | |
2276 | /* Make sure ints are disabled */ |
2277 | gem_disable_ints(gp); |
2278 | |
2279 | /* We can drop the lock now */ |
2280 | spin_unlock(&gp->tx_lock); |
2281 | spin_unlock_irqrestore(&gp->lock, flags); |
2282 | |
2283 | /* If we are going to sleep with WOL */ |
2284 | gem_stop_dma(gp); |
2285 | msleep(10); |
2286 | if (!wol) |
2287 | gem_reset(gp); |
2288 | msleep(10); |
2289 | |
2290 | /* Get rid of rings */ |
2291 | gem_clean_rings(gp); |
2292 | |
2293 | /* No irq needed anymore */ |
2294 | free_irq(gp->pdev->irq, (void *) dev); |
2295 | |
2296 | /* Cell not needed neither if no WOL */ |
2297 | if (!wol) { |
2298 | spin_lock_irqsave(&gp->lock, flags); |
2299 | gem_put_cell(gp); |
2300 | spin_unlock_irqrestore(&gp->lock, flags); |
2301 | } |
2302 | } |
2303 | |
2304 | static void gem_reset_task(struct work_struct *work) |
2305 | { |
2306 | struct gem *gp = container_of(work, struct gem, reset_task); |
2307 | |
2308 | mutex_lock(&gp->pm_mutex); |
2309 | |
2310 | if (gp->opened) |
2311 | napi_disable(&gp->napi); |
2312 | |
2313 | spin_lock_irq(&gp->lock); |
2314 | spin_lock(&gp->tx_lock); |
2315 | |
2316 | if (gp->running) { |
2317 | netif_stop_queue(gp->dev); |
2318 | |
2319 | /* Reset the chip & rings */ |
2320 | gem_reinit_chip(gp); |
2321 | if (gp->lstate == link_up) |
2322 | gem_set_link_modes(gp); |
2323 | netif_wake_queue(gp->dev); |
2324 | } |
2325 | |
2326 | gp->reset_task_pending = 0; |
2327 | |
2328 | spin_unlock(&gp->tx_lock); |
2329 | spin_unlock_irq(&gp->lock); |
2330 | |
2331 | if (gp->opened) |
2332 | napi_enable(&gp->napi); |
2333 | |
2334 | mutex_unlock(&gp->pm_mutex); |
2335 | } |
2336 | |
2337 | |
2338 | static int gem_open(struct net_device *dev) |
2339 | { |
2340 | struct gem *gp = netdev_priv(dev); |
2341 | int rc = 0; |
2342 | |
2343 | mutex_lock(&gp->pm_mutex); |
2344 | |
2345 | /* We need the cell enabled */ |
2346 | if (!gp->asleep) |
2347 | rc = gem_do_start(dev); |
2348 | gp->opened = (rc == 0); |
2349 | |
2350 | mutex_unlock(&gp->pm_mutex); |
2351 | |
2352 | return rc; |
2353 | } |
2354 | |
2355 | static int gem_close(struct net_device *dev) |
2356 | { |
2357 | struct gem *gp = netdev_priv(dev); |
2358 | |
2359 | mutex_lock(&gp->pm_mutex); |
2360 | |
2361 | napi_disable(&gp->napi); |
2362 | |
2363 | gp->opened = 0; |
2364 | if (!gp->asleep) |
2365 | gem_do_stop(dev, 0); |
2366 | |
2367 | mutex_unlock(&gp->pm_mutex); |
2368 | |
2369 | return 0; |
2370 | } |
2371 | |
2372 | #ifdef CONFIG_PM |
2373 | static int gem_suspend(struct pci_dev *pdev, pm_message_t state) |
2374 | { |
2375 | struct net_device *dev = pci_get_drvdata(pdev); |
2376 | struct gem *gp = netdev_priv(dev); |
2377 | unsigned long flags; |
2378 | |
2379 | mutex_lock(&gp->pm_mutex); |
2380 | |
2381 | printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", |
2382 | dev->name, |
2383 | (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); |
2384 | |
2385 | /* Keep the cell enabled during the entire operation */ |
2386 | spin_lock_irqsave(&gp->lock, flags); |
2387 | spin_lock(&gp->tx_lock); |
2388 | gem_get_cell(gp); |
2389 | spin_unlock(&gp->tx_lock); |
2390 | spin_unlock_irqrestore(&gp->lock, flags); |
2391 | |
2392 | /* If the driver is opened, we stop the MAC */ |
2393 | if (gp->opened) { |
2394 | napi_disable(&gp->napi); |
2395 | |
2396 | /* Stop traffic, mark us closed */ |
2397 | netif_device_detach(dev); |
2398 | |
2399 | /* Switch off MAC, remember WOL setting */ |
2400 | gp->asleep_wol = gp->wake_on_lan; |
2401 | gem_do_stop(dev, gp->asleep_wol); |
2402 | } else |
2403 | gp->asleep_wol = 0; |
2404 | |
2405 | /* Mark us asleep */ |
2406 | gp->asleep = 1; |
2407 | wmb(); |
2408 | |
2409 | /* Stop the link timer */ |
2410 | del_timer_sync(&gp->link_timer); |
2411 | |
2412 | /* Now we release the mutex to not block the reset task who |
2413 | * can take it too. We are marked asleep, so there will be no |
2414 | * conflict here |
2415 | */ |
2416 | mutex_unlock(&gp->pm_mutex); |
2417 | |
2418 | /* Wait for a pending reset task to complete */ |
2419 | while (gp->reset_task_pending) |
2420 | yield(); |
2421 | flush_scheduled_work(); |
2422 | |
2423 | /* Shut the PHY down eventually and setup WOL */ |
2424 | gem_stop_phy(gp, gp->asleep_wol); |
2425 | |
2426 | /* Make sure bus master is disabled */ |
2427 | pci_disable_device(gp->pdev); |
2428 | |
2429 | /* Release the cell, no need to take a lock at this point since |
2430 | * nothing else can happen now |
2431 | */ |
2432 | gem_put_cell(gp); |
2433 | |
2434 | return 0; |
2435 | } |
2436 | |
2437 | static int gem_resume(struct pci_dev *pdev) |
2438 | { |
2439 | struct net_device *dev = pci_get_drvdata(pdev); |
2440 | struct gem *gp = netdev_priv(dev); |
2441 | unsigned long flags; |
2442 | |
2443 | printk(KERN_INFO "%s: resuming\n", dev->name); |
2444 | |
2445 | mutex_lock(&gp->pm_mutex); |
2446 | |
2447 | /* Keep the cell enabled during the entire operation, no need to |
2448 | * take a lock here tho since nothing else can happen while we are |
2449 | * marked asleep |
2450 | */ |
2451 | gem_get_cell(gp); |
2452 | |
2453 | /* Make sure PCI access and bus master are enabled */ |
2454 | if (pci_enable_device(gp->pdev)) { |
2455 | printk(KERN_ERR "%s: Can't re-enable chip !\n", |
2456 | dev->name); |
2457 | /* Put cell and forget it for now, it will be considered as |
2458 | * still asleep, a new sleep cycle may bring it back |
2459 | */ |
2460 | gem_put_cell(gp); |
2461 | mutex_unlock(&gp->pm_mutex); |
2462 | return 0; |
2463 | } |
2464 | pci_set_master(gp->pdev); |
2465 | |
2466 | /* Reset everything */ |
2467 | gem_reset(gp); |
2468 | |
2469 | /* Mark us woken up */ |
2470 | gp->asleep = 0; |
2471 | wmb(); |
2472 | |
2473 | /* Bring the PHY back. Again, lock is useless at this point as |
2474 | * nothing can be happening until we restart the whole thing |
2475 | */ |
2476 | gem_init_phy(gp); |
2477 | |
2478 | /* If we were opened, bring everything back */ |
2479 | if (gp->opened) { |
2480 | /* Restart MAC */ |
2481 | gem_do_start(dev); |
2482 | |
2483 | /* Re-attach net device */ |
2484 | netif_device_attach(dev); |
2485 | } |
2486 | |
2487 | spin_lock_irqsave(&gp->lock, flags); |
2488 | spin_lock(&gp->tx_lock); |
2489 | |
2490 | /* If we had WOL enabled, the cell clock was never turned off during |
2491 | * sleep, so we end up beeing unbalanced. Fix that here |
2492 | */ |
2493 | if (gp->asleep_wol) |
2494 | gem_put_cell(gp); |
2495 | |
2496 | /* This function doesn't need to hold the cell, it will be held if the |
2497 | * driver is open by gem_do_start(). |
2498 | */ |
2499 | gem_put_cell(gp); |
2500 | |
2501 | spin_unlock(&gp->tx_lock); |
2502 | spin_unlock_irqrestore(&gp->lock, flags); |
2503 | |
2504 | mutex_unlock(&gp->pm_mutex); |
2505 | |
2506 | return 0; |
2507 | } |
2508 | #endif /* CONFIG_PM */ |
2509 | |
2510 | static struct net_device_stats *gem_get_stats(struct net_device *dev) |
2511 | { |
2512 | struct gem *gp = netdev_priv(dev); |
2513 | struct net_device_stats *stats = &gp->net_stats; |
2514 | |
2515 | spin_lock_irq(&gp->lock); |
2516 | spin_lock(&gp->tx_lock); |
2517 | |
2518 | /* I have seen this being called while the PM was in progress, |
2519 | * so we shield against this |
2520 | */ |
2521 | if (gp->running) { |
2522 | stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); |
2523 | writel(0, gp->regs + MAC_FCSERR); |
2524 | |
2525 | stats->rx_frame_errors += readl(gp->regs + MAC_AERR); |
2526 | writel(0, gp->regs + MAC_AERR); |
2527 | |
2528 | stats->rx_length_errors += readl(gp->regs + MAC_LERR); |
2529 | writel(0, gp->regs + MAC_LERR); |
2530 | |
2531 | stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); |
2532 | stats->collisions += |
2533 | (readl(gp->regs + MAC_ECOLL) + |
2534 | readl(gp->regs + MAC_LCOLL)); |
2535 | writel(0, gp->regs + MAC_ECOLL); |
2536 | writel(0, gp->regs + MAC_LCOLL); |
2537 | } |
2538 | |
2539 | spin_unlock(&gp->tx_lock); |
2540 | spin_unlock_irq(&gp->lock); |
2541 | |
2542 | return &gp->net_stats; |
2543 | } |
2544 | |
2545 | static int gem_set_mac_address(struct net_device *dev, void *addr) |
2546 | { |
2547 | struct sockaddr *macaddr = (struct sockaddr *) addr; |
2548 | struct gem *gp = netdev_priv(dev); |
2549 | unsigned char *e = &dev->dev_addr[0]; |
2550 | |
2551 | if (!is_valid_ether_addr(macaddr->sa_data)) |
2552 | return -EADDRNOTAVAIL; |
2553 | |
2554 | if (!netif_running(dev) || !netif_device_present(dev)) { |
2555 | /* We'll just catch it later when the |
2556 | * device is up'd or resumed. |
2557 | */ |
2558 | memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); |
2559 | return 0; |
2560 | } |
2561 | |
2562 | mutex_lock(&gp->pm_mutex); |
2563 | memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); |
2564 | if (gp->running) { |
2565 | writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); |
2566 | writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); |
2567 | writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); |
2568 | } |
2569 | mutex_unlock(&gp->pm_mutex); |
2570 | |
2571 | return 0; |
2572 | } |
2573 | |
2574 | static void gem_set_multicast(struct net_device *dev) |
2575 | { |
2576 | struct gem *gp = netdev_priv(dev); |
2577 | u32 rxcfg, rxcfg_new; |
2578 | int limit = 10000; |
2579 | |
2580 | |
2581 | spin_lock_irq(&gp->lock); |
2582 | spin_lock(&gp->tx_lock); |
2583 | |
2584 | if (!gp->running) |
2585 | goto bail; |
2586 | |
2587 | netif_stop_queue(dev); |
2588 | |
2589 | rxcfg = readl(gp->regs + MAC_RXCFG); |
2590 | rxcfg_new = gem_setup_multicast(gp); |
2591 | #ifdef STRIP_FCS |
2592 | rxcfg_new |= MAC_RXCFG_SFCS; |
2593 | #endif |
2594 | gp->mac_rx_cfg = rxcfg_new; |
2595 | |
2596 | writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); |
2597 | while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { |
2598 | if (!limit--) |
2599 | break; |
2600 | udelay(10); |
2601 | } |
2602 | |
2603 | rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); |
2604 | rxcfg |= rxcfg_new; |
2605 | |
2606 | writel(rxcfg, gp->regs + MAC_RXCFG); |
2607 | |
2608 | netif_wake_queue(dev); |
2609 | |
2610 | bail: |
2611 | spin_unlock(&gp->tx_lock); |
2612 | spin_unlock_irq(&gp->lock); |
2613 | } |
2614 | |
2615 | /* Jumbo-grams don't seem to work :-( */ |
2616 | #define GEM_MIN_MTU 68 |
2617 | #if 1 |
2618 | #define GEM_MAX_MTU 1500 |
2619 | #else |
2620 | #define GEM_MAX_MTU 9000 |
2621 | #endif |
2622 | |
2623 | static int gem_change_mtu(struct net_device *dev, int new_mtu) |
2624 | { |
2625 | struct gem *gp = netdev_priv(dev); |
2626 | |
2627 | if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) |
2628 | return -EINVAL; |
2629 | |
2630 | if (!netif_running(dev) || !netif_device_present(dev)) { |
2631 | /* We'll just catch it later when the |
2632 | * device is up'd or resumed. |
2633 | */ |
2634 | dev->mtu = new_mtu; |
2635 | return 0; |
2636 | } |
2637 | |
2638 | mutex_lock(&gp->pm_mutex); |
2639 | spin_lock_irq(&gp->lock); |
2640 | spin_lock(&gp->tx_lock); |
2641 | dev->mtu = new_mtu; |
2642 | if (gp->running) { |
2643 | gem_reinit_chip(gp); |
2644 | if (gp->lstate == link_up) |
2645 | gem_set_link_modes(gp); |
2646 | } |
2647 | spin_unlock(&gp->tx_lock); |
2648 | spin_unlock_irq(&gp->lock); |
2649 | mutex_unlock(&gp->pm_mutex); |
2650 | |
2651 | return 0; |
2652 | } |
2653 | |
2654 | static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
2655 | { |
2656 | struct gem *gp = netdev_priv(dev); |
2657 | |
2658 | strcpy(info->driver, DRV_NAME); |
2659 | strcpy(info->version, DRV_VERSION); |
2660 | strcpy(info->bus_info, pci_name(gp->pdev)); |
2661 | } |
2662 | |
2663 | static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
2664 | { |
2665 | struct gem *gp = netdev_priv(dev); |
2666 | |
2667 | if (gp->phy_type == phy_mii_mdio0 || |
2668 | gp->phy_type == phy_mii_mdio1) { |
2669 | if (gp->phy_mii.def) |
2670 | cmd->supported = gp->phy_mii.def->features; |
2671 | else |
2672 | cmd->supported = (SUPPORTED_10baseT_Half | |
2673 | SUPPORTED_10baseT_Full); |
2674 | |
2675 | /* XXX hardcoded stuff for now */ |
2676 | cmd->port = PORT_MII; |
2677 | cmd->transceiver = XCVR_EXTERNAL; |
2678 | cmd->phy_address = 0; /* XXX fixed PHYAD */ |
2679 | |
2680 | /* Return current PHY settings */ |
2681 | spin_lock_irq(&gp->lock); |
2682 | cmd->autoneg = gp->want_autoneg; |
2683 | cmd->speed = gp->phy_mii.speed; |
2684 | cmd->duplex = gp->phy_mii.duplex; |
2685 | cmd->advertising = gp->phy_mii.advertising; |
2686 | |
2687 | /* If we started with a forced mode, we don't have a default |
2688 | * advertise set, we need to return something sensible so |
2689 | * userland can re-enable autoneg properly. |
2690 | */ |
2691 | if (cmd->advertising == 0) |
2692 | cmd->advertising = cmd->supported; |
2693 | spin_unlock_irq(&gp->lock); |
2694 | } else { // XXX PCS ? |
2695 | cmd->supported = |
2696 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | |
2697 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | |
2698 | SUPPORTED_Autoneg); |
2699 | cmd->advertising = cmd->supported; |
2700 | cmd->speed = 0; |
2701 | cmd->duplex = cmd->port = cmd->phy_address = |
2702 | cmd->transceiver = cmd->autoneg = 0; |
2703 | |
2704 | /* serdes means usually a Fibre connector, with most fixed */ |
2705 | if (gp->phy_type == phy_serdes) { |
2706 | cmd->port = PORT_FIBRE; |
2707 | cmd->supported = (SUPPORTED_1000baseT_Half | |
2708 | SUPPORTED_1000baseT_Full | |
2709 | SUPPORTED_FIBRE | SUPPORTED_Autoneg | |
2710 | SUPPORTED_Pause | SUPPORTED_Asym_Pause); |
2711 | cmd->advertising = cmd->supported; |
2712 | cmd->transceiver = XCVR_INTERNAL; |
2713 | if (gp->lstate == link_up) |
2714 | cmd->speed = SPEED_1000; |
2715 | cmd->duplex = DUPLEX_FULL; |
2716 | cmd->autoneg = 1; |
2717 | } |
2718 | } |
2719 | cmd->maxtxpkt = cmd->maxrxpkt = 0; |
2720 | |
2721 | return 0; |
2722 | } |
2723 | |
2724 | static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
2725 | { |
2726 | struct gem *gp = netdev_priv(dev); |
2727 | |
2728 | /* Verify the settings we care about. */ |
2729 | if (cmd->autoneg != AUTONEG_ENABLE && |
2730 | cmd->autoneg != AUTONEG_DISABLE) |
2731 | return -EINVAL; |
2732 | |
2733 | if (cmd->autoneg == AUTONEG_ENABLE && |
2734 | cmd->advertising == 0) |
2735 | return -EINVAL; |
2736 | |
2737 | if (cmd->autoneg == AUTONEG_DISABLE && |
2738 | ((cmd->speed != SPEED_1000 && |
2739 | cmd->speed != SPEED_100 && |
2740 | cmd->speed != SPEED_10) || |
2741 | (cmd->duplex != DUPLEX_HALF && |
2742 | cmd->duplex != DUPLEX_FULL))) |
2743 | return -EINVAL; |
2744 | |
2745 | /* Apply settings and restart link process. */ |
2746 | spin_lock_irq(&gp->lock); |
2747 | gem_get_cell(gp); |
2748 | gem_begin_auto_negotiation(gp, cmd); |
2749 | gem_put_cell(gp); |
2750 | spin_unlock_irq(&gp->lock); |
2751 | |
2752 | return 0; |
2753 | } |
2754 | |
2755 | static int gem_nway_reset(struct net_device *dev) |
2756 | { |
2757 | struct gem *gp = netdev_priv(dev); |
2758 | |
2759 | if (!gp->want_autoneg) |
2760 | return -EINVAL; |
2761 | |
2762 | /* Restart link process. */ |
2763 | spin_lock_irq(&gp->lock); |
2764 | gem_get_cell(gp); |
2765 | gem_begin_auto_negotiation(gp, NULL); |
2766 | gem_put_cell(gp); |
2767 | spin_unlock_irq(&gp->lock); |
2768 | |
2769 | return 0; |
2770 | } |
2771 | |
2772 | static u32 gem_get_msglevel(struct net_device *dev) |
2773 | { |
2774 | struct gem *gp = netdev_priv(dev); |
2775 | return gp->msg_enable; |
2776 | } |
2777 | |
2778 | static void gem_set_msglevel(struct net_device *dev, u32 value) |
2779 | { |
2780 | struct gem *gp = netdev_priv(dev); |
2781 | gp->msg_enable = value; |
2782 | } |
2783 | |
2784 | |
2785 | /* Add more when I understand how to program the chip */ |
2786 | /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ |
2787 | |
2788 | #define WOL_SUPPORTED_MASK (WAKE_MAGIC) |
2789 | |
2790 | static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2791 | { |
2792 | struct gem *gp = netdev_priv(dev); |
2793 | |
2794 | /* Add more when I understand how to program the chip */ |
2795 | if (gp->has_wol) { |
2796 | wol->supported = WOL_SUPPORTED_MASK; |
2797 | wol->wolopts = gp->wake_on_lan; |
2798 | } else { |
2799 | wol->supported = 0; |
2800 | wol->wolopts = 0; |
2801 | } |
2802 | } |
2803 | |
2804 | static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2805 | { |
2806 | struct gem *gp = netdev_priv(dev); |
2807 | |
2808 | if (!gp->has_wol) |
2809 | return -EOPNOTSUPP; |
2810 | gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; |
2811 | return 0; |
2812 | } |
2813 | |
2814 | static const struct ethtool_ops gem_ethtool_ops = { |
2815 | .get_drvinfo = gem_get_drvinfo, |
2816 | .get_link = ethtool_op_get_link, |
2817 | .get_settings = gem_get_settings, |
2818 | .set_settings = gem_set_settings, |
2819 | .nway_reset = gem_nway_reset, |
2820 | .get_msglevel = gem_get_msglevel, |
2821 | .set_msglevel = gem_set_msglevel, |
2822 | .get_wol = gem_get_wol, |
2823 | .set_wol = gem_set_wol, |
2824 | }; |
2825 | |
2826 | static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
2827 | { |
2828 | struct gem *gp = netdev_priv(dev); |
2829 | struct mii_ioctl_data *data = if_mii(ifr); |
2830 | int rc = -EOPNOTSUPP; |
2831 | unsigned long flags; |
2832 | |
2833 | /* Hold the PM mutex while doing ioctl's or we may collide |
2834 | * with power management. |
2835 | */ |
2836 | mutex_lock(&gp->pm_mutex); |
2837 | |
2838 | spin_lock_irqsave(&gp->lock, flags); |
2839 | gem_get_cell(gp); |
2840 | spin_unlock_irqrestore(&gp->lock, flags); |
2841 | |
2842 | switch (cmd) { |
2843 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
2844 | data->phy_id = gp->mii_phy_addr; |
2845 | /* Fallthrough... */ |
2846 | |
2847 | case SIOCGMIIREG: /* Read MII PHY register. */ |
2848 | if (!gp->running) |
2849 | rc = -EAGAIN; |
2850 | else { |
2851 | data->val_out = __phy_read(gp, data->phy_id & 0x1f, |
2852 | data->reg_num & 0x1f); |
2853 | rc = 0; |
2854 | } |
2855 | break; |
2856 | |
2857 | case SIOCSMIIREG: /* Write MII PHY register. */ |
2858 | if (!gp->running) |
2859 | rc = -EAGAIN; |
2860 | else { |
2861 | __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, |
2862 | data->val_in); |
2863 | rc = 0; |
2864 | } |
2865 | break; |
2866 | }; |
2867 | |
2868 | spin_lock_irqsave(&gp->lock, flags); |
2869 | gem_put_cell(gp); |
2870 | spin_unlock_irqrestore(&gp->lock, flags); |
2871 | |
2872 | mutex_unlock(&gp->pm_mutex); |
2873 | |
2874 | return rc; |
2875 | } |
2876 | |
2877 | #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) |
2878 | /* Fetch MAC address from vital product data of PCI ROM. */ |
2879 | static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) |
2880 | { |
2881 | int this_offset; |
2882 | |
2883 | for (this_offset = 0x20; this_offset < len; this_offset++) { |
2884 | void __iomem *p = rom_base + this_offset; |
2885 | int i; |
2886 | |
2887 | if (readb(p + 0) != 0x90 || |
2888 | readb(p + 1) != 0x00 || |
2889 | readb(p + 2) != 0x09 || |
2890 | readb(p + 3) != 0x4e || |
2891 | readb(p + 4) != 0x41 || |
2892 | readb(p + 5) != 0x06) |
2893 | continue; |
2894 | |
2895 | this_offset += 6; |
2896 | p += 6; |
2897 | |
2898 | for (i = 0; i < 6; i++) |
2899 | dev_addr[i] = readb(p + i); |
2900 | return 1; |
2901 | } |
2902 | return 0; |
2903 | } |
2904 | |
2905 | static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) |
2906 | { |
2907 | size_t size; |
2908 | void __iomem *p = pci_map_rom(pdev, &size); |
2909 | |
2910 | if (p) { |
2911 | int found; |
2912 | |
2913 | found = readb(p) == 0x55 && |
2914 | readb(p + 1) == 0xaa && |
2915 | find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); |
2916 | pci_unmap_rom(pdev, p); |
2917 | if (found) |
2918 | return; |
2919 | } |
2920 | |
2921 | /* Sun MAC prefix then 3 random bytes. */ |
2922 | dev_addr[0] = 0x08; |
2923 | dev_addr[1] = 0x00; |
2924 | dev_addr[2] = 0x20; |
2925 | get_random_bytes(dev_addr + 3, 3); |
2926 | return; |
2927 | } |
2928 | #endif /* not Sparc and not PPC */ |
2929 | |
2930 | static int __devinit gem_get_device_address(struct gem *gp) |
2931 | { |
2932 | #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) |
2933 | struct net_device *dev = gp->dev; |
2934 | const unsigned char *addr; |
2935 | |
2936 | addr = of_get_property(gp->of_node, "local-mac-address", NULL); |
2937 | if (addr == NULL) { |
2938 | #ifdef CONFIG_SPARC |
2939 | addr = idprom->id_ethaddr; |
2940 | #else |
2941 | printk("\n"); |
2942 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); |
2943 | return -1; |
2944 | #endif |
2945 | } |
2946 | memcpy(dev->dev_addr, addr, 6); |
2947 | #else |
2948 | get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr); |
2949 | #endif |
2950 | return 0; |
2951 | } |
2952 | |
2953 | static void gem_remove_one(struct pci_dev *pdev) |
2954 | { |
2955 | struct net_device *dev = pci_get_drvdata(pdev); |
2956 | |
2957 | if (dev) { |
2958 | struct gem *gp = netdev_priv(dev); |
2959 | |
2960 | unregister_netdev(dev); |
2961 | |
2962 | /* Stop the link timer */ |
2963 | del_timer_sync(&gp->link_timer); |
2964 | |
2965 | /* We shouldn't need any locking here */ |
2966 | gem_get_cell(gp); |
2967 | |
2968 | /* Wait for a pending reset task to complete */ |
2969 | while (gp->reset_task_pending) |
2970 | yield(); |
2971 | flush_scheduled_work(); |
2972 | |
2973 | /* Shut the PHY down */ |
2974 | gem_stop_phy(gp, 0); |
2975 | |
2976 | gem_put_cell(gp); |
2977 | |
2978 | /* Make sure bus master is disabled */ |
2979 | pci_disable_device(gp->pdev); |
2980 | |
2981 | /* Free resources */ |
2982 | pci_free_consistent(pdev, |
2983 | sizeof(struct gem_init_block), |
2984 | gp->init_block, |
2985 | gp->gblock_dvma); |
2986 | iounmap(gp->regs); |
2987 | pci_release_regions(pdev); |
2988 | free_netdev(dev); |
2989 | |
2990 | pci_set_drvdata(pdev, NULL); |
2991 | } |
2992 | } |
2993 | |
2994 | static const struct net_device_ops gem_netdev_ops = { |
2995 | .ndo_open = gem_open, |
2996 | .ndo_stop = gem_close, |
2997 | .ndo_start_xmit = gem_start_xmit, |
2998 | .ndo_get_stats = gem_get_stats, |
2999 | .ndo_set_multicast_list = gem_set_multicast, |
3000 | .ndo_do_ioctl = gem_ioctl, |
3001 | .ndo_tx_timeout = gem_tx_timeout, |
3002 | .ndo_change_mtu = gem_change_mtu, |
3003 | .ndo_validate_addr = eth_validate_addr, |
3004 | .ndo_set_mac_address = gem_set_mac_address, |
3005 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3006 | .ndo_poll_controller = gem_poll_controller, |
3007 | #endif |
3008 | }; |
3009 | |
3010 | static int __devinit gem_init_one(struct pci_dev *pdev, |
3011 | const struct pci_device_id *ent) |
3012 | { |
3013 | static int gem_version_printed = 0; |
3014 | unsigned long gemreg_base, gemreg_len; |
3015 | struct net_device *dev; |
3016 | struct gem *gp; |
3017 | int err, pci_using_dac; |
3018 | |
3019 | if (gem_version_printed++ == 0) |
3020 | printk(KERN_INFO "%s", version); |
3021 | |
3022 | /* Apple gmac note: during probe, the chip is powered up by |
3023 | * the arch code to allow the code below to work (and to let |
3024 | * the chip be probed on the config space. It won't stay powered |
3025 | * up until the interface is brought up however, so we can't rely |
3026 | * on register configuration done at this point. |
3027 | */ |
3028 | err = pci_enable_device(pdev); |
3029 | if (err) { |
3030 | printk(KERN_ERR PFX "Cannot enable MMIO operation, " |
3031 | "aborting.\n"); |
3032 | return err; |
3033 | } |
3034 | pci_set_master(pdev); |
3035 | |
3036 | /* Configure DMA attributes. */ |
3037 | |
3038 | /* All of the GEM documentation states that 64-bit DMA addressing |
3039 | * is fully supported and should work just fine. However the |
3040 | * front end for RIO based GEMs is different and only supports |
3041 | * 32-bit addressing. |
3042 | * |
3043 | * For now we assume the various PPC GEMs are 32-bit only as well. |
3044 | */ |
3045 | if (pdev->vendor == PCI_VENDOR_ID_SUN && |
3046 | pdev->device == PCI_DEVICE_ID_SUN_GEM && |
3047 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
3048 | pci_using_dac = 1; |
3049 | } else { |
3050 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3051 | if (err) { |
3052 | printk(KERN_ERR PFX "No usable DMA configuration, " |
3053 | "aborting.\n"); |
3054 | goto err_disable_device; |
3055 | } |
3056 | pci_using_dac = 0; |
3057 | } |
3058 | |
3059 | gemreg_base = pci_resource_start(pdev, 0); |
3060 | gemreg_len = pci_resource_len(pdev, 0); |
3061 | |
3062 | if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { |
3063 | printk(KERN_ERR PFX "Cannot find proper PCI device " |
3064 | "base address, aborting.\n"); |
3065 | err = -ENODEV; |
3066 | goto err_disable_device; |
3067 | } |
3068 | |
3069 | dev = alloc_etherdev(sizeof(*gp)); |
3070 | if (!dev) { |
3071 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); |
3072 | err = -ENOMEM; |
3073 | goto err_disable_device; |
3074 | } |
3075 | SET_NETDEV_DEV(dev, &pdev->dev); |
3076 | |
3077 | gp = netdev_priv(dev); |
3078 | |
3079 | err = pci_request_regions(pdev, DRV_NAME); |
3080 | if (err) { |
3081 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " |
3082 | "aborting.\n"); |
3083 | goto err_out_free_netdev; |
3084 | } |
3085 | |
3086 | gp->pdev = pdev; |
3087 | dev->base_addr = (long) pdev; |
3088 | gp->dev = dev; |
3089 | |
3090 | gp->msg_enable = DEFAULT_MSG; |
3091 | |
3092 | spin_lock_init(&gp->lock); |
3093 | spin_lock_init(&gp->tx_lock); |
3094 | mutex_init(&gp->pm_mutex); |
3095 | |
3096 | init_timer(&gp->link_timer); |
3097 | gp->link_timer.function = gem_link_timer; |
3098 | gp->link_timer.data = (unsigned long) gp; |
3099 | |
3100 | INIT_WORK(&gp->reset_task, gem_reset_task); |
3101 | |
3102 | gp->lstate = link_down; |
3103 | gp->timer_ticks = 0; |
3104 | netif_carrier_off(dev); |
3105 | |
3106 | gp->regs = ioremap(gemreg_base, gemreg_len); |
3107 | if (!gp->regs) { |
3108 | printk(KERN_ERR PFX "Cannot map device registers, " |
3109 | "aborting.\n"); |
3110 | err = -EIO; |
3111 | goto err_out_free_res; |
3112 | } |
3113 | |
3114 | /* On Apple, we want a reference to the Open Firmware device-tree |
3115 | * node. We use it for clock control. |
3116 | */ |
3117 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) |
3118 | gp->of_node = pci_device_to_OF_node(pdev); |
3119 | #endif |
3120 | |
3121 | /* Only Apple version supports WOL afaik */ |
3122 | if (pdev->vendor == PCI_VENDOR_ID_APPLE) |
3123 | gp->has_wol = 1; |
3124 | |
3125 | /* Make sure cell is enabled */ |
3126 | gem_get_cell(gp); |
3127 | |
3128 | /* Make sure everything is stopped and in init state */ |
3129 | gem_reset(gp); |
3130 | |
3131 | /* Fill up the mii_phy structure (even if we won't use it) */ |
3132 | gp->phy_mii.dev = dev; |
3133 | gp->phy_mii.mdio_read = _phy_read; |
3134 | gp->phy_mii.mdio_write = _phy_write; |
3135 | #ifdef CONFIG_PPC_PMAC |
3136 | gp->phy_mii.platform_data = gp->of_node; |
3137 | #endif |
3138 | /* By default, we start with autoneg */ |
3139 | gp->want_autoneg = 1; |
3140 | |
3141 | /* Check fifo sizes, PHY type, etc... */ |
3142 | if (gem_check_invariants(gp)) { |
3143 | err = -ENODEV; |
3144 | goto err_out_iounmap; |
3145 | } |
3146 | |
3147 | /* It is guaranteed that the returned buffer will be at least |
3148 | * PAGE_SIZE aligned. |
3149 | */ |
3150 | gp->init_block = (struct gem_init_block *) |
3151 | pci_alloc_consistent(pdev, sizeof(struct gem_init_block), |
3152 | &gp->gblock_dvma); |
3153 | if (!gp->init_block) { |
3154 | printk(KERN_ERR PFX "Cannot allocate init block, " |
3155 | "aborting.\n"); |
3156 | err = -ENOMEM; |
3157 | goto err_out_iounmap; |
3158 | } |
3159 | |
3160 | if (gem_get_device_address(gp)) |
3161 | goto err_out_free_consistent; |
3162 | |
3163 | dev->netdev_ops = &gem_netdev_ops; |
3164 | netif_napi_add(dev, &gp->napi, gem_poll, 64); |
3165 | dev->ethtool_ops = &gem_ethtool_ops; |
3166 | dev->watchdog_timeo = 5 * HZ; |
3167 | dev->irq = pdev->irq; |
3168 | dev->dma = 0; |
3169 | |
3170 | /* Set that now, in case PM kicks in now */ |
3171 | pci_set_drvdata(pdev, dev); |
3172 | |
3173 | /* Detect & init PHY, start autoneg, we release the cell now |
3174 | * too, it will be managed by whoever needs it |
3175 | */ |
3176 | gem_init_phy(gp); |
3177 | |
3178 | spin_lock_irq(&gp->lock); |
3179 | gem_put_cell(gp); |
3180 | spin_unlock_irq(&gp->lock); |
3181 | |
3182 | /* Register with kernel */ |
3183 | if (register_netdev(dev)) { |
3184 | printk(KERN_ERR PFX "Cannot register net device, " |
3185 | "aborting.\n"); |
3186 | err = -ENOMEM; |
3187 | goto err_out_free_consistent; |
3188 | } |
3189 | |
3190 | printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", |
3191 | dev->name, dev->dev_addr); |
3192 | |
3193 | if (gp->phy_type == phy_mii_mdio0 || |
3194 | gp->phy_type == phy_mii_mdio1) |
3195 | printk(KERN_INFO "%s: Found %s PHY\n", dev->name, |
3196 | gp->phy_mii.def ? gp->phy_mii.def->name : "no"); |
3197 | |
3198 | /* GEM can do it all... */ |
3199 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; |
3200 | if (pci_using_dac) |
3201 | dev->features |= NETIF_F_HIGHDMA; |
3202 | |
3203 | return 0; |
3204 | |
3205 | err_out_free_consistent: |
3206 | gem_remove_one(pdev); |
3207 | err_out_iounmap: |
3208 | gem_put_cell(gp); |
3209 | iounmap(gp->regs); |
3210 | |
3211 | err_out_free_res: |
3212 | pci_release_regions(pdev); |
3213 | |
3214 | err_out_free_netdev: |
3215 | free_netdev(dev); |
3216 | err_disable_device: |
3217 | pci_disable_device(pdev); |
3218 | return err; |
3219 | |
3220 | } |
3221 | |
3222 | |
3223 | static struct pci_driver gem_driver = { |
3224 | .name = GEM_MODULE_NAME, |
3225 | .id_table = gem_pci_tbl, |
3226 | .probe = gem_init_one, |
3227 | .remove = gem_remove_one, |
3228 | #ifdef CONFIG_PM |
3229 | .suspend = gem_suspend, |
3230 | .resume = gem_resume, |
3231 | #endif /* CONFIG_PM */ |
3232 | }; |
3233 | |
3234 | static int __init gem_init(void) |
3235 | { |
3236 | return pci_register_driver(&gem_driver); |
3237 | } |
3238 | |
3239 | static void __exit gem_cleanup(void) |
3240 | { |
3241 | pci_unregister_driver(&gem_driver); |
3242 | } |
3243 | |
3244 | module_init(gem_init); |
3245 | module_exit(gem_cleanup); |
3246 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9