Root/
1 | /* |
2 | * New driver for Marvell Yukon chipset and SysKonnect Gigabit |
3 | * Ethernet adapters. Based on earlier sk98lin, e100 and |
4 | * FreeBSD if_sk drivers. |
5 | * |
6 | * This driver intentionally does not support all the features |
7 | * of the original driver such as link fail-over and link management because |
8 | * those should be done at higher levels. |
9 | * |
10 | * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org> |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License as published by |
14 | * the Free Software Foundation; either version 2 of the License. |
15 | * |
16 | * This program is distributed in the hope that it will be useful, |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
19 | * GNU General Public License for more details. |
20 | * |
21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program; if not, write to the Free Software |
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
24 | */ |
25 | |
26 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
27 | |
28 | #include <linux/in.h> |
29 | #include <linux/kernel.h> |
30 | #include <linux/module.h> |
31 | #include <linux/moduleparam.h> |
32 | #include <linux/netdevice.h> |
33 | #include <linux/etherdevice.h> |
34 | #include <linux/ethtool.h> |
35 | #include <linux/pci.h> |
36 | #include <linux/if_vlan.h> |
37 | #include <linux/ip.h> |
38 | #include <linux/delay.h> |
39 | #include <linux/crc32.h> |
40 | #include <linux/dma-mapping.h> |
41 | #include <linux/debugfs.h> |
42 | #include <linux/sched.h> |
43 | #include <linux/seq_file.h> |
44 | #include <linux/mii.h> |
45 | #include <linux/slab.h> |
46 | #include <asm/irq.h> |
47 | |
48 | #include "skge.h" |
49 | |
50 | #define DRV_NAME "skge" |
51 | #define DRV_VERSION "1.13" |
52 | |
53 | #define DEFAULT_TX_RING_SIZE 128 |
54 | #define DEFAULT_RX_RING_SIZE 512 |
55 | #define MAX_TX_RING_SIZE 1024 |
56 | #define TX_LOW_WATER (MAX_SKB_FRAGS + 1) |
57 | #define MAX_RX_RING_SIZE 4096 |
58 | #define RX_COPY_THRESHOLD 128 |
59 | #define RX_BUF_SIZE 1536 |
60 | #define PHY_RETRIES 1000 |
61 | #define ETH_JUMBO_MTU 9000 |
62 | #define TX_WATCHDOG (5 * HZ) |
63 | #define NAPI_WEIGHT 64 |
64 | #define BLINK_MS 250 |
65 | #define LINK_HZ HZ |
66 | |
67 | #define SKGE_EEPROM_MAGIC 0x9933aabb |
68 | |
69 | |
70 | MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); |
71 | MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); |
72 | MODULE_LICENSE("GPL"); |
73 | MODULE_VERSION(DRV_VERSION); |
74 | |
75 | static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | |
76 | NETIF_MSG_LINK | NETIF_MSG_IFUP | |
77 | NETIF_MSG_IFDOWN); |
78 | |
79 | static int debug = -1; /* defaults above */ |
80 | module_param(debug, int, 0); |
81 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
82 | |
83 | static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = { |
84 | { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) }, |
85 | { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, |
86 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, |
87 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, |
88 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T) }, |
89 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ |
90 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, |
91 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ |
92 | { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, |
93 | { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) }, |
94 | { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, |
95 | { 0 } |
96 | }; |
97 | MODULE_DEVICE_TABLE(pci, skge_id_table); |
98 | |
99 | static int skge_up(struct net_device *dev); |
100 | static int skge_down(struct net_device *dev); |
101 | static void skge_phy_reset(struct skge_port *skge); |
102 | static void skge_tx_clean(struct net_device *dev); |
103 | static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); |
104 | static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); |
105 | static void genesis_get_stats(struct skge_port *skge, u64 *data); |
106 | static void yukon_get_stats(struct skge_port *skge, u64 *data); |
107 | static void yukon_init(struct skge_hw *hw, int port); |
108 | static void genesis_mac_init(struct skge_hw *hw, int port); |
109 | static void genesis_link_up(struct skge_port *skge); |
110 | static void skge_set_multicast(struct net_device *dev); |
111 | |
112 | /* Avoid conditionals by using array */ |
113 | static const int txqaddr[] = { Q_XA1, Q_XA2 }; |
114 | static const int rxqaddr[] = { Q_R1, Q_R2 }; |
115 | static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; |
116 | static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; |
117 | static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; |
118 | static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; |
119 | |
120 | static int skge_get_regs_len(struct net_device *dev) |
121 | { |
122 | return 0x4000; |
123 | } |
124 | |
125 | /* |
126 | * Returns copy of whole control register region |
127 | * Note: skip RAM address register because accessing it will |
128 | * cause bus hangs! |
129 | */ |
130 | static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
131 | void *p) |
132 | { |
133 | const struct skge_port *skge = netdev_priv(dev); |
134 | const void __iomem *io = skge->hw->regs; |
135 | |
136 | regs->version = 1; |
137 | memset(p, 0, regs->len); |
138 | memcpy_fromio(p, io, B3_RAM_ADDR); |
139 | |
140 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, |
141 | regs->len - B3_RI_WTO_R1); |
142 | } |
143 | |
144 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ |
145 | static u32 wol_supported(const struct skge_hw *hw) |
146 | { |
147 | if (hw->chip_id == CHIP_ID_GENESIS) |
148 | return 0; |
149 | |
150 | if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) |
151 | return 0; |
152 | |
153 | return WAKE_MAGIC | WAKE_PHY; |
154 | } |
155 | |
156 | static void skge_wol_init(struct skge_port *skge) |
157 | { |
158 | struct skge_hw *hw = skge->hw; |
159 | int port = skge->port; |
160 | u16 ctrl; |
161 | |
162 | skge_write16(hw, B0_CTST, CS_RST_CLR); |
163 | skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); |
164 | |
165 | /* Turn on Vaux */ |
166 | skge_write8(hw, B0_POWER_CTRL, |
167 | PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); |
168 | |
169 | /* WA code for COMA mode -- clear PHY reset */ |
170 | if (hw->chip_id == CHIP_ID_YUKON_LITE && |
171 | hw->chip_rev >= CHIP_REV_YU_LITE_A3) { |
172 | u32 reg = skge_read32(hw, B2_GP_IO); |
173 | reg |= GP_DIR_9; |
174 | reg &= ~GP_IO_9; |
175 | skge_write32(hw, B2_GP_IO, reg); |
176 | } |
177 | |
178 | skge_write32(hw, SK_REG(port, GPHY_CTRL), |
179 | GPC_DIS_SLEEP | |
180 | GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | |
181 | GPC_ANEG_1 | GPC_RST_SET); |
182 | |
183 | skge_write32(hw, SK_REG(port, GPHY_CTRL), |
184 | GPC_DIS_SLEEP | |
185 | GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | |
186 | GPC_ANEG_1 | GPC_RST_CLR); |
187 | |
188 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); |
189 | |
190 | /* Force to 10/100 skge_reset will re-enable on resume */ |
191 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, |
192 | (PHY_AN_100FULL | PHY_AN_100HALF | |
193 | PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); |
194 | /* no 1000 HD/FD */ |
195 | gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); |
196 | gm_phy_write(hw, port, PHY_MARV_CTRL, |
197 | PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | |
198 | PHY_CT_RE_CFG | PHY_CT_DUP_MD); |
199 | |
200 | |
201 | /* Set GMAC to no flow control and auto update for speed/duplex */ |
202 | gma_write16(hw, port, GM_GP_CTRL, |
203 | GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| |
204 | GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); |
205 | |
206 | /* Set WOL address */ |
207 | memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), |
208 | skge->netdev->dev_addr, ETH_ALEN); |
209 | |
210 | /* Turn on appropriate WOL control bits */ |
211 | skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); |
212 | ctrl = 0; |
213 | if (skge->wol & WAKE_PHY) |
214 | ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; |
215 | else |
216 | ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; |
217 | |
218 | if (skge->wol & WAKE_MAGIC) |
219 | ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; |
220 | else |
221 | ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; |
222 | |
223 | ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; |
224 | skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); |
225 | |
226 | /* block receiver */ |
227 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); |
228 | } |
229 | |
230 | static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
231 | { |
232 | struct skge_port *skge = netdev_priv(dev); |
233 | |
234 | wol->supported = wol_supported(skge->hw); |
235 | wol->wolopts = skge->wol; |
236 | } |
237 | |
238 | static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
239 | { |
240 | struct skge_port *skge = netdev_priv(dev); |
241 | struct skge_hw *hw = skge->hw; |
242 | |
243 | if ((wol->wolopts & ~wol_supported(hw)) || |
244 | !device_can_wakeup(&hw->pdev->dev)) |
245 | return -EOPNOTSUPP; |
246 | |
247 | skge->wol = wol->wolopts; |
248 | |
249 | device_set_wakeup_enable(&hw->pdev->dev, skge->wol); |
250 | |
251 | return 0; |
252 | } |
253 | |
254 | /* Determine supported/advertised modes based on hardware. |
255 | * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx |
256 | */ |
257 | static u32 skge_supported_modes(const struct skge_hw *hw) |
258 | { |
259 | u32 supported; |
260 | |
261 | if (hw->copper) { |
262 | supported = (SUPPORTED_10baseT_Half | |
263 | SUPPORTED_10baseT_Full | |
264 | SUPPORTED_100baseT_Half | |
265 | SUPPORTED_100baseT_Full | |
266 | SUPPORTED_1000baseT_Half | |
267 | SUPPORTED_1000baseT_Full | |
268 | SUPPORTED_Autoneg | |
269 | SUPPORTED_TP); |
270 | |
271 | if (hw->chip_id == CHIP_ID_GENESIS) |
272 | supported &= ~(SUPPORTED_10baseT_Half | |
273 | SUPPORTED_10baseT_Full | |
274 | SUPPORTED_100baseT_Half | |
275 | SUPPORTED_100baseT_Full); |
276 | |
277 | else if (hw->chip_id == CHIP_ID_YUKON) |
278 | supported &= ~SUPPORTED_1000baseT_Half; |
279 | } else |
280 | supported = (SUPPORTED_1000baseT_Full | |
281 | SUPPORTED_1000baseT_Half | |
282 | SUPPORTED_FIBRE | |
283 | SUPPORTED_Autoneg); |
284 | |
285 | return supported; |
286 | } |
287 | |
288 | static int skge_get_settings(struct net_device *dev, |
289 | struct ethtool_cmd *ecmd) |
290 | { |
291 | struct skge_port *skge = netdev_priv(dev); |
292 | struct skge_hw *hw = skge->hw; |
293 | |
294 | ecmd->transceiver = XCVR_INTERNAL; |
295 | ecmd->supported = skge_supported_modes(hw); |
296 | |
297 | if (hw->copper) { |
298 | ecmd->port = PORT_TP; |
299 | ecmd->phy_address = hw->phy_addr; |
300 | } else |
301 | ecmd->port = PORT_FIBRE; |
302 | |
303 | ecmd->advertising = skge->advertising; |
304 | ecmd->autoneg = skge->autoneg; |
305 | ecmd->speed = skge->speed; |
306 | ecmd->duplex = skge->duplex; |
307 | return 0; |
308 | } |
309 | |
310 | static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) |
311 | { |
312 | struct skge_port *skge = netdev_priv(dev); |
313 | const struct skge_hw *hw = skge->hw; |
314 | u32 supported = skge_supported_modes(hw); |
315 | int err = 0; |
316 | |
317 | if (ecmd->autoneg == AUTONEG_ENABLE) { |
318 | ecmd->advertising = supported; |
319 | skge->duplex = -1; |
320 | skge->speed = -1; |
321 | } else { |
322 | u32 setting; |
323 | |
324 | switch (ecmd->speed) { |
325 | case SPEED_1000: |
326 | if (ecmd->duplex == DUPLEX_FULL) |
327 | setting = SUPPORTED_1000baseT_Full; |
328 | else if (ecmd->duplex == DUPLEX_HALF) |
329 | setting = SUPPORTED_1000baseT_Half; |
330 | else |
331 | return -EINVAL; |
332 | break; |
333 | case SPEED_100: |
334 | if (ecmd->duplex == DUPLEX_FULL) |
335 | setting = SUPPORTED_100baseT_Full; |
336 | else if (ecmd->duplex == DUPLEX_HALF) |
337 | setting = SUPPORTED_100baseT_Half; |
338 | else |
339 | return -EINVAL; |
340 | break; |
341 | |
342 | case SPEED_10: |
343 | if (ecmd->duplex == DUPLEX_FULL) |
344 | setting = SUPPORTED_10baseT_Full; |
345 | else if (ecmd->duplex == DUPLEX_HALF) |
346 | setting = SUPPORTED_10baseT_Half; |
347 | else |
348 | return -EINVAL; |
349 | break; |
350 | default: |
351 | return -EINVAL; |
352 | } |
353 | |
354 | if ((setting & supported) == 0) |
355 | return -EINVAL; |
356 | |
357 | skge->speed = ecmd->speed; |
358 | skge->duplex = ecmd->duplex; |
359 | } |
360 | |
361 | skge->autoneg = ecmd->autoneg; |
362 | skge->advertising = ecmd->advertising; |
363 | |
364 | if (netif_running(dev)) { |
365 | skge_down(dev); |
366 | err = skge_up(dev); |
367 | if (err) { |
368 | dev_close(dev); |
369 | return err; |
370 | } |
371 | } |
372 | |
373 | return 0; |
374 | } |
375 | |
376 | static void skge_get_drvinfo(struct net_device *dev, |
377 | struct ethtool_drvinfo *info) |
378 | { |
379 | struct skge_port *skge = netdev_priv(dev); |
380 | |
381 | strcpy(info->driver, DRV_NAME); |
382 | strcpy(info->version, DRV_VERSION); |
383 | strcpy(info->fw_version, "N/A"); |
384 | strcpy(info->bus_info, pci_name(skge->hw->pdev)); |
385 | } |
386 | |
387 | static const struct skge_stat { |
388 | char name[ETH_GSTRING_LEN]; |
389 | u16 xmac_offset; |
390 | u16 gma_offset; |
391 | } skge_stats[] = { |
392 | { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, |
393 | { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, |
394 | |
395 | { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, |
396 | { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, |
397 | { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, |
398 | { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, |
399 | { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, |
400 | { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, |
401 | { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, |
402 | { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, |
403 | |
404 | { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, |
405 | { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, |
406 | { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, |
407 | { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, |
408 | { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, |
409 | { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, |
410 | |
411 | { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, |
412 | { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, |
413 | { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, |
414 | { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, |
415 | { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, |
416 | }; |
417 | |
418 | static int skge_get_sset_count(struct net_device *dev, int sset) |
419 | { |
420 | switch (sset) { |
421 | case ETH_SS_STATS: |
422 | return ARRAY_SIZE(skge_stats); |
423 | default: |
424 | return -EOPNOTSUPP; |
425 | } |
426 | } |
427 | |
428 | static void skge_get_ethtool_stats(struct net_device *dev, |
429 | struct ethtool_stats *stats, u64 *data) |
430 | { |
431 | struct skge_port *skge = netdev_priv(dev); |
432 | |
433 | if (skge->hw->chip_id == CHIP_ID_GENESIS) |
434 | genesis_get_stats(skge, data); |
435 | else |
436 | yukon_get_stats(skge, data); |
437 | } |
438 | |
439 | /* Use hardware MIB variables for critical path statistics and |
440 | * transmit feedback not reported at interrupt. |
441 | * Other errors are accounted for in interrupt handler. |
442 | */ |
443 | static struct net_device_stats *skge_get_stats(struct net_device *dev) |
444 | { |
445 | struct skge_port *skge = netdev_priv(dev); |
446 | u64 data[ARRAY_SIZE(skge_stats)]; |
447 | |
448 | if (skge->hw->chip_id == CHIP_ID_GENESIS) |
449 | genesis_get_stats(skge, data); |
450 | else |
451 | yukon_get_stats(skge, data); |
452 | |
453 | dev->stats.tx_bytes = data[0]; |
454 | dev->stats.rx_bytes = data[1]; |
455 | dev->stats.tx_packets = data[2] + data[4] + data[6]; |
456 | dev->stats.rx_packets = data[3] + data[5] + data[7]; |
457 | dev->stats.multicast = data[3] + data[5]; |
458 | dev->stats.collisions = data[10]; |
459 | dev->stats.tx_aborted_errors = data[12]; |
460 | |
461 | return &dev->stats; |
462 | } |
463 | |
464 | static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
465 | { |
466 | int i; |
467 | |
468 | switch (stringset) { |
469 | case ETH_SS_STATS: |
470 | for (i = 0; i < ARRAY_SIZE(skge_stats); i++) |
471 | memcpy(data + i * ETH_GSTRING_LEN, |
472 | skge_stats[i].name, ETH_GSTRING_LEN); |
473 | break; |
474 | } |
475 | } |
476 | |
477 | static void skge_get_ring_param(struct net_device *dev, |
478 | struct ethtool_ringparam *p) |
479 | { |
480 | struct skge_port *skge = netdev_priv(dev); |
481 | |
482 | p->rx_max_pending = MAX_RX_RING_SIZE; |
483 | p->tx_max_pending = MAX_TX_RING_SIZE; |
484 | p->rx_mini_max_pending = 0; |
485 | p->rx_jumbo_max_pending = 0; |
486 | |
487 | p->rx_pending = skge->rx_ring.count; |
488 | p->tx_pending = skge->tx_ring.count; |
489 | p->rx_mini_pending = 0; |
490 | p->rx_jumbo_pending = 0; |
491 | } |
492 | |
493 | static int skge_set_ring_param(struct net_device *dev, |
494 | struct ethtool_ringparam *p) |
495 | { |
496 | struct skge_port *skge = netdev_priv(dev); |
497 | int err = 0; |
498 | |
499 | if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || |
500 | p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) |
501 | return -EINVAL; |
502 | |
503 | skge->rx_ring.count = p->rx_pending; |
504 | skge->tx_ring.count = p->tx_pending; |
505 | |
506 | if (netif_running(dev)) { |
507 | skge_down(dev); |
508 | err = skge_up(dev); |
509 | if (err) |
510 | dev_close(dev); |
511 | } |
512 | |
513 | return err; |
514 | } |
515 | |
516 | static u32 skge_get_msglevel(struct net_device *netdev) |
517 | { |
518 | struct skge_port *skge = netdev_priv(netdev); |
519 | return skge->msg_enable; |
520 | } |
521 | |
522 | static void skge_set_msglevel(struct net_device *netdev, u32 value) |
523 | { |
524 | struct skge_port *skge = netdev_priv(netdev); |
525 | skge->msg_enable = value; |
526 | } |
527 | |
528 | static int skge_nway_reset(struct net_device *dev) |
529 | { |
530 | struct skge_port *skge = netdev_priv(dev); |
531 | |
532 | if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) |
533 | return -EINVAL; |
534 | |
535 | skge_phy_reset(skge); |
536 | return 0; |
537 | } |
538 | |
539 | static int skge_set_sg(struct net_device *dev, u32 data) |
540 | { |
541 | struct skge_port *skge = netdev_priv(dev); |
542 | struct skge_hw *hw = skge->hw; |
543 | |
544 | if (hw->chip_id == CHIP_ID_GENESIS && data) |
545 | return -EOPNOTSUPP; |
546 | return ethtool_op_set_sg(dev, data); |
547 | } |
548 | |
549 | static int skge_set_tx_csum(struct net_device *dev, u32 data) |
550 | { |
551 | struct skge_port *skge = netdev_priv(dev); |
552 | struct skge_hw *hw = skge->hw; |
553 | |
554 | if (hw->chip_id == CHIP_ID_GENESIS && data) |
555 | return -EOPNOTSUPP; |
556 | |
557 | return ethtool_op_set_tx_csum(dev, data); |
558 | } |
559 | |
560 | static u32 skge_get_rx_csum(struct net_device *dev) |
561 | { |
562 | struct skge_port *skge = netdev_priv(dev); |
563 | |
564 | return skge->rx_csum; |
565 | } |
566 | |
567 | /* Only Yukon supports checksum offload. */ |
568 | static int skge_set_rx_csum(struct net_device *dev, u32 data) |
569 | { |
570 | struct skge_port *skge = netdev_priv(dev); |
571 | |
572 | if (skge->hw->chip_id == CHIP_ID_GENESIS && data) |
573 | return -EOPNOTSUPP; |
574 | |
575 | skge->rx_csum = data; |
576 | return 0; |
577 | } |
578 | |
579 | static void skge_get_pauseparam(struct net_device *dev, |
580 | struct ethtool_pauseparam *ecmd) |
581 | { |
582 | struct skge_port *skge = netdev_priv(dev); |
583 | |
584 | ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || |
585 | (skge->flow_control == FLOW_MODE_SYM_OR_REM)); |
586 | ecmd->tx_pause = (ecmd->rx_pause || |
587 | (skge->flow_control == FLOW_MODE_LOC_SEND)); |
588 | |
589 | ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; |
590 | } |
591 | |
592 | static int skge_set_pauseparam(struct net_device *dev, |
593 | struct ethtool_pauseparam *ecmd) |
594 | { |
595 | struct skge_port *skge = netdev_priv(dev); |
596 | struct ethtool_pauseparam old; |
597 | int err = 0; |
598 | |
599 | skge_get_pauseparam(dev, &old); |
600 | |
601 | if (ecmd->autoneg != old.autoneg) |
602 | skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; |
603 | else { |
604 | if (ecmd->rx_pause && ecmd->tx_pause) |
605 | skge->flow_control = FLOW_MODE_SYMMETRIC; |
606 | else if (ecmd->rx_pause && !ecmd->tx_pause) |
607 | skge->flow_control = FLOW_MODE_SYM_OR_REM; |
608 | else if (!ecmd->rx_pause && ecmd->tx_pause) |
609 | skge->flow_control = FLOW_MODE_LOC_SEND; |
610 | else |
611 | skge->flow_control = FLOW_MODE_NONE; |
612 | } |
613 | |
614 | if (netif_running(dev)) { |
615 | skge_down(dev); |
616 | err = skge_up(dev); |
617 | if (err) { |
618 | dev_close(dev); |
619 | return err; |
620 | } |
621 | } |
622 | |
623 | return 0; |
624 | } |
625 | |
626 | /* Chip internal frequency for clock calculations */ |
627 | static inline u32 hwkhz(const struct skge_hw *hw) |
628 | { |
629 | return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125; |
630 | } |
631 | |
632 | /* Chip HZ to microseconds */ |
633 | static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) |
634 | { |
635 | return (ticks * 1000) / hwkhz(hw); |
636 | } |
637 | |
638 | /* Microseconds to chip HZ */ |
639 | static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) |
640 | { |
641 | return hwkhz(hw) * usec / 1000; |
642 | } |
643 | |
644 | static int skge_get_coalesce(struct net_device *dev, |
645 | struct ethtool_coalesce *ecmd) |
646 | { |
647 | struct skge_port *skge = netdev_priv(dev); |
648 | struct skge_hw *hw = skge->hw; |
649 | int port = skge->port; |
650 | |
651 | ecmd->rx_coalesce_usecs = 0; |
652 | ecmd->tx_coalesce_usecs = 0; |
653 | |
654 | if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { |
655 | u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); |
656 | u32 msk = skge_read32(hw, B2_IRQM_MSK); |
657 | |
658 | if (msk & rxirqmask[port]) |
659 | ecmd->rx_coalesce_usecs = delay; |
660 | if (msk & txirqmask[port]) |
661 | ecmd->tx_coalesce_usecs = delay; |
662 | } |
663 | |
664 | return 0; |
665 | } |
666 | |
667 | /* Note: interrupt timer is per board, but can turn on/off per port */ |
668 | static int skge_set_coalesce(struct net_device *dev, |
669 | struct ethtool_coalesce *ecmd) |
670 | { |
671 | struct skge_port *skge = netdev_priv(dev); |
672 | struct skge_hw *hw = skge->hw; |
673 | int port = skge->port; |
674 | u32 msk = skge_read32(hw, B2_IRQM_MSK); |
675 | u32 delay = 25; |
676 | |
677 | if (ecmd->rx_coalesce_usecs == 0) |
678 | msk &= ~rxirqmask[port]; |
679 | else if (ecmd->rx_coalesce_usecs < 25 || |
680 | ecmd->rx_coalesce_usecs > 33333) |
681 | return -EINVAL; |
682 | else { |
683 | msk |= rxirqmask[port]; |
684 | delay = ecmd->rx_coalesce_usecs; |
685 | } |
686 | |
687 | if (ecmd->tx_coalesce_usecs == 0) |
688 | msk &= ~txirqmask[port]; |
689 | else if (ecmd->tx_coalesce_usecs < 25 || |
690 | ecmd->tx_coalesce_usecs > 33333) |
691 | return -EINVAL; |
692 | else { |
693 | msk |= txirqmask[port]; |
694 | delay = min(delay, ecmd->rx_coalesce_usecs); |
695 | } |
696 | |
697 | skge_write32(hw, B2_IRQM_MSK, msk); |
698 | if (msk == 0) |
699 | skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); |
700 | else { |
701 | skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); |
702 | skge_write32(hw, B2_IRQM_CTRL, TIM_START); |
703 | } |
704 | return 0; |
705 | } |
706 | |
707 | enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; |
708 | static void skge_led(struct skge_port *skge, enum led_mode mode) |
709 | { |
710 | struct skge_hw *hw = skge->hw; |
711 | int port = skge->port; |
712 | |
713 | spin_lock_bh(&hw->phy_lock); |
714 | if (hw->chip_id == CHIP_ID_GENESIS) { |
715 | switch (mode) { |
716 | case LED_MODE_OFF: |
717 | if (hw->phy_type == SK_PHY_BCOM) |
718 | xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); |
719 | else { |
720 | skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); |
721 | skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); |
722 | } |
723 | skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); |
724 | skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); |
725 | skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); |
726 | break; |
727 | |
728 | case LED_MODE_ON: |
729 | skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); |
730 | skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); |
731 | |
732 | skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); |
733 | skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); |
734 | |
735 | break; |
736 | |
737 | case LED_MODE_TST: |
738 | skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); |
739 | skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); |
740 | skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); |
741 | |
742 | if (hw->phy_type == SK_PHY_BCOM) |
743 | xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); |
744 | else { |
745 | skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); |
746 | skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); |
747 | skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); |
748 | } |
749 | |
750 | } |
751 | } else { |
752 | switch (mode) { |
753 | case LED_MODE_OFF: |
754 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); |
755 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, |
756 | PHY_M_LED_MO_DUP(MO_LED_OFF) | |
757 | PHY_M_LED_MO_10(MO_LED_OFF) | |
758 | PHY_M_LED_MO_100(MO_LED_OFF) | |
759 | PHY_M_LED_MO_1000(MO_LED_OFF) | |
760 | PHY_M_LED_MO_RX(MO_LED_OFF)); |
761 | break; |
762 | case LED_MODE_ON: |
763 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, |
764 | PHY_M_LED_PULS_DUR(PULS_170MS) | |
765 | PHY_M_LED_BLINK_RT(BLINK_84MS) | |
766 | PHY_M_LEDC_TX_CTRL | |
767 | PHY_M_LEDC_DP_CTRL); |
768 | |
769 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, |
770 | PHY_M_LED_MO_RX(MO_LED_OFF) | |
771 | (skge->speed == SPEED_100 ? |
772 | PHY_M_LED_MO_100(MO_LED_ON) : 0)); |
773 | break; |
774 | case LED_MODE_TST: |
775 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); |
776 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, |
777 | PHY_M_LED_MO_DUP(MO_LED_ON) | |
778 | PHY_M_LED_MO_10(MO_LED_ON) | |
779 | PHY_M_LED_MO_100(MO_LED_ON) | |
780 | PHY_M_LED_MO_1000(MO_LED_ON) | |
781 | PHY_M_LED_MO_RX(MO_LED_ON)); |
782 | } |
783 | } |
784 | spin_unlock_bh(&hw->phy_lock); |
785 | } |
786 | |
787 | /* blink LED's for finding board */ |
788 | static int skge_phys_id(struct net_device *dev, u32 data) |
789 | { |
790 | struct skge_port *skge = netdev_priv(dev); |
791 | unsigned long ms; |
792 | enum led_mode mode = LED_MODE_TST; |
793 | |
794 | if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) |
795 | ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000; |
796 | else |
797 | ms = data * 1000; |
798 | |
799 | while (ms > 0) { |
800 | skge_led(skge, mode); |
801 | mode ^= LED_MODE_TST; |
802 | |
803 | if (msleep_interruptible(BLINK_MS)) |
804 | break; |
805 | ms -= BLINK_MS; |
806 | } |
807 | |
808 | /* back to regular LED state */ |
809 | skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); |
810 | |
811 | return 0; |
812 | } |
813 | |
814 | static int skge_get_eeprom_len(struct net_device *dev) |
815 | { |
816 | struct skge_port *skge = netdev_priv(dev); |
817 | u32 reg2; |
818 | |
819 | pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); |
820 | return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); |
821 | } |
822 | |
823 | static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) |
824 | { |
825 | u32 val; |
826 | |
827 | pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); |
828 | |
829 | do { |
830 | pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); |
831 | } while (!(offset & PCI_VPD_ADDR_F)); |
832 | |
833 | pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); |
834 | return val; |
835 | } |
836 | |
837 | static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) |
838 | { |
839 | pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); |
840 | pci_write_config_word(pdev, cap + PCI_VPD_ADDR, |
841 | offset | PCI_VPD_ADDR_F); |
842 | |
843 | do { |
844 | pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); |
845 | } while (offset & PCI_VPD_ADDR_F); |
846 | } |
847 | |
848 | static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, |
849 | u8 *data) |
850 | { |
851 | struct skge_port *skge = netdev_priv(dev); |
852 | struct pci_dev *pdev = skge->hw->pdev; |
853 | int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); |
854 | int length = eeprom->len; |
855 | u16 offset = eeprom->offset; |
856 | |
857 | if (!cap) |
858 | return -EINVAL; |
859 | |
860 | eeprom->magic = SKGE_EEPROM_MAGIC; |
861 | |
862 | while (length > 0) { |
863 | u32 val = skge_vpd_read(pdev, cap, offset); |
864 | int n = min_t(int, length, sizeof(val)); |
865 | |
866 | memcpy(data, &val, n); |
867 | length -= n; |
868 | data += n; |
869 | offset += n; |
870 | } |
871 | return 0; |
872 | } |
873 | |
874 | static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, |
875 | u8 *data) |
876 | { |
877 | struct skge_port *skge = netdev_priv(dev); |
878 | struct pci_dev *pdev = skge->hw->pdev; |
879 | int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); |
880 | int length = eeprom->len; |
881 | u16 offset = eeprom->offset; |
882 | |
883 | if (!cap) |
884 | return -EINVAL; |
885 | |
886 | if (eeprom->magic != SKGE_EEPROM_MAGIC) |
887 | return -EINVAL; |
888 | |
889 | while (length > 0) { |
890 | u32 val; |
891 | int n = min_t(int, length, sizeof(val)); |
892 | |
893 | if (n < sizeof(val)) |
894 | val = skge_vpd_read(pdev, cap, offset); |
895 | memcpy(&val, data, n); |
896 | |
897 | skge_vpd_write(pdev, cap, offset, val); |
898 | |
899 | length -= n; |
900 | data += n; |
901 | offset += n; |
902 | } |
903 | return 0; |
904 | } |
905 | |
906 | static const struct ethtool_ops skge_ethtool_ops = { |
907 | .get_settings = skge_get_settings, |
908 | .set_settings = skge_set_settings, |
909 | .get_drvinfo = skge_get_drvinfo, |
910 | .get_regs_len = skge_get_regs_len, |
911 | .get_regs = skge_get_regs, |
912 | .get_wol = skge_get_wol, |
913 | .set_wol = skge_set_wol, |
914 | .get_msglevel = skge_get_msglevel, |
915 | .set_msglevel = skge_set_msglevel, |
916 | .nway_reset = skge_nway_reset, |
917 | .get_link = ethtool_op_get_link, |
918 | .get_eeprom_len = skge_get_eeprom_len, |
919 | .get_eeprom = skge_get_eeprom, |
920 | .set_eeprom = skge_set_eeprom, |
921 | .get_ringparam = skge_get_ring_param, |
922 | .set_ringparam = skge_set_ring_param, |
923 | .get_pauseparam = skge_get_pauseparam, |
924 | .set_pauseparam = skge_set_pauseparam, |
925 | .get_coalesce = skge_get_coalesce, |
926 | .set_coalesce = skge_set_coalesce, |
927 | .set_sg = skge_set_sg, |
928 | .set_tx_csum = skge_set_tx_csum, |
929 | .get_rx_csum = skge_get_rx_csum, |
930 | .set_rx_csum = skge_set_rx_csum, |
931 | .get_strings = skge_get_strings, |
932 | .phys_id = skge_phys_id, |
933 | .get_sset_count = skge_get_sset_count, |
934 | .get_ethtool_stats = skge_get_ethtool_stats, |
935 | }; |
936 | |
937 | /* |
938 | * Allocate ring elements and chain them together |
939 | * One-to-one association of board descriptors with ring elements |
940 | */ |
941 | static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) |
942 | { |
943 | struct skge_tx_desc *d; |
944 | struct skge_element *e; |
945 | int i; |
946 | |
947 | ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); |
948 | if (!ring->start) |
949 | return -ENOMEM; |
950 | |
951 | for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { |
952 | e->desc = d; |
953 | if (i == ring->count - 1) { |
954 | e->next = ring->start; |
955 | d->next_offset = base; |
956 | } else { |
957 | e->next = e + 1; |
958 | d->next_offset = base + (i+1) * sizeof(*d); |
959 | } |
960 | } |
961 | ring->to_use = ring->to_clean = ring->start; |
962 | |
963 | return 0; |
964 | } |
965 | |
966 | /* Allocate and setup a new buffer for receiving */ |
967 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
968 | struct sk_buff *skb, unsigned int bufsize) |
969 | { |
970 | struct skge_rx_desc *rd = e->desc; |
971 | u64 map; |
972 | |
973 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
974 | PCI_DMA_FROMDEVICE); |
975 | |
976 | rd->dma_lo = map; |
977 | rd->dma_hi = map >> 32; |
978 | e->skb = skb; |
979 | rd->csum1_start = ETH_HLEN; |
980 | rd->csum2_start = ETH_HLEN; |
981 | rd->csum1 = 0; |
982 | rd->csum2 = 0; |
983 | |
984 | wmb(); |
985 | |
986 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
987 | pci_unmap_addr_set(e, mapaddr, map); |
988 | pci_unmap_len_set(e, maplen, bufsize); |
989 | } |
990 | |
991 | /* Resume receiving using existing skb, |
992 | * Note: DMA address is not changed by chip. |
993 | * MTU not changed while receiver active. |
994 | */ |
995 | static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) |
996 | { |
997 | struct skge_rx_desc *rd = e->desc; |
998 | |
999 | rd->csum2 = 0; |
1000 | rd->csum2_start = ETH_HLEN; |
1001 | |
1002 | wmb(); |
1003 | |
1004 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; |
1005 | } |
1006 | |
1007 | |
1008 | /* Free all buffers in receive ring, assumes receiver stopped */ |
1009 | static void skge_rx_clean(struct skge_port *skge) |
1010 | { |
1011 | struct skge_hw *hw = skge->hw; |
1012 | struct skge_ring *ring = &skge->rx_ring; |
1013 | struct skge_element *e; |
1014 | |
1015 | e = ring->start; |
1016 | do { |
1017 | struct skge_rx_desc *rd = e->desc; |
1018 | rd->control = 0; |
1019 | if (e->skb) { |
1020 | pci_unmap_single(hw->pdev, |
1021 | pci_unmap_addr(e, mapaddr), |
1022 | pci_unmap_len(e, maplen), |
1023 | PCI_DMA_FROMDEVICE); |
1024 | dev_kfree_skb(e->skb); |
1025 | e->skb = NULL; |
1026 | } |
1027 | } while ((e = e->next) != ring->start); |
1028 | } |
1029 | |
1030 | |
1031 | /* Allocate buffers for receive ring |
1032 | * For receive: to_clean is next received frame. |
1033 | */ |
1034 | static int skge_rx_fill(struct net_device *dev) |
1035 | { |
1036 | struct skge_port *skge = netdev_priv(dev); |
1037 | struct skge_ring *ring = &skge->rx_ring; |
1038 | struct skge_element *e; |
1039 | |
1040 | e = ring->start; |
1041 | do { |
1042 | struct sk_buff *skb; |
1043 | |
1044 | skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, |
1045 | GFP_KERNEL); |
1046 | if (!skb) |
1047 | return -ENOMEM; |
1048 | |
1049 | skb_reserve(skb, NET_IP_ALIGN); |
1050 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); |
1051 | } while ((e = e->next) != ring->start); |
1052 | |
1053 | ring->to_clean = ring->start; |
1054 | return 0; |
1055 | } |
1056 | |
1057 | static const char *skge_pause(enum pause_status status) |
1058 | { |
1059 | switch (status) { |
1060 | case FLOW_STAT_NONE: |
1061 | return "none"; |
1062 | case FLOW_STAT_REM_SEND: |
1063 | return "rx only"; |
1064 | case FLOW_STAT_LOC_SEND: |
1065 | return "tx_only"; |
1066 | case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ |
1067 | return "both"; |
1068 | default: |
1069 | return "indeterminated"; |
1070 | } |
1071 | } |
1072 | |
1073 | |
1074 | static void skge_link_up(struct skge_port *skge) |
1075 | { |
1076 | skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), |
1077 | LED_BLK_OFF|LED_SYNC_OFF|LED_ON); |
1078 | |
1079 | netif_carrier_on(skge->netdev); |
1080 | netif_wake_queue(skge->netdev); |
1081 | |
1082 | netif_info(skge, link, skge->netdev, |
1083 | "Link is up at %d Mbps, %s duplex, flow control %s\n", |
1084 | skge->speed, |
1085 | skge->duplex == DUPLEX_FULL ? "full" : "half", |
1086 | skge_pause(skge->flow_status)); |
1087 | } |
1088 | |
1089 | static void skge_link_down(struct skge_port *skge) |
1090 | { |
1091 | skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); |
1092 | netif_carrier_off(skge->netdev); |
1093 | netif_stop_queue(skge->netdev); |
1094 | |
1095 | netif_info(skge, link, skge->netdev, "Link is down\n"); |
1096 | } |
1097 | |
1098 | |
1099 | static void xm_link_down(struct skge_hw *hw, int port) |
1100 | { |
1101 | struct net_device *dev = hw->dev[port]; |
1102 | struct skge_port *skge = netdev_priv(dev); |
1103 | |
1104 | xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); |
1105 | |
1106 | if (netif_carrier_ok(dev)) |
1107 | skge_link_down(skge); |
1108 | } |
1109 | |
1110 | static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) |
1111 | { |
1112 | int i; |
1113 | |
1114 | xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); |
1115 | *val = xm_read16(hw, port, XM_PHY_DATA); |
1116 | |
1117 | if (hw->phy_type == SK_PHY_XMAC) |
1118 | goto ready; |
1119 | |
1120 | for (i = 0; i < PHY_RETRIES; i++) { |
1121 | if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) |
1122 | goto ready; |
1123 | udelay(1); |
1124 | } |
1125 | |
1126 | return -ETIMEDOUT; |
1127 | ready: |
1128 | *val = xm_read16(hw, port, XM_PHY_DATA); |
1129 | |
1130 | return 0; |
1131 | } |
1132 | |
1133 | static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) |
1134 | { |
1135 | u16 v = 0; |
1136 | if (__xm_phy_read(hw, port, reg, &v)) |
1137 | pr_warning("%s: phy read timed out\n", hw->dev[port]->name); |
1138 | return v; |
1139 | } |
1140 | |
1141 | static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) |
1142 | { |
1143 | int i; |
1144 | |
1145 | xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); |
1146 | for (i = 0; i < PHY_RETRIES; i++) { |
1147 | if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) |
1148 | goto ready; |
1149 | udelay(1); |
1150 | } |
1151 | return -EIO; |
1152 | |
1153 | ready: |
1154 | xm_write16(hw, port, XM_PHY_DATA, val); |
1155 | for (i = 0; i < PHY_RETRIES; i++) { |
1156 | if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) |
1157 | return 0; |
1158 | udelay(1); |
1159 | } |
1160 | return -ETIMEDOUT; |
1161 | } |
1162 | |
1163 | static void genesis_init(struct skge_hw *hw) |
1164 | { |
1165 | /* set blink source counter */ |
1166 | skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); |
1167 | skge_write8(hw, B2_BSC_CTRL, BSC_START); |
1168 | |
1169 | /* configure mac arbiter */ |
1170 | skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); |
1171 | |
1172 | /* configure mac arbiter timeout values */ |
1173 | skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); |
1174 | skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); |
1175 | skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); |
1176 | skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); |
1177 | |
1178 | skge_write8(hw, B3_MA_RCINI_RX1, 0); |
1179 | skge_write8(hw, B3_MA_RCINI_RX2, 0); |
1180 | skge_write8(hw, B3_MA_RCINI_TX1, 0); |
1181 | skge_write8(hw, B3_MA_RCINI_TX2, 0); |
1182 | |
1183 | /* configure packet arbiter timeout */ |
1184 | skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); |
1185 | skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); |
1186 | skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); |
1187 | skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); |
1188 | skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); |
1189 | } |
1190 | |
1191 | static void genesis_reset(struct skge_hw *hw, int port) |
1192 | { |
1193 | const u8 zero[8] = { 0 }; |
1194 | u32 reg; |
1195 | |
1196 | skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); |
1197 | |
1198 | /* reset the statistics module */ |
1199 | xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); |
1200 | xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); |
1201 | xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ |
1202 | xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ |
1203 | xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ |
1204 | |
1205 | /* disable Broadcom PHY IRQ */ |
1206 | if (hw->phy_type == SK_PHY_BCOM) |
1207 | xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); |
1208 | |
1209 | xm_outhash(hw, port, XM_HSM, zero); |
1210 | |
1211 | /* Flush TX and RX fifo */ |
1212 | reg = xm_read32(hw, port, XM_MODE); |
1213 | xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); |
1214 | xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); |
1215 | } |
1216 | |
1217 | |
1218 | /* Convert mode to MII values */ |
1219 | static const u16 phy_pause_map[] = { |
1220 | [FLOW_MODE_NONE] = 0, |
1221 | [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, |
1222 | [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, |
1223 | [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, |
1224 | }; |
1225 | |
1226 | /* special defines for FIBER (88E1011S only) */ |
1227 | static const u16 fiber_pause_map[] = { |
1228 | [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, |
1229 | [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, |
1230 | [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, |
1231 | [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, |
1232 | }; |
1233 | |
1234 | |
1235 | /* Check status of Broadcom phy link */ |
1236 | static void bcom_check_link(struct skge_hw *hw, int port) |
1237 | { |
1238 | struct net_device *dev = hw->dev[port]; |
1239 | struct skge_port *skge = netdev_priv(dev); |
1240 | u16 status; |
1241 | |
1242 | /* read twice because of latch */ |
1243 | xm_phy_read(hw, port, PHY_BCOM_STAT); |
1244 | status = xm_phy_read(hw, port, PHY_BCOM_STAT); |
1245 | |
1246 | if ((status & PHY_ST_LSYNC) == 0) { |
1247 | xm_link_down(hw, port); |
1248 | return; |
1249 | } |
1250 | |
1251 | if (skge->autoneg == AUTONEG_ENABLE) { |
1252 | u16 lpa, aux; |
1253 | |
1254 | if (!(status & PHY_ST_AN_OVER)) |
1255 | return; |
1256 | |
1257 | lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); |
1258 | if (lpa & PHY_B_AN_RF) { |
1259 | netdev_notice(dev, "remote fault\n"); |
1260 | return; |
1261 | } |
1262 | |
1263 | aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); |
1264 | |
1265 | /* Check Duplex mismatch */ |
1266 | switch (aux & PHY_B_AS_AN_RES_MSK) { |
1267 | case PHY_B_RES_1000FD: |
1268 | skge->duplex = DUPLEX_FULL; |
1269 | break; |
1270 | case PHY_B_RES_1000HD: |
1271 | skge->duplex = DUPLEX_HALF; |
1272 | break; |
1273 | default: |
1274 | netdev_notice(dev, "duplex mismatch\n"); |
1275 | return; |
1276 | } |
1277 | |
1278 | /* We are using IEEE 802.3z/D5.0 Table 37-4 */ |
1279 | switch (aux & PHY_B_AS_PAUSE_MSK) { |
1280 | case PHY_B_AS_PAUSE_MSK: |
1281 | skge->flow_status = FLOW_STAT_SYMMETRIC; |
1282 | break; |
1283 | case PHY_B_AS_PRR: |
1284 | skge->flow_status = FLOW_STAT_REM_SEND; |
1285 | break; |
1286 | case PHY_B_AS_PRT: |
1287 | skge->flow_status = FLOW_STAT_LOC_SEND; |
1288 | break; |
1289 | default: |
1290 | skge->flow_status = FLOW_STAT_NONE; |
1291 | } |
1292 | skge->speed = SPEED_1000; |
1293 | } |
1294 | |
1295 | if (!netif_carrier_ok(dev)) |
1296 | genesis_link_up(skge); |
1297 | } |
1298 | |
1299 | /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional |
1300 | * Phy on for 100 or 10Mbit operation |
1301 | */ |
1302 | static void bcom_phy_init(struct skge_port *skge) |
1303 | { |
1304 | struct skge_hw *hw = skge->hw; |
1305 | int port = skge->port; |
1306 | int i; |
1307 | u16 id1, r, ext, ctl; |
1308 | |
1309 | /* magic workaround patterns for Broadcom */ |
1310 | static const struct { |
1311 | u16 reg; |
1312 | u16 val; |
1313 | } A1hack[] = { |
1314 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, |
1315 | { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, |
1316 | { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, |
1317 | { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, |
1318 | }, C0hack[] = { |
1319 | { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, |
1320 | { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, |
1321 | }; |
1322 | |
1323 | /* read Id from external PHY (all have the same address) */ |
1324 | id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); |
1325 | |
1326 | /* Optimize MDIO transfer by suppressing preamble. */ |
1327 | r = xm_read16(hw, port, XM_MMU_CMD); |
1328 | r |= XM_MMU_NO_PRE; |
1329 | xm_write16(hw, port, XM_MMU_CMD, r); |
1330 | |
1331 | switch (id1) { |
1332 | case PHY_BCOM_ID1_C0: |
1333 | /* |
1334 | * Workaround BCOM Errata for the C0 type. |
1335 | * Write magic patterns to reserved registers. |
1336 | */ |
1337 | for (i = 0; i < ARRAY_SIZE(C0hack); i++) |
1338 | xm_phy_write(hw, port, |
1339 | C0hack[i].reg, C0hack[i].val); |
1340 | |
1341 | break; |
1342 | case PHY_BCOM_ID1_A1: |
1343 | /* |
1344 | * Workaround BCOM Errata for the A1 type. |
1345 | * Write magic patterns to reserved registers. |
1346 | */ |
1347 | for (i = 0; i < ARRAY_SIZE(A1hack); i++) |
1348 | xm_phy_write(hw, port, |
1349 | A1hack[i].reg, A1hack[i].val); |
1350 | break; |
1351 | } |
1352 | |
1353 | /* |
1354 | * Workaround BCOM Errata (#10523) for all BCom PHYs. |
1355 | * Disable Power Management after reset. |
1356 | */ |
1357 | r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); |
1358 | r |= PHY_B_AC_DIS_PM; |
1359 | xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); |
1360 | |
1361 | /* Dummy read */ |
1362 | xm_read16(hw, port, XM_ISRC); |
1363 | |
1364 | ext = PHY_B_PEC_EN_LTR; /* enable tx led */ |
1365 | ctl = PHY_CT_SP1000; /* always 1000mbit */ |
1366 | |
1367 | if (skge->autoneg == AUTONEG_ENABLE) { |
1368 | /* |
1369 | * Workaround BCOM Errata #1 for the C5 type. |
1370 | * 1000Base-T Link Acquisition Failure in Slave Mode |
1371 | * Set Repeater/DTE bit 10 of the 1000Base-T Control Register |
1372 | */ |
1373 | u16 adv = PHY_B_1000C_RD; |
1374 | if (skge->advertising & ADVERTISED_1000baseT_Half) |
1375 | adv |= PHY_B_1000C_AHD; |
1376 | if (skge->advertising & ADVERTISED_1000baseT_Full) |
1377 | adv |= PHY_B_1000C_AFD; |
1378 | xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); |
1379 | |
1380 | ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; |
1381 | } else { |
1382 | if (skge->duplex == DUPLEX_FULL) |
1383 | ctl |= PHY_CT_DUP_MD; |
1384 | /* Force to slave */ |
1385 | xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); |
1386 | } |
1387 | |
1388 | /* Set autonegotiation pause parameters */ |
1389 | xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, |
1390 | phy_pause_map[skge->flow_control] | PHY_AN_CSMA); |
1391 | |
1392 | /* Handle Jumbo frames */ |
1393 | if (hw->dev[port]->mtu > ETH_DATA_LEN) { |
1394 | xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, |
1395 | PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); |
1396 | |
1397 | ext |= PHY_B_PEC_HIGH_LA; |
1398 | |
1399 | } |
1400 | |
1401 | xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); |
1402 | xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); |
1403 | |
1404 | /* Use link status change interrupt */ |
1405 | xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); |
1406 | } |
1407 | |
1408 | static void xm_phy_init(struct skge_port *skge) |
1409 | { |
1410 | struct skge_hw *hw = skge->hw; |
1411 | int port = skge->port; |
1412 | u16 ctrl = 0; |
1413 | |
1414 | if (skge->autoneg == AUTONEG_ENABLE) { |
1415 | if (skge->advertising & ADVERTISED_1000baseT_Half) |
1416 | ctrl |= PHY_X_AN_HD; |
1417 | if (skge->advertising & ADVERTISED_1000baseT_Full) |
1418 | ctrl |= PHY_X_AN_FD; |
1419 | |
1420 | ctrl |= fiber_pause_map[skge->flow_control]; |
1421 | |
1422 | xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); |
1423 | |
1424 | /* Restart Auto-negotiation */ |
1425 | ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; |
1426 | } else { |
1427 | /* Set DuplexMode in Config register */ |
1428 | if (skge->duplex == DUPLEX_FULL) |
1429 | ctrl |= PHY_CT_DUP_MD; |
1430 | /* |
1431 | * Do NOT enable Auto-negotiation here. This would hold |
1432 | * the link down because no IDLEs are transmitted |
1433 | */ |
1434 | } |
1435 | |
1436 | xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); |
1437 | |
1438 | /* Poll PHY for status changes */ |
1439 | mod_timer(&skge->link_timer, jiffies + LINK_HZ); |
1440 | } |
1441 | |
1442 | static int xm_check_link(struct net_device *dev) |
1443 | { |
1444 | struct skge_port *skge = netdev_priv(dev); |
1445 | struct skge_hw *hw = skge->hw; |
1446 | int port = skge->port; |
1447 | u16 status; |
1448 | |
1449 | /* read twice because of latch */ |
1450 | xm_phy_read(hw, port, PHY_XMAC_STAT); |
1451 | status = xm_phy_read(hw, port, PHY_XMAC_STAT); |
1452 | |
1453 | if ((status & PHY_ST_LSYNC) == 0) { |
1454 | xm_link_down(hw, port); |
1455 | return 0; |
1456 | } |
1457 | |
1458 | if (skge->autoneg == AUTONEG_ENABLE) { |
1459 | u16 lpa, res; |
1460 | |
1461 | if (!(status & PHY_ST_AN_OVER)) |
1462 | return 0; |
1463 | |
1464 | lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); |
1465 | if (lpa & PHY_B_AN_RF) { |
1466 | netdev_notice(dev, "remote fault\n"); |
1467 | return 0; |
1468 | } |
1469 | |
1470 | res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); |
1471 | |
1472 | /* Check Duplex mismatch */ |
1473 | switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { |
1474 | case PHY_X_RS_FD: |
1475 | skge->duplex = DUPLEX_FULL; |
1476 | break; |
1477 | case PHY_X_RS_HD: |
1478 | skge->duplex = DUPLEX_HALF; |
1479 | break; |
1480 | default: |
1481 | netdev_notice(dev, "duplex mismatch\n"); |
1482 | return 0; |
1483 | } |
1484 | |
1485 | /* We are using IEEE 802.3z/D5.0 Table 37-4 */ |
1486 | if ((skge->flow_control == FLOW_MODE_SYMMETRIC || |
1487 | skge->flow_control == FLOW_MODE_SYM_OR_REM) && |
1488 | (lpa & PHY_X_P_SYM_MD)) |
1489 | skge->flow_status = FLOW_STAT_SYMMETRIC; |
1490 | else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && |
1491 | (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) |
1492 | /* Enable PAUSE receive, disable PAUSE transmit */ |
1493 | skge->flow_status = FLOW_STAT_REM_SEND; |
1494 | else if (skge->flow_control == FLOW_MODE_LOC_SEND && |
1495 | (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) |
1496 | /* Disable PAUSE receive, enable PAUSE transmit */ |
1497 | skge->flow_status = FLOW_STAT_LOC_SEND; |
1498 | else |
1499 | skge->flow_status = FLOW_STAT_NONE; |
1500 | |
1501 | skge->speed = SPEED_1000; |
1502 | } |
1503 | |
1504 | if (!netif_carrier_ok(dev)) |
1505 | genesis_link_up(skge); |
1506 | return 1; |
1507 | } |
1508 | |
1509 | /* Poll to check for link coming up. |
1510 | * |
1511 | * Since internal PHY is wired to a level triggered pin, can't |
1512 | * get an interrupt when carrier is detected, need to poll for |
1513 | * link coming up. |
1514 | */ |
1515 | static void xm_link_timer(unsigned long arg) |
1516 | { |
1517 | struct skge_port *skge = (struct skge_port *) arg; |
1518 | struct net_device *dev = skge->netdev; |
1519 | struct skge_hw *hw = skge->hw; |
1520 | int port = skge->port; |
1521 | int i; |
1522 | unsigned long flags; |
1523 | |
1524 | if (!netif_running(dev)) |
1525 | return; |
1526 | |
1527 | spin_lock_irqsave(&hw->phy_lock, flags); |
1528 | |
1529 | /* |
1530 | * Verify that the link by checking GPIO register three times. |
1531 | * This pin has the signal from the link_sync pin connected to it. |
1532 | */ |
1533 | for (i = 0; i < 3; i++) { |
1534 | if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) |
1535 | goto link_down; |
1536 | } |
1537 | |
1538 | /* Re-enable interrupt to detect link down */ |
1539 | if (xm_check_link(dev)) { |
1540 | u16 msk = xm_read16(hw, port, XM_IMSK); |
1541 | msk &= ~XM_IS_INP_ASS; |
1542 | xm_write16(hw, port, XM_IMSK, msk); |
1543 | xm_read16(hw, port, XM_ISRC); |
1544 | } else { |
1545 | link_down: |
1546 | mod_timer(&skge->link_timer, |
1547 | round_jiffies(jiffies + LINK_HZ)); |
1548 | } |
1549 | spin_unlock_irqrestore(&hw->phy_lock, flags); |
1550 | } |
1551 | |
1552 | static void genesis_mac_init(struct skge_hw *hw, int port) |
1553 | { |
1554 | struct net_device *dev = hw->dev[port]; |
1555 | struct skge_port *skge = netdev_priv(dev); |
1556 | int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; |
1557 | int i; |
1558 | u32 r; |
1559 | const u8 zero[6] = { 0 }; |
1560 | |
1561 | for (i = 0; i < 10; i++) { |
1562 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), |
1563 | MFF_SET_MAC_RST); |
1564 | if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) |
1565 | goto reset_ok; |
1566 | udelay(1); |
1567 | } |
1568 | |
1569 | netdev_warn(dev, "genesis reset failed\n"); |
1570 | |
1571 | reset_ok: |
1572 | /* Unreset the XMAC. */ |
1573 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); |
1574 | |
1575 | /* |
1576 | * Perform additional initialization for external PHYs, |
1577 | * namely for the 1000baseTX cards that use the XMAC's |
1578 | * GMII mode. |
1579 | */ |
1580 | if (hw->phy_type != SK_PHY_XMAC) { |
1581 | /* Take external Phy out of reset */ |
1582 | r = skge_read32(hw, B2_GP_IO); |
1583 | if (port == 0) |
1584 | r |= GP_DIR_0|GP_IO_0; |
1585 | else |
1586 | r |= GP_DIR_2|GP_IO_2; |
1587 | |
1588 | skge_write32(hw, B2_GP_IO, r); |
1589 | |
1590 | /* Enable GMII interface */ |
1591 | xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); |
1592 | } |
1593 | |
1594 | |
1595 | switch (hw->phy_type) { |
1596 | case SK_PHY_XMAC: |
1597 | xm_phy_init(skge); |
1598 | break; |
1599 | case SK_PHY_BCOM: |
1600 | bcom_phy_init(skge); |
1601 | bcom_check_link(hw, port); |
1602 | } |
1603 | |
1604 | /* Set Station Address */ |
1605 | xm_outaddr(hw, port, XM_SA, dev->dev_addr); |
1606 | |
1607 | /* We don't use match addresses so clear */ |
1608 | for (i = 1; i < 16; i++) |
1609 | xm_outaddr(hw, port, XM_EXM(i), zero); |
1610 | |
1611 | /* Clear MIB counters */ |
1612 | xm_write16(hw, port, XM_STAT_CMD, |
1613 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); |
1614 | /* Clear two times according to Errata #3 */ |
1615 | xm_write16(hw, port, XM_STAT_CMD, |
1616 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); |
1617 | |
1618 | /* configure Rx High Water Mark (XM_RX_HI_WM) */ |
1619 | xm_write16(hw, port, XM_RX_HI_WM, 1450); |
1620 | |
1621 | /* We don't need the FCS appended to the packet. */ |
1622 | r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; |
1623 | if (jumbo) |
1624 | r |= XM_RX_BIG_PK_OK; |
1625 | |
1626 | if (skge->duplex == DUPLEX_HALF) { |
1627 | /* |
1628 | * If in manual half duplex mode the other side might be in |
1629 | * full duplex mode, so ignore if a carrier extension is not seen |
1630 | * on frames received |
1631 | */ |
1632 | r |= XM_RX_DIS_CEXT; |
1633 | } |
1634 | xm_write16(hw, port, XM_RX_CMD, r); |
1635 | |
1636 | /* We want short frames padded to 60 bytes. */ |
1637 | xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); |
1638 | |
1639 | /* Increase threshold for jumbo frames on dual port */ |
1640 | if (hw->ports > 1 && jumbo) |
1641 | xm_write16(hw, port, XM_TX_THR, 1020); |
1642 | else |
1643 | xm_write16(hw, port, XM_TX_THR, 512); |
1644 | |
1645 | /* |
1646 | * Enable the reception of all error frames. This is is |
1647 | * a necessary evil due to the design of the XMAC. The |
1648 | * XMAC's receive FIFO is only 8K in size, however jumbo |
1649 | * frames can be up to 9000 bytes in length. When bad |
1650 | * frame filtering is enabled, the XMAC's RX FIFO operates |
1651 | * in 'store and forward' mode. For this to work, the |
1652 | * entire frame has to fit into the FIFO, but that means |
1653 | * that jumbo frames larger than 8192 bytes will be |
1654 | * truncated. Disabling all bad frame filtering causes |
1655 | * the RX FIFO to operate in streaming mode, in which |
1656 | * case the XMAC will start transferring frames out of the |
1657 | * RX FIFO as soon as the FIFO threshold is reached. |
1658 | */ |
1659 | xm_write32(hw, port, XM_MODE, XM_DEF_MODE); |
1660 | |
1661 | |
1662 | /* |
1663 | * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) |
1664 | * - Enable all bits excepting 'Octets Rx OK Low CntOv' |
1665 | * and 'Octets Rx OK Hi Cnt Ov'. |
1666 | */ |
1667 | xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); |
1668 | |
1669 | /* |
1670 | * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) |
1671 | * - Enable all bits excepting 'Octets Tx OK Low CntOv' |
1672 | * and 'Octets Tx OK Hi Cnt Ov'. |
1673 | */ |
1674 | xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); |
1675 | |
1676 | /* Configure MAC arbiter */ |
1677 | skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); |
1678 | |
1679 | /* configure timeout values */ |
1680 | skge_write8(hw, B3_MA_TOINI_RX1, 72); |
1681 | skge_write8(hw, B3_MA_TOINI_RX2, 72); |
1682 | skge_write8(hw, B3_MA_TOINI_TX1, 72); |
1683 | skge_write8(hw, B3_MA_TOINI_TX2, 72); |
1684 | |
1685 | skge_write8(hw, B3_MA_RCINI_RX1, 0); |
1686 | skge_write8(hw, B3_MA_RCINI_RX2, 0); |
1687 | skge_write8(hw, B3_MA_RCINI_TX1, 0); |
1688 | skge_write8(hw, B3_MA_RCINI_TX2, 0); |
1689 | |
1690 | /* Configure Rx MAC FIFO */ |
1691 | skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); |
1692 | skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); |
1693 | skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); |
1694 | |
1695 | /* Configure Tx MAC FIFO */ |
1696 | skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); |
1697 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); |
1698 | skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); |
1699 | |
1700 | if (jumbo) { |
1701 | /* Enable frame flushing if jumbo frames used */ |
1702 | skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); |
1703 | } else { |
1704 | /* enable timeout timers if normal frames */ |
1705 | skge_write16(hw, B3_PA_CTRL, |
1706 | (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); |
1707 | } |
1708 | } |
1709 | |
1710 | static void genesis_stop(struct skge_port *skge) |
1711 | { |
1712 | struct skge_hw *hw = skge->hw; |
1713 | int port = skge->port; |
1714 | unsigned retries = 1000; |
1715 | u16 cmd; |
1716 | |
1717 | /* Disable Tx and Rx */ |
1718 | cmd = xm_read16(hw, port, XM_MMU_CMD); |
1719 | cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); |
1720 | xm_write16(hw, port, XM_MMU_CMD, cmd); |
1721 | |
1722 | genesis_reset(hw, port); |
1723 | |
1724 | /* Clear Tx packet arbiter timeout IRQ */ |
1725 | skge_write16(hw, B3_PA_CTRL, |
1726 | port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); |
1727 | |
1728 | /* Reset the MAC */ |
1729 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); |
1730 | do { |
1731 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); |
1732 | if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) |
1733 | break; |
1734 | } while (--retries > 0); |
1735 | |
1736 | /* For external PHYs there must be special handling */ |
1737 | if (hw->phy_type != SK_PHY_XMAC) { |
1738 | u32 reg = skge_read32(hw, B2_GP_IO); |
1739 | if (port == 0) { |
1740 | reg |= GP_DIR_0; |
1741 | reg &= ~GP_IO_0; |
1742 | } else { |
1743 | reg |= GP_DIR_2; |
1744 | reg &= ~GP_IO_2; |
1745 | } |
1746 | skge_write32(hw, B2_GP_IO, reg); |
1747 | skge_read32(hw, B2_GP_IO); |
1748 | } |
1749 | |
1750 | xm_write16(hw, port, XM_MMU_CMD, |
1751 | xm_read16(hw, port, XM_MMU_CMD) |
1752 | & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); |
1753 | |
1754 | xm_read16(hw, port, XM_MMU_CMD); |
1755 | } |
1756 | |
1757 | |
1758 | static void genesis_get_stats(struct skge_port *skge, u64 *data) |
1759 | { |
1760 | struct skge_hw *hw = skge->hw; |
1761 | int port = skge->port; |
1762 | int i; |
1763 | unsigned long timeout = jiffies + HZ; |
1764 | |
1765 | xm_write16(hw, port, |
1766 | XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); |
1767 | |
1768 | /* wait for update to complete */ |
1769 | while (xm_read16(hw, port, XM_STAT_CMD) |
1770 | & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { |
1771 | if (time_after(jiffies, timeout)) |
1772 | break; |
1773 | udelay(10); |
1774 | } |
1775 | |
1776 | /* special case for 64 bit octet counter */ |
1777 | data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 |
1778 | | xm_read32(hw, port, XM_TXO_OK_LO); |
1779 | data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 |
1780 | | xm_read32(hw, port, XM_RXO_OK_LO); |
1781 | |
1782 | for (i = 2; i < ARRAY_SIZE(skge_stats); i++) |
1783 | data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); |
1784 | } |
1785 | |
1786 | static void genesis_mac_intr(struct skge_hw *hw, int port) |
1787 | { |
1788 | struct net_device *dev = hw->dev[port]; |
1789 | struct skge_port *skge = netdev_priv(dev); |
1790 | u16 status = xm_read16(hw, port, XM_ISRC); |
1791 | |
1792 | netif_printk(skge, intr, KERN_DEBUG, skge->netdev, |
1793 | "mac interrupt status 0x%x\n", status); |
1794 | |
1795 | if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { |
1796 | xm_link_down(hw, port); |
1797 | mod_timer(&skge->link_timer, jiffies + 1); |
1798 | } |
1799 | |
1800 | if (status & XM_IS_TXF_UR) { |
1801 | xm_write32(hw, port, XM_MODE, XM_MD_FTF); |
1802 | ++dev->stats.tx_fifo_errors; |
1803 | } |
1804 | } |
1805 | |
1806 | static void genesis_link_up(struct skge_port *skge) |
1807 | { |
1808 | struct skge_hw *hw = skge->hw; |
1809 | int port = skge->port; |
1810 | u16 cmd, msk; |
1811 | u32 mode; |
1812 | |
1813 | cmd = xm_read16(hw, port, XM_MMU_CMD); |
1814 | |
1815 | /* |
1816 | * enabling pause frame reception is required for 1000BT |
1817 | * because the XMAC is not reset if the link is going down |
1818 | */ |
1819 | if (skge->flow_status == FLOW_STAT_NONE || |
1820 | skge->flow_status == FLOW_STAT_LOC_SEND) |
1821 | /* Disable Pause Frame Reception */ |
1822 | cmd |= XM_MMU_IGN_PF; |
1823 | else |
1824 | /* Enable Pause Frame Reception */ |
1825 | cmd &= ~XM_MMU_IGN_PF; |
1826 | |
1827 | xm_write16(hw, port, XM_MMU_CMD, cmd); |
1828 | |
1829 | mode = xm_read32(hw, port, XM_MODE); |
1830 | if (skge->flow_status == FLOW_STAT_SYMMETRIC || |
1831 | skge->flow_status == FLOW_STAT_LOC_SEND) { |
1832 | /* |
1833 | * Configure Pause Frame Generation |
1834 | * Use internal and external Pause Frame Generation. |
1835 | * Sending pause frames is edge triggered. |
1836 | * Send a Pause frame with the maximum pause time if |
1837 | * internal oder external FIFO full condition occurs. |
1838 | * Send a zero pause time frame to re-start transmission. |
1839 | */ |
1840 | /* XM_PAUSE_DA = '010000C28001' (default) */ |
1841 | /* XM_MAC_PTIME = 0xffff (maximum) */ |
1842 | /* remember this value is defined in big endian (!) */ |
1843 | xm_write16(hw, port, XM_MAC_PTIME, 0xffff); |
1844 | |
1845 | mode |= XM_PAUSE_MODE; |
1846 | skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); |
1847 | } else { |
1848 | /* |
1849 | * disable pause frame generation is required for 1000BT |
1850 | * because the XMAC is not reset if the link is going down |
1851 | */ |
1852 | /* Disable Pause Mode in Mode Register */ |
1853 | mode &= ~XM_PAUSE_MODE; |
1854 | |
1855 | skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); |
1856 | } |
1857 | |
1858 | xm_write32(hw, port, XM_MODE, mode); |
1859 | |
1860 | /* Turn on detection of Tx underrun */ |
1861 | msk = xm_read16(hw, port, XM_IMSK); |
1862 | msk &= ~XM_IS_TXF_UR; |
1863 | xm_write16(hw, port, XM_IMSK, msk); |
1864 | |
1865 | xm_read16(hw, port, XM_ISRC); |
1866 | |
1867 | /* get MMU Command Reg. */ |
1868 | cmd = xm_read16(hw, port, XM_MMU_CMD); |
1869 | if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) |
1870 | cmd |= XM_MMU_GMII_FD; |
1871 | |
1872 | /* |
1873 | * Workaround BCOM Errata (#10523) for all BCom Phys |
1874 | * Enable Power Management after link up |
1875 | */ |
1876 | if (hw->phy_type == SK_PHY_BCOM) { |
1877 | xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, |
1878 | xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) |
1879 | & ~PHY_B_AC_DIS_PM); |
1880 | xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); |
1881 | } |
1882 | |
1883 | /* enable Rx/Tx */ |
1884 | xm_write16(hw, port, XM_MMU_CMD, |
1885 | cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); |
1886 | skge_link_up(skge); |
1887 | } |
1888 | |
1889 | |
1890 | static inline void bcom_phy_intr(struct skge_port *skge) |
1891 | { |
1892 | struct skge_hw *hw = skge->hw; |
1893 | int port = skge->port; |
1894 | u16 isrc; |
1895 | |
1896 | isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); |
1897 | netif_printk(skge, intr, KERN_DEBUG, skge->netdev, |
1898 | "phy interrupt status 0x%x\n", isrc); |
1899 | |
1900 | if (isrc & PHY_B_IS_PSE) |
1901 | pr_err("%s: uncorrectable pair swap error\n", |
1902 | hw->dev[port]->name); |
1903 | |
1904 | /* Workaround BCom Errata: |
1905 | * enable and disable loopback mode if "NO HCD" occurs. |
1906 | */ |
1907 | if (isrc & PHY_B_IS_NO_HDCL) { |
1908 | u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); |
1909 | xm_phy_write(hw, port, PHY_BCOM_CTRL, |
1910 | ctrl | PHY_CT_LOOP); |
1911 | xm_phy_write(hw, port, PHY_BCOM_CTRL, |
1912 | ctrl & ~PHY_CT_LOOP); |
1913 | } |
1914 | |
1915 | if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) |
1916 | bcom_check_link(hw, port); |
1917 | |
1918 | } |
1919 | |
1920 | static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) |
1921 | { |
1922 | int i; |
1923 | |
1924 | gma_write16(hw, port, GM_SMI_DATA, val); |
1925 | gma_write16(hw, port, GM_SMI_CTRL, |
1926 | GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); |
1927 | for (i = 0; i < PHY_RETRIES; i++) { |
1928 | udelay(1); |
1929 | |
1930 | if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) |
1931 | return 0; |
1932 | } |
1933 | |
1934 | pr_warning("%s: phy write timeout\n", hw->dev[port]->name); |
1935 | return -EIO; |
1936 | } |
1937 | |
1938 | static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) |
1939 | { |
1940 | int i; |
1941 | |
1942 | gma_write16(hw, port, GM_SMI_CTRL, |
1943 | GM_SMI_CT_PHY_AD(hw->phy_addr) |
1944 | | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); |
1945 | |
1946 | for (i = 0; i < PHY_RETRIES; i++) { |
1947 | udelay(1); |
1948 | if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) |
1949 | goto ready; |
1950 | } |
1951 | |
1952 | return -ETIMEDOUT; |
1953 | ready: |
1954 | *val = gma_read16(hw, port, GM_SMI_DATA); |
1955 | return 0; |
1956 | } |
1957 | |
1958 | static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) |
1959 | { |
1960 | u16 v = 0; |
1961 | if (__gm_phy_read(hw, port, reg, &v)) |
1962 | pr_warning("%s: phy read timeout\n", hw->dev[port]->name); |
1963 | return v; |
1964 | } |
1965 | |
1966 | /* Marvell Phy Initialization */ |
1967 | static void yukon_init(struct skge_hw *hw, int port) |
1968 | { |
1969 | struct skge_port *skge = netdev_priv(hw->dev[port]); |
1970 | u16 ctrl, ct1000, adv; |
1971 | |
1972 | if (skge->autoneg == AUTONEG_ENABLE) { |
1973 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); |
1974 | |
1975 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | |
1976 | PHY_M_EC_MAC_S_MSK); |
1977 | ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); |
1978 | |
1979 | ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); |
1980 | |
1981 | gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); |
1982 | } |
1983 | |
1984 | ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); |
1985 | if (skge->autoneg == AUTONEG_DISABLE) |
1986 | ctrl &= ~PHY_CT_ANE; |
1987 | |
1988 | ctrl |= PHY_CT_RESET; |
1989 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); |
1990 | |
1991 | ctrl = 0; |
1992 | ct1000 = 0; |
1993 | adv = PHY_AN_CSMA; |
1994 | |
1995 | if (skge->autoneg == AUTONEG_ENABLE) { |
1996 | if (hw->copper) { |
1997 | if (skge->advertising & ADVERTISED_1000baseT_Full) |
1998 | ct1000 |= PHY_M_1000C_AFD; |
1999 | if (skge->advertising & ADVERTISED_1000baseT_Half) |
2000 | ct1000 |= PHY_M_1000C_AHD; |
2001 | if (skge->advertising & ADVERTISED_100baseT_Full) |
2002 | adv |= PHY_M_AN_100_FD; |
2003 | if (skge->advertising & ADVERTISED_100baseT_Half) |
2004 | adv |= PHY_M_AN_100_HD; |
2005 | if (skge->advertising & ADVERTISED_10baseT_Full) |
2006 | adv |= PHY_M_AN_10_FD; |
2007 | if (skge->advertising & ADVERTISED_10baseT_Half) |
2008 | adv |= PHY_M_AN_10_HD; |
2009 | |
2010 | /* Set Flow-control capabilities */ |
2011 | adv |= phy_pause_map[skge->flow_control]; |
2012 | } else { |
2013 | if (skge->advertising & ADVERTISED_1000baseT_Full) |
2014 | adv |= PHY_M_AN_1000X_AFD; |
2015 | if (skge->advertising & ADVERTISED_1000baseT_Half) |
2016 | adv |= PHY_M_AN_1000X_AHD; |
2017 | |
2018 | adv |= fiber_pause_map[skge->flow_control]; |
2019 | } |
2020 | |
2021 | /* Restart Auto-negotiation */ |
2022 | ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; |
2023 | } else { |
2024 | /* forced speed/duplex settings */ |
2025 | ct1000 = PHY_M_1000C_MSE; |
2026 | |
2027 | if (skge->duplex == DUPLEX_FULL) |
2028 | ctrl |= PHY_CT_DUP_MD; |
2029 | |
2030 | switch (skge->speed) { |
2031 | case SPEED_1000: |
2032 | ctrl |= PHY_CT_SP1000; |
2033 | break; |
2034 | case SPEED_100: |
2035 | ctrl |= PHY_CT_SP100; |
2036 | break; |
2037 | } |
2038 | |
2039 | ctrl |= PHY_CT_RESET; |
2040 | } |
2041 | |
2042 | gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); |
2043 | |
2044 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); |
2045 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); |
2046 | |
2047 | /* Enable phy interrupt on autonegotiation complete (or link up) */ |
2048 | if (skge->autoneg == AUTONEG_ENABLE) |
2049 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); |
2050 | else |
2051 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); |
2052 | } |
2053 | |
2054 | static void yukon_reset(struct skge_hw *hw, int port) |
2055 | { |
2056 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ |
2057 | gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ |
2058 | gma_write16(hw, port, GM_MC_ADDR_H2, 0); |
2059 | gma_write16(hw, port, GM_MC_ADDR_H3, 0); |
2060 | gma_write16(hw, port, GM_MC_ADDR_H4, 0); |
2061 | |
2062 | gma_write16(hw, port, GM_RX_CTRL, |
2063 | gma_read16(hw, port, GM_RX_CTRL) |
2064 | | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); |
2065 | } |
2066 | |
2067 | /* Apparently, early versions of Yukon-Lite had wrong chip_id? */ |
2068 | static int is_yukon_lite_a0(struct skge_hw *hw) |
2069 | { |
2070 | u32 reg; |
2071 | int ret; |
2072 | |
2073 | if (hw->chip_id != CHIP_ID_YUKON) |
2074 | return 0; |
2075 | |
2076 | reg = skge_read32(hw, B2_FAR); |
2077 | skge_write8(hw, B2_FAR + 3, 0xff); |
2078 | ret = (skge_read8(hw, B2_FAR + 3) != 0); |
2079 | skge_write32(hw, B2_FAR, reg); |
2080 | return ret; |
2081 | } |
2082 | |
2083 | static void yukon_mac_init(struct skge_hw *hw, int port) |
2084 | { |
2085 | struct skge_port *skge = netdev_priv(hw->dev[port]); |
2086 | int i; |
2087 | u32 reg; |
2088 | const u8 *addr = hw->dev[port]->dev_addr; |
2089 | |
2090 | /* WA code for COMA mode -- set PHY reset */ |
2091 | if (hw->chip_id == CHIP_ID_YUKON_LITE && |
2092 | hw->chip_rev >= CHIP_REV_YU_LITE_A3) { |
2093 | reg = skge_read32(hw, B2_GP_IO); |
2094 | reg |= GP_DIR_9 | GP_IO_9; |
2095 | skge_write32(hw, B2_GP_IO, reg); |
2096 | } |
2097 | |
2098 | /* hard reset */ |
2099 | skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); |
2100 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); |
2101 | |
2102 | /* WA code for COMA mode -- clear PHY reset */ |
2103 | if (hw->chip_id == CHIP_ID_YUKON_LITE && |
2104 | hw->chip_rev >= CHIP_REV_YU_LITE_A3) { |
2105 | reg = skge_read32(hw, B2_GP_IO); |
2106 | reg |= GP_DIR_9; |
2107 | reg &= ~GP_IO_9; |
2108 | skge_write32(hw, B2_GP_IO, reg); |
2109 | } |
2110 | |
2111 | /* Set hardware config mode */ |
2112 | reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | |
2113 | GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; |
2114 | reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; |
2115 | |
2116 | /* Clear GMC reset */ |
2117 | skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); |
2118 | skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); |
2119 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); |
2120 | |
2121 | if (skge->autoneg == AUTONEG_DISABLE) { |
2122 | reg = GM_GPCR_AU_ALL_DIS; |
2123 | gma_write16(hw, port, GM_GP_CTRL, |
2124 | gma_read16(hw, port, GM_GP_CTRL) | reg); |
2125 | |
2126 | switch (skge->speed) { |
2127 | case SPEED_1000: |
2128 | reg &= ~GM_GPCR_SPEED_100; |
2129 | reg |= GM_GPCR_SPEED_1000; |
2130 | break; |
2131 | case SPEED_100: |
2132 | reg &= ~GM_GPCR_SPEED_1000; |
2133 | reg |= GM_GPCR_SPEED_100; |
2134 | break; |
2135 | case SPEED_10: |
2136 | reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); |
2137 | break; |
2138 | } |
2139 | |
2140 | if (skge->duplex == DUPLEX_FULL) |
2141 | reg |= GM_GPCR_DUP_FULL; |
2142 | } else |
2143 | reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; |
2144 | |
2145 | switch (skge->flow_control) { |
2146 | case FLOW_MODE_NONE: |
2147 | skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); |
2148 | reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; |
2149 | break; |
2150 | case FLOW_MODE_LOC_SEND: |
2151 | /* disable Rx flow-control */ |
2152 | reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; |
2153 | break; |
2154 | case FLOW_MODE_SYMMETRIC: |
2155 | case FLOW_MODE_SYM_OR_REM: |
2156 | /* enable Tx & Rx flow-control */ |
2157 | break; |
2158 | } |
2159 | |
2160 | gma_write16(hw, port, GM_GP_CTRL, reg); |
2161 | skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); |
2162 | |
2163 | yukon_init(hw, port); |
2164 | |
2165 | /* MIB clear */ |
2166 | reg = gma_read16(hw, port, GM_PHY_ADDR); |
2167 | gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); |
2168 | |
2169 | for (i = 0; i < GM_MIB_CNT_SIZE; i++) |
2170 | gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); |
2171 | gma_write16(hw, port, GM_PHY_ADDR, reg); |
2172 | |
2173 | /* transmit control */ |
2174 | gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); |
2175 | |
2176 | /* receive control reg: unicast + multicast + no FCS */ |
2177 | gma_write16(hw, port, GM_RX_CTRL, |
2178 | GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); |
2179 | |
2180 | /* transmit flow control */ |
2181 | gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); |
2182 | |
2183 | /* transmit parameter */ |
2184 | gma_write16(hw, port, GM_TX_PARAM, |
2185 | TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | |
2186 | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | |
2187 | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); |
2188 | |
2189 | /* configure the Serial Mode Register */ |
2190 | reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
2191 | | GM_SMOD_VLAN_ENA |
2192 | | IPG_DATA_VAL(IPG_DATA_DEF); |
2193 | |
2194 | if (hw->dev[port]->mtu > ETH_DATA_LEN) |
2195 | reg |= GM_SMOD_JUMBO_ENA; |
2196 | |
2197 | gma_write16(hw, port, GM_SERIAL_MODE, reg); |
2198 | |
2199 | /* physical address: used for pause frames */ |
2200 | gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); |
2201 | /* virtual address for data */ |
2202 | gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); |
2203 | |
2204 | /* enable interrupt mask for counter overflows */ |
2205 | gma_write16(hw, port, GM_TX_IRQ_MSK, 0); |
2206 | gma_write16(hw, port, GM_RX_IRQ_MSK, 0); |
2207 | gma_write16(hw, port, GM_TR_IRQ_MSK, 0); |
2208 | |
2209 | /* Initialize Mac Fifo */ |
2210 | |
2211 | /* Configure Rx MAC FIFO */ |
2212 | skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); |
2213 | reg = GMF_OPER_ON | GMF_RX_F_FL_ON; |
2214 | |
2215 | /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ |
2216 | if (is_yukon_lite_a0(hw)) |
2217 | reg &= ~GMF_RX_F_FL_ON; |
2218 | |
2219 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); |
2220 | skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); |
2221 | /* |
2222 | * because Pause Packet Truncation in GMAC is not working |
2223 | * we have to increase the Flush Threshold to 64 bytes |
2224 | * in order to flush pause packets in Rx FIFO on Yukon-1 |
2225 | */ |
2226 | skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); |
2227 | |
2228 | /* Configure Tx MAC FIFO */ |
2229 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); |
2230 | skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); |
2231 | } |
2232 | |
2233 | /* Go into power down mode */ |
2234 | static void yukon_suspend(struct skge_hw *hw, int port) |
2235 | { |
2236 | u16 ctrl; |
2237 | |
2238 | ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); |
2239 | ctrl |= PHY_M_PC_POL_R_DIS; |
2240 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); |
2241 | |
2242 | ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); |
2243 | ctrl |= PHY_CT_RESET; |
2244 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); |
2245 | |
2246 | /* switch IEEE compatible power down mode on */ |
2247 | ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); |
2248 | ctrl |= PHY_CT_PDOWN; |
2249 | gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); |
2250 | } |
2251 | |
2252 | static void yukon_stop(struct skge_port *skge) |
2253 | { |
2254 | struct skge_hw *hw = skge->hw; |
2255 | int port = skge->port; |
2256 | |
2257 | skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); |
2258 | yukon_reset(hw, port); |
2259 | |
2260 | gma_write16(hw, port, GM_GP_CTRL, |
2261 | gma_read16(hw, port, GM_GP_CTRL) |
2262 | & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); |
2263 | gma_read16(hw, port, GM_GP_CTRL); |
2264 | |
2265 | yukon_suspend(hw, port); |
2266 | |
2267 | /* set GPHY Control reset */ |
2268 | skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); |
2269 | skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); |
2270 | } |
2271 | |
2272 | static void yukon_get_stats(struct skge_port *skge, u64 *data) |
2273 | { |
2274 | struct skge_hw *hw = skge->hw; |
2275 | int port = skge->port; |
2276 | int i; |
2277 | |
2278 | data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 |
2279 | | gma_read32(hw, port, GM_TXO_OK_LO); |
2280 | data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 |
2281 | | gma_read32(hw, port, GM_RXO_OK_LO); |
2282 | |
2283 | for (i = 2; i < ARRAY_SIZE(skge_stats); i++) |
2284 | data[i] = gma_read32(hw, port, |
2285 | skge_stats[i].gma_offset); |
2286 | } |
2287 | |
2288 | static void yukon_mac_intr(struct skge_hw *hw, int port) |
2289 | { |
2290 | struct net_device *dev = hw->dev[port]; |
2291 | struct skge_port *skge = netdev_priv(dev); |
2292 | u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); |
2293 | |
2294 | netif_printk(skge, intr, KERN_DEBUG, skge->netdev, |
2295 | "mac interrupt status 0x%x\n", status); |
2296 | |
2297 | if (status & GM_IS_RX_FF_OR) { |
2298 | ++dev->stats.rx_fifo_errors; |
2299 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); |
2300 | } |
2301 | |
2302 | if (status & GM_IS_TX_FF_UR) { |
2303 | ++dev->stats.tx_fifo_errors; |
2304 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); |
2305 | } |
2306 | |
2307 | } |
2308 | |
2309 | static u16 yukon_speed(const struct skge_hw *hw, u16 aux) |
2310 | { |
2311 | switch (aux & PHY_M_PS_SPEED_MSK) { |
2312 | case PHY_M_PS_SPEED_1000: |
2313 | return SPEED_1000; |
2314 | case PHY_M_PS_SPEED_100: |
2315 | return SPEED_100; |
2316 | default: |
2317 | return SPEED_10; |
2318 | } |
2319 | } |
2320 | |
2321 | static void yukon_link_up(struct skge_port *skge) |
2322 | { |
2323 | struct skge_hw *hw = skge->hw; |
2324 | int port = skge->port; |
2325 | u16 reg; |
2326 | |
2327 | /* Enable Transmit FIFO Underrun */ |
2328 | skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); |
2329 | |
2330 | reg = gma_read16(hw, port, GM_GP_CTRL); |
2331 | if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) |
2332 | reg |= GM_GPCR_DUP_FULL; |
2333 | |
2334 | /* enable Rx/Tx */ |
2335 | reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; |
2336 | gma_write16(hw, port, GM_GP_CTRL, reg); |
2337 | |
2338 | gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); |
2339 | skge_link_up(skge); |
2340 | } |
2341 | |
2342 | static void yukon_link_down(struct skge_port *skge) |
2343 | { |
2344 | struct skge_hw *hw = skge->hw; |
2345 | int port = skge->port; |
2346 | u16 ctrl; |
2347 | |
2348 | ctrl = gma_read16(hw, port, GM_GP_CTRL); |
2349 | ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); |
2350 | gma_write16(hw, port, GM_GP_CTRL, ctrl); |
2351 | |
2352 | if (skge->flow_status == FLOW_STAT_REM_SEND) { |
2353 | ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); |
2354 | ctrl |= PHY_M_AN_ASP; |
2355 | /* restore Asymmetric Pause bit */ |
2356 | gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); |
2357 | } |
2358 | |
2359 | skge_link_down(skge); |
2360 | |
2361 | yukon_init(hw, port); |
2362 | } |
2363 | |
2364 | static void yukon_phy_intr(struct skge_port *skge) |
2365 | { |
2366 | struct skge_hw *hw = skge->hw; |
2367 | int port = skge->port; |
2368 | const char *reason = NULL; |
2369 | u16 istatus, phystat; |
2370 | |
2371 | istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); |
2372 | phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); |
2373 | |
2374 | netif_printk(skge, intr, KERN_DEBUG, skge->netdev, |
2375 | "phy interrupt status 0x%x 0x%x\n", istatus, phystat); |
2376 | |
2377 | if (istatus & PHY_M_IS_AN_COMPL) { |
2378 | if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) |
2379 | & PHY_M_AN_RF) { |
2380 | reason = "remote fault"; |
2381 | goto failed; |
2382 | } |
2383 | |
2384 | if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { |
2385 | reason = "master/slave fault"; |
2386 | goto failed; |
2387 | } |
2388 | |
2389 | if (!(phystat & PHY_M_PS_SPDUP_RES)) { |
2390 | reason = "speed/duplex"; |
2391 | goto failed; |
2392 | } |
2393 | |
2394 | skge->duplex = (phystat & PHY_M_PS_FULL_DUP) |
2395 | ? DUPLEX_FULL : DUPLEX_HALF; |
2396 | skge->speed = yukon_speed(hw, phystat); |
2397 | |
2398 | /* We are using IEEE 802.3z/D5.0 Table 37-4 */ |
2399 | switch (phystat & PHY_M_PS_PAUSE_MSK) { |
2400 | case PHY_M_PS_PAUSE_MSK: |
2401 | skge->flow_status = FLOW_STAT_SYMMETRIC; |
2402 | break; |
2403 | case PHY_M_PS_RX_P_EN: |
2404 | skge->flow_status = FLOW_STAT_REM_SEND; |
2405 | break; |
2406 | case PHY_M_PS_TX_P_EN: |
2407 | skge->flow_status = FLOW_STAT_LOC_SEND; |
2408 | break; |
2409 | default: |
2410 | skge->flow_status = FLOW_STAT_NONE; |
2411 | } |
2412 | |
2413 | if (skge->flow_status == FLOW_STAT_NONE || |
2414 | (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) |
2415 | skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); |
2416 | else |
2417 | skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); |
2418 | yukon_link_up(skge); |
2419 | return; |
2420 | } |
2421 | |
2422 | if (istatus & PHY_M_IS_LSP_CHANGE) |
2423 | skge->speed = yukon_speed(hw, phystat); |
2424 | |
2425 | if (istatus & PHY_M_IS_DUP_CHANGE) |
2426 | skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; |
2427 | if (istatus & PHY_M_IS_LST_CHANGE) { |
2428 | if (phystat & PHY_M_PS_LINK_UP) |
2429 | yukon_link_up(skge); |
2430 | else |
2431 | yukon_link_down(skge); |
2432 | } |
2433 | return; |
2434 | failed: |
2435 | pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); |
2436 | |
2437 | /* XXX restart autonegotiation? */ |
2438 | } |
2439 | |
2440 | static void skge_phy_reset(struct skge_port *skge) |
2441 | { |
2442 | struct skge_hw *hw = skge->hw; |
2443 | int port = skge->port; |
2444 | struct net_device *dev = hw->dev[port]; |
2445 | |
2446 | netif_stop_queue(skge->netdev); |
2447 | netif_carrier_off(skge->netdev); |
2448 | |
2449 | spin_lock_bh(&hw->phy_lock); |
2450 | if (hw->chip_id == CHIP_ID_GENESIS) { |
2451 | genesis_reset(hw, port); |
2452 | genesis_mac_init(hw, port); |
2453 | } else { |
2454 | yukon_reset(hw, port); |
2455 | yukon_init(hw, port); |
2456 | } |
2457 | spin_unlock_bh(&hw->phy_lock); |
2458 | |
2459 | skge_set_multicast(dev); |
2460 | } |
2461 | |
2462 | /* Basic MII support */ |
2463 | static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
2464 | { |
2465 | struct mii_ioctl_data *data = if_mii(ifr); |
2466 | struct skge_port *skge = netdev_priv(dev); |
2467 | struct skge_hw *hw = skge->hw; |
2468 | int err = -EOPNOTSUPP; |
2469 | |
2470 | if (!netif_running(dev)) |
2471 | return -ENODEV; /* Phy still in reset */ |
2472 | |
2473 | switch (cmd) { |
2474 | case SIOCGMIIPHY: |
2475 | data->phy_id = hw->phy_addr; |
2476 | |
2477 | /* fallthru */ |
2478 | case SIOCGMIIREG: { |
2479 | u16 val = 0; |
2480 | spin_lock_bh(&hw->phy_lock); |
2481 | if (hw->chip_id == CHIP_ID_GENESIS) |
2482 | err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); |
2483 | else |
2484 | err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); |
2485 | spin_unlock_bh(&hw->phy_lock); |
2486 | data->val_out = val; |
2487 | break; |
2488 | } |
2489 | |
2490 | case SIOCSMIIREG: |
2491 | spin_lock_bh(&hw->phy_lock); |
2492 | if (hw->chip_id == CHIP_ID_GENESIS) |
2493 | err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, |
2494 | data->val_in); |
2495 | else |
2496 | err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, |
2497 | data->val_in); |
2498 | spin_unlock_bh(&hw->phy_lock); |
2499 | break; |
2500 | } |
2501 | return err; |
2502 | } |
2503 | |
2504 | static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) |
2505 | { |
2506 | u32 end; |
2507 | |
2508 | start /= 8; |
2509 | len /= 8; |
2510 | end = start + len - 1; |
2511 | |
2512 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); |
2513 | skge_write32(hw, RB_ADDR(q, RB_START), start); |
2514 | skge_write32(hw, RB_ADDR(q, RB_WP), start); |
2515 | skge_write32(hw, RB_ADDR(q, RB_RP), start); |
2516 | skge_write32(hw, RB_ADDR(q, RB_END), end); |
2517 | |
2518 | if (q == Q_R1 || q == Q_R2) { |
2519 | /* Set thresholds on receive queue's */ |
2520 | skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), |
2521 | start + (2*len)/3); |
2522 | skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), |
2523 | start + (len/3)); |
2524 | } else { |
2525 | /* Enable store & forward on Tx queue's because |
2526 | * Tx FIFO is only 4K on Genesis and 1K on Yukon |
2527 | */ |
2528 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); |
2529 | } |
2530 | |
2531 | skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); |
2532 | } |
2533 | |
2534 | /* Setup Bus Memory Interface */ |
2535 | static void skge_qset(struct skge_port *skge, u16 q, |
2536 | const struct skge_element *e) |
2537 | { |
2538 | struct skge_hw *hw = skge->hw; |
2539 | u32 watermark = 0x600; |
2540 | u64 base = skge->dma + (e->desc - skge->mem); |
2541 | |
2542 | /* optimization to reduce window on 32bit/33mhz */ |
2543 | if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) |
2544 | watermark /= 2; |
2545 | |
2546 | skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); |
2547 | skge_write32(hw, Q_ADDR(q, Q_F), watermark); |
2548 | skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); |
2549 | skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); |
2550 | } |
2551 | |
2552 | static int skge_up(struct net_device *dev) |
2553 | { |
2554 | struct skge_port *skge = netdev_priv(dev); |
2555 | struct skge_hw *hw = skge->hw; |
2556 | int port = skge->port; |
2557 | u32 chunk, ram_addr; |
2558 | size_t rx_size, tx_size; |
2559 | int err; |
2560 | |
2561 | if (!is_valid_ether_addr(dev->dev_addr)) |
2562 | return -EINVAL; |
2563 | |
2564 | netif_info(skge, ifup, skge->netdev, "enabling interface\n"); |
2565 | |
2566 | if (dev->mtu > RX_BUF_SIZE) |
2567 | skge->rx_buf_size = dev->mtu + ETH_HLEN; |
2568 | else |
2569 | skge->rx_buf_size = RX_BUF_SIZE; |
2570 | |
2571 | |
2572 | rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); |
2573 | tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); |
2574 | skge->mem_size = tx_size + rx_size; |
2575 | skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); |
2576 | if (!skge->mem) |
2577 | return -ENOMEM; |
2578 | |
2579 | BUG_ON(skge->dma & 7); |
2580 | |
2581 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { |
2582 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
2583 | err = -EINVAL; |
2584 | goto free_pci_mem; |
2585 | } |
2586 | |
2587 | memset(skge->mem, 0, skge->mem_size); |
2588 | |
2589 | err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); |
2590 | if (err) |
2591 | goto free_pci_mem; |
2592 | |
2593 | err = skge_rx_fill(dev); |
2594 | if (err) |
2595 | goto free_rx_ring; |
2596 | |
2597 | err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, |
2598 | skge->dma + rx_size); |
2599 | if (err) |
2600 | goto free_rx_ring; |
2601 | |
2602 | /* Initialize MAC */ |
2603 | spin_lock_bh(&hw->phy_lock); |
2604 | if (hw->chip_id == CHIP_ID_GENESIS) |
2605 | genesis_mac_init(hw, port); |
2606 | else |
2607 | yukon_mac_init(hw, port); |
2608 | spin_unlock_bh(&hw->phy_lock); |
2609 | |
2610 | /* Configure RAMbuffers - equally between ports and tx/rx */ |
2611 | chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); |
2612 | ram_addr = hw->ram_offset + 2 * chunk * port; |
2613 | |
2614 | skge_ramset(hw, rxqaddr[port], ram_addr, chunk); |
2615 | skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); |
2616 | |
2617 | BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); |
2618 | skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); |
2619 | skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); |
2620 | |
2621 | /* Start receiver BMU */ |
2622 | wmb(); |
2623 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); |
2624 | skge_led(skge, LED_MODE_ON); |
2625 | |
2626 | spin_lock_irq(&hw->hw_lock); |
2627 | hw->intr_mask |= portmask[port]; |
2628 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2629 | spin_unlock_irq(&hw->hw_lock); |
2630 | |
2631 | napi_enable(&skge->napi); |
2632 | return 0; |
2633 | |
2634 | free_rx_ring: |
2635 | skge_rx_clean(skge); |
2636 | kfree(skge->rx_ring.start); |
2637 | free_pci_mem: |
2638 | pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); |
2639 | skge->mem = NULL; |
2640 | |
2641 | return err; |
2642 | } |
2643 | |
2644 | /* stop receiver */ |
2645 | static void skge_rx_stop(struct skge_hw *hw, int port) |
2646 | { |
2647 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); |
2648 | skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), |
2649 | RB_RST_SET|RB_DIS_OP_MD); |
2650 | skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); |
2651 | } |
2652 | |
2653 | static int skge_down(struct net_device *dev) |
2654 | { |
2655 | struct skge_port *skge = netdev_priv(dev); |
2656 | struct skge_hw *hw = skge->hw; |
2657 | int port = skge->port; |
2658 | |
2659 | if (skge->mem == NULL) |
2660 | return 0; |
2661 | |
2662 | netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); |
2663 | |
2664 | netif_tx_disable(dev); |
2665 | |
2666 | if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) |
2667 | del_timer_sync(&skge->link_timer); |
2668 | |
2669 | napi_disable(&skge->napi); |
2670 | netif_carrier_off(dev); |
2671 | |
2672 | spin_lock_irq(&hw->hw_lock); |
2673 | hw->intr_mask &= ~portmask[port]; |
2674 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2675 | spin_unlock_irq(&hw->hw_lock); |
2676 | |
2677 | skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); |
2678 | if (hw->chip_id == CHIP_ID_GENESIS) |
2679 | genesis_stop(skge); |
2680 | else |
2681 | yukon_stop(skge); |
2682 | |
2683 | /* Stop transmitter */ |
2684 | skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); |
2685 | skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), |
2686 | RB_RST_SET|RB_DIS_OP_MD); |
2687 | |
2688 | |
2689 | /* Disable Force Sync bit and Enable Alloc bit */ |
2690 | skge_write8(hw, SK_REG(port, TXA_CTRL), |
2691 | TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); |
2692 | |
2693 | /* Stop Interval Timer and Limit Counter of Tx Arbiter */ |
2694 | skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); |
2695 | skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); |
2696 | |
2697 | /* Reset PCI FIFO */ |
2698 | skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); |
2699 | skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); |
2700 | |
2701 | /* Reset the RAM Buffer async Tx queue */ |
2702 | skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); |
2703 | |
2704 | skge_rx_stop(hw, port); |
2705 | |
2706 | if (hw->chip_id == CHIP_ID_GENESIS) { |
2707 | skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); |
2708 | skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); |
2709 | } else { |
2710 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); |
2711 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); |
2712 | } |
2713 | |
2714 | skge_led(skge, LED_MODE_OFF); |
2715 | |
2716 | netif_tx_lock_bh(dev); |
2717 | skge_tx_clean(dev); |
2718 | netif_tx_unlock_bh(dev); |
2719 | |
2720 | skge_rx_clean(skge); |
2721 | |
2722 | kfree(skge->rx_ring.start); |
2723 | kfree(skge->tx_ring.start); |
2724 | pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); |
2725 | skge->mem = NULL; |
2726 | return 0; |
2727 | } |
2728 | |
2729 | static inline int skge_avail(const struct skge_ring *ring) |
2730 | { |
2731 | smp_mb(); |
2732 | return ((ring->to_clean > ring->to_use) ? 0 : ring->count) |
2733 | + (ring->to_clean - ring->to_use) - 1; |
2734 | } |
2735 | |
2736 | static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, |
2737 | struct net_device *dev) |
2738 | { |
2739 | struct skge_port *skge = netdev_priv(dev); |
2740 | struct skge_hw *hw = skge->hw; |
2741 | struct skge_element *e; |
2742 | struct skge_tx_desc *td; |
2743 | int i; |
2744 | u32 control, len; |
2745 | u64 map; |
2746 | |
2747 | if (skb_padto(skb, ETH_ZLEN)) |
2748 | return NETDEV_TX_OK; |
2749 | |
2750 | if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) |
2751 | return NETDEV_TX_BUSY; |
2752 | |
2753 | e = skge->tx_ring.to_use; |
2754 | td = e->desc; |
2755 | BUG_ON(td->control & BMU_OWN); |
2756 | e->skb = skb; |
2757 | len = skb_headlen(skb); |
2758 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2759 | pci_unmap_addr_set(e, mapaddr, map); |
2760 | pci_unmap_len_set(e, maplen, len); |
2761 | |
2762 | td->dma_lo = map; |
2763 | td->dma_hi = map >> 32; |
2764 | |
2765 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2766 | const int offset = skb_transport_offset(skb); |
2767 | |
2768 | /* This seems backwards, but it is what the sk98lin |
2769 | * does. Looks like hardware is wrong? |
2770 | */ |
2771 | if (ipip_hdr(skb)->protocol == IPPROTO_UDP && |
2772 | hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) |
2773 | control = BMU_TCP_CHECK; |
2774 | else |
2775 | control = BMU_UDP_CHECK; |
2776 | |
2777 | td->csum_offs = 0; |
2778 | td->csum_start = offset; |
2779 | td->csum_write = offset + skb->csum_offset; |
2780 | } else |
2781 | control = BMU_CHECK; |
2782 | |
2783 | if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ |
2784 | control |= BMU_EOF | BMU_IRQ_EOF; |
2785 | else { |
2786 | struct skge_tx_desc *tf = td; |
2787 | |
2788 | control |= BMU_STFWD; |
2789 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2790 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2791 | |
2792 | map = pci_map_page(hw->pdev, frag->page, frag->page_offset, |
2793 | frag->size, PCI_DMA_TODEVICE); |
2794 | |
2795 | e = e->next; |
2796 | e->skb = skb; |
2797 | tf = e->desc; |
2798 | BUG_ON(tf->control & BMU_OWN); |
2799 | |
2800 | tf->dma_lo = map; |
2801 | tf->dma_hi = (u64) map >> 32; |
2802 | pci_unmap_addr_set(e, mapaddr, map); |
2803 | pci_unmap_len_set(e, maplen, frag->size); |
2804 | |
2805 | tf->control = BMU_OWN | BMU_SW | control | frag->size; |
2806 | } |
2807 | tf->control |= BMU_EOF | BMU_IRQ_EOF; |
2808 | } |
2809 | /* Make sure all the descriptors written */ |
2810 | wmb(); |
2811 | td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; |
2812 | wmb(); |
2813 | |
2814 | skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); |
2815 | |
2816 | netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, |
2817 | "tx queued, slot %td, len %d\n", |
2818 | e - skge->tx_ring.start, skb->len); |
2819 | |
2820 | skge->tx_ring.to_use = e->next; |
2821 | smp_wmb(); |
2822 | |
2823 | if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { |
2824 | netdev_dbg(dev, "transmit queue full\n"); |
2825 | netif_stop_queue(dev); |
2826 | } |
2827 | |
2828 | return NETDEV_TX_OK; |
2829 | } |
2830 | |
2831 | |
2832 | /* Free resources associated with this reing element */ |
2833 | static void skge_tx_free(struct skge_port *skge, struct skge_element *e, |
2834 | u32 control) |
2835 | { |
2836 | struct pci_dev *pdev = skge->hw->pdev; |
2837 | |
2838 | /* skb header vs. fragment */ |
2839 | if (control & BMU_STF) |
2840 | pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), |
2841 | pci_unmap_len(e, maplen), |
2842 | PCI_DMA_TODEVICE); |
2843 | else |
2844 | pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), |
2845 | pci_unmap_len(e, maplen), |
2846 | PCI_DMA_TODEVICE); |
2847 | |
2848 | if (control & BMU_EOF) { |
2849 | netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, |
2850 | "tx done slot %td\n", e - skge->tx_ring.start); |
2851 | |
2852 | dev_kfree_skb(e->skb); |
2853 | } |
2854 | } |
2855 | |
2856 | /* Free all buffers in transmit ring */ |
2857 | static void skge_tx_clean(struct net_device *dev) |
2858 | { |
2859 | struct skge_port *skge = netdev_priv(dev); |
2860 | struct skge_element *e; |
2861 | |
2862 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { |
2863 | struct skge_tx_desc *td = e->desc; |
2864 | skge_tx_free(skge, e, td->control); |
2865 | td->control = 0; |
2866 | } |
2867 | |
2868 | skge->tx_ring.to_clean = e; |
2869 | } |
2870 | |
2871 | static void skge_tx_timeout(struct net_device *dev) |
2872 | { |
2873 | struct skge_port *skge = netdev_priv(dev); |
2874 | |
2875 | netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); |
2876 | |
2877 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); |
2878 | skge_tx_clean(dev); |
2879 | netif_wake_queue(dev); |
2880 | } |
2881 | |
2882 | static int skge_change_mtu(struct net_device *dev, int new_mtu) |
2883 | { |
2884 | int err; |
2885 | |
2886 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) |
2887 | return -EINVAL; |
2888 | |
2889 | if (!netif_running(dev)) { |
2890 | dev->mtu = new_mtu; |
2891 | return 0; |
2892 | } |
2893 | |
2894 | skge_down(dev); |
2895 | |
2896 | dev->mtu = new_mtu; |
2897 | |
2898 | err = skge_up(dev); |
2899 | if (err) |
2900 | dev_close(dev); |
2901 | |
2902 | return err; |
2903 | } |
2904 | |
2905 | static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; |
2906 | |
2907 | static void genesis_add_filter(u8 filter[8], const u8 *addr) |
2908 | { |
2909 | u32 crc, bit; |
2910 | |
2911 | crc = ether_crc_le(ETH_ALEN, addr); |
2912 | bit = ~crc & 0x3f; |
2913 | filter[bit/8] |= 1 << (bit%8); |
2914 | } |
2915 | |
2916 | static void genesis_set_multicast(struct net_device *dev) |
2917 | { |
2918 | struct skge_port *skge = netdev_priv(dev); |
2919 | struct skge_hw *hw = skge->hw; |
2920 | int port = skge->port; |
2921 | struct dev_mc_list *list; |
2922 | u32 mode; |
2923 | u8 filter[8]; |
2924 | |
2925 | mode = xm_read32(hw, port, XM_MODE); |
2926 | mode |= XM_MD_ENA_HASH; |
2927 | if (dev->flags & IFF_PROMISC) |
2928 | mode |= XM_MD_ENA_PROM; |
2929 | else |
2930 | mode &= ~XM_MD_ENA_PROM; |
2931 | |
2932 | if (dev->flags & IFF_ALLMULTI) |
2933 | memset(filter, 0xff, sizeof(filter)); |
2934 | else { |
2935 | memset(filter, 0, sizeof(filter)); |
2936 | |
2937 | if (skge->flow_status == FLOW_STAT_REM_SEND || |
2938 | skge->flow_status == FLOW_STAT_SYMMETRIC) |
2939 | genesis_add_filter(filter, pause_mc_addr); |
2940 | |
2941 | netdev_for_each_mc_addr(list, dev) |
2942 | genesis_add_filter(filter, list->dmi_addr); |
2943 | } |
2944 | |
2945 | xm_write32(hw, port, XM_MODE, mode); |
2946 | xm_outhash(hw, port, XM_HSM, filter); |
2947 | } |
2948 | |
2949 | static void yukon_add_filter(u8 filter[8], const u8 *addr) |
2950 | { |
2951 | u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; |
2952 | filter[bit/8] |= 1 << (bit%8); |
2953 | } |
2954 | |
2955 | static void yukon_set_multicast(struct net_device *dev) |
2956 | { |
2957 | struct skge_port *skge = netdev_priv(dev); |
2958 | struct skge_hw *hw = skge->hw; |
2959 | int port = skge->port; |
2960 | struct dev_mc_list *list; |
2961 | int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || |
2962 | skge->flow_status == FLOW_STAT_SYMMETRIC); |
2963 | u16 reg; |
2964 | u8 filter[8]; |
2965 | |
2966 | memset(filter, 0, sizeof(filter)); |
2967 | |
2968 | reg = gma_read16(hw, port, GM_RX_CTRL); |
2969 | reg |= GM_RXCR_UCF_ENA; |
2970 | |
2971 | if (dev->flags & IFF_PROMISC) /* promiscuous */ |
2972 | reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); |
2973 | else if (dev->flags & IFF_ALLMULTI) /* all multicast */ |
2974 | memset(filter, 0xff, sizeof(filter)); |
2975 | else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ |
2976 | reg &= ~GM_RXCR_MCF_ENA; |
2977 | else { |
2978 | reg |= GM_RXCR_MCF_ENA; |
2979 | |
2980 | if (rx_pause) |
2981 | yukon_add_filter(filter, pause_mc_addr); |
2982 | |
2983 | netdev_for_each_mc_addr(list, dev) |
2984 | yukon_add_filter(filter, list->dmi_addr); |
2985 | } |
2986 | |
2987 | |
2988 | gma_write16(hw, port, GM_MC_ADDR_H1, |
2989 | (u16)filter[0] | ((u16)filter[1] << 8)); |
2990 | gma_write16(hw, port, GM_MC_ADDR_H2, |
2991 | (u16)filter[2] | ((u16)filter[3] << 8)); |
2992 | gma_write16(hw, port, GM_MC_ADDR_H3, |
2993 | (u16)filter[4] | ((u16)filter[5] << 8)); |
2994 | gma_write16(hw, port, GM_MC_ADDR_H4, |
2995 | (u16)filter[6] | ((u16)filter[7] << 8)); |
2996 | |
2997 | gma_write16(hw, port, GM_RX_CTRL, reg); |
2998 | } |
2999 | |
3000 | static inline u16 phy_length(const struct skge_hw *hw, u32 status) |
3001 | { |
3002 | if (hw->chip_id == CHIP_ID_GENESIS) |
3003 | return status >> XMR_FS_LEN_SHIFT; |
3004 | else |
3005 | return status >> GMR_FS_LEN_SHIFT; |
3006 | } |
3007 | |
3008 | static inline int bad_phy_status(const struct skge_hw *hw, u32 status) |
3009 | { |
3010 | if (hw->chip_id == CHIP_ID_GENESIS) |
3011 | return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; |
3012 | else |
3013 | return (status & GMR_FS_ANY_ERR) || |
3014 | (status & GMR_FS_RX_OK) == 0; |
3015 | } |
3016 | |
3017 | static void skge_set_multicast(struct net_device *dev) |
3018 | { |
3019 | struct skge_port *skge = netdev_priv(dev); |
3020 | struct skge_hw *hw = skge->hw; |
3021 | |
3022 | if (hw->chip_id == CHIP_ID_GENESIS) |
3023 | genesis_set_multicast(dev); |
3024 | else |
3025 | yukon_set_multicast(dev); |
3026 | |
3027 | } |
3028 | |
3029 | |
3030 | /* Get receive buffer from descriptor. |
3031 | * Handles copy of small buffers and reallocation failures |
3032 | */ |
3033 | static struct sk_buff *skge_rx_get(struct net_device *dev, |
3034 | struct skge_element *e, |
3035 | u32 control, u32 status, u16 csum) |
3036 | { |
3037 | struct skge_port *skge = netdev_priv(dev); |
3038 | struct sk_buff *skb; |
3039 | u16 len = control & BMU_BBC; |
3040 | |
3041 | netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, |
3042 | "rx slot %td status 0x%x len %d\n", |
3043 | e - skge->rx_ring.start, status, len); |
3044 | |
3045 | if (len > skge->rx_buf_size) |
3046 | goto error; |
3047 | |
3048 | if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) |
3049 | goto error; |
3050 | |
3051 | if (bad_phy_status(skge->hw, status)) |
3052 | goto error; |
3053 | |
3054 | if (phy_length(skge->hw, status) != len) |
3055 | goto error; |
3056 | |
3057 | if (len < RX_COPY_THRESHOLD) { |
3058 | skb = netdev_alloc_skb_ip_align(dev, len); |
3059 | if (!skb) |
3060 | goto resubmit; |
3061 | |
3062 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
3063 | pci_unmap_addr(e, mapaddr), |
3064 | len, PCI_DMA_FROMDEVICE); |
3065 | skb_copy_from_linear_data(e->skb, skb->data, len); |
3066 | pci_dma_sync_single_for_device(skge->hw->pdev, |
3067 | pci_unmap_addr(e, mapaddr), |
3068 | len, PCI_DMA_FROMDEVICE); |
3069 | skge_rx_reuse(e, skge->rx_buf_size); |
3070 | } else { |
3071 | struct sk_buff *nskb; |
3072 | |
3073 | nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); |
3074 | if (!nskb) |
3075 | goto resubmit; |
3076 | |
3077 | pci_unmap_single(skge->hw->pdev, |
3078 | pci_unmap_addr(e, mapaddr), |
3079 | pci_unmap_len(e, maplen), |
3080 | PCI_DMA_FROMDEVICE); |
3081 | skb = e->skb; |
3082 | prefetch(skb->data); |
3083 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); |
3084 | } |
3085 | |
3086 | skb_put(skb, len); |
3087 | if (skge->rx_csum) { |
3088 | skb->csum = csum; |
3089 | skb->ip_summed = CHECKSUM_COMPLETE; |
3090 | } |
3091 | |
3092 | skb->protocol = eth_type_trans(skb, dev); |
3093 | |
3094 | return skb; |
3095 | error: |
3096 | |
3097 | netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, |
3098 | "rx err, slot %td control 0x%x status 0x%x\n", |
3099 | e - skge->rx_ring.start, control, status); |
3100 | |
3101 | if (skge->hw->chip_id == CHIP_ID_GENESIS) { |
3102 | if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) |
3103 | dev->stats.rx_length_errors++; |
3104 | if (status & XMR_FS_FRA_ERR) |
3105 | dev->stats.rx_frame_errors++; |
3106 | if (status & XMR_FS_FCS_ERR) |
3107 | dev->stats.rx_crc_errors++; |
3108 | } else { |
3109 | if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) |
3110 | dev->stats.rx_length_errors++; |
3111 | if (status & GMR_FS_FRAGMENT) |
3112 | dev->stats.rx_frame_errors++; |
3113 | if (status & GMR_FS_CRC_ERR) |
3114 | dev->stats.rx_crc_errors++; |
3115 | } |
3116 | |
3117 | resubmit: |
3118 | skge_rx_reuse(e, skge->rx_buf_size); |
3119 | return NULL; |
3120 | } |
3121 | |
3122 | /* Free all buffers in Tx ring which are no longer owned by device */ |
3123 | static void skge_tx_done(struct net_device *dev) |
3124 | { |
3125 | struct skge_port *skge = netdev_priv(dev); |
3126 | struct skge_ring *ring = &skge->tx_ring; |
3127 | struct skge_element *e; |
3128 | |
3129 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); |
3130 | |
3131 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { |
3132 | u32 control = ((const struct skge_tx_desc *) e->desc)->control; |
3133 | |
3134 | if (control & BMU_OWN) |
3135 | break; |
3136 | |
3137 | skge_tx_free(skge, e, control); |
3138 | } |
3139 | skge->tx_ring.to_clean = e; |
3140 | |
3141 | /* Can run lockless until we need to synchronize to restart queue. */ |
3142 | smp_mb(); |
3143 | |
3144 | if (unlikely(netif_queue_stopped(dev) && |
3145 | skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { |
3146 | netif_tx_lock(dev); |
3147 | if (unlikely(netif_queue_stopped(dev) && |
3148 | skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { |
3149 | netif_wake_queue(dev); |
3150 | |
3151 | } |
3152 | netif_tx_unlock(dev); |
3153 | } |
3154 | } |
3155 | |
3156 | static int skge_poll(struct napi_struct *napi, int to_do) |
3157 | { |
3158 | struct skge_port *skge = container_of(napi, struct skge_port, napi); |
3159 | struct net_device *dev = skge->netdev; |
3160 | struct skge_hw *hw = skge->hw; |
3161 | struct skge_ring *ring = &skge->rx_ring; |
3162 | struct skge_element *e; |
3163 | int work_done = 0; |
3164 | |
3165 | skge_tx_done(dev); |
3166 | |
3167 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); |
3168 | |
3169 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { |
3170 | struct skge_rx_desc *rd = e->desc; |
3171 | struct sk_buff *skb; |
3172 | u32 control; |
3173 | |
3174 | rmb(); |
3175 | control = rd->control; |
3176 | if (control & BMU_OWN) |
3177 | break; |
3178 | |
3179 | skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); |
3180 | if (likely(skb)) { |
3181 | netif_receive_skb(skb); |
3182 | |
3183 | ++work_done; |
3184 | } |
3185 | } |
3186 | ring->to_clean = e; |
3187 | |
3188 | /* restart receiver */ |
3189 | wmb(); |
3190 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); |
3191 | |
3192 | if (work_done < to_do) { |
3193 | unsigned long flags; |
3194 | |
3195 | spin_lock_irqsave(&hw->hw_lock, flags); |
3196 | __napi_complete(napi); |
3197 | hw->intr_mask |= napimask[skge->port]; |
3198 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
3199 | skge_read32(hw, B0_IMSK); |
3200 | spin_unlock_irqrestore(&hw->hw_lock, flags); |
3201 | } |
3202 | |
3203 | return work_done; |
3204 | } |
3205 | |
3206 | /* Parity errors seem to happen when Genesis is connected to a switch |
3207 | * with no other ports present. Heartbeat error?? |
3208 | */ |
3209 | static void skge_mac_parity(struct skge_hw *hw, int port) |
3210 | { |
3211 | struct net_device *dev = hw->dev[port]; |
3212 | |
3213 | ++dev->stats.tx_heartbeat_errors; |
3214 | |
3215 | if (hw->chip_id == CHIP_ID_GENESIS) |
3216 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), |
3217 | MFF_CLR_PERR); |
3218 | else |
3219 | /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ |
3220 | skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), |
3221 | (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) |
3222 | ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); |
3223 | } |
3224 | |
3225 | static void skge_mac_intr(struct skge_hw *hw, int port) |
3226 | { |
3227 | if (hw->chip_id == CHIP_ID_GENESIS) |
3228 | genesis_mac_intr(hw, port); |
3229 | else |
3230 | yukon_mac_intr(hw, port); |
3231 | } |
3232 | |
3233 | /* Handle device specific framing and timeout interrupts */ |
3234 | static void skge_error_irq(struct skge_hw *hw) |
3235 | { |
3236 | struct pci_dev *pdev = hw->pdev; |
3237 | u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); |
3238 | |
3239 | if (hw->chip_id == CHIP_ID_GENESIS) { |
3240 | /* clear xmac errors */ |
3241 | if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) |
3242 | skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); |
3243 | if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) |
3244 | skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); |
3245 | } else { |
3246 | /* Timestamp (unused) overflow */ |
3247 | if (hwstatus & IS_IRQ_TIST_OV) |
3248 | skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); |
3249 | } |
3250 | |
3251 | if (hwstatus & IS_RAM_RD_PAR) { |
3252 | dev_err(&pdev->dev, "Ram read data parity error\n"); |
3253 | skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); |
3254 | } |
3255 | |
3256 | if (hwstatus & IS_RAM_WR_PAR) { |
3257 | dev_err(&pdev->dev, "Ram write data parity error\n"); |
3258 | skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); |
3259 | } |
3260 | |
3261 | if (hwstatus & IS_M1_PAR_ERR) |
3262 | skge_mac_parity(hw, 0); |
3263 | |
3264 | if (hwstatus & IS_M2_PAR_ERR) |
3265 | skge_mac_parity(hw, 1); |
3266 | |
3267 | if (hwstatus & IS_R1_PAR_ERR) { |
3268 | dev_err(&pdev->dev, "%s: receive queue parity error\n", |
3269 | hw->dev[0]->name); |
3270 | skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); |
3271 | } |
3272 | |
3273 | if (hwstatus & IS_R2_PAR_ERR) { |
3274 | dev_err(&pdev->dev, "%s: receive queue parity error\n", |
3275 | hw->dev[1]->name); |
3276 | skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); |
3277 | } |
3278 | |
3279 | if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { |
3280 | u16 pci_status, pci_cmd; |
3281 | |
3282 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); |
3283 | pci_read_config_word(pdev, PCI_STATUS, &pci_status); |
3284 | |
3285 | dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", |
3286 | pci_cmd, pci_status); |
3287 | |
3288 | /* Write the error bits back to clear them. */ |
3289 | pci_status &= PCI_STATUS_ERROR_BITS; |
3290 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
3291 | pci_write_config_word(pdev, PCI_COMMAND, |
3292 | pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); |
3293 | pci_write_config_word(pdev, PCI_STATUS, pci_status); |
3294 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
3295 | |
3296 | /* if error still set then just ignore it */ |
3297 | hwstatus = skge_read32(hw, B0_HWE_ISRC); |
3298 | if (hwstatus & IS_IRQ_STAT) { |
3299 | dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); |
3300 | hw->intr_mask &= ~IS_HW_ERR; |
3301 | } |
3302 | } |
3303 | } |
3304 | |
3305 | /* |
3306 | * Interrupt from PHY are handled in tasklet (softirq) |
3307 | * because accessing phy registers requires spin wait which might |
3308 | * cause excess interrupt latency. |
3309 | */ |
3310 | static void skge_extirq(unsigned long arg) |
3311 | { |
3312 | struct skge_hw *hw = (struct skge_hw *) arg; |
3313 | int port; |
3314 | |
3315 | for (port = 0; port < hw->ports; port++) { |
3316 | struct net_device *dev = hw->dev[port]; |
3317 | |
3318 | if (netif_running(dev)) { |
3319 | struct skge_port *skge = netdev_priv(dev); |
3320 | |
3321 | spin_lock(&hw->phy_lock); |
3322 | if (hw->chip_id != CHIP_ID_GENESIS) |
3323 | yukon_phy_intr(skge); |
3324 | else if (hw->phy_type == SK_PHY_BCOM) |
3325 | bcom_phy_intr(skge); |
3326 | spin_unlock(&hw->phy_lock); |
3327 | } |
3328 | } |
3329 | |
3330 | spin_lock_irq(&hw->hw_lock); |
3331 | hw->intr_mask |= IS_EXT_REG; |
3332 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
3333 | skge_read32(hw, B0_IMSK); |
3334 | spin_unlock_irq(&hw->hw_lock); |
3335 | } |
3336 | |
3337 | static irqreturn_t skge_intr(int irq, void *dev_id) |
3338 | { |
3339 | struct skge_hw *hw = dev_id; |
3340 | u32 status; |
3341 | int handled = 0; |
3342 | |
3343 | spin_lock(&hw->hw_lock); |
3344 | /* Reading this register masks IRQ */ |
3345 | status = skge_read32(hw, B0_SP_ISRC); |
3346 | if (status == 0 || status == ~0) |
3347 | goto out; |
3348 | |
3349 | handled = 1; |
3350 | status &= hw->intr_mask; |
3351 | if (status & IS_EXT_REG) { |
3352 | hw->intr_mask &= ~IS_EXT_REG; |
3353 | tasklet_schedule(&hw->phy_task); |
3354 | } |
3355 | |
3356 | if (status & (IS_XA1_F|IS_R1_F)) { |
3357 | struct skge_port *skge = netdev_priv(hw->dev[0]); |
3358 | hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); |
3359 | napi_schedule(&skge->napi); |
3360 | } |
3361 | |
3362 | if (status & IS_PA_TO_TX1) |
3363 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); |
3364 | |
3365 | if (status & IS_PA_TO_RX1) { |
3366 | ++hw->dev[0]->stats.rx_over_errors; |
3367 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); |
3368 | } |
3369 | |
3370 | |
3371 | if (status & IS_MAC1) |
3372 | skge_mac_intr(hw, 0); |
3373 | |
3374 | if (hw->dev[1]) { |
3375 | struct skge_port *skge = netdev_priv(hw->dev[1]); |
3376 | |
3377 | if (status & (IS_XA2_F|IS_R2_F)) { |
3378 | hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); |
3379 | napi_schedule(&skge->napi); |
3380 | } |
3381 | |
3382 | if (status & IS_PA_TO_RX2) { |
3383 | ++hw->dev[1]->stats.rx_over_errors; |
3384 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); |
3385 | } |
3386 | |
3387 | if (status & IS_PA_TO_TX2) |
3388 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); |
3389 | |
3390 | if (status & IS_MAC2) |
3391 | skge_mac_intr(hw, 1); |
3392 | } |
3393 | |
3394 | if (status & IS_HW_ERR) |
3395 | skge_error_irq(hw); |
3396 | |
3397 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
3398 | skge_read32(hw, B0_IMSK); |
3399 | out: |
3400 | spin_unlock(&hw->hw_lock); |
3401 | |
3402 | return IRQ_RETVAL(handled); |
3403 | } |
3404 | |
3405 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3406 | static void skge_netpoll(struct net_device *dev) |
3407 | { |
3408 | struct skge_port *skge = netdev_priv(dev); |
3409 | |
3410 | disable_irq(dev->irq); |
3411 | skge_intr(dev->irq, skge->hw); |
3412 | enable_irq(dev->irq); |
3413 | } |
3414 | #endif |
3415 | |
3416 | static int skge_set_mac_address(struct net_device *dev, void *p) |
3417 | { |
3418 | struct skge_port *skge = netdev_priv(dev); |
3419 | struct skge_hw *hw = skge->hw; |
3420 | unsigned port = skge->port; |
3421 | const struct sockaddr *addr = p; |
3422 | u16 ctrl; |
3423 | |
3424 | if (!is_valid_ether_addr(addr->sa_data)) |
3425 | return -EADDRNOTAVAIL; |
3426 | |
3427 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
3428 | |
3429 | if (!netif_running(dev)) { |
3430 | memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); |
3431 | memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); |
3432 | } else { |
3433 | /* disable Rx */ |
3434 | spin_lock_bh(&hw->phy_lock); |
3435 | ctrl = gma_read16(hw, port, GM_GP_CTRL); |
3436 | gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); |
3437 | |
3438 | memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); |
3439 | memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); |
3440 | |
3441 | if (hw->chip_id == CHIP_ID_GENESIS) |
3442 | xm_outaddr(hw, port, XM_SA, dev->dev_addr); |
3443 | else { |
3444 | gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); |
3445 | gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); |
3446 | } |
3447 | |
3448 | gma_write16(hw, port, GM_GP_CTRL, ctrl); |
3449 | spin_unlock_bh(&hw->phy_lock); |
3450 | } |
3451 | |
3452 | return 0; |
3453 | } |
3454 | |
3455 | static const struct { |
3456 | u8 id; |
3457 | const char *name; |
3458 | } skge_chips[] = { |
3459 | { CHIP_ID_GENESIS, "Genesis" }, |
3460 | { CHIP_ID_YUKON, "Yukon" }, |
3461 | { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, |
3462 | { CHIP_ID_YUKON_LP, "Yukon-LP"}, |
3463 | }; |
3464 | |
3465 | static const char *skge_board_name(const struct skge_hw *hw) |
3466 | { |
3467 | int i; |
3468 | static char buf[16]; |
3469 | |
3470 | for (i = 0; i < ARRAY_SIZE(skge_chips); i++) |
3471 | if (skge_chips[i].id == hw->chip_id) |
3472 | return skge_chips[i].name; |
3473 | |
3474 | snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); |
3475 | return buf; |
3476 | } |
3477 | |
3478 | |
3479 | /* |
3480 | * Setup the board data structure, but don't bring up |
3481 | * the port(s) |
3482 | */ |
3483 | static int skge_reset(struct skge_hw *hw) |
3484 | { |
3485 | u32 reg; |
3486 | u16 ctst, pci_status; |
3487 | u8 t8, mac_cfg, pmd_type; |
3488 | int i; |
3489 | |
3490 | ctst = skge_read16(hw, B0_CTST); |
3491 | |
3492 | /* do a SW reset */ |
3493 | skge_write8(hw, B0_CTST, CS_RST_SET); |
3494 | skge_write8(hw, B0_CTST, CS_RST_CLR); |
3495 | |
3496 | /* clear PCI errors, if any */ |
3497 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
3498 | skge_write8(hw, B2_TST_CTRL2, 0); |
3499 | |
3500 | pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); |
3501 | pci_write_config_word(hw->pdev, PCI_STATUS, |
3502 | pci_status | PCI_STATUS_ERROR_BITS); |
3503 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
3504 | skge_write8(hw, B0_CTST, CS_MRST_CLR); |
3505 | |
3506 | /* restore CLK_RUN bits (for Yukon-Lite) */ |
3507 | skge_write16(hw, B0_CTST, |
3508 | ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); |
3509 | |
3510 | hw->chip_id = skge_read8(hw, B2_CHIP_ID); |
3511 | hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; |
3512 | pmd_type = skge_read8(hw, B2_PMD_TYP); |
3513 | hw->copper = (pmd_type == 'T' || pmd_type == '1'); |
3514 | |
3515 | switch (hw->chip_id) { |
3516 | case CHIP_ID_GENESIS: |
3517 | switch (hw->phy_type) { |
3518 | case SK_PHY_XMAC: |
3519 | hw->phy_addr = PHY_ADDR_XMAC; |
3520 | break; |
3521 | case SK_PHY_BCOM: |
3522 | hw->phy_addr = PHY_ADDR_BCOM; |
3523 | break; |
3524 | default: |
3525 | dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", |
3526 | hw->phy_type); |
3527 | return -EOPNOTSUPP; |
3528 | } |
3529 | break; |
3530 | |
3531 | case CHIP_ID_YUKON: |
3532 | case CHIP_ID_YUKON_LITE: |
3533 | case CHIP_ID_YUKON_LP: |
3534 | if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') |
3535 | hw->copper = 1; |
3536 | |
3537 | hw->phy_addr = PHY_ADDR_MARV; |
3538 | break; |
3539 | |
3540 | default: |
3541 | dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", |
3542 | hw->chip_id); |
3543 | return -EOPNOTSUPP; |
3544 | } |
3545 | |
3546 | mac_cfg = skge_read8(hw, B2_MAC_CFG); |
3547 | hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; |
3548 | hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; |
3549 | |
3550 | /* read the adapters RAM size */ |
3551 | t8 = skge_read8(hw, B2_E_0); |
3552 | if (hw->chip_id == CHIP_ID_GENESIS) { |
3553 | if (t8 == 3) { |
3554 | /* special case: 4 x 64k x 36, offset = 0x80000 */ |
3555 | hw->ram_size = 0x100000; |
3556 | hw->ram_offset = 0x80000; |
3557 | } else |
3558 | hw->ram_size = t8 * 512; |
3559 | } else if (t8 == 0) |
3560 | hw->ram_size = 0x20000; |
3561 | else |
3562 | hw->ram_size = t8 * 4096; |
3563 | |
3564 | hw->intr_mask = IS_HW_ERR; |
3565 | |
3566 | /* Use PHY IRQ for all but fiber based Genesis board */ |
3567 | if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)) |
3568 | hw->intr_mask |= IS_EXT_REG; |
3569 | |
3570 | if (hw->chip_id == CHIP_ID_GENESIS) |
3571 | genesis_init(hw); |
3572 | else { |
3573 | /* switch power to VCC (WA for VAUX problem) */ |
3574 | skge_write8(hw, B0_POWER_CTRL, |
3575 | PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); |
3576 | |
3577 | /* avoid boards with stuck Hardware error bits */ |
3578 | if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && |
3579 | (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { |
3580 | dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); |
3581 | hw->intr_mask &= ~IS_HW_ERR; |
3582 | } |
3583 | |
3584 | /* Clear PHY COMA */ |
3585 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
3586 | pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); |
3587 | reg &= ~PCI_PHY_COMA; |
3588 | pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); |
3589 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
3590 | |
3591 | |
3592 | for (i = 0; i < hw->ports; i++) { |
3593 | skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); |
3594 | skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); |
3595 | } |
3596 | } |
3597 | |
3598 | /* turn off hardware timer (unused) */ |
3599 | skge_write8(hw, B2_TI_CTRL, TIM_STOP); |
3600 | skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); |
3601 | skge_write8(hw, B0_LED, LED_STAT_ON); |
3602 | |
3603 | /* enable the Tx Arbiters */ |
3604 | for (i = 0; i < hw->ports; i++) |
3605 | skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); |
3606 | |
3607 | /* Initialize ram interface */ |
3608 | skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); |
3609 | |
3610 | skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); |
3611 | skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); |
3612 | skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); |
3613 | skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); |
3614 | skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); |
3615 | skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); |
3616 | skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); |
3617 | skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); |
3618 | skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); |
3619 | skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); |
3620 | skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); |
3621 | skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); |
3622 | |
3623 | skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); |
3624 | |
3625 | /* Set interrupt moderation for Transmit only |
3626 | * Receive interrupts avoided by NAPI |
3627 | */ |
3628 | skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); |
3629 | skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); |
3630 | skge_write32(hw, B2_IRQM_CTRL, TIM_START); |
3631 | |
3632 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
3633 | |
3634 | for (i = 0; i < hw->ports; i++) { |
3635 | if (hw->chip_id == CHIP_ID_GENESIS) |
3636 | genesis_reset(hw, i); |
3637 | else |
3638 | yukon_reset(hw, i); |
3639 | } |
3640 | |
3641 | return 0; |
3642 | } |
3643 | |
3644 | |
3645 | #ifdef CONFIG_SKGE_DEBUG |
3646 | |
3647 | static struct dentry *skge_debug; |
3648 | |
3649 | static int skge_debug_show(struct seq_file *seq, void *v) |
3650 | { |
3651 | struct net_device *dev = seq->private; |
3652 | const struct skge_port *skge = netdev_priv(dev); |
3653 | const struct skge_hw *hw = skge->hw; |
3654 | const struct skge_element *e; |
3655 | |
3656 | if (!netif_running(dev)) |
3657 | return -ENETDOWN; |
3658 | |
3659 | seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), |
3660 | skge_read32(hw, B0_IMSK)); |
3661 | |
3662 | seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); |
3663 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { |
3664 | const struct skge_tx_desc *t = e->desc; |
3665 | seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", |
3666 | t->control, t->dma_hi, t->dma_lo, t->status, |
3667 | t->csum_offs, t->csum_write, t->csum_start); |
3668 | } |
3669 | |
3670 | seq_printf(seq, "\nRx Ring: \n"); |
3671 | for (e = skge->rx_ring.to_clean; ; e = e->next) { |
3672 | const struct skge_rx_desc *r = e->desc; |
3673 | |
3674 | if (r->control & BMU_OWN) |
3675 | break; |
3676 | |
3677 | seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", |
3678 | r->control, r->dma_hi, r->dma_lo, r->status, |
3679 | r->timestamp, r->csum1, r->csum1_start); |
3680 | } |
3681 | |
3682 | return 0; |
3683 | } |
3684 | |
3685 | static int skge_debug_open(struct inode *inode, struct file *file) |
3686 | { |
3687 | return single_open(file, skge_debug_show, inode->i_private); |
3688 | } |
3689 | |
3690 | static const struct file_operations skge_debug_fops = { |
3691 | .owner = THIS_MODULE, |
3692 | .open = skge_debug_open, |
3693 | .read = seq_read, |
3694 | .llseek = seq_lseek, |
3695 | .release = single_release, |
3696 | }; |
3697 | |
3698 | /* |
3699 | * Use network device events to create/remove/rename |
3700 | * debugfs file entries |
3701 | */ |
3702 | static int skge_device_event(struct notifier_block *unused, |
3703 | unsigned long event, void *ptr) |
3704 | { |
3705 | struct net_device *dev = ptr; |
3706 | struct skge_port *skge; |
3707 | struct dentry *d; |
3708 | |
3709 | if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) |
3710 | goto done; |
3711 | |
3712 | skge = netdev_priv(dev); |
3713 | switch (event) { |
3714 | case NETDEV_CHANGENAME: |
3715 | if (skge->debugfs) { |
3716 | d = debugfs_rename(skge_debug, skge->debugfs, |
3717 | skge_debug, dev->name); |
3718 | if (d) |
3719 | skge->debugfs = d; |
3720 | else { |
3721 | netdev_info(dev, "rename failed\n"); |
3722 | debugfs_remove(skge->debugfs); |
3723 | } |
3724 | } |
3725 | break; |
3726 | |
3727 | case NETDEV_GOING_DOWN: |
3728 | if (skge->debugfs) { |
3729 | debugfs_remove(skge->debugfs); |
3730 | skge->debugfs = NULL; |
3731 | } |
3732 | break; |
3733 | |
3734 | case NETDEV_UP: |
3735 | d = debugfs_create_file(dev->name, S_IRUGO, |
3736 | skge_debug, dev, |
3737 | &skge_debug_fops); |
3738 | if (!d || IS_ERR(d)) |
3739 | netdev_info(dev, "debugfs create failed\n"); |
3740 | else |
3741 | skge->debugfs = d; |
3742 | break; |
3743 | } |
3744 | |
3745 | done: |
3746 | return NOTIFY_DONE; |
3747 | } |
3748 | |
3749 | static struct notifier_block skge_notifier = { |
3750 | .notifier_call = skge_device_event, |
3751 | }; |
3752 | |
3753 | |
3754 | static __init void skge_debug_init(void) |
3755 | { |
3756 | struct dentry *ent; |
3757 | |
3758 | ent = debugfs_create_dir("skge", NULL); |
3759 | if (!ent || IS_ERR(ent)) { |
3760 | pr_info("debugfs create directory failed\n"); |
3761 | return; |
3762 | } |
3763 | |
3764 | skge_debug = ent; |
3765 | register_netdevice_notifier(&skge_notifier); |
3766 | } |
3767 | |
3768 | static __exit void skge_debug_cleanup(void) |
3769 | { |
3770 | if (skge_debug) { |
3771 | unregister_netdevice_notifier(&skge_notifier); |
3772 | debugfs_remove(skge_debug); |
3773 | skge_debug = NULL; |
3774 | } |
3775 | } |
3776 | |
3777 | #else |
3778 | #define skge_debug_init() |
3779 | #define skge_debug_cleanup() |
3780 | #endif |
3781 | |
3782 | static const struct net_device_ops skge_netdev_ops = { |
3783 | .ndo_open = skge_up, |
3784 | .ndo_stop = skge_down, |
3785 | .ndo_start_xmit = skge_xmit_frame, |
3786 | .ndo_do_ioctl = skge_ioctl, |
3787 | .ndo_get_stats = skge_get_stats, |
3788 | .ndo_tx_timeout = skge_tx_timeout, |
3789 | .ndo_change_mtu = skge_change_mtu, |
3790 | .ndo_validate_addr = eth_validate_addr, |
3791 | .ndo_set_multicast_list = skge_set_multicast, |
3792 | .ndo_set_mac_address = skge_set_mac_address, |
3793 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3794 | .ndo_poll_controller = skge_netpoll, |
3795 | #endif |
3796 | }; |
3797 | |
3798 | |
3799 | /* Initialize network device */ |
3800 | static struct net_device *skge_devinit(struct skge_hw *hw, int port, |
3801 | int highmem) |
3802 | { |
3803 | struct skge_port *skge; |
3804 | struct net_device *dev = alloc_etherdev(sizeof(*skge)); |
3805 | |
3806 | if (!dev) { |
3807 | dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); |
3808 | return NULL; |
3809 | } |
3810 | |
3811 | SET_NETDEV_DEV(dev, &hw->pdev->dev); |
3812 | dev->netdev_ops = &skge_netdev_ops; |
3813 | dev->ethtool_ops = &skge_ethtool_ops; |
3814 | dev->watchdog_timeo = TX_WATCHDOG; |
3815 | dev->irq = hw->pdev->irq; |
3816 | |
3817 | if (highmem) |
3818 | dev->features |= NETIF_F_HIGHDMA; |
3819 | |
3820 | skge = netdev_priv(dev); |
3821 | netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); |
3822 | skge->netdev = dev; |
3823 | skge->hw = hw; |
3824 | skge->msg_enable = netif_msg_init(debug, default_msg); |
3825 | |
3826 | skge->tx_ring.count = DEFAULT_TX_RING_SIZE; |
3827 | skge->rx_ring.count = DEFAULT_RX_RING_SIZE; |
3828 | |
3829 | /* Auto speed and flow control */ |
3830 | skge->autoneg = AUTONEG_ENABLE; |
3831 | skge->flow_control = FLOW_MODE_SYM_OR_REM; |
3832 | skge->duplex = -1; |
3833 | skge->speed = -1; |
3834 | skge->advertising = skge_supported_modes(hw); |
3835 | |
3836 | if (device_can_wakeup(&hw->pdev->dev)) { |
3837 | skge->wol = wol_supported(hw) & WAKE_MAGIC; |
3838 | device_set_wakeup_enable(&hw->pdev->dev, skge->wol); |
3839 | } |
3840 | |
3841 | hw->dev[port] = dev; |
3842 | |
3843 | skge->port = port; |
3844 | |
3845 | /* Only used for Genesis XMAC */ |
3846 | setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); |
3847 | |
3848 | if (hw->chip_id != CHIP_ID_GENESIS) { |
3849 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
3850 | skge->rx_csum = 1; |
3851 | } |
3852 | |
3853 | /* read the mac address */ |
3854 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); |
3855 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
3856 | |
3857 | /* device is off until link detection */ |
3858 | netif_carrier_off(dev); |
3859 | netif_stop_queue(dev); |
3860 | |
3861 | return dev; |
3862 | } |
3863 | |
3864 | static void __devinit skge_show_addr(struct net_device *dev) |
3865 | { |
3866 | const struct skge_port *skge = netdev_priv(dev); |
3867 | |
3868 | netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); |
3869 | } |
3870 | |
3871 | static int __devinit skge_probe(struct pci_dev *pdev, |
3872 | const struct pci_device_id *ent) |
3873 | { |
3874 | struct net_device *dev, *dev1; |
3875 | struct skge_hw *hw; |
3876 | int err, using_dac = 0; |
3877 | |
3878 | err = pci_enable_device(pdev); |
3879 | if (err) { |
3880 | dev_err(&pdev->dev, "cannot enable PCI device\n"); |
3881 | goto err_out; |
3882 | } |
3883 | |
3884 | err = pci_request_regions(pdev, DRV_NAME); |
3885 | if (err) { |
3886 | dev_err(&pdev->dev, "cannot obtain PCI resources\n"); |
3887 | goto err_out_disable_pdev; |
3888 | } |
3889 | |
3890 | pci_set_master(pdev); |
3891 | |
3892 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
3893 | using_dac = 1; |
3894 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
3895 | } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { |
3896 | using_dac = 0; |
3897 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
3898 | } |
3899 | |
3900 | if (err) { |
3901 | dev_err(&pdev->dev, "no usable DMA configuration\n"); |
3902 | goto err_out_free_regions; |
3903 | } |
3904 | |
3905 | #ifdef __BIG_ENDIAN |
3906 | /* byte swap descriptors in hardware */ |
3907 | { |
3908 | u32 reg; |
3909 | |
3910 | pci_read_config_dword(pdev, PCI_DEV_REG2, ®); |
3911 | reg |= PCI_REV_DESC; |
3912 | pci_write_config_dword(pdev, PCI_DEV_REG2, reg); |
3913 | } |
3914 | #endif |
3915 | |
3916 | err = -ENOMEM; |
3917 | /* space for skge@pci:0000:04:00.0 */ |
3918 | hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") |
3919 | + strlen(pci_name(pdev)) + 1, GFP_KERNEL); |
3920 | if (!hw) { |
3921 | dev_err(&pdev->dev, "cannot allocate hardware struct\n"); |
3922 | goto err_out_free_regions; |
3923 | } |
3924 | sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); |
3925 | |
3926 | hw->pdev = pdev; |
3927 | spin_lock_init(&hw->hw_lock); |
3928 | spin_lock_init(&hw->phy_lock); |
3929 | tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); |
3930 | |
3931 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
3932 | if (!hw->regs) { |
3933 | dev_err(&pdev->dev, "cannot map device registers\n"); |
3934 | goto err_out_free_hw; |
3935 | } |
3936 | |
3937 | err = skge_reset(hw); |
3938 | if (err) |
3939 | goto err_out_iounmap; |
3940 | |
3941 | pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", |
3942 | DRV_VERSION, |
3943 | (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, |
3944 | skge_board_name(hw), hw->chip_rev); |
3945 | |
3946 | dev = skge_devinit(hw, 0, using_dac); |
3947 | if (!dev) |
3948 | goto err_out_led_off; |
3949 | |
3950 | /* Some motherboards are broken and has zero in ROM. */ |
3951 | if (!is_valid_ether_addr(dev->dev_addr)) |
3952 | dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); |
3953 | |
3954 | err = register_netdev(dev); |
3955 | if (err) { |
3956 | dev_err(&pdev->dev, "cannot register net device\n"); |
3957 | goto err_out_free_netdev; |
3958 | } |
3959 | |
3960 | err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw); |
3961 | if (err) { |
3962 | dev_err(&pdev->dev, "%s: cannot assign irq %d\n", |
3963 | dev->name, pdev->irq); |
3964 | goto err_out_unregister; |
3965 | } |
3966 | skge_show_addr(dev); |
3967 | |
3968 | if (hw->ports > 1) { |
3969 | dev1 = skge_devinit(hw, 1, using_dac); |
3970 | if (dev1 && register_netdev(dev1) == 0) |
3971 | skge_show_addr(dev1); |
3972 | else { |
3973 | /* Failure to register second port need not be fatal */ |
3974 | dev_warn(&pdev->dev, "register of second port failed\n"); |
3975 | hw->dev[1] = NULL; |
3976 | hw->ports = 1; |
3977 | if (dev1) |
3978 | free_netdev(dev1); |
3979 | } |
3980 | } |
3981 | pci_set_drvdata(pdev, hw); |
3982 | |
3983 | return 0; |
3984 | |
3985 | err_out_unregister: |
3986 | unregister_netdev(dev); |
3987 | err_out_free_netdev: |
3988 | free_netdev(dev); |
3989 | err_out_led_off: |
3990 | skge_write16(hw, B0_LED, LED_STAT_OFF); |
3991 | err_out_iounmap: |
3992 | iounmap(hw->regs); |
3993 | err_out_free_hw: |
3994 | kfree(hw); |
3995 | err_out_free_regions: |
3996 | pci_release_regions(pdev); |
3997 | err_out_disable_pdev: |
3998 | pci_disable_device(pdev); |
3999 | pci_set_drvdata(pdev, NULL); |
4000 | err_out: |
4001 | return err; |
4002 | } |
4003 | |
4004 | static void __devexit skge_remove(struct pci_dev *pdev) |
4005 | { |
4006 | struct skge_hw *hw = pci_get_drvdata(pdev); |
4007 | struct net_device *dev0, *dev1; |
4008 | |
4009 | if (!hw) |
4010 | return; |
4011 | |
4012 | flush_scheduled_work(); |
4013 | |
4014 | dev1 = hw->dev[1]; |
4015 | if (dev1) |
4016 | unregister_netdev(dev1); |
4017 | dev0 = hw->dev[0]; |
4018 | unregister_netdev(dev0); |
4019 | |
4020 | tasklet_disable(&hw->phy_task); |
4021 | |
4022 | spin_lock_irq(&hw->hw_lock); |
4023 | hw->intr_mask = 0; |
4024 | skge_write32(hw, B0_IMSK, 0); |
4025 | skge_read32(hw, B0_IMSK); |
4026 | spin_unlock_irq(&hw->hw_lock); |
4027 | |
4028 | skge_write16(hw, B0_LED, LED_STAT_OFF); |
4029 | skge_write8(hw, B0_CTST, CS_RST_SET); |
4030 | |
4031 | free_irq(pdev->irq, hw); |
4032 | pci_release_regions(pdev); |
4033 | pci_disable_device(pdev); |
4034 | if (dev1) |
4035 | free_netdev(dev1); |
4036 | free_netdev(dev0); |
4037 | |
4038 | iounmap(hw->regs); |
4039 | kfree(hw); |
4040 | pci_set_drvdata(pdev, NULL); |
4041 | } |
4042 | |
4043 | #ifdef CONFIG_PM |
4044 | static int skge_suspend(struct pci_dev *pdev, pm_message_t state) |
4045 | { |
4046 | struct skge_hw *hw = pci_get_drvdata(pdev); |
4047 | int i, err, wol = 0; |
4048 | |
4049 | if (!hw) |
4050 | return 0; |
4051 | |
4052 | err = pci_save_state(pdev); |
4053 | if (err) |
4054 | return err; |
4055 | |
4056 | for (i = 0; i < hw->ports; i++) { |
4057 | struct net_device *dev = hw->dev[i]; |
4058 | struct skge_port *skge = netdev_priv(dev); |
4059 | |
4060 | if (netif_running(dev)) |
4061 | skge_down(dev); |
4062 | if (skge->wol) |
4063 | skge_wol_init(skge); |
4064 | |
4065 | wol |= skge->wol; |
4066 | } |
4067 | |
4068 | skge_write32(hw, B0_IMSK, 0); |
4069 | |
4070 | pci_prepare_to_sleep(pdev); |
4071 | |
4072 | return 0; |
4073 | } |
4074 | |
4075 | static int skge_resume(struct pci_dev *pdev) |
4076 | { |
4077 | struct skge_hw *hw = pci_get_drvdata(pdev); |
4078 | int i, err; |
4079 | |
4080 | if (!hw) |
4081 | return 0; |
4082 | |
4083 | err = pci_back_from_sleep(pdev); |
4084 | if (err) |
4085 | goto out; |
4086 | |
4087 | err = pci_restore_state(pdev); |
4088 | if (err) |
4089 | goto out; |
4090 | |
4091 | err = skge_reset(hw); |
4092 | if (err) |
4093 | goto out; |
4094 | |
4095 | for (i = 0; i < hw->ports; i++) { |
4096 | struct net_device *dev = hw->dev[i]; |
4097 | |
4098 | if (netif_running(dev)) { |
4099 | err = skge_up(dev); |
4100 | |
4101 | if (err) { |
4102 | netdev_err(dev, "could not up: %d\n", err); |
4103 | dev_close(dev); |
4104 | goto out; |
4105 | } |
4106 | } |
4107 | } |
4108 | out: |
4109 | return err; |
4110 | } |
4111 | #endif |
4112 | |
4113 | static void skge_shutdown(struct pci_dev *pdev) |
4114 | { |
4115 | struct skge_hw *hw = pci_get_drvdata(pdev); |
4116 | int i, wol = 0; |
4117 | |
4118 | if (!hw) |
4119 | return; |
4120 | |
4121 | for (i = 0; i < hw->ports; i++) { |
4122 | struct net_device *dev = hw->dev[i]; |
4123 | struct skge_port *skge = netdev_priv(dev); |
4124 | |
4125 | if (skge->wol) |
4126 | skge_wol_init(skge); |
4127 | wol |= skge->wol; |
4128 | } |
4129 | |
4130 | if (pci_enable_wake(pdev, PCI_D3cold, wol)) |
4131 | pci_enable_wake(pdev, PCI_D3hot, wol); |
4132 | |
4133 | pci_disable_device(pdev); |
4134 | pci_set_power_state(pdev, PCI_D3hot); |
4135 | |
4136 | } |
4137 | |
4138 | static struct pci_driver skge_driver = { |
4139 | .name = DRV_NAME, |
4140 | .id_table = skge_id_table, |
4141 | .probe = skge_probe, |
4142 | .remove = __devexit_p(skge_remove), |
4143 | #ifdef CONFIG_PM |
4144 | .suspend = skge_suspend, |
4145 | .resume = skge_resume, |
4146 | #endif |
4147 | .shutdown = skge_shutdown, |
4148 | }; |
4149 | |
4150 | static int __init skge_init_module(void) |
4151 | { |
4152 | skge_debug_init(); |
4153 | return pci_register_driver(&skge_driver); |
4154 | } |
4155 | |
4156 | static void __exit skge_cleanup_module(void) |
4157 | { |
4158 | pci_unregister_driver(&skge_driver); |
4159 | skge_debug_cleanup(); |
4160 | } |
4161 | |
4162 | module_init(skge_init_module); |
4163 | module_exit(skge_cleanup_module); |
4164 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9