Root/target/linux/lantiq/patches-3.7/0117-NET-MIPS-lantiq-adds-xrx200-net.patch

1From a0a6f7f03c914327064364767b7ba688cdbcf611 Mon Sep 17 00:00:00 2001
2From: John Crispin <blogic@openwrt.org>
3Date: Mon, 22 Oct 2012 12:22:23 +0200
4Subject: [PATCH 117/123] NET: MIPS: lantiq: adds xrx200-net
5
6---
7 drivers/net/ethernet/Kconfig | 8 +-
8 drivers/net/ethernet/Makefile | 1 +
9 drivers/net/ethernet/lantiq_pce.h | 163 +++++
10 drivers/net/ethernet/lantiq_xrx200.c | 1191 ++++++++++++++++++++++++++++++++++
11 4 files changed, 1362 insertions(+), 1 deletion(-)
12 create mode 100644 drivers/net/ethernet/lantiq_pce.h
13 create mode 100644 drivers/net/ethernet/lantiq_xrx200.c
14
15Index: linux-3.7.1/drivers/net/ethernet/Kconfig
16===================================================================
17--- linux-3.7.1.orig/drivers/net/ethernet/Kconfig 2012-12-17 20:14:54.000000000 +0100
18+++ linux-3.7.1/drivers/net/ethernet/Kconfig 2012-12-21 10:30:29.629462283 +0100
19@@ -83,7 +83,13 @@
20     tristate "Lantiq SoC ETOP driver"
21     depends on SOC_TYPE_XWAY
22     ---help---
23- Support for the MII0 inside the Lantiq SoC
24+ Support for the MII0 inside the Lantiq ADSL SoC
25+
26+config LANTIQ_XRX200
27+ tristate "Lantiq SoC XRX200 driver"
28+ depends on SOC_TYPE_XWAY
29+ ---help---
30+ Support for the MII0 inside the Lantiq VDSL SoC
31 
32 source "drivers/net/ethernet/marvell/Kconfig"
33 source "drivers/net/ethernet/mellanox/Kconfig"
34Index: linux-3.7.1/drivers/net/ethernet/Makefile
35===================================================================
36--- linux-3.7.1.orig/drivers/net/ethernet/Makefile 2012-12-17 20:14:54.000000000 +0100
37+++ linux-3.7.1/drivers/net/ethernet/Makefile 2012-12-21 10:30:29.629462283 +0100
38@@ -36,6 +36,7 @@
39 obj-$(CONFIG_JME) += jme.o
40 obj-$(CONFIG_KORINA) += korina.o
41 obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
42+obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
43 obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
44 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
45 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
46Index: linux-3.7.1/drivers/net/ethernet/lantiq_pce.h
47===================================================================
48--- /dev/null 1970-01-01 00:00:00.000000000 +0000
49+++ linux-3.7.1/drivers/net/ethernet/lantiq_pce.h 2012-12-21 10:30:29.629462283 +0100
50@@ -0,0 +1,163 @@
51+/*
52+ * This program is free software; you can redistribute it and/or modify it
53+ * under the terms of the GNU General Public License version 2 as published
54+ * by the Free Software Foundation.
55+ *
56+ * This program is distributed in the hope that it will be useful,
57+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
58+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
59+ * GNU General Public License for more details.
60+ *
61+ * You should have received a copy of the GNU General Public License
62+ * along with this program; if not, write to the Free Software
63+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
64+ *
65+ * Copyright (C) 2010 Lantiq Deutschland GmbH
66+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
67+ *
68+ * PCE microcode extracted from UGW5.2 switch api
69+ */
70+
71+/* Switch API Micro Code V0.3 */
72+enum {
73+ OUT_MAC0 = 0,
74+ OUT_MAC1,
75+ OUT_MAC2,
76+ OUT_MAC3,
77+ OUT_MAC4,
78+ OUT_MAC5,
79+ OUT_ETHTYP,
80+ OUT_VTAG0,
81+ OUT_VTAG1,
82+ OUT_ITAG0,
83+ OUT_ITAG1, /*10 */
84+ OUT_ITAG2,
85+ OUT_ITAG3,
86+ OUT_IP0,
87+ OUT_IP1,
88+ OUT_IP2,
89+ OUT_IP3,
90+ OUT_SIP0,
91+ OUT_SIP1,
92+ OUT_SIP2,
93+ OUT_SIP3, /*20*/
94+ OUT_SIP4,
95+ OUT_SIP5,
96+ OUT_SIP6,
97+ OUT_SIP7,
98+ OUT_DIP0,
99+ OUT_DIP1,
100+ OUT_DIP2,
101+ OUT_DIP3,
102+ OUT_DIP4,
103+ OUT_DIP5, /*30*/
104+ OUT_DIP6,
105+ OUT_DIP7,
106+ OUT_SESID,
107+ OUT_PROT,
108+ OUT_APP0,
109+ OUT_APP1,
110+ OUT_IGMP0,
111+ OUT_IGMP1,
112+ OUT_IPOFF, /*39*/
113+ OUT_NONE = 63
114+};
115+
116+/* parser's microcode length type */
117+#define INSTR 0
118+#define IPV6 1
119+#define LENACCU 2
120+
121+/* parser's microcode flag type */
122+enum {
123+ FLAG_ITAG = 0,
124+ FLAG_VLAN,
125+ FLAG_SNAP,
126+ FLAG_PPPOE,
127+ FLAG_IPV6,
128+ FLAG_IPV6FL,
129+ FLAG_IPV4,
130+ FLAG_IGMP,
131+ FLAG_TU,
132+ FLAG_HOP,
133+ FLAG_NN1, /*10 */
134+ FLAG_NN2,
135+ FLAG_END,
136+ FLAG_NO, /*13*/
137+};
138+
139+/* Micro code version V2_11 (extension for parsing IPv6 in PPPoE) */
140+#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
141+ { {val, msk, (ns<<10 | out<<4 | len>>1), (len&1)<<15 | type<<13 | flags<<9 | ipv4_len<<8 }}
142+struct pce_microcode {
143+ unsigned short val[4];
144+/* unsigned short val_2;
145+ unsigned short val_1;
146+ unsigned short val_0;*/
147+} pce_microcode[] = {
148+ /* value mask ns fields L type flags ipv4_len */
149+ MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0),
150+ MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
151+ MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
152+ MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0),
153+ MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
154+ MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
155+ MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
156+ MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
157+ MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0),
158+ MC_ENTRY(0x0000, 0x0000, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
159+ MC_ENTRY(0x0600, 0x0600, 38, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0),
160+ MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0),
161+ MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0),
162+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
163+ MC_ENTRY(0x0300, 0xFF00, 39, OUT_NONE, 0, INSTR, FLAG_SNAP, 0),
164+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
165+ MC_ENTRY(0x0000, 0x0000, 39, OUT_DIP7, 3, INSTR, FLAG_NO, 0),
166+ MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0),
167+ MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0),
168+ MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0),
169+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
170+ MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1),
171+ MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0),
172+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_NO, 0),
173+ MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0),
174+ MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0),
175+ MC_ENTRY(0x0000, 0x0000, 38, OUT_NONE, 0, LENACCU, FLAG_NO, 0),
176+ MC_ENTRY(0x1100, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
177+ MC_ENTRY(0x0600, 0xFF00, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
178+ MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0),
179+ MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0),
180+ MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0),
181+ MC_ENTRY(0x0000, 0x0000, 37, OUT_PROT, 1, INSTR, FLAG_NO, 0),
182+ MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0),
183+ MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0),
184+ MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0),
185+ MC_ENTRY(0x0000, 0x0000, 38, OUT_PROT, 1, IPV6, FLAG_NO, 0),
186+ MC_ENTRY(0x0000, 0x0000, 38, OUT_SIP0, 16, INSTR, FLAG_NO, 0),
187+ MC_ENTRY(0x0000, 0x0000, 39, OUT_APP0, 4, INSTR, FLAG_IGMP, 0),
188+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
189+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
190+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
191+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
192+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
193+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
194+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
195+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
196+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
197+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
198+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
199+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
200+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
201+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
202+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
203+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
204+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
205+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
206+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
207+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
208+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
209+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
210+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
211+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
212+ MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE, 0, INSTR, FLAG_END, 0),
213+};
214Index: linux-3.7.1/drivers/net/ethernet/lantiq_xrx200.c
215===================================================================
216--- /dev/null 1970-01-01 00:00:00.000000000 +0000
217+++ linux-3.7.1/drivers/net/ethernet/lantiq_xrx200.c 2012-12-21 12:06:26.473599462 +0100
218@@ -0,0 +1,1203 @@
219+/*
220+ * This program is free software; you can redistribute it and/or modify it
221+ * under the terms of the GNU General Public License version 2 as published
222+ * by the Free Software Foundation.
223+ *
224+ * This program is distributed in the hope that it will be useful,
225+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
226+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
227+ * GNU General Public License for more details.
228+ *
229+ * You should have received a copy of the GNU General Public License
230+ * along with this program; if not, write to the Free Software
231+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
232+ *
233+ * Copyright (C) 2010 Lantiq Deutschland
234+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
235+ */
236+
237+#include <linux/etherdevice.h>
238+#include <linux/module.h>
239+#include <linux/platform_device.h>
240+#include <linux/interrupt.h>
241+#include <linux/clk.h>
242+#include <asm/delay.h>
243+
244+#include <linux/of_net.h>
245+#include <linux/of_mdio.h>
246+#include <linux/of_gpio.h>
247+
248+#include <xway_dma.h>
249+#include <lantiq_soc.h>
250+
251+#include "lantiq_pce.h"
252+
253+#define SW_POLLING
254+#define SW_ROUTING
255+#define SW_PORTMAP
256+
257+#ifdef SW_ROUTING
258+ #ifdef SW_PORTMAP
259+#define XRX200_MAX_DEV 7
260+ #else
261+#define XRX200_MAX_DEV 2
262+ #endif
263+#else
264+#define XRX200_MAX_DEV 1
265+#endif
266+
267+#define XRX200_MAX_PORT 7
268+#define XRX200_MAX_DMA 8
269+
270+#define XRX200_HEADROOM 4
271+
272+#define XRX200_TX_TIMEOUT (10 * HZ)
273+
274+/* port type */
275+#define XRX200_PORT_TYPE_PHY 1
276+#define XRX200_PORT_TYPE_MAC 2
277+
278+/* DMA */
279+#define XRX200_DMA_CRC_LEN 0x4
280+#define XRX200_DMA_DATA_LEN 0x600
281+#define XRX200_DMA_IRQ INT_NUM_IM2_IRL0
282+#define XRX200_DMA_RX 0
283+#define XRX200_DMA_TX 1
284+
285+/* fetch / store dma */
286+#define FDMA_PCTRL0 0x2A00
287+#define FDMA_PCTRLx(x) (FDMA_PCTRL0 + (x * 0x18))
288+#define SDMA_PCTRL0 0x2F00
289+#define SDMA_PCTRLx(x) (SDMA_PCTRL0 + (x * 0x18))
290+
291+/* buffer management */
292+#define BM_PCFG0 0x200
293+#define BM_PCFGx(x) (BM_PCFG0 + (x * 8))
294+
295+/* MDIO */
296+#define MDIO_GLOB 0x0000
297+#define MDIO_CTRL 0x0020
298+#define MDIO_READ 0x0024
299+#define MDIO_WRITE 0x0028
300+#define MDIO_PHY0 0x0054
301+#define MDIO_PHY(x) (0x0054 - (x * sizeof(unsigned)))
302+#define MDIO_CLK_CFG0 0x002C
303+#define MDIO_CLK_CFG1 0x0030
304+
305+#define MDIO_GLOB_ENABLE 0x8000
306+#define MDIO_BUSY BIT(12)
307+#define MDIO_RD BIT(11)
308+#define MDIO_WR BIT(10)
309+#define MDIO_MASK 0x1f
310+#define MDIO_ADDRSHIFT 5
311+#define MDIO1_25MHZ 9
312+
313+#define MDIO_PHY_LINK_DOWN 0x4000
314+#define MDIO_PHY_LINK_UP 0x2000
315+
316+#define MDIO_PHY_SPEED_M10 0x0000
317+#define MDIO_PHY_SPEED_M100 0x0800
318+#define MDIO_PHY_SPEED_G1 0x1000
319+
320+#define MDIO_PHY_FDUP_EN 0x0600
321+#define MDIO_PHY_FDUP_DIS 0x0200
322+
323+#define MDIO_PHY_LINK_MASK 0x6000
324+#define MDIO_PHY_SPEED_MASK 0x1800
325+#define MDIO_PHY_FDUP_MASK 0x0600
326+#define MDIO_PHY_ADDR_MASK 0x001f
327+#define MDIO_UPDATE_MASK MDIO_PHY_ADDR_MASK | MDIO_PHY_LINK_MASK | \
328+ MDIO_PHY_SPEED_MASK | MDIO_PHY_FDUP_MASK
329+
330+/* MII */
331+#define MII_CFG(p) (p * 8)
332+
333+#define MII_CFG_EN BIT(14)
334+
335+#define MII_CFG_MODE_MIIP 0x0
336+#define MII_CFG_MODE_MIIM 0x1
337+#define MII_CFG_MODE_RMIIP 0x2
338+#define MII_CFG_MODE_RMIIM 0x3
339+#define MII_CFG_MODE_RGMII 0x4
340+#define MII_CFG_MODE_MASK 0xf
341+
342+#define MII_CFG_RATE_M2P5 0x00
343+#define MII_CFG_RATE_M25 0x10
344+#define MII_CFG_RATE_M125 0x20
345+#define MII_CFG_RATE_M50 0x30
346+#define MII_CFG_RATE_AUTO 0x40
347+#define MII_CFG_RATE_MASK 0x70
348+
349+/* cpu port mac */
350+#define PMAC_HD_CTL 0x0000
351+#define PMAC_RX_IPG 0x0024
352+#define PMAC_EWAN 0x002c
353+
354+#define PMAC_IPG_MASK 0xf
355+#define PMAC_HD_CTL_AS 0x0008
356+#define PMAC_HD_CTL_AC 0x0004
357+#define PMAC_HD_CTL_RXSH 0x0040
358+#define PMAC_HD_CTL_AST 0x0080
359+#define PMAC_HD_CTL_RST 0x0100
360+
361+/* PCE */
362+#define PCE_TBL_KEY(x) (0x1100 + ((7 - x) * 4))
363+#define PCE_TBL_MASK 0x1120
364+#define PCE_TBL_VAL(x) (0x1124 + ((4 - x) * 4))
365+#define PCE_TBL_ADDR 0x1138
366+#define PCE_TBL_CTRL 0x113c
367+#define PCE_PMAP1 0x114c
368+#define PCE_PMAP2 0x1150
369+#define PCE_PMAP3 0x1154
370+#define PCE_GCTRL_REG(x) (0x1158 + (x * 4))
371+#define PCE_PCTRL_REG(p, x) (0x1200 + (((p * 0xa) + x) * 4))
372+
373+#define PCE_TBL_BUSY BIT(15)
374+#define PCE_TBL_CFG_ADDR_MASK 0x1f
375+#define PCE_TBL_CFG_ADWR 0x20
376+#define PCE_TBL_CFG_ADWR_MASK 0x60
377+#define PCE_INGRESS BIT(11)
378+
379+/* MAC */
380+#define MAC_FLEN_REG (0x2314)
381+#define MAC_CTRL_REG(p, x) (0x240c + (((p * 0xc) + x) * 4))
382+
383+/* buffer management */
384+#define BM_PCFG(p) (0x200 + (p * 8))
385+
386+/* special tag in TX path header */
387+#define SPID_SHIFT 24
388+#define DPID_SHIFT 16
389+#define DPID_ENABLE 1
390+#define SPID_CPU_PORT 2
391+#define PORT_MAP_SEL BIT(15)
392+#define PORT_MAP_EN BIT(14)
393+#define PORT_MAP_SHIFT 1
394+#define PORT_MAP_MASK 0x3f
395+
396+#define SPPID_MASK 0x7
397+#define SPPID_SHIFT 4
398+
399+/* MII regs not yet in linux */
400+#define MDIO_DEVAD_NONE (-1)
401+#define ADVERTIZE_MPD (1 << 10)
402+
403+struct xrx200_port {
404+ u8 num;
405+ u8 phy_addr;
406+ u16 flags;
407+ phy_interface_t phy_if;
408+
409+ int link;
410+ int gpio;
411+ enum of_gpio_flags gpio_flags;
412+
413+ struct phy_device *phydev;
414+ struct device_node *phy_node;
415+};
416+
417+struct xrx200_chan {
418+ int idx;
419+ int refcount;
420+ int tx_free;
421+
422+ struct net_device dummy_dev;
423+ struct net_device *devs[XRX200_MAX_DEV];
424+
425+ struct napi_struct napi;
426+ struct ltq_dma_channel dma;
427+ struct sk_buff *skb[LTQ_DESC_NUM];
428+};
429+
430+struct xrx200_hw {
431+ struct clk *clk;
432+ struct mii_bus *mii_bus;
433+
434+ struct xrx200_chan chan[XRX200_MAX_DMA];
435+
436+ struct net_device *devs[XRX200_MAX_DEV];
437+ int num_devs;
438+
439+ int port_map[XRX200_MAX_PORT];
440+ unsigned short wan_map;
441+
442+ spinlock_t lock;
443+};
444+
445+struct xrx200_priv {
446+ struct net_device_stats stats;
447+ int id;
448+
449+ struct xrx200_port port[XRX200_MAX_PORT];
450+ int num_port;
451+ int wan;
452+ unsigned short port_map;
453+ const void *mac;
454+
455+ struct xrx200_hw *hw;
456+};
457+
458+static __iomem void *xrx200_switch_membase;
459+static __iomem void *xrx200_mii_membase;
460+static __iomem void *xrx200_mdio_membase;
461+static __iomem void *xrx200_pmac_membase;
462+
463+#define ltq_switch_r32(x) ltq_r32(xrx200_switch_membase + (x))
464+#define ltq_switch_w32(x, y) ltq_w32(x, xrx200_switch_membase + (y))
465+#define ltq_switch_w32_mask(x, y, z) \
466+ ltq_w32_mask(x, y, xrx200_switch_membase + (z))
467+
468+#define ltq_mdio_r32(x) ltq_r32(xrx200_mdio_membase + (x))
469+#define ltq_mdio_w32(x, y) ltq_w32(x, xrx200_mdio_membase + (y))
470+#define ltq_mdio_w32_mask(x, y, z) \
471+ ltq_w32_mask(x, y, xrx200_mdio_membase + (z))
472+
473+#define ltq_mii_r32(x) ltq_r32(xrx200_mii_membase + (x))
474+#define ltq_mii_w32(x, y) ltq_w32(x, xrx200_mii_membase + (y))
475+#define ltq_mii_w32_mask(x, y, z) \
476+ ltq_w32_mask(x, y, xrx200_mii_membase + (z))
477+
478+#define ltq_pmac_r32(x) ltq_r32(xrx200_pmac_membase + (x))
479+#define ltq_pmac_w32(x, y) ltq_w32(x, xrx200_pmac_membase + (y))
480+#define ltq_pmac_w32_mask(x, y, z) \
481+ ltq_w32_mask(x, y, xrx200_pmac_membase + (z))
482+
483+static int xrx200_open(struct net_device *dev)
484+{
485+ struct xrx200_priv *priv = netdev_priv(dev);
486+ unsigned long flags;
487+ int i;
488+
489+ for (i = 0; i < XRX200_MAX_DMA; i++) {
490+ if (!priv->hw->chan[i].dma.irq)
491+ continue;
492+ spin_lock_irqsave(&priv->hw->lock, flags);
493+ if (!priv->hw->chan[i].refcount) {
494+ napi_enable(&priv->hw->chan[i].napi);
495+ ltq_dma_open(&priv->hw->chan[i].dma);
496+ }
497+ priv->hw->chan[i].refcount++;
498+ spin_unlock_irqrestore(&priv->hw->lock, flags);
499+ }
500+ for (i = 0; i < priv->num_port; i++)
501+ if (priv->port[i].phydev)
502+ phy_start(priv->port[i].phydev);
503+ netif_start_queue(dev);
504+
505+ return 0;
506+}
507+
508+static int xrx200_close(struct net_device *dev)
509+{
510+ struct xrx200_priv *priv = netdev_priv(dev);
511+ unsigned long flags;
512+ int i;
513+
514+ netif_stop_queue(dev);
515+
516+ for (i = 0; i < priv->num_port; i++)
517+ if (priv->port[i].phydev)
518+ phy_stop(priv->port[i].phydev);
519+
520+ for (i = 0; i < XRX200_MAX_DMA; i++) {
521+ if (!priv->hw->chan[i].dma.irq)
522+ continue;
523+ spin_lock_irqsave(&priv->hw->lock, flags);
524+ priv->hw->chan[i].refcount--;
525+ if (!priv->hw->chan[i].refcount) {
526+ napi_disable(&priv->hw->chan[i].napi);
527+ ltq_dma_close(&priv->hw->chan[XRX200_DMA_RX].dma);
528+ }
529+ spin_unlock_irqrestore(&priv->hw->lock, flags);
530+ }
531+
532+ return 0;
533+}
534+
535+static int xrx200_alloc_skb(struct xrx200_chan *ch)
536+{
537+#define DMA_PAD (NET_IP_ALIGN + NET_SKB_PAD)
538+ ch->skb[ch->dma.desc] = dev_alloc_skb(XRX200_DMA_DATA_LEN + DMA_PAD);
539+ if (!ch->skb[ch->dma.desc])
540+ return -ENOMEM;
541+
542+ skb_reserve(ch->skb[ch->dma.desc], NET_SKB_PAD);
543+ ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
544+ ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
545+ DMA_FROM_DEVICE);
546+ ch->dma.desc_base[ch->dma.desc].addr =
547+ CPHYSADDR(ch->skb[ch->dma.desc]->data);
548+ ch->dma.desc_base[ch->dma.desc].ctl =
549+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
550+ XRX200_DMA_DATA_LEN;
551+ skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
552+
553+ return 0;
554+}
555+
556+static void xrx200_hw_receive(struct xrx200_chan *ch, int id)
557+{
558+ struct net_device *dev = ch->devs[id];
559+ struct xrx200_priv *priv = netdev_priv(dev);
560+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
561+ struct sk_buff *skb = ch->skb[ch->dma.desc];
562+ int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - XRX200_DMA_CRC_LEN;
563+ unsigned long flags;
564+
565+ spin_lock_irqsave(&priv->hw->lock, flags);
566+ if (xrx200_alloc_skb(ch)) {
567+ netdev_err(dev,
568+ "failed to allocate new rx buffer, stopping DMA\n");
569+ ltq_dma_close(&ch->dma);
570+ }
571+
572+ ch->dma.desc++;
573+ ch->dma.desc %= LTQ_DESC_NUM;
574+ spin_unlock_irqrestore(&priv->hw->lock, flags);
575+
576+ skb_put(skb, len);
577+#ifdef SW_ROUTING
578+ skb_pull(skb, 8);
579+#endif
580+ skb->dev = dev;
581+ skb->protocol = eth_type_trans(skb, dev);
582+ netif_receive_skb(skb);
583+ priv->stats.rx_packets++;
584+ priv->stats.rx_bytes+=len;
585+}
586+
587+static int xrx200_poll_rx(struct napi_struct *napi, int budget)
588+{
589+ struct xrx200_chan *ch = container_of(napi,
590+ struct xrx200_chan, napi);
591+ struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
592+ int rx = 0;
593+ int complete = 0;
594+ unsigned long flags;
595+
596+ while ((rx < budget) && !complete) {
597+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
598+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
599+#ifdef SW_ROUTING
600+ struct sk_buff *skb = ch->skb[ch->dma.desc];
601+ u32 *special_tag = (u32*)skb->data;
602+ int port = (special_tag[1] >> SPPID_SHIFT) & SPPID_MASK;
603+ xrx200_hw_receive(ch, priv->hw->port_map[port]);
604+#else
605+ xrx200_hw_receive(ch, 0);
606+#endif
607+ rx++;
608+ } else {
609+ complete = 1;
610+ }
611+ }
612+ if (complete || !rx) {
613+ napi_complete(&ch->napi);
614+ spin_lock_irqsave(&priv->hw->lock, flags);
615+ ltq_dma_ack_irq(&ch->dma);
616+ spin_unlock_irqrestore(&priv->hw->lock, flags);
617+ }
618+ return rx;
619+}
620+
621+static int xrx200_poll_tx(struct napi_struct *napi, int budget)
622+{
623+ struct xrx200_chan *ch =
624+ container_of(napi, struct xrx200_chan, napi);
625+ struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
626+ unsigned long flags;
627+ int i;
628+
629+ spin_lock_irqsave(&priv->hw->lock, flags);
630+ while ((ch->dma.desc_base[ch->tx_free].ctl &
631+ (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
632+ dev_kfree_skb_any(ch->skb[ch->tx_free]);
633+ ch->skb[ch->tx_free] = NULL;
634+ memset(&ch->dma.desc_base[ch->tx_free], 0,
635+ sizeof(struct ltq_dma_desc));
636+ ch->tx_free++;
637+ ch->tx_free %= LTQ_DESC_NUM;
638+ }
639+ spin_unlock_irqrestore(&priv->hw->lock, flags);
640+
641+ for (i = 0; i < XRX200_MAX_DEV && ch->devs[i]; i++) {
642+ struct netdev_queue *txq =
643+ netdev_get_tx_queue(ch->devs[i], 0);
644+ if (netif_tx_queue_stopped(txq))
645+ netif_tx_start_queue(txq);
646+ }
647+ napi_complete(&ch->napi);
648+ spin_lock_irqsave(&priv->hw->lock, flags);
649+ ltq_dma_ack_irq(&ch->dma);
650+ spin_unlock_irqrestore(&priv->hw->lock, flags);
651+
652+ return 1;
653+}
654+
655+static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
656+{
657+ struct xrx200_priv *priv = netdev_priv(dev);
658+
659+ return &priv->stats;
660+}
661+
662+static void xrx200_tx_timeout(struct net_device *dev)
663+{
664+ struct xrx200_priv *priv = netdev_priv(dev);
665+
666+ printk(KERN_ERR "%s: transmit timed out, disable the dma channel irq\n", dev->name);
667+
668+ priv->stats.tx_errors++;
669+ netif_wake_queue(dev);
670+}
671+
672+static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *dev)
673+{
674+ int queue = skb_get_queue_mapping(skb);
675+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
676+ struct xrx200_priv *priv = netdev_priv(dev);
677+ struct xrx200_chan *ch = &priv->hw->chan[XRX200_DMA_TX];
678+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
679+ unsigned long flags;
680+ u32 byte_offset;
681+ int len;
682+#ifdef SW_ROUTING
683+ #ifdef SW_PORTMAP
684+ u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | PORT_MAP_SEL | PORT_MAP_EN | DPID_ENABLE;
685+ #else
686+ u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | DPID_ENABLE;
687+ #endif
688+#endif
689+
690+ len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
691+
692+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
693+ netdev_err(dev, "tx ring full\n");
694+ netif_tx_stop_queue(txq);
695+ return NETDEV_TX_BUSY;
696+ }
697+#ifdef SW_ROUTING
698+ #ifdef SW_PORTMAP
699+ special_tag |= priv->port_map << PORT_MAP_SHIFT;
700+ #else
701+ if(priv->id)
702+ special_tag |= (1 << DPID_SHIFT);
703+ #endif
704+ if(skb_headroom(skb) < 4) {
705+ struct sk_buff *tmp = skb_realloc_headroom(skb, 4);
706+ dev_kfree_skb_any(skb);
707+ skb = tmp;
708+ }
709+ skb_push(skb, 4);
710+ memcpy(skb->data, &special_tag, sizeof(u32));
711+ len += 4;
712+#endif
713+
714+ /* dma needs to start on a 16 byte aligned address */
715+ byte_offset = CPHYSADDR(skb->data) % 16;
716+ ch->skb[ch->dma.desc] = skb;
717+
718+ dev->trans_start = jiffies;
719+
720+ spin_lock_irqsave(&priv->hw->lock, flags);
721+ desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
722+ DMA_TO_DEVICE)) - byte_offset;
723+ wmb();
724+ desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
725+ LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
726+ ch->dma.desc++;
727+ ch->dma.desc %= LTQ_DESC_NUM;
728+ spin_unlock_irqrestore(&priv->hw->lock, flags);
729+
730+ if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
731+ netif_tx_stop_queue(txq);
732+
733+ priv->stats.tx_packets++;
734+ priv->stats.tx_bytes+=len;
735+
736+ return NETDEV_TX_OK;
737+}
738+
739+static irqreturn_t xrx200_dma_irq(int irq, void *priv)
740+{
741+ struct xrx200_hw *hw = priv;
742+ int ch = irq - XRX200_DMA_IRQ;
743+
744+ napi_schedule(&hw->chan[ch].napi);
745+
746+ return IRQ_HANDLED;
747+}
748+
749+static int xrx200_dma_init(struct xrx200_hw *hw)
750+{
751+ int i, err = 0;
752+
753+ ltq_dma_init_port(DMA_PORT_ETOP);
754+
755+ for (i = 0; i < 8 && !err; i++) {
756+ int irq = XRX200_DMA_IRQ + i;
757+ struct xrx200_chan *ch = &hw->chan[i];
758+
759+ ch->idx = ch->dma.nr = i;
760+
761+ if (i == XRX200_DMA_TX) {
762+ ltq_dma_alloc_tx(&ch->dma);
763+ err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_tx", hw);
764+ } else if (i == XRX200_DMA_RX) {
765+ ltq_dma_alloc_rx(&ch->dma);
766+ for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
767+ ch->dma.desc++)
768+ if (xrx200_alloc_skb(ch))
769+ err = -ENOMEM;
770+ ch->dma.desc = 0;
771+ err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_rx", hw);
772+ } else
773+ continue;
774+
775+ if (!err)
776+ ch->dma.irq = irq;
777+ }
778+
779+ return err;
780+}
781+
782+#ifdef SW_POLLING
783+static void xrx200_gmac_update(struct xrx200_port *port)
784+{
785+ u16 phyaddr = port->phydev->addr & MDIO_PHY_ADDR_MASK;
786+ u16 miimode = ltq_mii_r32(MII_CFG(port->num)) & MII_CFG_MODE_MASK;
787+ u16 miirate = 0;
788+
789+ switch (port->phydev->speed) {
790+ case SPEED_1000:
791+ phyaddr |= MDIO_PHY_SPEED_G1;
792+ miirate = MII_CFG_RATE_M125;
793+ break;
794+
795+ case SPEED_100:
796+ phyaddr |= MDIO_PHY_SPEED_M100;
797+ switch (miimode) {
798+ case MII_CFG_MODE_RMIIM:
799+ case MII_CFG_MODE_RMIIP:
800+ miirate = MII_CFG_RATE_M50;
801+ break;
802+ default:
803+ miirate = MII_CFG_RATE_M25;
804+ break;
805+ }
806+ break;
807+
808+ default:
809+ phyaddr |= MDIO_PHY_SPEED_M10;
810+ miirate = MII_CFG_RATE_M2P5;
811+ break;
812+ }
813+
814+ if (port->phydev->link)
815+ phyaddr |= MDIO_PHY_LINK_UP;
816+ else
817+ phyaddr |= MDIO_PHY_LINK_DOWN;
818+
819+ if (port->phydev->duplex == DUPLEX_FULL)
820+ phyaddr |= MDIO_PHY_FDUP_EN;
821+ else
822+ phyaddr |= MDIO_PHY_FDUP_DIS;
823+
824+ ltq_mdio_w32_mask(MDIO_UPDATE_MASK, phyaddr, MDIO_PHY(port->num));
825+ ltq_mii_w32_mask(MII_CFG_RATE_MASK, miirate, MII_CFG(port->num));
826+ udelay(1);
827+}
828+#else
829+static void xrx200_gmac_update(struct xrx200_port *port)
830+{
831+
832+}
833+#endif
834+
835+static void xrx200_mdio_link(struct net_device *dev)
836+{
837+ struct xrx200_priv *priv = netdev_priv(dev);
838+ int i;
839+
840+ for (i = 0; i < priv->num_port; i++) {
841+ if (!priv->port[i].phydev)
842+ continue;
843+
844+ if (priv->port[i].link != priv->port[i].phydev->link) {
845+ xrx200_gmac_update(&priv->port[i]);
846+ priv->port[i].link = priv->port[i].phydev->link;
847+ netdev_info(dev, "port %d %s link\n",
848+ priv->port[i].num,
849+ (priv->port[i].link)?("got"):("lost"));
850+ }
851+ }
852+}
853+
854+static inline int xrx200_mdio_poll(struct mii_bus *bus)
855+{
856+ unsigned cnt = 10000;
857+
858+ while (likely(cnt--)) {
859+ unsigned ctrl = ltq_mdio_r32(MDIO_CTRL);
860+ if ((ctrl & MDIO_BUSY) == 0)
861+ return 0;
862+ }
863+
864+ return 1;
865+}
866+
867+static int xrx200_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
868+{
869+ if (xrx200_mdio_poll(bus))
870+ return 1;
871+
872+ ltq_mdio_w32(val, MDIO_WRITE);
873+ ltq_mdio_w32(MDIO_BUSY | MDIO_WR |
874+ ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
875+ (reg & MDIO_MASK),
876+ MDIO_CTRL);
877+
878+ return 0;
879+}
880+
881+static int xrx200_mdio_rd(struct mii_bus *bus, int addr, int reg)
882+{
883+ if (xrx200_mdio_poll(bus))
884+ return -1;
885+
886+ ltq_mdio_w32(MDIO_BUSY | MDIO_RD |
887+ ((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
888+ (reg & MDIO_MASK),
889+ MDIO_CTRL);
890+
891+ if (xrx200_mdio_poll(bus))
892+ return -1;
893+
894+ return ltq_mdio_r32(MDIO_READ);
895+}
896+
897+static int xrx200_mdio_probe(struct net_device *dev, struct xrx200_port *port)
898+{
899+ struct xrx200_priv *priv = netdev_priv(dev);
900+ struct phy_device *phydev = NULL;
901+ unsigned val;
902+
903+ phydev = priv->hw->mii_bus->phy_map[port->phy_addr];
904+
905+ if (!phydev) {
906+ netdev_err(dev, "no PHY found\n");
907+ return -ENODEV;
908+ }
909+
910+ phydev = phy_connect(dev, dev_name(&phydev->dev), &xrx200_mdio_link,
911+ 0, port->phy_if);
912+
913+ if (IS_ERR(phydev)) {
914+ netdev_err(dev, "Could not attach to PHY\n");
915+ return PTR_ERR(phydev);
916+ }
917+
918+ phydev->supported &= (SUPPORTED_10baseT_Half
919+ | SUPPORTED_10baseT_Full
920+ | SUPPORTED_100baseT_Half
921+ | SUPPORTED_100baseT_Full
922+ | SUPPORTED_1000baseT_Half
923+ | SUPPORTED_1000baseT_Full
924+ | SUPPORTED_Autoneg
925+ | SUPPORTED_MII
926+ | SUPPORTED_TP);
927+ phydev->advertising = phydev->supported;
928+ port->phydev = phydev;
929+
930+ pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
931+ dev->name, phydev->drv->name,
932+ dev_name(&phydev->dev), phydev->irq);
933+
934+#ifdef SW_POLLING
935+ phy_read_status(phydev);
936+
937+ val = xrx200_mdio_rd(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
938+ val |= ADVERTIZE_MPD;
939+ xrx200_mdio_wr(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
940+ xrx200_mdio_wr(priv->hw->mii_bus, 0, 0, 0x1040);
941+
942+ phy_start_aneg(phydev);
943+#endif
944+ return 0;
945+}
946+
947+static void xrx200_port_config(struct xrx200_priv *priv,
948+ const struct xrx200_port *port)
949+{
950+ u16 miimode = 0;
951+
952+ switch (port->num) {
953+ case 0: /* xMII0 */
954+ case 1: /* xMII1 */
955+ switch (port->phy_if) {
956+ case PHY_INTERFACE_MODE_MII:
957+ if (port->flags & XRX200_PORT_TYPE_PHY)
958+ /* MII MAC mode, connected to external PHY */
959+ miimode = MII_CFG_MODE_MIIM;
960+ else
961+ /* MII PHY mode, connected to external MAC */
962+ miimode = MII_CFG_MODE_MIIP;
963+ break;
964+ case PHY_INTERFACE_MODE_RMII:
965+ if (port->flags & XRX200_PORT_TYPE_PHY)
966+ /* RMII MAC mode, connected to external PHY */
967+ miimode = MII_CFG_MODE_RMIIM;
968+ else
969+ /* RMII PHY mode, connected to external MAC */
970+ miimode = MII_CFG_MODE_RMIIP;
971+ break;
972+ case PHY_INTERFACE_MODE_RGMII:
973+ /* RGMII MAC mode, connected to external PHY */
974+ miimode = MII_CFG_MODE_RGMII;
975+ break;
976+ default:
977+ break;
978+ }
979+ break;
980+ case 2: /* internal GPHY0 */
981+ case 3: /* internal GPHY0 */
982+ case 4: /* internal GPHY1 */
983+ switch (port->phy_if) {
984+ case PHY_INTERFACE_MODE_MII:
985+ case PHY_INTERFACE_MODE_GMII:
986+ /* MII MAC mode, connected to internal GPHY */
987+ miimode = MII_CFG_MODE_MIIM;
988+ break;
989+ default:
990+ break;
991+ }
992+ break;
993+ case 5: /* internal GPHY1 or xMII2 */
994+ switch (port->phy_if) {
995+ case PHY_INTERFACE_MODE_MII:
996+ /* MII MAC mode, connected to internal GPHY */
997+ miimode = MII_CFG_MODE_MIIM;
998+ break;
999+ case PHY_INTERFACE_MODE_RGMII:
1000+ /* RGMII MAC mode, connected to external PHY */
1001+ miimode = MII_CFG_MODE_RGMII;
1002+ break;
1003+ default:
1004+ break;
1005+ }
1006+ break;
1007+ default:
1008+ break;
1009+ }
1010+
1011+ ltq_mii_w32_mask(MII_CFG_MODE_MASK, miimode | MII_CFG_EN,
1012+ MII_CFG(port->num));
1013+}
1014+
1015+static int xrx200_init(struct net_device *dev)
1016+{
1017+ struct xrx200_priv *priv = netdev_priv(dev);
1018+ struct sockaddr mac;
1019+ int err, i;
1020+
1021+#ifndef SW_POLLING
1022+ unsigned int reg = 0;
1023+
1024+ /* enable auto polling */
1025+ for (i = 0; i < priv->num_port; i++)
1026+ reg |= BIT(priv->port[i].num);
1027+ ltq_mdio_w32(reg, MDIO_CLK_CFG0);
1028+ ltq_mdio_w32(MDIO1_25MHZ, MDIO_CLK_CFG1);
1029+#endif
1030+
1031+ /* setup each port */
1032+ for (i = 0; i < priv->num_port; i++)
1033+ xrx200_port_config(priv, &priv->port[i]);
1034+
1035+ memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
1036+ if (!is_valid_ether_addr(mac.sa_data)) {
1037+ pr_warn("net-xrx200: invalid MAC, using random\n");
1038+ eth_random_addr(mac.sa_data);
1039+ dev->addr_assign_type |= NET_ADDR_RANDOM;
1040+ }
1041+
1042+ err = eth_mac_addr(dev, &mac);
1043+ if (err)
1044+ goto err_netdev;
1045+
1046+ for (i = 0; i < priv->num_port; i++)
1047+ if (xrx200_mdio_probe(dev, &priv->port[i]))
1048+ pr_warn("xrx200-mdio: probing phy of port %d failed\n",
1049+ priv->port[i].num);
1050+
1051+ return 0;
1052+
1053+err_netdev:
1054+ unregister_netdev(dev);
1055+ free_netdev(dev);
1056+ return err;
1057+}
1058+
1059+static void xrx200_pci_microcode(void)
1060+{
1061+ int i;
1062+
1063+ ltq_switch_w32_mask(PCE_TBL_CFG_ADDR_MASK | PCE_TBL_CFG_ADWR_MASK,
1064+ PCE_TBL_CFG_ADWR, PCE_TBL_CTRL);
1065+ ltq_switch_w32(0, PCE_TBL_MASK);
1066+
1067+ for (i = 0; i < ARRAY_SIZE(pce_microcode); i++) {
1068+ ltq_switch_w32(i, PCE_TBL_ADDR);
1069+ ltq_switch_w32(pce_microcode[i].val[3], PCE_TBL_VAL(0));
1070+ ltq_switch_w32(pce_microcode[i].val[2], PCE_TBL_VAL(1));
1071+ ltq_switch_w32(pce_microcode[i].val[1], PCE_TBL_VAL(2));
1072+ ltq_switch_w32(pce_microcode[i].val[0], PCE_TBL_VAL(3));
1073+
1074+ // start the table access:
1075+ ltq_switch_w32_mask(0, PCE_TBL_BUSY, PCE_TBL_CTRL);
1076+ while (ltq_switch_r32(PCE_TBL_CTRL) & PCE_TBL_BUSY);
1077+ }
1078+
1079+ /* tell the switch that the microcode is loaded */
1080+ ltq_switch_w32_mask(0, BIT(3), PCE_GCTRL_REG(0));
1081+}
1082+
1083+static void xrx200_hw_init(struct xrx200_hw *hw)
1084+{
1085+ int i;
1086+
1087+ /* enable clock gate */
1088+ clk_enable(hw->clk);
1089+
1090+ ltq_switch_w32(1, 0);
1091+ mdelay(100);
1092+ ltq_switch_w32(0, 0);
1093+ /*
1094+ * TODO: we should really disbale all phys/miis here and explicitly
1095+ * enable them in the device secific init function
1096+ */
1097+
1098+ /* disable port fetch/store dma */
1099+ for (i = 0; i < 7; i++ ) {
1100+ ltq_switch_w32(0, FDMA_PCTRLx(i));
1101+ ltq_switch_w32(0, SDMA_PCTRLx(i));
1102+ }
1103+
1104+ /* enable Switch */
1105+ ltq_mdio_w32_mask(0, MDIO_GLOB_ENABLE, MDIO_GLOB);
1106+
1107+ /* load the pce microcode */
1108+ xrx200_pci_microcode();
1109+
1110+ /* Default unknown Broadcat/Multicast/Unicast port maps */
1111+ ltq_switch_w32(0x7f, PCE_PMAP1);
1112+ ltq_switch_w32(0x7f, PCE_PMAP2);
1113+ ltq_switch_w32(0x7f, PCE_PMAP3);
1114+
1115+ /* RMON Counter Enable for all physical ports */
1116+ for (i = 0; i < 7; i++)
1117+ ltq_switch_w32(0x1, BM_PCFG(i));
1118+
1119+ /* disable auto polling */
1120+ ltq_mdio_w32(0x0, MDIO_CLK_CFG0);
1121+
1122+ /* enable port statistic counters */
1123+ for (i = 0; i < 7; i++)
1124+ ltq_switch_w32(0x1, BM_PCFGx(i));
1125+
1126+ /* set IPG to 12 */
1127+ ltq_pmac_w32_mask(PMAC_IPG_MASK, 0xb, PMAC_RX_IPG);
1128+
1129+#ifdef SW_ROUTING
1130+ /* enable status header, enable CRC */
1131+ ltq_pmac_w32_mask(0,
1132+ PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC,
1133+ PMAC_HD_CTL);
1134+#else
1135+ /* disable status header, enable CRC */
1136+ ltq_pmac_w32_mask(PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS,
1137+ PMAC_HD_CTL_AC,
1138+ PMAC_HD_CTL);
1139+#endif
1140+
1141+ /* enable port fetch/store dma */
1142+ for (i = 0; i < 7; i++ ) {
1143+ ltq_switch_w32_mask(0, 0x01, FDMA_PCTRLx(i));
1144+ ltq_switch_w32_mask(0, 0x01, SDMA_PCTRLx(i));
1145+ ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(i, 0));
1146+ }
1147+
1148+ /* enable special tag insertion on cpu port */
1149+ ltq_switch_w32_mask(0, 0x02, FDMA_PCTRLx(6));
1150+ ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(6, 0));
1151+ ltq_switch_w32_mask(0, BIT(3), MAC_CTRL_REG(6, 2));
1152+ ltq_switch_w32(1518 + 8 + 4 * 2, MAC_FLEN_REG);
1153+}
1154+
1155+static void xrx200_hw_cleanup(struct xrx200_hw *hw)
1156+{
1157+ int i;
1158+
1159+ /* disable the switch */
1160+ ltq_mdio_w32_mask(MDIO_GLOB_ENABLE, 0, MDIO_GLOB);
1161+
1162+ /* free the channels and IRQs */
1163+ for (i = 0; i < 2; i++) {
1164+ ltq_dma_free(&hw->chan[i].dma);
1165+ if (hw->chan[i].dma.irq)
1166+ free_irq(hw->chan[i].dma.irq, hw);
1167+ }
1168+
1169+ /* free the allocated RX ring */
1170+ for (i = 0; i < LTQ_DESC_NUM; i++)
1171+ dev_kfree_skb_any(hw->chan[XRX200_DMA_RX].skb[i]);
1172+
1173+ /* clear the mdio bus */
1174+ mdiobus_unregister(hw->mii_bus);
1175+ mdiobus_free(hw->mii_bus);
1176+
1177+ /* release the clock */
1178+ clk_disable(hw->clk);
1179+ clk_put(hw->clk);
1180+}
1181+
1182+static int xrx200_of_mdio(struct xrx200_hw *hw, struct device_node *np)
1183+{
1184+ int i;
1185+ hw->mii_bus = mdiobus_alloc();
1186+ if (!hw->mii_bus)
1187+ return -ENOMEM;
1188+
1189+ hw->mii_bus->read = xrx200_mdio_rd;
1190+ hw->mii_bus->write = xrx200_mdio_wr;
1191+ hw->mii_bus->name = "lantiq,xrx200-mdio";
1192+ snprintf(hw->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
1193+
1194+ if (of_mdiobus_register(hw->mii_bus, np)) {
1195+ mdiobus_free(hw->mii_bus);
1196+ return -ENXIO;
1197+ }
1198+
1199+ return 0;
1200+}
1201+
1202+static void xrx200_of_port(struct xrx200_priv *priv, struct device_node *port)
1203+{
1204+ const __be32 *addr, *id = of_get_property(port, "reg", NULL);
1205+ struct xrx200_port *p = &priv->port[priv->num_port];
1206+
1207+ if (!id)
1208+ return;
1209+
1210+ memset(p, 0, sizeof(struct xrx200_port));
1211+ p->phy_node = of_parse_phandle(port, "phy-handle", 0);
1212+ addr = of_get_property(p->phy_node, "reg", NULL);
1213+ if (!addr)
1214+ return;
1215+
1216+ p->num = *id;
1217+ p->phy_addr = *addr;
1218+ p->phy_if = of_get_phy_mode(port);
1219+ if (p->phy_addr > 0x10)
1220+ p->flags = XRX200_PORT_TYPE_MAC;
1221+ else
1222+ p->flags = XRX200_PORT_TYPE_PHY;
1223+ priv->num_port++;
1224+
1225+ p->gpio = of_get_gpio_flags(port, 0, &p->gpio_flags);
1226+ if (gpio_is_valid(p->gpio))
1227+ if (!gpio_request(p->gpio, "phy-reset")) {
1228+ gpio_direction_output(p->gpio,
1229+ (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (1) : (0));
1230+ udelay(100);
1231+ gpio_set_value(p->gpio, (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
1232+ }
1233+ /* is this port a wan port ? */
1234+ if (priv->wan)
1235+ priv->hw->wan_map |= BIT(p->num);
1236+
1237+ priv->port_map |= BIT(p->num);
1238+
1239+ /* store the port id in the hw struct so we can map ports -> devices */
1240+ priv->hw->port_map[p->num] = priv->hw->num_devs;
1241+}
1242+
1243+static const struct net_device_ops xrx200_netdev_ops = {
1244+ .ndo_init = xrx200_init,
1245+ .ndo_open = xrx200_open,
1246+ .ndo_stop = xrx200_close,
1247+ .ndo_start_xmit = xrx200_start_xmit,
1248+ .ndo_set_mac_address = eth_mac_addr,
1249+ .ndo_validate_addr = eth_validate_addr,
1250+ .ndo_change_mtu = eth_change_mtu,
1251+ .ndo_get_stats = xrx200_get_stats,
1252+ .ndo_tx_timeout = xrx200_tx_timeout,
1253+};
1254+
1255+static void xrx200_of_iface(struct xrx200_hw *hw, struct device_node *iface)
1256+{
1257+ struct xrx200_priv *priv;
1258+ struct device_node *port;
1259+ const __be32 *wan;
1260+
1261+ /* alloc the network device */
1262+ hw->devs[hw->num_devs] = alloc_etherdev(sizeof(struct xrx200_priv));
1263+ if (!hw->devs[hw->num_devs])
1264+ return;
1265+
1266+ /* setup the network device */
1267+ strcpy(hw->devs[hw->num_devs]->name, "eth%d");
1268+ hw->devs[hw->num_devs]->netdev_ops = &xrx200_netdev_ops;
1269+ hw->devs[hw->num_devs]->watchdog_timeo = XRX200_TX_TIMEOUT;
1270+ hw->devs[hw->num_devs]->needed_headroom = XRX200_HEADROOM;
1271+
1272+ /* setup our private data */
1273+ priv = netdev_priv(hw->devs[hw->num_devs]);
1274+ priv->hw = hw;
1275+ priv->mac = of_get_mac_address(iface);
1276+ priv->id = hw->num_devs;
1277+
1278+ /* is this the wan interface ? */
1279+ wan = of_get_property(iface, "lantiq,wan", NULL);
1280+ if (wan && (*wan == 1))
1281+ priv->wan = 1;
1282+
1283+ /* load the ports that are part of the interface */
1284+ for_each_child_of_node(iface, port)
1285+ if (of_device_is_compatible(port, "lantiq,xrx200-pdi-port"))
1286+ xrx200_of_port(priv, port);
1287+
1288+ /* register the actual device */
1289+ if (!register_netdev(hw->devs[hw->num_devs]))
1290+ hw->num_devs++;
1291+}
1292+
1293+static struct xrx200_hw xrx200_hw;
1294+
1295+static int __devinit xrx200_probe(struct platform_device *pdev)
1296+{
1297+ struct resource *res[4];
1298+ struct device_node *mdio_np, *iface_np;
1299+ int i;
1300+
1301+ /* load the memory ranges */
1302+ for (i = 0; i < 4; i++) {
1303+ res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
1304+ if (!res[i]) {
1305+ dev_err(&pdev->dev, "failed to get resources\n");
1306+ return -ENOENT;
1307+ }
1308+ }
1309+ xrx200_switch_membase = devm_request_and_ioremap(&pdev->dev, res[0]);
1310+ xrx200_mdio_membase = devm_request_and_ioremap(&pdev->dev, res[1]);
1311+ xrx200_mii_membase = devm_request_and_ioremap(&pdev->dev, res[2]);
1312+ xrx200_pmac_membase = devm_request_and_ioremap(&pdev->dev, res[3]);
1313+ if (!xrx200_switch_membase || !xrx200_mdio_membase ||
1314+ !xrx200_mii_membase || !xrx200_pmac_membase) {
1315+ dev_err(&pdev->dev, "failed to request and remap io ranges \n");
1316+ return -ENOMEM;
1317+ }
1318+
1319+ /* get the clock */
1320+ xrx200_hw.clk = clk_get(&pdev->dev, NULL);
1321+ if (IS_ERR(xrx200_hw.clk)) {
1322+ dev_err(&pdev->dev, "failed to get clock\n");
1323+ return PTR_ERR(xrx200_hw.clk);
1324+ }
1325+
1326+ /* bring up the dma engine and IP core */
1327+ spin_lock_init(&xrx200_hw.lock);
1328+ xrx200_dma_init(&xrx200_hw);
1329+ xrx200_hw_init(&xrx200_hw);
1330+
1331+ /* bring up the mdio bus */
1332+ mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
1333+ "lantiq,xrx200-mdio");
1334+ if (mdio_np)
1335+ if (xrx200_of_mdio(&xrx200_hw, mdio_np))
1336+ dev_err(&pdev->dev, "mdio probe failed\n");
1337+
1338+ /* load the interfaces */
1339+ for_each_child_of_node(pdev->dev.of_node, iface_np)
1340+ if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
1341+ if (xrx200_hw.num_devs < XRX200_MAX_DEV)
1342+ xrx200_of_iface(&xrx200_hw, iface_np);
1343+ else
1344+ dev_err(&pdev->dev,
1345+ "only %d interfaces allowed\n",
1346+ XRX200_MAX_DEV);
1347+ }
1348+
1349+ if (!xrx200_hw.num_devs) {
1350+ xrx200_hw_cleanup(&xrx200_hw);
1351+ dev_err(&pdev->dev, "failed to load interfaces\n");
1352+ return -ENOENT;
1353+ }
1354+
1355+ /* set wan port mask */
1356+ ltq_pmac_w32(xrx200_hw.wan_map, PMAC_EWAN);
1357+
1358+ for (i = 0; i < xrx200_hw.num_devs; i++) {
1359+ xrx200_hw.chan[XRX200_DMA_RX].devs[i] = xrx200_hw.devs[i];
1360+ xrx200_hw.chan[XRX200_DMA_TX].devs[i] = xrx200_hw.devs[i];
1361+ }
1362+
1363+ /* setup NAPI */
1364+ init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev);
1365+ init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_TX].dummy_dev);
1366+ netif_napi_add(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev,
1367+ &xrx200_hw.chan[XRX200_DMA_RX].napi, xrx200_poll_rx, 32);
1368+ netif_napi_add(&xrx200_hw.chan[XRX200_DMA_TX].dummy_dev,
1369+ &xrx200_hw.chan[XRX200_DMA_TX].napi, xrx200_poll_tx, 8);
1370+
1371+ platform_set_drvdata(pdev, &xrx200_hw);
1372+
1373+ return 0;
1374+}
1375+
1376+static int __devexit xrx200_remove(struct platform_device *pdev)
1377+{
1378+ struct net_device *dev = platform_get_drvdata(pdev);
1379+ struct xrx200_priv *priv;
1380+
1381+ if (!dev)
1382+ return 0;
1383+
1384+ priv = netdev_priv(dev);
1385+
1386+ /* free stack related instances */
1387+ netif_stop_queue(dev);
1388+ netif_napi_del(&xrx200_hw.chan[XRX200_DMA_RX].napi);
1389+ netif_napi_del(&xrx200_hw.chan[XRX200_DMA_TX].napi);
1390+
1391+ /* shut down hardware */
1392+ xrx200_hw_cleanup(&xrx200_hw);
1393+
1394+ /* remove the actual device */
1395+ unregister_netdev(dev);
1396+ free_netdev(dev);
1397+
1398+ return 0;
1399+}
1400+
1401+static const struct of_device_id xrx200_match[] = {
1402+ { .compatible = "lantiq,xrx200-net" },
1403+ {},
1404+};
1405+MODULE_DEVICE_TABLE(of, xrx200_match);
1406+
1407+static struct platform_driver xrx200_driver = {
1408+ .probe = xrx200_probe,
1409+ .remove = __devexit_p(xrx200_remove),
1410+ .driver = {
1411+ .name = "lantiq,xrx200-net",
1412+ .of_match_table = xrx200_match,
1413+ .owner = THIS_MODULE,
1414+ },
1415+};
1416+
1417+module_platform_driver(xrx200_driver);
1418+
1419+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1420+MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
1421+MODULE_LICENSE("GPL");
1422

Archive Download this file



interactive