Root/target/linux/atheros/patches-3.3/110-ar2313_ethernet.patch

1--- a/drivers/net/ethernet/Kconfig
2+++ b/drivers/net/ethernet/Kconfig
3@@ -22,6 +22,7 @@ source "drivers/net/ethernet/adaptec/Kco
4 source "drivers/net/ethernet/aeroflex/Kconfig"
5 source "drivers/net/ethernet/alteon/Kconfig"
6 source "drivers/net/ethernet/amd/Kconfig"
7+source "drivers/net/ethernet/ar231x/Kconfig"
8 source "drivers/net/ethernet/apple/Kconfig"
9 source "drivers/net/ethernet/atheros/Kconfig"
10 source "drivers/net/ethernet/cadence/Kconfig"
11--- a/drivers/net/ethernet/Makefile
12+++ b/drivers/net/ethernet/Makefile
13@@ -9,6 +9,7 @@ obj-$(CONFIG_GRETH) += aeroflex/
14 obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
15 obj-$(CONFIG_NET_VENDOR_AMD) += amd/
16 obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
17+obj-$(CONFIG_NET_VENDOR_AR231X) += ar231x/
18 obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
19 obj-$(CONFIG_NET_CADENCE) += cadence/
20 obj-$(CONFIG_NET_BFIN) += adi/
21--- /dev/null
22+++ b/drivers/net/ethernet/ar231x/Kconfig
23@@ -0,0 +1,5 @@
24+config NET_VENDOR_AR231X
25+ tristate "AR231X Ethernet support"
26+ depends on ATHEROS_AR231X
27+ help
28+ Support for the AR231x/531x ethernet controller
29--- /dev/null
30+++ b/drivers/net/ethernet/ar231x/Makefile
31@@ -0,0 +1 @@
32+obj-$(CONFIG_NET_VENDOR_AR231X) += ar231x.o
33--- /dev/null
34+++ b/drivers/net/ethernet/ar231x/ar231x.c
35@@ -0,0 +1,1279 @@
36+/*
37+ * ar231x.c: Linux driver for the Atheros AR231x Ethernet device.
38+ *
39+ * Copyright (C) 2004 by Sameer Dekate <sdekate@arubanetworks.com>
40+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
41+ * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
42+ *
43+ * Thanks to Atheros for providing hardware and documentation
44+ * enabling me to write this driver.
45+ *
46+ * This program is free software; you can redistribute it and/or modify
47+ * it under the terms of the GNU General Public License as published by
48+ * the Free Software Foundation; either version 2 of the License, or
49+ * (at your option) any later version.
50+ *
51+ * Additional credits:
52+ * This code is taken from John Taylor's Sibyte driver and then
53+ * modified for the AR2313.
54+ */
55+
56+#include <linux/module.h>
57+#include <linux/version.h>
58+#include <linux/types.h>
59+#include <linux/errno.h>
60+#include <linux/ioport.h>
61+#include <linux/pci.h>
62+#include <linux/netdevice.h>
63+#include <linux/etherdevice.h>
64+#include <linux/interrupt.h>
65+#include <linux/hardirq.h>
66+#include <linux/skbuff.h>
67+#include <linux/init.h>
68+#include <linux/delay.h>
69+#include <linux/mm.h>
70+#include <linux/highmem.h>
71+#include <linux/sockios.h>
72+#include <linux/pkt_sched.h>
73+#include <linux/mii.h>
74+#include <linux/phy.h>
75+#include <linux/ethtool.h>
76+#include <linux/ctype.h>
77+#include <linux/platform_device.h>
78+
79+#include <net/sock.h>
80+#include <net/ip.h>
81+
82+#include <asm/system.h>
83+#include <asm/io.h>
84+#include <asm/irq.h>
85+#include <asm/byteorder.h>
86+#include <asm/uaccess.h>
87+#include <asm/bootinfo.h>
88+
89+#define AR2313_MTU 1692
90+#define AR2313_PRIOS 1
91+#define AR2313_QUEUES (2*AR2313_PRIOS)
92+#define AR2313_DESCR_ENTRIES 64
93+
94+
95+#ifndef min
96+#define min(a,b) (((a)<(b))?(a):(b))
97+#endif
98+
99+#ifndef SMP_CACHE_BYTES
100+#define SMP_CACHE_BYTES L1_CACHE_BYTES
101+#endif
102+
103+#define AR2313_MBOX_SET_BIT 0x8
104+
105+#include "ar231x.h"
106+
107+/*
108+ * New interrupt handler strategy:
109+ *
110+ * An old interrupt handler worked using the traditional method of
111+ * replacing an skbuff with a new one when a packet arrives. However
112+ * the rx rings do not need to contain a static number of buffer
113+ * descriptors, thus it makes sense to move the memory allocation out
114+ * of the main interrupt handler and do it in a bottom half handler
115+ * and only allocate new buffers when the number of buffers in the
116+ * ring is below a certain threshold. In order to avoid starving the
117+ * NIC under heavy load it is however necessary to force allocation
118+ * when hitting a minimum threshold. The strategy for alloction is as
119+ * follows:
120+ *
121+ * RX_LOW_BUF_THRES - allocate buffers in the bottom half
122+ * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
123+ * the buffers in the interrupt handler
124+ * RX_RING_THRES - maximum number of buffers in the rx ring
125+ *
126+ * One advantagous side effect of this allocation approach is that the
127+ * entire rx processing can be done without holding any spin lock
128+ * since the rx rings and registers are totally independent of the tx
129+ * ring and its registers. This of course includes the kmalloc's of
130+ * new skb's. Thus start_xmit can run in parallel with rx processing
131+ * and the memory allocation on SMP systems.
132+ *
133+ * Note that running the skb reallocation in a bottom half opens up
134+ * another can of races which needs to be handled properly. In
135+ * particular it can happen that the interrupt handler tries to run
136+ * the reallocation while the bottom half is either running on another
137+ * CPU or was interrupted on the same CPU. To get around this the
138+ * driver uses bitops to prevent the reallocation routines from being
139+ * reentered.
140+ *
141+ * TX handling can also be done without holding any spin lock, wheee
142+ * this is fun! since tx_csm is only written to by the interrupt
143+ * handler.
144+ */
145+
146+/*
147+ * Threshold values for RX buffer allocation - the low water marks for
148+ * when to start refilling the rings are set to 75% of the ring
149+ * sizes. It seems to make sense to refill the rings entirely from the
150+ * intrrupt handler once it gets below the panic threshold, that way
151+ * we don't risk that the refilling is moved to another CPU when the
152+ * one running the interrupt handler just got the slab code hot in its
153+ * cache.
154+ */
155+#define RX_RING_SIZE AR2313_DESCR_ENTRIES
156+#define RX_PANIC_THRES (RX_RING_SIZE/4)
157+#define RX_LOW_THRES ((3*RX_RING_SIZE)/4)
158+#define CRC_LEN 4
159+#define RX_OFFSET 2
160+
161+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
162+#define VLAN_HDR 4
163+#else
164+#define VLAN_HDR 0
165+#endif
166+
167+#define AR2313_BUFSIZE (AR2313_MTU + VLAN_HDR + ETH_HLEN + CRC_LEN + RX_OFFSET)
168+
169+#ifdef MODULE
170+MODULE_LICENSE("GPL");
171+MODULE_AUTHOR("Sameer Dekate <sdekate@arubanetworks.com>, Imre Kaloz <kaloz@openwrt.org>, Felix Fietkau <nbd@openwrt.org>");
172+MODULE_DESCRIPTION("AR231x Ethernet driver");
173+#endif
174+
175+#define virt_to_phys(x) ((u32)(x) & 0x1fffffff)
176+
177+// prototypes
178+static void ar231x_halt(struct net_device *dev);
179+static void rx_tasklet_func(unsigned long data);
180+static void rx_tasklet_cleanup(struct net_device *dev);
181+static void ar231x_multicast_list(struct net_device *dev);
182+static void ar231x_tx_timeout(struct net_device *dev);
183+
184+static int ar231x_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum);
185+static int ar231x_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value);
186+static int ar231x_mdiobus_reset(struct mii_bus *bus);
187+static int ar231x_mdiobus_probe (struct net_device *dev);
188+static void ar231x_adjust_link(struct net_device *dev);
189+
190+#ifndef ERR
191+#define ERR(fmt, args...) printk("%s: " fmt, __func__, ##args)
192+#endif
193+
194+#ifdef CONFIG_NET_POLL_CONTROLLER
195+static void
196+ar231x_netpoll(struct net_device *dev)
197+{
198+ unsigned long flags;
199+
200+ local_irq_save(flags);
201+ ar231x_interrupt(dev->irq, dev);
202+ local_irq_restore(flags);
203+}
204+#endif
205+
206+static const struct net_device_ops ar231x_ops = {
207+ .ndo_open = ar231x_open,
208+ .ndo_stop = ar231x_close,
209+ .ndo_start_xmit = ar231x_start_xmit,
210+ .ndo_set_rx_mode = ar231x_multicast_list,
211+ .ndo_do_ioctl = ar231x_ioctl,
212+ .ndo_change_mtu = eth_change_mtu,
213+ .ndo_validate_addr = eth_validate_addr,
214+ .ndo_set_mac_address = eth_mac_addr,
215+ .ndo_tx_timeout = ar231x_tx_timeout,
216+#ifdef CONFIG_NET_POLL_CONTROLLER
217+ .ndo_poll_controller = ar231x_netpoll,
218+#endif
219+};
220+
221+int __devinit ar231x_probe(struct platform_device *pdev)
222+{
223+ struct net_device *dev;
224+ struct ar231x_private *sp;
225+ struct resource *res;
226+ unsigned long ar_eth_base;
227+ char buf[64];
228+
229+ dev = alloc_etherdev(sizeof(struct ar231x_private));
230+
231+ if (dev == NULL) {
232+ printk(KERN_ERR
233+ "ar231x: Unable to allocate net_device structure!\n");
234+ return -ENOMEM;
235+ }
236+
237+ platform_set_drvdata(pdev, dev);
238+
239+ sp = netdev_priv(dev);
240+ sp->dev = dev;
241+ sp->cfg = pdev->dev.platform_data;
242+
243+ sprintf(buf, "eth%d_membase", pdev->id);
244+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf);
245+ if (!res)
246+ return -ENODEV;
247+
248+ sp->link = 0;
249+ ar_eth_base = res->start;
250+
251+ sprintf(buf, "eth%d_irq", pdev->id);
252+ dev->irq = platform_get_irq_byname(pdev, buf);
253+
254+ spin_lock_init(&sp->lock);
255+
256+ dev->features |= NETIF_F_HIGHDMA;
257+ dev->netdev_ops = &ar231x_ops;
258+
259+ tasklet_init(&sp->rx_tasklet, rx_tasklet_func, (unsigned long) dev);
260+ tasklet_disable(&sp->rx_tasklet);
261+
262+ sp->eth_regs =
263+ ioremap_nocache(virt_to_phys(ar_eth_base), sizeof(*sp->eth_regs));
264+ if (!sp->eth_regs) {
265+ printk("Can't remap eth registers\n");
266+ return (-ENXIO);
267+ }
268+
269+ /*
270+ * When there's only one MAC, PHY regs are typically on ENET0,
271+ * even though the MAC might be on ENET1.
272+ * Needto remap PHY regs separately in this case
273+ */
274+ if (virt_to_phys(ar_eth_base) == virt_to_phys(sp->phy_regs))
275+ sp->phy_regs = sp->eth_regs;
276+ else {
277+ sp->phy_regs =
278+ ioremap_nocache(virt_to_phys(sp->cfg->phy_base),
279+ sizeof(*sp->phy_regs));
280+ if (!sp->phy_regs) {
281+ printk("Can't remap phy registers\n");
282+ return (-ENXIO);
283+ }
284+ }
285+
286+ sp->dma_regs =
287+ ioremap_nocache(virt_to_phys(ar_eth_base + 0x1000),
288+ sizeof(*sp->dma_regs));
289+ dev->base_addr = (unsigned int) sp->dma_regs;
290+ if (!sp->dma_regs) {
291+ printk("Can't remap DMA registers\n");
292+ return (-ENXIO);
293+ }
294+
295+ sp->int_regs = ioremap_nocache(virt_to_phys(sp->cfg->reset_base), 4);
296+ if (!sp->int_regs) {
297+ printk("Can't remap INTERRUPT registers\n");
298+ return (-ENXIO);
299+ }
300+
301+ strncpy(sp->name, "Atheros AR231x", sizeof(sp->name) - 1);
302+ sp->name[sizeof(sp->name) - 1] = '\0';
303+ memcpy(dev->dev_addr, sp->cfg->macaddr, 6);
304+
305+ if (ar231x_init(dev)) {
306+ /*
307+ * ar231x_init() calls ar231x_init_cleanup() on error.
308+ */
309+ kfree(dev);
310+ return -ENODEV;
311+ }
312+
313+ if (register_netdev(dev)) {
314+ printk("%s: register_netdev failed\n", __func__);
315+ return -1;
316+ }
317+
318+ printk("%s: %s: %02x:%02x:%02x:%02x:%02x:%02x, irq %d\n",
319+ dev->name, sp->name,
320+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
321+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], dev->irq);
322+
323+ sp->mii_bus = mdiobus_alloc();
324+ if (sp->mii_bus == NULL)
325+ return -1;
326+
327+ sp->mii_bus->priv = dev;
328+ sp->mii_bus->read = ar231x_mdiobus_read;
329+ sp->mii_bus->write = ar231x_mdiobus_write;
330+ sp->mii_bus->reset = ar231x_mdiobus_reset;
331+ sp->mii_bus->name = "ar231x_eth_mii";
332+ snprintf(sp->mii_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
333+ sp->mii_bus->irq = kmalloc(sizeof(int), GFP_KERNEL);
334+ *sp->mii_bus->irq = PHY_POLL;
335+
336+ mdiobus_register(sp->mii_bus);
337+
338+ if (ar231x_mdiobus_probe(dev) != 0) {
339+ printk(KERN_ERR "%s: mdiobus_probe failed\n", dev->name);
340+ rx_tasklet_cleanup(dev);
341+ ar231x_init_cleanup(dev);
342+ unregister_netdev(dev);
343+ kfree(dev);
344+ return -ENODEV;
345+ }
346+
347+ /* start link poll timer */
348+ ar231x_setup_timer(dev);
349+
350+ return 0;
351+}
352+
353+
354+static void ar231x_multicast_list(struct net_device *dev)
355+{
356+ struct ar231x_private *sp = netdev_priv(dev);
357+ unsigned int filter;
358+
359+ filter = sp->eth_regs->mac_control;
360+
361+ if (dev->flags & IFF_PROMISC)
362+ filter |= MAC_CONTROL_PR;
363+ else
364+ filter &= ~MAC_CONTROL_PR;
365+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 0))
366+ filter |= MAC_CONTROL_PM;
367+ else
368+ filter &= ~MAC_CONTROL_PM;
369+
370+ sp->eth_regs->mac_control = filter;
371+}
372+
373+static void rx_tasklet_cleanup(struct net_device *dev)
374+{
375+ struct ar231x_private *sp = netdev_priv(dev);
376+
377+ /*
378+ * Tasklet may be scheduled. Need to get it removed from the list
379+ * since we're about to free the struct.
380+ */
381+
382+ sp->unloading = 1;
383+ tasklet_enable(&sp->rx_tasklet);
384+ tasklet_kill(&sp->rx_tasklet);
385+}
386+
387+static int __devexit ar231x_remove(struct platform_device *pdev)
388+{
389+ struct net_device *dev = platform_get_drvdata(pdev);
390+ struct ar231x_private *sp = netdev_priv(dev);
391+ rx_tasklet_cleanup(dev);
392+ ar231x_init_cleanup(dev);
393+ unregister_netdev(dev);
394+ mdiobus_unregister(sp->mii_bus);
395+ mdiobus_free(sp->mii_bus);
396+ kfree(dev);
397+ return 0;
398+}
399+
400+
401+/*
402+ * Restart the AR2313 ethernet controller.
403+ */
404+static int ar231x_restart(struct net_device *dev)
405+{
406+ /* disable interrupts */
407+ disable_irq(dev->irq);
408+
409+ /* stop mac */
410+ ar231x_halt(dev);
411+
412+ /* initialize */
413+ ar231x_init(dev);
414+
415+ /* enable interrupts */
416+ enable_irq(dev->irq);
417+
418+ return 0;
419+}
420+
421+static struct platform_driver ar231x_driver = {
422+ .driver.name = "ar231x-eth",
423+ .probe = ar231x_probe,
424+ .remove = __devexit_p(ar231x_remove),
425+};
426+
427+module_platform_driver(ar231x_driver);
428+
429+static void ar231x_free_descriptors(struct net_device *dev)
430+{
431+ struct ar231x_private *sp = netdev_priv(dev);
432+ if (sp->rx_ring != NULL) {
433+ kfree((void *) KSEG0ADDR(sp->rx_ring));
434+ sp->rx_ring = NULL;
435+ sp->tx_ring = NULL;
436+ }
437+}
438+
439+
440+static int ar231x_allocate_descriptors(struct net_device *dev)
441+{
442+ struct ar231x_private *sp = netdev_priv(dev);
443+ int size;
444+ int j;
445+ ar231x_descr_t *space;
446+
447+ if (sp->rx_ring != NULL) {
448+ printk("%s: already done.\n", __FUNCTION__);
449+ return 0;
450+ }
451+
452+ size =
453+ (sizeof(ar231x_descr_t) * (AR2313_DESCR_ENTRIES * AR2313_QUEUES));
454+ space = kmalloc(size, GFP_KERNEL);
455+ if (space == NULL)
456+ return 1;
457+
458+ /* invalidate caches */
459+ dma_cache_inv((unsigned int) space, size);
460+
461+ /* now convert pointer to KSEG1 */
462+ space = (ar231x_descr_t *) KSEG1ADDR(space);
463+
464+ memset((void *) space, 0, size);
465+
466+ sp->rx_ring = space;
467+ space += AR2313_DESCR_ENTRIES;
468+
469+ sp->tx_ring = space;
470+ space += AR2313_DESCR_ENTRIES;
471+
472+ /* Initialize the transmit Descriptors */
473+ for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
474+ ar231x_descr_t *td = &sp->tx_ring[j];
475+ td->status = 0;
476+ td->devcs = DMA_TX1_CHAINED;
477+ td->addr = 0;
478+ td->descr =
479+ virt_to_phys(&sp->
480+ tx_ring[(j + 1) & (AR2313_DESCR_ENTRIES - 1)]);
481+ }
482+
483+ return 0;
484+}
485+
486+
487+/*
488+ * Generic cleanup handling data allocated during init. Used when the
489+ * module is unloaded or if an error occurs during initialization
490+ */
491+static void ar231x_init_cleanup(struct net_device *dev)
492+{
493+ struct ar231x_private *sp = netdev_priv(dev);
494+ struct sk_buff *skb;
495+ int j;
496+
497+ ar231x_free_descriptors(dev);
498+
499+ if (sp->eth_regs)
500+ iounmap((void *) sp->eth_regs);
501+ if (sp->dma_regs)
502+ iounmap((void *) sp->dma_regs);
503+
504+ if (sp->rx_skb) {
505+ for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
506+ skb = sp->rx_skb[j];
507+ if (skb) {
508+ sp->rx_skb[j] = NULL;
509+ dev_kfree_skb(skb);
510+ }
511+ }
512+ kfree(sp->rx_skb);
513+ sp->rx_skb = NULL;
514+ }
515+
516+ if (sp->tx_skb) {
517+ for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
518+ skb = sp->tx_skb[j];
519+ if (skb) {
520+ sp->tx_skb[j] = NULL;
521+ dev_kfree_skb(skb);
522+ }
523+ }
524+ kfree(sp->tx_skb);
525+ sp->tx_skb = NULL;
526+ }
527+}
528+
529+static int ar231x_setup_timer(struct net_device *dev)
530+{
531+ struct ar231x_private *sp = netdev_priv(dev);
532+
533+ init_timer(&sp->link_timer);
534+
535+ sp->link_timer.function = ar231x_link_timer_fn;
536+ sp->link_timer.data = (int) dev;
537+ sp->link_timer.expires = jiffies + HZ;
538+
539+ add_timer(&sp->link_timer);
540+ return 0;
541+
542+}
543+
544+static void ar231x_link_timer_fn(unsigned long data)
545+{
546+ struct net_device *dev = (struct net_device *) data;
547+ struct ar231x_private *sp = netdev_priv(dev);
548+
549+ // see if the link status changed
550+ // This was needed to make sure we set the PHY to the
551+ // autonegotiated value of half or full duplex.
552+ ar231x_check_link(dev);
553+
554+ // Loop faster when we don't have link.
555+ // This was needed to speed up the AP bootstrap time.
556+ if (sp->link == 0) {
557+ mod_timer(&sp->link_timer, jiffies + HZ / 2);
558+ } else {
559+ mod_timer(&sp->link_timer, jiffies + LINK_TIMER);
560+ }
561+}
562+
563+static void ar231x_check_link(struct net_device *dev)
564+{
565+ struct ar231x_private *sp = netdev_priv(dev);
566+ u16 phyData;
567+
568+ phyData = ar231x_mdiobus_read(sp->mii_bus, sp->phy, MII_BMSR);
569+ if (sp->phyData != phyData) {
570+ if (phyData & BMSR_LSTATUS) {
571+ /* link is present, ready link partner ability to deterine
572+ duplexity */
573+ int duplex = 0;
574+ u16 reg;
575+
576+ sp->link = 1;
577+ reg = ar231x_mdiobus_read(sp->mii_bus, sp->phy, MII_BMCR);
578+ if (reg & BMCR_ANENABLE) {
579+ /* auto neg enabled */
580+ reg = ar231x_mdiobus_read(sp->mii_bus, sp->phy, MII_LPA);
581+ duplex = (reg & (LPA_100FULL | LPA_10FULL)) ? 1 : 0;
582+ } else {
583+ /* no auto neg, just read duplex config */
584+ duplex = (reg & BMCR_FULLDPLX) ? 1 : 0;
585+ }
586+
587+ printk(KERN_INFO "%s: Configuring MAC for %s duplex\n",
588+ dev->name, (duplex) ? "full" : "half");
589+
590+ if (duplex) {
591+ /* full duplex */
592+ sp->eth_regs->mac_control =
593+ ((sp->eth_regs->
594+ mac_control | MAC_CONTROL_F) & ~MAC_CONTROL_DRO);
595+ } else {
596+ /* half duplex */
597+ sp->eth_regs->mac_control =
598+ ((sp->eth_regs->
599+ mac_control | MAC_CONTROL_DRO) & ~MAC_CONTROL_F);
600+ }
601+ } else {
602+ /* no link */
603+ sp->link = 0;
604+ }
605+ sp->phyData = phyData;
606+ }
607+}
608+
609+static int ar231x_reset_reg(struct net_device *dev)
610+{
611+ struct ar231x_private *sp = netdev_priv(dev);
612+ unsigned int ethsal, ethsah;
613+ unsigned int flags;
614+
615+ *sp->int_regs |= sp->cfg->reset_mac;
616+ mdelay(10);
617+ *sp->int_regs &= ~sp->cfg->reset_mac;
618+ mdelay(10);
619+ *sp->int_regs |= sp->cfg->reset_phy;
620+ mdelay(10);
621+ *sp->int_regs &= ~sp->cfg->reset_phy;
622+ mdelay(10);
623+
624+ sp->dma_regs->bus_mode = (DMA_BUS_MODE_SWR);
625+ mdelay(10);
626+ sp->dma_regs->bus_mode =
627+ ((32 << DMA_BUS_MODE_PBL_SHIFT) | DMA_BUS_MODE_BLE);
628+
629+ /* enable interrupts */
630+ sp->dma_regs->intr_ena = (DMA_STATUS_AIS |
631+ DMA_STATUS_NIS |
632+ DMA_STATUS_RI |
633+ DMA_STATUS_TI | DMA_STATUS_FBE);
634+ sp->dma_regs->xmt_base = virt_to_phys(sp->tx_ring);
635+ sp->dma_regs->rcv_base = virt_to_phys(sp->rx_ring);
636+ sp->dma_regs->control =
637+ (DMA_CONTROL_SR | DMA_CONTROL_ST | DMA_CONTROL_SF);
638+
639+ sp->eth_regs->flow_control = (FLOW_CONTROL_FCE);
640+ sp->eth_regs->vlan_tag = (0x8100);
641+
642+ /* Enable Ethernet Interface */
643+ flags = (MAC_CONTROL_TE | /* transmit enable */
644+ MAC_CONTROL_PM | /* pass mcast */
645+ MAC_CONTROL_F | /* full duplex */
646+ MAC_CONTROL_HBD); /* heart beat disabled */
647+
648+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
649+ flags |= MAC_CONTROL_PR;
650+ }
651+ sp->eth_regs->mac_control = flags;
652+
653+ /* Set all Ethernet station address registers to their initial values */
654+ ethsah = ((((u_int) (dev->dev_addr[5]) << 8) & (u_int) 0x0000FF00) |
655+ (((u_int) (dev->dev_addr[4]) << 0) & (u_int) 0x000000FF));
656+
657+ ethsal = ((((u_int) (dev->dev_addr[3]) << 24) & (u_int) 0xFF000000) |
658+ (((u_int) (dev->dev_addr[2]) << 16) & (u_int) 0x00FF0000) |
659+ (((u_int) (dev->dev_addr[1]) << 8) & (u_int) 0x0000FF00) |
660+ (((u_int) (dev->dev_addr[0]) << 0) & (u_int) 0x000000FF));
661+
662+ sp->eth_regs->mac_addr[0] = ethsah;
663+ sp->eth_regs->mac_addr[1] = ethsal;
664+
665+ mdelay(10);
666+
667+ return (0);
668+}
669+
670+
671+static int ar231x_init(struct net_device *dev)
672+{
673+ struct ar231x_private *sp = netdev_priv(dev);
674+ int ecode = 0;
675+
676+ /*
677+ * Allocate descriptors
678+ */
679+ if (ar231x_allocate_descriptors(dev)) {
680+ printk("%s: %s: ar231x_allocate_descriptors failed\n",
681+ dev->name, __FUNCTION__);
682+ ecode = -EAGAIN;
683+ goto init_error;
684+ }
685+
686+ /*
687+ * Get the memory for the skb rings.
688+ */
689+ if (sp->rx_skb == NULL) {
690+ sp->rx_skb =
691+ kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES,
692+ GFP_KERNEL);
693+ if (!(sp->rx_skb)) {
694+ printk("%s: %s: rx_skb kmalloc failed\n",
695+ dev->name, __FUNCTION__);
696+ ecode = -EAGAIN;
697+ goto init_error;
698+ }
699+ }
700+ memset(sp->rx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
701+
702+ if (sp->tx_skb == NULL) {
703+ sp->tx_skb =
704+ kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES,
705+ GFP_KERNEL);
706+ if (!(sp->tx_skb)) {
707+ printk("%s: %s: tx_skb kmalloc failed\n",
708+ dev->name, __FUNCTION__);
709+ ecode = -EAGAIN;
710+ goto init_error;
711+ }
712+ }
713+ memset(sp->tx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
714+
715+ /*
716+ * Set tx_csm before we start receiving interrupts, otherwise
717+ * the interrupt handler might think it is supposed to process
718+ * tx ints before we are up and running, which may cause a null
719+ * pointer access in the int handler.
720+ */
721+ sp->rx_skbprd = 0;
722+ sp->cur_rx = 0;
723+ sp->tx_prd = 0;
724+ sp->tx_csm = 0;
725+
726+ /*
727+ * Zero the stats before starting the interface
728+ */
729+ memset(&dev->stats, 0, sizeof(dev->stats));
730+
731+ /*
732+ * We load the ring here as there seem to be no way to tell the
733+ * firmware to wipe the ring without re-initializing it.
734+ */
735+ ar231x_load_rx_ring(dev, RX_RING_SIZE);
736+
737+ /*
738+ * Init hardware
739+ */
740+ ar231x_reset_reg(dev);
741+
742+ /*
743+ * Get the IRQ
744+ */
745+ ecode =
746+ request_irq(dev->irq, &ar231x_interrupt,
747+ IRQF_DISABLED,
748+ dev->name, dev);
749+ if (ecode) {
750+ printk(KERN_WARNING "%s: %s: Requested IRQ %d is busy\n",
751+ dev->name, __FUNCTION__, dev->irq);
752+ goto init_error;
753+ }
754+
755+
756+ tasklet_enable(&sp->rx_tasklet);
757+
758+ return 0;
759+
760+ init_error:
761+ ar231x_init_cleanup(dev);
762+ return ecode;
763+}
764+
765+/*
766+ * Load the rx ring.
767+ *
768+ * Loading rings is safe without holding the spin lock since this is
769+ * done only before the device is enabled, thus no interrupts are
770+ * generated and by the interrupt handler/tasklet handler.
771+ */
772+static void ar231x_load_rx_ring(struct net_device *dev, int nr_bufs)
773+{
774+
775+ struct ar231x_private *sp = netdev_priv(dev);
776+ short i, idx;
777+
778+ idx = sp->rx_skbprd;
779+
780+ for (i = 0; i < nr_bufs; i++) {
781+ struct sk_buff *skb;
782+ ar231x_descr_t *rd;
783+
784+ if (sp->rx_skb[idx])
785+ break;
786+
787+ skb = netdev_alloc_skb_ip_align(dev, AR2313_BUFSIZE);
788+ if (!skb) {
789+ printk("\n\n\n\n %s: No memory in system\n\n\n\n",
790+ __FUNCTION__);
791+ break;
792+ }
793+
794+ /*
795+ * Make sure IP header starts on a fresh cache line.
796+ */
797+ skb->dev = dev;
798+ sp->rx_skb[idx] = skb;
799+
800+ rd = (ar231x_descr_t *) & sp->rx_ring[idx];
801+
802+ /* initialize dma descriptor */
803+ rd->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
804+ DMA_RX1_CHAINED);
805+ rd->addr = virt_to_phys(skb->data);
806+ rd->descr =
807+ virt_to_phys(&sp->
808+ rx_ring[(idx + 1) & (AR2313_DESCR_ENTRIES - 1)]);
809+ rd->status = DMA_RX_OWN;
810+
811+ idx = DSC_NEXT(idx);
812+ }
813+
814+ if (i)
815+ sp->rx_skbprd = idx;
816+
817+ return;
818+}
819+
820+#define AR2313_MAX_PKTS_PER_CALL 64
821+
822+static int ar231x_rx_int(struct net_device *dev)
823+{
824+ struct ar231x_private *sp = netdev_priv(dev);
825+ struct sk_buff *skb, *skb_new;
826+ ar231x_descr_t *rxdesc;
827+ unsigned int status;
828+ u32 idx;
829+ int pkts = 0;
830+ int rval;
831+
832+ idx = sp->cur_rx;
833+
834+ /* process at most the entire ring and then wait for another interrupt
835+ */
836+ while (1) {
837+
838+ rxdesc = &sp->rx_ring[idx];
839+ status = rxdesc->status;
840+ if (status & DMA_RX_OWN) {
841+ /* SiByte owns descriptor or descr not yet filled in */
842+ rval = 0;
843+ break;
844+ }
845+
846+ if (++pkts > AR2313_MAX_PKTS_PER_CALL) {
847+ rval = 1;
848+ break;
849+ }
850+
851+ if ((status & DMA_RX_ERROR) && !(status & DMA_RX_LONG)) {
852+ dev->stats.rx_errors++;
853+ dev->stats.rx_dropped++;
854+
855+ /* add statistics counters */
856+ if (status & DMA_RX_ERR_CRC)
857+ dev->stats.rx_crc_errors++;
858+ if (status & DMA_RX_ERR_COL)
859+ dev->stats.rx_over_errors++;
860+ if (status & DMA_RX_ERR_LENGTH)
861+ dev->stats.rx_length_errors++;
862+ if (status & DMA_RX_ERR_RUNT)
863+ dev->stats.rx_over_errors++;
864+ if (status & DMA_RX_ERR_DESC)
865+ dev->stats.rx_over_errors++;
866+
867+ } else {
868+ /* alloc new buffer. */
869+ skb_new = netdev_alloc_skb_ip_align(dev, AR2313_BUFSIZE);
870+ if (skb_new != NULL) {
871+
872+ skb = sp->rx_skb[idx];
873+ /* set skb */
874+ skb_put(skb,
875+ ((status >> DMA_RX_LEN_SHIFT) & 0x3fff) - CRC_LEN);
876+
877+ dev->stats.rx_bytes += skb->len;
878+ skb->protocol = eth_type_trans(skb, dev);
879+ /* pass the packet to upper layers */
880+ netif_rx(skb);
881+
882+ skb_new->dev = dev;
883+ /* reset descriptor's curr_addr */
884+ rxdesc->addr = virt_to_phys(skb_new->data);
885+
886+ dev->stats.rx_packets++;
887+ sp->rx_skb[idx] = skb_new;
888+ } else {
889+ dev->stats.rx_dropped++;
890+ }
891+ }
892+
893+ rxdesc->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
894+ DMA_RX1_CHAINED);
895+ rxdesc->status = DMA_RX_OWN;
896+
897+ idx = DSC_NEXT(idx);
898+ }
899+
900+ sp->cur_rx = idx;
901+
902+ return rval;
903+}
904+
905+
906+static void ar231x_tx_int(struct net_device *dev)
907+{
908+ struct ar231x_private *sp = netdev_priv(dev);
909+ u32 idx;
910+ struct sk_buff *skb;
911+ ar231x_descr_t *txdesc;
912+ unsigned int status = 0;
913+
914+ idx = sp->tx_csm;
915+
916+ while (idx != sp->tx_prd) {
917+ txdesc = &sp->tx_ring[idx];
918+
919+ if ((status = txdesc->status) & DMA_TX_OWN) {
920+ /* ar231x dma still owns descr */
921+ break;
922+ }
923+ /* done with this descriptor */
924+ dma_unmap_single(NULL, txdesc->addr,
925+ txdesc->devcs & DMA_TX1_BSIZE_MASK,
926+ DMA_TO_DEVICE);
927+ txdesc->status = 0;
928+
929+ if (status & DMA_TX_ERROR) {
930+ dev->stats.tx_errors++;
931+ dev->stats.tx_dropped++;
932+ if (status & DMA_TX_ERR_UNDER)
933+ dev->stats.tx_fifo_errors++;
934+ if (status & DMA_TX_ERR_HB)
935+ dev->stats.tx_heartbeat_errors++;
936+ if (status & (DMA_TX_ERR_LOSS | DMA_TX_ERR_LINK))
937+ dev->stats.tx_carrier_errors++;
938+ if (status & (DMA_TX_ERR_LATE |
939+ DMA_TX_ERR_COL |
940+ DMA_TX_ERR_JABBER | DMA_TX_ERR_DEFER))
941+ dev->stats.tx_aborted_errors++;
942+ } else {
943+ /* transmit OK */
944+ dev->stats.tx_packets++;
945+ }
946+
947+ skb = sp->tx_skb[idx];
948+ sp->tx_skb[idx] = NULL;
949+ idx = DSC_NEXT(idx);
950+ dev->stats.tx_bytes += skb->len;
951+ dev_kfree_skb_irq(skb);
952+ }
953+
954+ sp->tx_csm = idx;
955+
956+ return;
957+}
958+
959+
960+static void rx_tasklet_func(unsigned long data)
961+{
962+ struct net_device *dev = (struct net_device *) data;
963+ struct ar231x_private *sp = netdev_priv(dev);
964+
965+ if (sp->unloading) {
966+ return;
967+ }
968+
969+ if (ar231x_rx_int(dev)) {
970+ tasklet_hi_schedule(&sp->rx_tasklet);
971+ } else {
972+ unsigned long flags;
973+ spin_lock_irqsave(&sp->lock, flags);
974+ sp->dma_regs->intr_ena |= DMA_STATUS_RI;
975+ spin_unlock_irqrestore(&sp->lock, flags);
976+ }
977+}
978+
979+static void rx_schedule(struct net_device *dev)
980+{
981+ struct ar231x_private *sp = netdev_priv(dev);
982+
983+ sp->dma_regs->intr_ena &= ~DMA_STATUS_RI;
984+
985+ tasklet_hi_schedule(&sp->rx_tasklet);
986+}
987+
988+static irqreturn_t ar231x_interrupt(int irq, void *dev_id)
989+{
990+ struct net_device *dev = (struct net_device *) dev_id;
991+ struct ar231x_private *sp = netdev_priv(dev);
992+ unsigned int status, enabled;
993+
994+ /* clear interrupt */
995+ /*
996+ * Don't clear RI bit if currently disabled.
997+ */
998+ status = sp->dma_regs->status;
999+ enabled = sp->dma_regs->intr_ena;
1000+ sp->dma_regs->status = status & enabled;
1001+
1002+ if (status & DMA_STATUS_NIS) {
1003+ /* normal status */
1004+ /*
1005+ * Don't schedule rx processing if interrupt
1006+ * is already disabled.
1007+ */
1008+ if (status & enabled & DMA_STATUS_RI) {
1009+ /* receive interrupt */
1010+ rx_schedule(dev);
1011+ }
1012+ if (status & DMA_STATUS_TI) {
1013+ /* transmit interrupt */
1014+ ar231x_tx_int(dev);
1015+ }
1016+ }
1017+
1018+ /* abnormal status */
1019+ if (status & (DMA_STATUS_FBE | DMA_STATUS_TPS)) {
1020+ ar231x_restart(dev);
1021+ }
1022+ return IRQ_HANDLED;
1023+}
1024+
1025+
1026+static int ar231x_open(struct net_device *dev)
1027+{
1028+ struct ar231x_private *sp = netdev_priv(dev);
1029+ unsigned int ethsal, ethsah;
1030+
1031+ /* reset the hardware, in case the MAC address changed */
1032+ ethsah = ((((u_int) (dev->dev_addr[5]) << 8) & (u_int) 0x0000FF00) |
1033+ (((u_int) (dev->dev_addr[4]) << 0) & (u_int) 0x000000FF));
1034+
1035+ ethsal = ((((u_int) (dev->dev_addr[3]) << 24) & (u_int) 0xFF000000) |
1036+ (((u_int) (dev->dev_addr[2]) << 16) & (u_int) 0x00FF0000) |
1037+ (((u_int) (dev->dev_addr[1]) << 8) & (u_int) 0x0000FF00) |
1038+ (((u_int) (dev->dev_addr[0]) << 0) & (u_int) 0x000000FF));
1039+
1040+ sp->eth_regs->mac_addr[0] = ethsah;
1041+ sp->eth_regs->mac_addr[1] = ethsal;
1042+
1043+ mdelay(10);
1044+
1045+ dev->mtu = 1500;
1046+ netif_start_queue(dev);
1047+
1048+ sp->eth_regs->mac_control |= MAC_CONTROL_RE;
1049+
1050+ return 0;
1051+}
1052+
1053+static void ar231x_tx_timeout(struct net_device *dev)
1054+{
1055+ struct ar231x_private *sp = netdev_priv(dev);
1056+ unsigned long flags;
1057+
1058+ spin_lock_irqsave(&sp->lock, flags);
1059+ ar231x_restart(dev);
1060+ spin_unlock_irqrestore(&sp->lock, flags);
1061+}
1062+
1063+static void ar231x_halt(struct net_device *dev)
1064+{
1065+ struct ar231x_private *sp = netdev_priv(dev);
1066+ int j;
1067+
1068+ tasklet_disable(&sp->rx_tasklet);
1069+
1070+ /* kill the MAC */
1071+ sp->eth_regs->mac_control &= ~(MAC_CONTROL_RE | /* disable Receives */
1072+ MAC_CONTROL_TE); /* disable Transmits */
1073+ /* stop dma */
1074+ sp->dma_regs->control = 0;
1075+ sp->dma_regs->bus_mode = DMA_BUS_MODE_SWR;
1076+
1077+ /* place phy and MAC in reset */
1078+ *sp->int_regs |= (sp->cfg->reset_mac | sp->cfg->reset_phy);
1079+
1080+ /* free buffers on tx ring */
1081+ for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
1082+ struct sk_buff *skb;
1083+ ar231x_descr_t *txdesc;
1084+
1085+ txdesc = &sp->tx_ring[j];
1086+ txdesc->descr = 0;
1087+
1088+ skb = sp->tx_skb[j];
1089+ if (skb) {
1090+ dev_kfree_skb(skb);
1091+ sp->tx_skb[j] = NULL;
1092+ }
1093+ }
1094+}
1095+
1096+/*
1097+ * close should do nothing. Here's why. It's called when
1098+ * 'ifconfig bond0 down' is run. If it calls free_irq then
1099+ * the irq is gone forever ! When bond0 is made 'up' again,
1100+ * the ar231x_open () does not call request_irq (). Worse,
1101+ * the call to ar231x_halt() generates a WDOG reset due to
1102+ * the write to 'sp->int_regs' and the box reboots.
1103+ * Commenting this out is good since it allows the
1104+ * system to resume when bond0 is made up again.
1105+ */
1106+static int ar231x_close(struct net_device *dev)
1107+{
1108+#if 0
1109+ /*
1110+ * Disable interrupts
1111+ */
1112+ disable_irq(dev->irq);
1113+
1114+ /*
1115+ * Without (or before) releasing irq and stopping hardware, this
1116+ * is an absolute non-sense, by the way. It will be reset instantly
1117+ * by the first irq.
1118+ */
1119+ netif_stop_queue(dev);
1120+
1121+ /* stop the MAC and DMA engines */
1122+ ar231x_halt(dev);
1123+
1124+ /* release the interrupt */
1125+ free_irq(dev->irq, dev);
1126+
1127+#endif
1128+ return 0;
1129+}
1130+
1131+static int ar231x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1132+{
1133+ struct ar231x_private *sp = netdev_priv(dev);
1134+ ar231x_descr_t *td;
1135+ u32 idx;
1136+
1137+ idx = sp->tx_prd;
1138+ td = &sp->tx_ring[idx];
1139+
1140+ if (td->status & DMA_TX_OWN) {
1141+ /* free skbuf and lie to the caller that we sent it out */
1142+ dev->stats.tx_dropped++;
1143+ dev_kfree_skb(skb);
1144+
1145+ /* restart transmitter in case locked */
1146+ sp->dma_regs->xmt_poll = 0;
1147+ return 0;
1148+ }
1149+
1150+ /* Setup the transmit descriptor. */
1151+ td->devcs = ((skb->len << DMA_TX1_BSIZE_SHIFT) |
1152+ (DMA_TX1_LS | DMA_TX1_IC | DMA_TX1_CHAINED));
1153+ td->addr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
1154+ td->status = DMA_TX_OWN;
1155+
1156+ /* kick transmitter last */
1157+ sp->dma_regs->xmt_poll = 0;
1158+
1159+ sp->tx_skb[idx] = skb;
1160+ idx = DSC_NEXT(idx);
1161+ sp->tx_prd = idx;
1162+
1163+ return 0;
1164+}
1165+
1166+static int ar231x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1167+{
1168+ struct ar231x_private *sp = netdev_priv(dev);
1169+ int ret;
1170+
1171+ switch (cmd) {
1172+
1173+ case SIOCETHTOOL:
1174+ spin_lock_irq(&sp->lock);
1175+ ret = phy_ethtool_ioctl(sp->phy_dev, (void *) ifr->ifr_data);
1176+ spin_unlock_irq(&sp->lock);
1177+ return ret;
1178+
1179+ case SIOCSIFHWADDR:
1180+ if (copy_from_user
1181+ (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
1182+ return -EFAULT;
1183+ return 0;
1184+
1185+ case SIOCGIFHWADDR:
1186+ if (copy_to_user
1187+ (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
1188+ return -EFAULT;
1189+ return 0;
1190+
1191+ case SIOCGMIIPHY:
1192+ case SIOCGMIIREG:
1193+ case SIOCSMIIREG:
1194+ return phy_mii_ioctl(sp->phy_dev, ifr, cmd);
1195+
1196+ default:
1197+ break;
1198+ }
1199+
1200+ return -EOPNOTSUPP;
1201+}
1202+
1203+static void ar231x_adjust_link(struct net_device *dev)
1204+{
1205+ struct ar231x_private *sp = netdev_priv(dev);
1206+ unsigned int mc;
1207+
1208+ if (!sp->phy_dev->link)
1209+ return;
1210+
1211+ if (sp->phy_dev->duplex != sp->oldduplex) {
1212+ mc = readl(&sp->eth_regs->mac_control);
1213+ mc &= ~(MAC_CONTROL_F | MAC_CONTROL_DRO);
1214+ if (sp->phy_dev->duplex)
1215+ mc |= MAC_CONTROL_F;
1216+ else
1217+ mc |= MAC_CONTROL_DRO;
1218+ writel(mc, &sp->eth_regs->mac_control);
1219+ sp->oldduplex = sp->phy_dev->duplex;
1220+ }
1221+}
1222+
1223+#define MII_ADDR(phy, reg) \
1224+ ((reg << MII_ADDR_REG_SHIFT) | (phy << MII_ADDR_PHY_SHIFT))
1225+
1226+static int
1227+ar231x_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
1228+{
1229+ struct net_device *const dev = bus->priv;
1230+ struct ar231x_private *sp = netdev_priv(dev);
1231+ volatile ETHERNET_STRUCT *ethernet = sp->phy_regs;
1232+
1233+ ethernet->mii_addr = MII_ADDR(phy_addr, regnum);
1234+ while (ethernet->mii_addr & MII_ADDR_BUSY);
1235+ return (ethernet->mii_data >> MII_DATA_SHIFT);
1236+}
1237+
1238+static int
1239+ar231x_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
1240+ u16 value)
1241+{
1242+ struct net_device *const dev = bus->priv;
1243+ struct ar231x_private *sp = netdev_priv(dev);
1244+ volatile ETHERNET_STRUCT *ethernet = sp->phy_regs;
1245+
1246+ while (ethernet->mii_addr & MII_ADDR_BUSY);
1247+ ethernet->mii_data = value << MII_DATA_SHIFT;
1248+ ethernet->mii_addr = MII_ADDR(phy_addr, regnum) | MII_ADDR_WRITE;
1249+
1250+ return 0;
1251+}
1252+
1253+static int ar231x_mdiobus_reset(struct mii_bus *bus)
1254+{
1255+ struct net_device *const dev = bus->priv;
1256+
1257+ ar231x_reset_reg(dev);
1258+
1259+ return 0;
1260+}
1261+
1262+static int ar231x_mdiobus_probe (struct net_device *dev)
1263+{
1264+ struct ar231x_private *const sp = netdev_priv(dev);
1265+ struct phy_device *phydev = NULL;
1266+ int phy_addr;
1267+
1268+ /* find the first (lowest address) PHY on the current MAC's MII bus */
1269+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
1270+ if (sp->mii_bus->phy_map[phy_addr]) {
1271+ phydev = sp->mii_bus->phy_map[phy_addr];
1272+ sp->phy = phy_addr;
1273+ break; /* break out with first one found */
1274+ }
1275+
1276+ if (!phydev) {
1277+ printk (KERN_ERR "ar231x: %s: no PHY found\n", dev->name);
1278+ return -1;
1279+ }
1280+
1281+ /* now we are supposed to have a proper phydev, to attach to... */
1282+ BUG_ON(!phydev);
1283+ BUG_ON(phydev->attached_dev);
1284+
1285+ phydev = phy_connect(dev, dev_name(&phydev->dev), &ar231x_adjust_link, 0,
1286+ PHY_INTERFACE_MODE_MII);
1287+
1288+ if (IS_ERR(phydev)) {
1289+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
1290+ return PTR_ERR(phydev);
1291+ }
1292+
1293+ /* mask with MAC supported features */
1294+ phydev->supported &= (SUPPORTED_10baseT_Half
1295+ | SUPPORTED_10baseT_Full
1296+ | SUPPORTED_100baseT_Half
1297+ | SUPPORTED_100baseT_Full
1298+ | SUPPORTED_Autoneg
1299+ /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
1300+ | SUPPORTED_MII
1301+ | SUPPORTED_TP);
1302+
1303+ phydev->advertising = phydev->supported;
1304+
1305+ sp->oldduplex = -1;
1306+ sp->phy_dev = phydev;
1307+
1308+ printk(KERN_INFO "%s: attached PHY driver [%s] "
1309+ "(mii_bus:phy_addr=%s)\n",
1310+ dev->name, phydev->drv->name, dev_name(&phydev->dev));
1311+
1312+ return 0;
1313+}
1314+
1315--- /dev/null
1316+++ b/drivers/net/ethernet/ar231x/ar231x.h
1317@@ -0,0 +1,303 @@
1318+/*
1319+ * ar231x.h: Linux driver for the Atheros AR231x Ethernet device.
1320+ *
1321+ * Copyright (C) 2004 by Sameer Dekate <sdekate@arubanetworks.com>
1322+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
1323+ * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
1324+ *
1325+ * Thanks to Atheros for providing hardware and documentation
1326+ * enabling me to write this driver.
1327+ *
1328+ * This program is free software; you can redistribute it and/or modify
1329+ * it under the terms of the GNU General Public License as published by
1330+ * the Free Software Foundation; either version 2 of the License, or
1331+ * (at your option) any later version.
1332+ */
1333+
1334+#ifndef _AR2313_H_
1335+#define _AR2313_H_
1336+
1337+#include <linux/interrupt.h>
1338+#include <generated/autoconf.h>
1339+#include <linux/bitops.h>
1340+#include <asm/bootinfo.h>
1341+#include <ar231x_platform.h>
1342+
1343+/*
1344+ * probe link timer - 5 secs
1345+ */
1346+#define LINK_TIMER (5*HZ)
1347+
1348+#define IS_DMA_TX_INT(X) (((X) & (DMA_STATUS_TI)) != 0)
1349+#define IS_DMA_RX_INT(X) (((X) & (DMA_STATUS_RI)) != 0)
1350+#define IS_DRIVER_OWNED(X) (((X) & (DMA_TX_OWN)) == 0)
1351+
1352+#define AR2313_TX_TIMEOUT (HZ/4)
1353+
1354+/*
1355+ * Rings
1356+ */
1357+#define DSC_RING_ENTRIES_SIZE (AR2313_DESCR_ENTRIES * sizeof(struct desc))
1358+#define DSC_NEXT(idx) ((idx + 1) & (AR2313_DESCR_ENTRIES - 1))
1359+
1360+#define AR2313_MBGET 2
1361+#define AR2313_MBSET 3
1362+#define AR2313_PCI_RECONFIG 4
1363+#define AR2313_PCI_DUMP 5
1364+#define AR2313_TEST_PANIC 6
1365+#define AR2313_TEST_NULLPTR 7
1366+#define AR2313_READ_DATA 8
1367+#define AR2313_WRITE_DATA 9
1368+#define AR2313_GET_VERSION 10
1369+#define AR2313_TEST_HANG 11
1370+#define AR2313_SYNC 12
1371+
1372+#define DMA_RX_ERR_CRC BIT(1)
1373+#define DMA_RX_ERR_DRIB BIT(2)
1374+#define DMA_RX_ERR_MII BIT(3)
1375+#define DMA_RX_EV2 BIT(5)
1376+#define DMA_RX_ERR_COL BIT(6)
1377+#define DMA_RX_LONG BIT(7)
1378+#define DMA_RX_LS BIT(8) /* last descriptor */
1379+#define DMA_RX_FS BIT(9) /* first descriptor */
1380+#define DMA_RX_MF BIT(10) /* multicast frame */
1381+#define DMA_RX_ERR_RUNT BIT(11) /* runt frame */
1382+#define DMA_RX_ERR_LENGTH BIT(12) /* length error */
1383+#define DMA_RX_ERR_DESC BIT(14) /* descriptor error */
1384+#define DMA_RX_ERROR BIT(15) /* error summary */
1385+#define DMA_RX_LEN_MASK 0x3fff0000
1386+#define DMA_RX_LEN_SHIFT 16
1387+#define DMA_RX_FILT BIT(30)
1388+#define DMA_RX_OWN BIT(31) /* desc owned by DMA controller */
1389+
1390+#define DMA_RX1_BSIZE_MASK 0x000007ff
1391+#define DMA_RX1_BSIZE_SHIFT 0
1392+#define DMA_RX1_CHAINED BIT(24)
1393+#define DMA_RX1_RER BIT(25)
1394+
1395+#define DMA_TX_ERR_UNDER BIT(1) /* underflow error */
1396+#define DMA_TX_ERR_DEFER BIT(2) /* excessive deferral */
1397+#define DMA_TX_COL_MASK 0x78
1398+#define DMA_TX_COL_SHIFT 3
1399+#define DMA_TX_ERR_HB BIT(7) /* hearbeat failure */
1400+#define DMA_TX_ERR_COL BIT(8) /* excessive collisions */
1401+#define DMA_TX_ERR_LATE BIT(9) /* late collision */
1402+#define DMA_TX_ERR_LINK BIT(10) /* no carrier */
1403+#define DMA_TX_ERR_LOSS BIT(11) /* loss of carrier */
1404+#define DMA_TX_ERR_JABBER BIT(14) /* transmit jabber timeout */
1405+#define DMA_TX_ERROR BIT(15) /* frame aborted */
1406+#define DMA_TX_OWN BIT(31) /* descr owned by DMA controller */
1407+
1408+#define DMA_TX1_BSIZE_MASK 0x000007ff
1409+#define DMA_TX1_BSIZE_SHIFT 0
1410+#define DMA_TX1_CHAINED BIT(24) /* chained descriptors */
1411+#define DMA_TX1_TER BIT(25) /* transmit end of ring */
1412+#define DMA_TX1_FS BIT(29) /* first segment */
1413+#define DMA_TX1_LS BIT(30) /* last segment */
1414+#define DMA_TX1_IC BIT(31) /* interrupt on completion */
1415+
1416+#define RCVPKT_LENGTH(X) (X >> 16) /* Received pkt Length */
1417+
1418+#define MAC_CONTROL_RE BIT(2) /* receive enable */
1419+#define MAC_CONTROL_TE BIT(3) /* transmit enable */
1420+#define MAC_CONTROL_DC BIT(5) /* Deferral check */
1421+#define MAC_CONTROL_ASTP BIT(8) /* Auto pad strip */
1422+#define MAC_CONTROL_DRTY BIT(10) /* Disable retry */
1423+#define MAC_CONTROL_DBF BIT(11) /* Disable bcast frames */
1424+#define MAC_CONTROL_LCC BIT(12) /* late collision ctrl */
1425+#define MAC_CONTROL_HP BIT(13) /* Hash Perfect filtering */
1426+#define MAC_CONTROL_HASH BIT(14) /* Unicast hash filtering */
1427+#define MAC_CONTROL_HO BIT(15) /* Hash only filtering */
1428+#define MAC_CONTROL_PB BIT(16) /* Pass Bad frames */
1429+#define MAC_CONTROL_IF BIT(17) /* Inverse filtering */
1430+#define MAC_CONTROL_PR BIT(18) /* promiscuous mode (valid frames only) */
1431+#define MAC_CONTROL_PM BIT(19) /* pass multicast */
1432+#define MAC_CONTROL_F BIT(20) /* full-duplex */
1433+#define MAC_CONTROL_DRO BIT(23) /* Disable Receive Own */
1434+#define MAC_CONTROL_HBD BIT(28) /* heart-beat disabled (MUST BE SET) */
1435+#define MAC_CONTROL_BLE BIT(30) /* big endian mode */
1436+#define MAC_CONTROL_RA BIT(31) /* receive all (valid and invalid frames) */
1437+
1438+#define MII_ADDR_BUSY BIT(0)
1439+#define MII_ADDR_WRITE BIT(1)
1440+#define MII_ADDR_REG_SHIFT 6
1441+#define MII_ADDR_PHY_SHIFT 11
1442+#define MII_DATA_SHIFT 0
1443+
1444+#define FLOW_CONTROL_FCE BIT(1)
1445+
1446+#define DMA_BUS_MODE_SWR BIT(0) /* software reset */
1447+#define DMA_BUS_MODE_BLE BIT(7) /* big endian mode */
1448+#define DMA_BUS_MODE_PBL_SHIFT 8 /* programmable burst length 32 */
1449+#define DMA_BUS_MODE_DBO BIT(20) /* big-endian descriptors */
1450+
1451+#define DMA_STATUS_TI BIT(0) /* transmit interrupt */
1452+#define DMA_STATUS_TPS BIT(1) /* transmit process stopped */
1453+#define DMA_STATUS_TU BIT(2) /* transmit buffer unavailable */
1454+#define DMA_STATUS_TJT BIT(3) /* transmit buffer timeout */
1455+#define DMA_STATUS_UNF BIT(5) /* transmit underflow */
1456+#define DMA_STATUS_RI BIT(6) /* receive interrupt */
1457+#define DMA_STATUS_RU BIT(7) /* receive buffer unavailable */
1458+#define DMA_STATUS_RPS BIT(8) /* receive process stopped */
1459+#define DMA_STATUS_ETI BIT(10) /* early transmit interrupt */
1460+#define DMA_STATUS_FBE BIT(13) /* fatal bus interrupt */
1461+#define DMA_STATUS_ERI BIT(14) /* early receive interrupt */
1462+#define DMA_STATUS_AIS BIT(15) /* abnormal interrupt summary */
1463+#define DMA_STATUS_NIS BIT(16) /* normal interrupt summary */
1464+#define DMA_STATUS_RS_SHIFT 17 /* receive process state */
1465+#define DMA_STATUS_TS_SHIFT 20 /* transmit process state */
1466+#define DMA_STATUS_EB_SHIFT 23 /* error bits */
1467+
1468+#define DMA_CONTROL_SR BIT(1) /* start receive */
1469+#define DMA_CONTROL_ST BIT(13) /* start transmit */
1470+#define DMA_CONTROL_SF BIT(21) /* store and forward */
1471+
1472+
1473+typedef struct {
1474+ volatile unsigned int status; // OWN, Device control and status.
1475+ volatile unsigned int devcs; // pkt Control bits + Length
1476+ volatile unsigned int addr; // Current Address.
1477+ volatile unsigned int descr; // Next descriptor in chain.
1478+} ar231x_descr_t;
1479+
1480+
1481+
1482+//
1483+// New Combo structure for Both Eth0 AND eth1
1484+//
1485+typedef struct {
1486+ volatile unsigned int mac_control; /* 0x00 */
1487+ volatile unsigned int mac_addr[2]; /* 0x04 - 0x08 */
1488+ volatile unsigned int mcast_table[2]; /* 0x0c - 0x10 */
1489+ volatile unsigned int mii_addr; /* 0x14 */
1490+ volatile unsigned int mii_data; /* 0x18 */
1491+ volatile unsigned int flow_control; /* 0x1c */
1492+ volatile unsigned int vlan_tag; /* 0x20 */
1493+ volatile unsigned int pad[7]; /* 0x24 - 0x3c */
1494+ volatile unsigned int ucast_table[8]; /* 0x40-0x5c */
1495+
1496+} ETHERNET_STRUCT;
1497+
1498+/********************************************************************
1499+ * Interrupt controller
1500+ ********************************************************************/
1501+
1502+typedef struct {
1503+ volatile unsigned int wdog_control; /* 0x08 */
1504+ volatile unsigned int wdog_timer; /* 0x0c */
1505+ volatile unsigned int misc_status; /* 0x10 */
1506+ volatile unsigned int misc_mask; /* 0x14 */
1507+ volatile unsigned int global_status; /* 0x18 */
1508+ volatile unsigned int reserved; /* 0x1c */
1509+ volatile unsigned int reset_control; /* 0x20 */
1510+} INTERRUPT;
1511+
1512+/********************************************************************
1513+ * DMA controller
1514+ ********************************************************************/
1515+typedef struct {
1516+ volatile unsigned int bus_mode; /* 0x00 (CSR0) */
1517+ volatile unsigned int xmt_poll; /* 0x04 (CSR1) */
1518+ volatile unsigned int rcv_poll; /* 0x08 (CSR2) */
1519+ volatile unsigned int rcv_base; /* 0x0c (CSR3) */
1520+ volatile unsigned int xmt_base; /* 0x10 (CSR4) */
1521+ volatile unsigned int status; /* 0x14 (CSR5) */
1522+ volatile unsigned int control; /* 0x18 (CSR6) */
1523+ volatile unsigned int intr_ena; /* 0x1c (CSR7) */
1524+ volatile unsigned int rcv_missed; /* 0x20 (CSR8) */
1525+ volatile unsigned int reserved[11]; /* 0x24-0x4c (CSR9-19) */
1526+ volatile unsigned int cur_tx_buf_addr; /* 0x50 (CSR20) */
1527+ volatile unsigned int cur_rx_buf_addr; /* 0x50 (CSR21) */
1528+} DMA;
1529+
1530+/*
1531+ * Struct private for the Sibyte.
1532+ *
1533+ * Elements are grouped so variables used by the tx handling goes
1534+ * together, and will go into the same cache lines etc. in order to
1535+ * avoid cache line contention between the rx and tx handling on SMP.
1536+ *
1537+ * Frequently accessed variables are put at the beginning of the
1538+ * struct to help the compiler generate better/shorter code.
1539+ */
1540+struct ar231x_private {
1541+ struct net_device *dev;
1542+ int version;
1543+ u32 mb[2];
1544+
1545+ volatile ETHERNET_STRUCT *phy_regs;
1546+ volatile ETHERNET_STRUCT *eth_regs;
1547+ volatile DMA *dma_regs;
1548+ volatile u32 *int_regs;
1549+ struct ar231x_eth *cfg;
1550+
1551+ spinlock_t lock; /* Serialise access to device */
1552+
1553+ /*
1554+ * RX and TX descriptors, must be adjacent
1555+ */
1556+ ar231x_descr_t *rx_ring;
1557+ ar231x_descr_t *tx_ring;
1558+
1559+
1560+ struct sk_buff **rx_skb;
1561+ struct sk_buff **tx_skb;
1562+
1563+ /*
1564+ * RX elements
1565+ */
1566+ u32 rx_skbprd;
1567+ u32 cur_rx;
1568+
1569+ /*
1570+ * TX elements
1571+ */
1572+ u32 tx_prd;
1573+ u32 tx_csm;
1574+
1575+ /*
1576+ * Misc elements
1577+ */
1578+ char name[48];
1579+ struct {
1580+ u32 address;
1581+ u32 length;
1582+ char *mapping;
1583+ } desc;
1584+
1585+
1586+ struct timer_list link_timer;
1587+ unsigned short phy; /* merlot phy = 1, samsung phy = 0x1f */
1588+ unsigned short mac;
1589+ unsigned short link; /* 0 - link down, 1 - link up */
1590+ u16 phyData;
1591+
1592+ struct tasklet_struct rx_tasklet;
1593+ int unloading;
1594+
1595+ struct phy_device *phy_dev;
1596+ struct mii_bus *mii_bus;
1597+ int oldduplex;
1598+};
1599+
1600+
1601+/*
1602+ * Prototypes
1603+ */
1604+static int ar231x_init(struct net_device *dev);
1605+#ifdef TX_TIMEOUT
1606+static void ar231x_tx_timeout(struct net_device *dev);
1607+#endif
1608+static int ar231x_restart(struct net_device *dev);
1609+static void ar231x_load_rx_ring(struct net_device *dev, int bufs);
1610+static irqreturn_t ar231x_interrupt(int irq, void *dev_id);
1611+static int ar231x_open(struct net_device *dev);
1612+static int ar231x_start_xmit(struct sk_buff *skb, struct net_device *dev);
1613+static int ar231x_close(struct net_device *dev);
1614+static int ar231x_ioctl(struct net_device *dev, struct ifreq *ifr,
1615+ int cmd);
1616+static void ar231x_init_cleanup(struct net_device *dev);
1617+static int ar231x_setup_timer(struct net_device *dev);
1618+static void ar231x_link_timer_fn(unsigned long data);
1619+static void ar231x_check_link(struct net_device *dev);
1620+#endif /* _AR2313_H_ */
1621

Archive Download this file



interactive