Root/target/linux/cns3xxx/files/drivers/net/ethernet/cavium/cns3xxx_eth.c

1/*
2 * Cavium CNS3xxx Gigabit driver for Linux
3 *
4 * Copyright 2011 Gateworks Corporation
5 * Chris Lang <clang@gateworks.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/delay.h>
14#include <linux/module.h>
15#include <linux/dma-mapping.h>
16#include <linux/dmapool.h>
17#include <linux/etherdevice.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/phy.h>
22#include <linux/platform_device.h>
23#include <linux/skbuff.h>
24#include <mach/irqs.h>
25#include <mach/platform.h>
26
27#define DRV_NAME "cns3xxx_eth"
28
29#define RX_DESCS 128
30#define TX_DESCS 128
31#define TX_DESC_RESERVE 20
32
33#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
34#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
35#define REGS_SIZE 336
36
37#define RX_BUFFER_ALIGN 64
38#define RX_BUFFER_ALIGN_MASK (~(RX_BUFFER_ALIGN - 1))
39
40#define SKB_HEAD_ALIGN (((PAGE_SIZE - NET_SKB_PAD) % RX_BUFFER_ALIGN) + NET_SKB_PAD + NET_IP_ALIGN)
41#define RX_SEGMENT_ALLOC_SIZE 4096
42#define RX_SEGMENT_BUFSIZE (SKB_WITH_OVERHEAD(RX_SEGMENT_ALLOC_SIZE))
43#define RX_SEGMENT_MRU (((RX_SEGMENT_BUFSIZE - SKB_HEAD_ALIGN) & RX_BUFFER_ALIGN_MASK) - NET_IP_ALIGN)
44#define MAX_MTU 9500
45
46#define NAPI_WEIGHT 64
47
48/* MDIO Defines */
49#define MDIO_CMD_COMPLETE 0x00008000
50#define MDIO_WRITE_COMMAND 0x00002000
51#define MDIO_READ_COMMAND 0x00004000
52#define MDIO_REG_OFFSET 8
53#define MDIO_VALUE_OFFSET 16
54
55/* Descritor Defines */
56#define END_OF_RING 0x40000000
57#define FIRST_SEGMENT 0x20000000
58#define LAST_SEGMENT 0x10000000
59#define FORCE_ROUTE 0x04000000
60#define IP_CHECKSUM 0x00040000
61#define UDP_CHECKSUM 0x00020000
62#define TCP_CHECKSUM 0x00010000
63
64/* Port Config Defines */
65#define PORT_BP_ENABLE 0x00020000
66#define PORT_DISABLE 0x00040000
67#define PORT_LEARN_DIS 0x00080000
68#define PORT_BLOCK_STATE 0x00100000
69#define PORT_BLOCK_MODE 0x00200000
70
71#define PROMISC_OFFSET 29
72
73/* Global Config Defines */
74#define UNKNOWN_VLAN_TO_CPU 0x02000000
75#define ACCEPT_CRC_PACKET 0x00200000
76#define CRC_STRIPPING 0x00100000
77
78/* VLAN Config Defines */
79#define NIC_MODE 0x00008000
80#define VLAN_UNAWARE 0x00000001
81
82/* DMA AUTO Poll Defines */
83#define TS_POLL_EN 0x00000020
84#define TS_SUSPEND 0x00000010
85#define FS_POLL_EN 0x00000002
86#define FS_SUSPEND 0x00000001
87
88/* DMA Ring Control Defines */
89#define QUEUE_THRESHOLD 0x000000f0
90#define CLR_FS_STATE 0x80000000
91
92/* Interrupt Status Defines */
93#define MAC0_STATUS_CHANGE 0x00004000
94#define MAC1_STATUS_CHANGE 0x00008000
95#define MAC2_STATUS_CHANGE 0x00010000
96#define MAC0_RX_ERROR 0x00100000
97#define MAC1_RX_ERROR 0x00200000
98#define MAC2_RX_ERROR 0x00400000
99
100struct tx_desc
101{
102    u32 sdp; /* segment data pointer */
103
104    union {
105        struct {
106            u32 sdl:16; /* segment data length */
107            u32 tco:1;
108            u32 uco:1;
109            u32 ico:1;
110            u32 rsv_1:3; /* reserve */
111            u32 pri:3;
112            u32 fp:1; /* force priority */
113            u32 fr:1;
114            u32 interrupt:1;
115            u32 lsd:1;
116            u32 fsd:1;
117            u32 eor:1;
118            u32 cown:1;
119        };
120        u32 config0;
121    };
122
123    union {
124        struct {
125            u32 ctv:1;
126            u32 stv:1;
127            u32 sid:4;
128            u32 inss:1;
129            u32 dels:1;
130            u32 rsv_2:9;
131            u32 pmap:5;
132            u32 mark:3;
133            u32 ewan:1;
134            u32 fewan:1;
135            u32 rsv_3:5;
136        };
137        u32 config1;
138    };
139
140    union {
141        struct {
142            u32 c_vid:12;
143            u32 c_cfs:1;
144            u32 c_pri:3;
145            u32 s_vid:12;
146            u32 s_dei:1;
147            u32 s_pri:3;
148        };
149        u32 config2;
150    };
151
152    u8 alignment[16]; /* for 32 byte */
153};
154
155struct rx_desc
156{
157    u32 sdp; /* segment data pointer */
158
159    union {
160        struct {
161            u32 sdl:16; /* segment data length */
162            u32 l4f:1;
163            u32 ipf:1;
164            u32 prot:4;
165            u32 hr:6;
166            u32 lsd:1;
167            u32 fsd:1;
168            u32 eor:1;
169            u32 cown:1;
170        };
171        u32 config0;
172    };
173
174    union {
175        struct {
176            u32 ctv:1;
177            u32 stv:1;
178            u32 unv:1;
179            u32 iwan:1;
180            u32 exdv:1;
181            u32 e_wan:1;
182            u32 rsv_1:2;
183            u32 sp:3;
184            u32 crc_err:1;
185            u32 un_eth:1;
186            u32 tc:2;
187            u32 rsv_2:1;
188            u32 ip_offset:5;
189            u32 rsv_3:11;
190        };
191        u32 config1;
192    };
193
194    union {
195        struct {
196            u32 c_vid:12;
197            u32 c_cfs:1;
198            u32 c_pri:3;
199            u32 s_vid:12;
200            u32 s_dei:1;
201            u32 s_pri:3;
202        };
203        u32 config2;
204    };
205
206    u8 alignment[16]; /* for 32 byte alignment */
207};
208
209
210struct switch_regs {
211    u32 phy_control;
212    u32 phy_auto_addr;
213    u32 mac_glob_cfg;
214    u32 mac_cfg[4];
215    u32 mac_pri_ctrl[5], __res;
216    u32 etype[2];
217    u32 udp_range[4];
218    u32 prio_etype_udp;
219    u32 prio_ipdscp[8];
220    u32 tc_ctrl;
221    u32 rate_ctrl;
222    u32 fc_glob_thrs;
223    u32 fc_port_thrs;
224    u32 mc_fc_glob_thrs;
225    u32 dc_glob_thrs;
226    u32 arl_vlan_cmd;
227    u32 arl_ctrl[3];
228    u32 vlan_cfg;
229    u32 pvid[2];
230    u32 vlan_ctrl[3];
231    u32 session_id[8];
232    u32 intr_stat;
233    u32 intr_mask;
234    u32 sram_test;
235    u32 mem_queue;
236    u32 farl_ctrl;
237    u32 fc_input_thrs, __res1[2];
238    u32 clk_skew_ctrl;
239    u32 mac_glob_cfg_ext, __res2[2];
240    u32 dma_ring_ctrl;
241    u32 dma_auto_poll_cfg;
242    u32 delay_intr_cfg, __res3;
243    u32 ts_dma_ctrl0;
244    u32 ts_desc_ptr0;
245    u32 ts_desc_base_addr0, __res4;
246    u32 fs_dma_ctrl0;
247    u32 fs_desc_ptr0;
248    u32 fs_desc_base_addr0, __res5;
249    u32 ts_dma_ctrl1;
250    u32 ts_desc_ptr1;
251    u32 ts_desc_base_addr1, __res6;
252    u32 fs_dma_ctrl1;
253    u32 fs_desc_ptr1;
254    u32 fs_desc_base_addr1;
255    u32 __res7[109];
256    u32 mac_counter0[13];
257};
258
259struct _tx_ring {
260    struct tx_desc *desc;
261    dma_addr_t phys_addr;
262    struct tx_desc *cur_addr;
263    struct sk_buff *buff_tab[TX_DESCS];
264    unsigned int phys_tab[TX_DESCS];
265    u32 free_index;
266    u32 count_index;
267    u32 cur_index;
268    int num_used;
269    int num_count;
270    bool stopped;
271};
272
273struct _rx_ring {
274    struct rx_desc *desc;
275    dma_addr_t phys_addr;
276    struct rx_desc *cur_addr;
277    void *buff_tab[RX_DESCS];
278    unsigned int phys_tab[RX_DESCS];
279    u32 cur_index;
280    u32 alloc_index;
281    int alloc_count;
282};
283
284struct sw {
285    struct resource *mem_res;
286    struct switch_regs __iomem *regs;
287    struct napi_struct napi;
288    struct cns3xxx_plat_info *plat;
289    struct _tx_ring *tx_ring;
290    struct _rx_ring *rx_ring;
291    struct sk_buff *frag_first;
292    struct sk_buff *frag_last;
293};
294
295struct port {
296    struct net_device *netdev;
297    struct phy_device *phydev;
298    struct sw *sw;
299    int id; /* logical port ID */
300    int speed, duplex;
301};
302
303static spinlock_t mdio_lock;
304static DEFINE_SPINLOCK(tx_lock);
305static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */
306struct mii_bus *mdio_bus;
307static int ports_open;
308static struct port *switch_port_tab[4];
309static struct dma_pool *rx_dma_pool;
310static struct dma_pool *tx_dma_pool;
311struct net_device *napi_dev;
312
313static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
314               int write, u16 cmd)
315{
316    int cycles = 0;
317    u32 temp = 0;
318
319    temp = __raw_readl(&mdio_regs->phy_control);
320    temp |= MDIO_CMD_COMPLETE;
321    __raw_writel(temp, &mdio_regs->phy_control);
322    udelay(10);
323
324    if (write) {
325        temp = (cmd << MDIO_VALUE_OFFSET);
326        temp |= MDIO_WRITE_COMMAND;
327    } else {
328        temp = MDIO_READ_COMMAND;
329    }
330    temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
331    temp |= (phy_id & 0x1f);
332
333    __raw_writel(temp, &mdio_regs->phy_control);
334
335    while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0)
336            && cycles < 5000) {
337        udelay(1);
338        cycles++;
339    }
340
341    if (cycles == 5000) {
342        printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
343               phy_id);
344        return -1;
345    }
346
347    temp = __raw_readl(&mdio_regs->phy_control);
348    temp |= MDIO_CMD_COMPLETE;
349    __raw_writel(temp, &mdio_regs->phy_control);
350
351    if (write)
352        return 0;
353
354    return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF);
355}
356
357static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location)
358{
359    unsigned long flags;
360    int ret;
361
362    spin_lock_irqsave(&mdio_lock, flags);
363    ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0);
364    spin_unlock_irqrestore(&mdio_lock, flags);
365    return ret;
366}
367
368static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
369                 u16 val)
370{
371    unsigned long flags;
372    int ret;
373
374    spin_lock_irqsave(&mdio_lock, flags);
375    ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val);
376    spin_unlock_irqrestore(&mdio_lock, flags);
377    return ret;
378}
379
380static int cns3xxx_mdio_register(void)
381{
382    int err;
383
384    if (!(mdio_bus = mdiobus_alloc()))
385        return -ENOMEM;
386
387    mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
388
389    spin_lock_init(&mdio_lock);
390    mdio_bus->name = "CNS3xxx MII Bus";
391    mdio_bus->read = &cns3xxx_mdio_read;
392    mdio_bus->write = &cns3xxx_mdio_write;
393    strcpy(mdio_bus->id, "0");
394
395    if ((err = mdiobus_register(mdio_bus)))
396        mdiobus_free(mdio_bus);
397    return err;
398}
399
400static void cns3xxx_mdio_remove(void)
401{
402    mdiobus_unregister(mdio_bus);
403    mdiobus_free(mdio_bus);
404}
405
406static void enable_tx_dma(struct sw *sw)
407{
408    __raw_writel(0x1, &sw->regs->ts_dma_ctrl0);
409}
410
411static void enable_rx_dma(struct sw *sw)
412{
413    __raw_writel(0x1, &sw->regs->fs_dma_ctrl0);
414}
415
416static void cns3xxx_adjust_link(struct net_device *dev)
417{
418    struct port *port = netdev_priv(dev);
419    struct phy_device *phydev = port->phydev;
420
421    if (!phydev->link) {
422        if (port->speed) {
423            port->speed = 0;
424            printk(KERN_INFO "%s: link down\n", dev->name);
425        }
426        return;
427    }
428
429    if (port->speed == phydev->speed && port->duplex == phydev->duplex)
430        return;
431
432    port->speed = phydev->speed;
433    port->duplex = phydev->duplex;
434
435    printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
436           dev->name, port->speed, port->duplex ? "full" : "half");
437}
438
439irqreturn_t eth_rx_irq(int irq, void *pdev)
440{
441    struct net_device *dev = pdev;
442    struct sw *sw = netdev_priv(dev);
443    if (likely(napi_schedule_prep(&sw->napi))) {
444        disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC);
445        __napi_schedule(&sw->napi);
446    }
447    return (IRQ_HANDLED);
448}
449
450irqreturn_t eth_stat_irq(int irq, void *pdev)
451{
452    struct net_device *dev = pdev;
453    struct sw *sw = netdev_priv(dev);
454    u32 cfg;
455    u32 stat = __raw_readl(&sw->regs->intr_stat);
456    __raw_writel(0xffffffff, &sw->regs->intr_stat);
457
458    if (stat & MAC2_RX_ERROR)
459        switch_port_tab[3]->netdev->stats.rx_dropped++;
460    if (stat & MAC1_RX_ERROR)
461        switch_port_tab[1]->netdev->stats.rx_dropped++;
462    if (stat & MAC0_RX_ERROR)
463        switch_port_tab[0]->netdev->stats.rx_dropped++;
464
465    if (stat & MAC0_STATUS_CHANGE) {
466        cfg = __raw_readl(&sw->regs->mac_cfg[0]);
467        switch_port_tab[0]->phydev->link = (cfg & 0x1);
468        switch_port_tab[0]->phydev->duplex = ((cfg >> 4) & 0x1);
469        if (((cfg >> 2) & 0x3) == 2)
470            switch_port_tab[0]->phydev->speed = 1000;
471        else if (((cfg >> 2) & 0x3) == 1)
472            switch_port_tab[0]->phydev->speed = 100;
473        else
474            switch_port_tab[0]->phydev->speed = 10;
475        cns3xxx_adjust_link(switch_port_tab[0]->netdev);
476    }
477
478    if (stat & MAC1_STATUS_CHANGE) {
479        cfg = __raw_readl(&sw->regs->mac_cfg[1]);
480        switch_port_tab[1]->phydev->link = (cfg & 0x1);
481        switch_port_tab[1]->phydev->duplex = ((cfg >> 4) & 0x1);
482        if (((cfg >> 2) & 0x3) == 2)
483            switch_port_tab[1]->phydev->speed = 1000;
484        else if (((cfg >> 2) & 0x3) == 1)
485            switch_port_tab[1]->phydev->speed = 100;
486        else
487            switch_port_tab[1]->phydev->speed = 10;
488        cns3xxx_adjust_link(switch_port_tab[1]->netdev);
489    }
490
491    if (stat & MAC2_STATUS_CHANGE) {
492        cfg = __raw_readl(&sw->regs->mac_cfg[3]);
493        switch_port_tab[3]->phydev->link = (cfg & 0x1);
494        switch_port_tab[3]->phydev->duplex = ((cfg >> 4) & 0x1);
495        if (((cfg >> 2) & 0x3) == 2)
496            switch_port_tab[3]->phydev->speed = 1000;
497        else if (((cfg >> 2) & 0x3) == 1)
498            switch_port_tab[3]->phydev->speed = 100;
499        else
500            switch_port_tab[3]->phydev->speed = 10;
501        cns3xxx_adjust_link(switch_port_tab[3]->netdev);
502    }
503
504    return (IRQ_HANDLED);
505}
506
507
508static void cns3xxx_alloc_rx_buf(struct sw *sw, int received)
509{
510    struct _rx_ring *rx_ring = sw->rx_ring;
511    unsigned int i = rx_ring->alloc_index;
512    struct rx_desc *desc = &(rx_ring)->desc[i];
513    void *buf;
514    unsigned int phys;
515
516    for (received += rx_ring->alloc_count; received > 0; received--) {
517        buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_ATOMIC);
518        if (!buf)
519            goto out;
520
521        phys = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
522                      RX_SEGMENT_MRU, DMA_FROM_DEVICE);
523        if (dma_mapping_error(NULL, phys)) {
524            kfree(buf);
525            goto out;
526        }
527
528        desc->sdl = RX_SEGMENT_MRU;
529        desc->sdp = phys;
530
531        /* put the new buffer on RX-free queue */
532        rx_ring->buff_tab[i] = buf;
533        rx_ring->phys_tab[i] = phys;
534        if (i == RX_DESCS - 1) {
535            i = 0;
536            desc->config0 = END_OF_RING | FIRST_SEGMENT |
537                    LAST_SEGMENT | RX_SEGMENT_MRU;
538            desc = &(rx_ring)->desc[i];
539        } else {
540            desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
541                    RX_SEGMENT_MRU;
542            i++;
543            desc++;
544        }
545    }
546out:
547    rx_ring->alloc_count = received;
548    rx_ring->alloc_index = i;
549}
550
551static void eth_check_num_used(struct _tx_ring *tx_ring)
552{
553    bool stop = false;
554    int i;
555
556    if (tx_ring->num_used >= TX_DESCS - TX_DESC_RESERVE)
557        stop = true;
558
559    if (tx_ring->stopped == stop)
560        return;
561
562    tx_ring->stopped = stop;
563    for (i = 0; i < 4; i++) {
564        struct port *port = switch_port_tab[i];
565        struct net_device *dev;
566
567        if (!port)
568            continue;
569
570        dev = port->netdev;
571        if (stop)
572            netif_stop_queue(dev);
573        else
574            netif_wake_queue(dev);
575    }
576}
577
578static void eth_complete_tx(struct sw *sw)
579{
580    struct _tx_ring *tx_ring = sw->tx_ring;
581    struct tx_desc *desc;
582    int i;
583    int index;
584    int num_used = tx_ring->num_used;
585    struct sk_buff *skb;
586
587    index = tx_ring->free_index;
588    desc = &(tx_ring)->desc[index];
589    for (i = 0; i < num_used; i++) {
590        if (desc->cown) {
591            skb = tx_ring->buff_tab[index];
592            tx_ring->buff_tab[index] = 0;
593            if (skb)
594                dev_kfree_skb_any(skb);
595            dma_unmap_single(NULL, tx_ring->phys_tab[index],
596                desc->sdl, DMA_TO_DEVICE);
597            if (++index == TX_DESCS) {
598                index = 0;
599                desc = &(tx_ring)->desc[index];
600            } else {
601                desc++;
602            }
603        } else {
604            break;
605        }
606    }
607    tx_ring->free_index = index;
608    tx_ring->num_used -= i;
609    eth_check_num_used(tx_ring);
610}
611
612static int eth_poll(struct napi_struct *napi, int budget)
613{
614    struct sw *sw = container_of(napi, struct sw, napi);
615    struct _rx_ring *rx_ring = sw->rx_ring;
616    int received = 0;
617    unsigned int length;
618    unsigned int i = rx_ring->cur_index;
619    struct rx_desc *desc = &(rx_ring)->desc[i];
620
621    while (desc->cown) {
622        struct sk_buff *skb;
623        int reserve = SKB_HEAD_ALIGN;
624
625        if (received >= budget)
626            break;
627
628        /* process received frame */
629        dma_unmap_single(NULL, rx_ring->phys_tab[i],
630                 RX_SEGMENT_MRU, DMA_FROM_DEVICE);
631
632        skb = build_skb(rx_ring->buff_tab[i]);
633        if (!skb)
634            break;
635
636        skb->dev = switch_port_tab[desc->sp]->netdev;
637
638        length = desc->sdl;
639        if (desc->fsd && !desc->lsd)
640            length = RX_SEGMENT_MRU;
641
642        if (!desc->fsd) {
643            reserve -= NET_IP_ALIGN;
644            if (!desc->lsd)
645                length += NET_IP_ALIGN;
646        }
647
648        skb_reserve(skb, reserve);
649        skb_put(skb, length);
650
651        if (!sw->frag_first)
652            sw->frag_first = skb;
653        else {
654            if (sw->frag_first == sw->frag_last)
655                skb_frag_add_head(sw->frag_first, skb);
656            else
657                sw->frag_last->next = skb;
658            sw->frag_first->len += skb->len;
659            sw->frag_first->data_len += skb->len;
660            sw->frag_first->truesize += skb->truesize;
661        }
662        sw->frag_last = skb;
663
664        if (desc->lsd) {
665            struct net_device *dev;
666
667            skb = sw->frag_first;
668            dev = skb->dev;
669            skb->protocol = eth_type_trans(skb, dev);
670
671            dev->stats.rx_packets++;
672            dev->stats.rx_bytes += skb->len;
673
674            /* RX Hardware checksum offload */
675            skb->ip_summed = CHECKSUM_NONE;
676            switch (desc->prot) {
677                case 1:
678                case 2:
679                case 5:
680                case 6:
681                case 13:
682                case 14:
683                    if (!desc->l4f) {
684                        skb->ip_summed = CHECKSUM_UNNECESSARY;
685                        napi_gro_receive(napi, skb);
686                        break;
687                    }
688                    /* fall through */
689                default:
690                    netif_receive_skb(skb);
691                    break;
692            }
693
694            sw->frag_first = NULL;
695            sw->frag_last = NULL;
696        }
697
698        received++;
699        if (++i == RX_DESCS) {
700            i = 0;
701            desc = &(rx_ring)->desc[i];
702        } else {
703            desc++;
704        }
705    }
706
707    cns3xxx_alloc_rx_buf(sw, received);
708
709    rx_ring->cur_index = i;
710
711    if (received != budget) {
712        napi_complete(napi);
713        enable_irq(IRQ_CNS3XXX_SW_R0RXC);
714    }
715
716    enable_rx_dma(sw);
717
718    spin_lock_bh(&tx_lock);
719    eth_complete_tx(sw);
720    spin_unlock_bh(&tx_lock);
721
722    return received;
723}
724
725static void eth_set_desc(struct _tx_ring *tx_ring, int index, int index_last,
726             void *data, int len, u32 config0, u32 pmap)
727{
728    struct tx_desc *tx_desc = &(tx_ring)->desc[index];
729    unsigned int phys;
730
731    phys = dma_map_single(NULL, data, len, DMA_TO_DEVICE);
732    tx_desc->sdp = phys;
733    tx_desc->pmap = pmap;
734    tx_ring->phys_tab[index] = phys;
735
736    config0 |= len;
737    if (index == TX_DESCS - 1)
738        config0 |= END_OF_RING;
739    if (index == index_last)
740        config0 |= LAST_SEGMENT;
741
742    mb();
743    tx_desc->config0 = config0;
744}
745
746static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
747{
748    struct port *port = netdev_priv(dev);
749    struct sw *sw = port->sw;
750    struct _tx_ring *tx_ring = sw->tx_ring;
751    struct sk_buff *skb1;
752    char pmap = (1 << port->id);
753    int nr_frags = skb_shinfo(skb)->nr_frags;
754    int nr_desc = nr_frags;
755    int index0, index, index_last;
756    int len0;
757    unsigned int i;
758    u32 config0;
759
760    if (pmap == 8)
761        pmap = (1 << 4);
762
763    skb_walk_frags(skb, skb1)
764        nr_desc++;
765
766    spin_lock_bh(&tx_lock);
767
768    eth_complete_tx(sw);
769    if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
770        spin_unlock_bh(&tx_lock);
771        return NETDEV_TX_BUSY;
772    }
773
774    index = index0 = tx_ring->cur_index;
775    index_last = (index0 + nr_desc) % TX_DESCS;
776    tx_ring->cur_index = (index_last + 1) % TX_DESCS;
777
778    spin_unlock_bh(&tx_lock);
779
780    config0 = FORCE_ROUTE;
781    if (skb->ip_summed == CHECKSUM_PARTIAL)
782        config0 |= UDP_CHECKSUM | TCP_CHECKSUM;
783
784    len0 = skb->len;
785
786    /* fragments */
787    for (i = 0; i < nr_frags; i++) {
788        struct skb_frag_struct *frag;
789        void *addr;
790
791        index = (index + 1) % TX_DESCS;
792
793        frag = &skb_shinfo(skb)->frags[i];
794        addr = page_address(skb_frag_page(frag)) + frag->page_offset;
795
796        eth_set_desc(tx_ring, index, index_last, addr, frag->size,
797                 config0, pmap);
798    }
799
800    if (nr_frags)
801        len0 = skb->len - skb->data_len;
802
803    skb_walk_frags(skb, skb1) {
804        index = (index + 1) % TX_DESCS;
805        len0 -= skb1->len;
806
807        eth_set_desc(tx_ring, index, index_last, skb1->data, skb1->len,
808                 config0, pmap);
809    }
810
811    tx_ring->buff_tab[index0] = skb;
812    eth_set_desc(tx_ring, index0, index_last, skb->data, len0,
813             config0 | FIRST_SEGMENT, pmap);
814
815    mb();
816
817    spin_lock(&tx_lock);
818    tx_ring->num_used += nr_desc + 1;
819    spin_unlock(&tx_lock);
820
821    dev->stats.tx_packets++;
822    dev->stats.tx_bytes += skb->len;
823
824    enable_tx_dma(sw);
825
826    return NETDEV_TX_OK;
827}
828
829static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
830{
831    struct port *port = netdev_priv(dev);
832
833    if (!netif_running(dev))
834        return -EINVAL;
835    return phy_mii_ioctl(port->phydev, req, cmd);
836}
837
838/* ethtool support */
839
840static void cns3xxx_get_drvinfo(struct net_device *dev,
841                   struct ethtool_drvinfo *info)
842{
843    strcpy(info->driver, DRV_NAME);
844    strcpy(info->bus_info, "internal");
845}
846
847static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
848{
849    struct port *port = netdev_priv(dev);
850    return phy_ethtool_gset(port->phydev, cmd);
851}
852
853static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
854{
855    struct port *port = netdev_priv(dev);
856    return phy_ethtool_sset(port->phydev, cmd);
857}
858
859static int cns3xxx_nway_reset(struct net_device *dev)
860{
861    struct port *port = netdev_priv(dev);
862    return phy_start_aneg(port->phydev);
863}
864
865static struct ethtool_ops cns3xxx_ethtool_ops = {
866    .get_drvinfo = cns3xxx_get_drvinfo,
867    .get_settings = cns3xxx_get_settings,
868    .set_settings = cns3xxx_set_settings,
869    .nway_reset = cns3xxx_nway_reset,
870    .get_link = ethtool_op_get_link,
871};
872
873
874static int init_rings(struct sw *sw)
875{
876    int i;
877    struct _rx_ring *rx_ring = sw->rx_ring;
878    struct _tx_ring *tx_ring = sw->tx_ring;
879
880    __raw_writel(0, &sw->regs->fs_dma_ctrl0);
881    __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
882    __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
883    __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
884
885    __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
886
887    if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL,
888                        RX_POOL_ALLOC_SIZE, 32, 0)))
889        return -ENOMEM;
890
891    if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL,
892                          &rx_ring->phys_addr)))
893        return -ENOMEM;
894    memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
895
896    /* Setup RX buffers */
897    for (i = 0; i < RX_DESCS; i++) {
898        struct rx_desc *desc = &(rx_ring)->desc[i];
899        void *buf;
900
901        buf = kzalloc(RX_SEGMENT_ALLOC_SIZE, GFP_KERNEL);
902        if (!buf)
903            return -ENOMEM;
904
905        desc->sdl = RX_SEGMENT_MRU;
906        if (i == (RX_DESCS - 1))
907            desc->eor = 1;
908        desc->fsd = 1;
909        desc->lsd = 1;
910
911        desc->sdp = dma_map_single(NULL, buf + SKB_HEAD_ALIGN,
912                       RX_SEGMENT_MRU, DMA_FROM_DEVICE);
913        if (dma_mapping_error(NULL, desc->sdp))
914            return -EIO;
915
916        rx_ring->buff_tab[i] = buf;
917        rx_ring->phys_tab[i] = desc->sdp;
918        desc->cown = 0;
919    }
920    __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0);
921    __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0);
922
923    if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL,
924                        TX_POOL_ALLOC_SIZE, 32, 0)))
925        return -ENOMEM;
926
927    if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL,
928                          &tx_ring->phys_addr)))
929        return -ENOMEM;
930    memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
931
932    /* Setup TX buffers */
933    for (i = 0; i < TX_DESCS; i++) {
934        struct tx_desc *desc = &(tx_ring)->desc[i];
935        tx_ring->buff_tab[i] = 0;
936
937        if (i == (TX_DESCS - 1))
938            desc->eor = 1;
939        desc->cown = 1;
940    }
941    __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
942    __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0);
943
944    return 0;
945}
946
947static void destroy_rings(struct sw *sw)
948{
949    int i;
950    if (sw->rx_ring->desc) {
951        for (i = 0; i < RX_DESCS; i++) {
952            struct _rx_ring *rx_ring = sw->rx_ring;
953            struct rx_desc *desc = &(rx_ring)->desc[i];
954            struct sk_buff *skb = sw->rx_ring->buff_tab[i];
955
956            if (!skb)
957                continue;
958
959            dma_unmap_single(NULL, desc->sdp, RX_SEGMENT_MRU,
960                     DMA_FROM_DEVICE);
961            dev_kfree_skb(skb);
962        }
963        dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr);
964        dma_pool_destroy(rx_dma_pool);
965        rx_dma_pool = 0;
966        sw->rx_ring->desc = 0;
967    }
968    if (sw->tx_ring->desc) {
969        for (i = 0; i < TX_DESCS; i++) {
970            struct _tx_ring *tx_ring = sw->tx_ring;
971            struct tx_desc *desc = &(tx_ring)->desc[i];
972            struct sk_buff *skb = sw->tx_ring->buff_tab[i];
973            if (skb) {
974                dma_unmap_single(NULL, desc->sdp,
975                    skb->len, DMA_TO_DEVICE);
976                dev_kfree_skb(skb);
977            }
978        }
979        dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr);
980        dma_pool_destroy(tx_dma_pool);
981        tx_dma_pool = 0;
982        sw->tx_ring->desc = 0;
983    }
984}
985
986static int eth_open(struct net_device *dev)
987{
988    struct port *port = netdev_priv(dev);
989    struct sw *sw = port->sw;
990    u32 temp;
991
992    port->speed = 0; /* force "link up" message */
993    phy_start(port->phydev);
994
995    netif_start_queue(dev);
996
997    if (!ports_open) {
998        request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev);
999        request_irq(IRQ_CNS3XXX_SW_STATUS, eth_stat_irq, IRQF_SHARED, "gig_stat", napi_dev);
1000        napi_enable(&sw->napi);
1001        netif_start_queue(napi_dev);
1002
1003         __raw_writel(~(MAC0_STATUS_CHANGE | MAC1_STATUS_CHANGE | MAC2_STATUS_CHANGE |
1004                    MAC0_RX_ERROR | MAC1_RX_ERROR | MAC2_RX_ERROR), &sw->regs->intr_mask);
1005
1006        temp = __raw_readl(&sw->regs->mac_cfg[2]);
1007        temp &= ~(PORT_DISABLE);
1008        __raw_writel(temp, &sw->regs->mac_cfg[2]);
1009
1010        temp = __raw_readl(&sw->regs->dma_auto_poll_cfg);
1011        temp &= ~(TS_SUSPEND | FS_SUSPEND);
1012        __raw_writel(temp, &sw->regs->dma_auto_poll_cfg);
1013
1014        enable_rx_dma(sw);
1015    }
1016    temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1017    temp &= ~(PORT_DISABLE);
1018    __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1019
1020    ports_open++;
1021    netif_carrier_on(dev);
1022
1023    return 0;
1024}
1025
1026static int eth_close(struct net_device *dev)
1027{
1028    struct port *port = netdev_priv(dev);
1029    struct sw *sw = port->sw;
1030    u32 temp;
1031
1032    ports_open--;
1033
1034    temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1035    temp |= (PORT_DISABLE);
1036    __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1037
1038    netif_stop_queue(dev);
1039
1040    phy_stop(port->phydev);
1041
1042    if (!ports_open) {
1043        disable_irq(IRQ_CNS3XXX_SW_R0RXC);
1044        free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev);
1045        disable_irq(IRQ_CNS3XXX_SW_STATUS);
1046        free_irq(IRQ_CNS3XXX_SW_STATUS, napi_dev);
1047        napi_disable(&sw->napi);
1048        netif_stop_queue(napi_dev);
1049        temp = __raw_readl(&sw->regs->mac_cfg[2]);
1050        temp |= (PORT_DISABLE);
1051        __raw_writel(temp, &sw->regs->mac_cfg[2]);
1052
1053        __raw_writel(TS_SUSPEND | FS_SUSPEND,
1054                 &sw->regs->dma_auto_poll_cfg);
1055    }
1056
1057    netif_carrier_off(dev);
1058    return 0;
1059}
1060
1061static void eth_rx_mode(struct net_device *dev)
1062{
1063    struct port *port = netdev_priv(dev);
1064    struct sw *sw = port->sw;
1065    u32 temp;
1066
1067    temp = __raw_readl(&sw->regs->mac_glob_cfg);
1068
1069    if (dev->flags & IFF_PROMISC) {
1070        if (port->id == 3)
1071            temp |= ((1 << 2) << PROMISC_OFFSET);
1072        else
1073            temp |= ((1 << port->id) << PROMISC_OFFSET);
1074    } else {
1075        if (port->id == 3)
1076            temp &= ~((1 << 2) << PROMISC_OFFSET);
1077        else
1078            temp &= ~((1 << port->id) << PROMISC_OFFSET);
1079    }
1080    __raw_writel(temp, &sw->regs->mac_glob_cfg);
1081}
1082
1083static int eth_set_mac(struct net_device *netdev, void *p)
1084{
1085    struct port *port = netdev_priv(netdev);
1086    struct sw *sw = port->sw;
1087    struct sockaddr *addr = p;
1088    u32 cycles = 0;
1089
1090    if (!is_valid_ether_addr(addr->sa_data))
1091        return -EADDRNOTAVAIL;
1092
1093    /* Invalidate old ARL Entry */
1094    if (port->id == 3)
1095        __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1096    else
1097        __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1098    __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) |
1099            (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])),
1100            &sw->regs->arl_ctrl[1]);
1101
1102    __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) |
1103            (1 << 1)),
1104            &sw->regs->arl_ctrl[2]);
1105    __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1106
1107    while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1108            && cycles < 5000) {
1109        udelay(1);
1110        cycles++;
1111    }
1112
1113    cycles = 0;
1114    memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1115
1116    if (port->id == 3)
1117        __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1118    else
1119        __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]);
1120    __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) |
1121            (addr->sa_data[2] << 8) | (addr->sa_data[3])),
1122            &sw->regs->arl_ctrl[1]);
1123
1124    __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) |
1125            (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]);
1126    __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd);
1127
1128    while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0)
1129        && cycles < 5000) {
1130        udelay(1);
1131        cycles++;
1132    }
1133    return 0;
1134}
1135
1136static int cns3xxx_change_mtu(struct net_device *dev, int new_mtu)
1137{
1138    if (new_mtu > MAX_MTU)
1139        return -EINVAL;
1140
1141    dev->mtu = new_mtu;
1142    return 0;
1143}
1144
1145static const struct net_device_ops cns3xxx_netdev_ops = {
1146    .ndo_open = eth_open,
1147    .ndo_stop = eth_close,
1148    .ndo_start_xmit = eth_xmit,
1149    .ndo_set_rx_mode = eth_rx_mode,
1150    .ndo_do_ioctl = eth_ioctl,
1151    .ndo_change_mtu = cns3xxx_change_mtu,
1152    .ndo_set_mac_address = eth_set_mac,
1153    .ndo_validate_addr = eth_validate_addr,
1154};
1155
1156static int __devinit eth_init_one(struct platform_device *pdev)
1157{
1158    int i;
1159    struct port *port;
1160    struct sw *sw;
1161    struct net_device *dev;
1162    struct cns3xxx_plat_info *plat = pdev->dev.platform_data;
1163    u32 regs_phys;
1164    char phy_id[MII_BUS_ID_SIZE + 3];
1165    int err;
1166    u32 temp;
1167
1168    if (!(napi_dev = alloc_etherdev(sizeof(struct sw))))
1169        return -ENOMEM;
1170    strcpy(napi_dev->name, "switch%d");
1171    napi_dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST;
1172
1173    SET_NETDEV_DEV(napi_dev, &pdev->dev);
1174    sw = netdev_priv(napi_dev);
1175    memset(sw, 0, sizeof(struct sw));
1176    sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT;
1177    regs_phys = CNS3XXX_SWITCH_BASE;
1178    sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name);
1179    if (!sw->mem_res) {
1180        err = -EBUSY;
1181        goto err_free;
1182    }
1183
1184    temp = __raw_readl(&sw->regs->phy_auto_addr);
1185    temp |= (3 << 30); /* maximum frame length: 9600 bytes */
1186    __raw_writel(temp, &sw->regs->phy_auto_addr);
1187
1188    for (i = 0; i < 4; i++) {
1189        temp = __raw_readl(&sw->regs->mac_cfg[i]);
1190        temp |= (PORT_DISABLE);
1191        __raw_writel(temp, &sw->regs->mac_cfg[i]);
1192    }
1193
1194    temp = PORT_DISABLE;
1195    __raw_writel(temp, &sw->regs->mac_cfg[2]);
1196
1197    temp = __raw_readl(&sw->regs->vlan_cfg);
1198    temp |= NIC_MODE | VLAN_UNAWARE;
1199    __raw_writel(temp, &sw->regs->vlan_cfg);
1200
1201    __raw_writel(UNKNOWN_VLAN_TO_CPU |
1202             CRC_STRIPPING, &sw->regs->mac_glob_cfg);
1203
1204    if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) {
1205        err = -ENOMEM;
1206        goto err_free;
1207    }
1208    memset(sw->rx_ring, 0, sizeof(struct _rx_ring));
1209
1210    if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) {
1211        err = -ENOMEM;
1212        goto err_free_rx;
1213    }
1214    memset(sw->tx_ring, 0, sizeof(struct _tx_ring));
1215
1216    if ((err = init_rings(sw)) != 0) {
1217        destroy_rings(sw);
1218        err = -ENOMEM;
1219        goto err_free_rings;
1220    }
1221    platform_set_drvdata(pdev, napi_dev);
1222
1223    netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT);
1224
1225    for (i = 0; i < 3; i++) {
1226        if (!(plat->ports & (1 << i))) {
1227            continue;
1228        }
1229
1230        if (!(dev = alloc_etherdev(sizeof(struct port)))) {
1231            goto free_ports;
1232        }
1233
1234        port = netdev_priv(dev);
1235        port->netdev = dev;
1236        if (i == 2)
1237            port->id = 3;
1238        else
1239            port->id = i;
1240        port->sw = sw;
1241
1242        temp = __raw_readl(&sw->regs->mac_cfg[port->id]);
1243        temp |= (PORT_DISABLE | PORT_BLOCK_STATE | PORT_LEARN_DIS);
1244        __raw_writel(temp, &sw->regs->mac_cfg[port->id]);
1245
1246        dev->netdev_ops = &cns3xxx_netdev_ops;
1247        dev->ethtool_ops = &cns3xxx_ethtool_ops;
1248        dev->tx_queue_len = 1000;
1249        dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST;
1250
1251        switch_port_tab[port->id] = port;
1252        memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN);
1253
1254        snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]);
1255        port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0,
1256            PHY_INTERFACE_MODE_RGMII);
1257        if ((err = IS_ERR(port->phydev))) {
1258            switch_port_tab[port->id] = 0;
1259            free_netdev(dev);
1260            goto free_ports;
1261        }
1262
1263        port->phydev->irq = PHY_IGNORE_INTERRUPT;
1264
1265        if ((err = register_netdev(dev))) {
1266            phy_disconnect(port->phydev);
1267            switch_port_tab[port->id] = 0;
1268            free_netdev(dev);
1269            goto free_ports;
1270        }
1271
1272        printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]);
1273        netif_carrier_off(dev);
1274        dev = 0;
1275    }
1276
1277    return 0;
1278
1279free_ports:
1280    err = -ENOMEM;
1281    for (--i; i >= 0; i--) {
1282        if (switch_port_tab[i]) {
1283            port = switch_port_tab[i];
1284            dev = port->netdev;
1285            unregister_netdev(dev);
1286            phy_disconnect(port->phydev);
1287            switch_port_tab[i] = 0;
1288            free_netdev(dev);
1289        }
1290    }
1291err_free_rings:
1292    kfree(sw->tx_ring);
1293err_free_rx:
1294    kfree(sw->rx_ring);
1295err_free:
1296    free_netdev(napi_dev);
1297    return err;
1298}
1299
1300static int __devexit eth_remove_one(struct platform_device *pdev)
1301{
1302    struct net_device *dev = platform_get_drvdata(pdev);
1303    struct sw *sw = netdev_priv(dev);
1304    int i;
1305    destroy_rings(sw);
1306
1307    for (i = 3; i >= 0; i--) {
1308        if (switch_port_tab[i]) {
1309            struct port *port = switch_port_tab[i];
1310            struct net_device *dev = port->netdev;
1311            unregister_netdev(dev);
1312            phy_disconnect(port->phydev);
1313            switch_port_tab[i] = 0;
1314            free_netdev(dev);
1315        }
1316    }
1317
1318    release_resource(sw->mem_res);
1319    free_netdev(napi_dev);
1320    return 0;
1321}
1322
1323static struct platform_driver cns3xxx_eth_driver = {
1324    .driver.name = DRV_NAME,
1325    .probe = eth_init_one,
1326    .remove = eth_remove_one,
1327};
1328
1329static int __init eth_init_module(void)
1330{
1331    int err;
1332    if ((err = cns3xxx_mdio_register()))
1333        return err;
1334    return platform_driver_register(&cns3xxx_eth_driver);
1335}
1336
1337static void __exit eth_cleanup_module(void)
1338{
1339    platform_driver_unregister(&cns3xxx_eth_driver);
1340    cns3xxx_mdio_remove();
1341}
1342
1343module_init(eth_init_module);
1344module_exit(eth_cleanup_module);
1345
1346MODULE_AUTHOR("Chris Lang");
1347MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1348MODULE_LICENSE("GPL v2");
1349MODULE_ALIAS("platform:cns3xxx_eth");
1350

Archive Download this file



interactive