Root/target/linux/adm5120/files/drivers/net/adm5120sw.c

1/*
2 * ADM5120 built-in ethernet switch driver
3 *
4 * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
5 *
6 * This code was based on a driver for Linux 2.6.xx by Jeroen Vreeken.
7 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
8 * NAPI extension for the Jeroen's driver
9 * Copyright Thomas Langer (Thomas.Langer@infineon.com), 2007
10 * Copyright Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
11 * Inspiration for the Jeroen's driver came from the ADMtek 2.4 driver.
12 * Copyright ADMtek Inc.
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License version 2 as published
16 * by the Free Software Foundation.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/spinlock.h>
26#include <linux/platform_device.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33
34#include <asm/mipsregs.h>
35
36#include <asm/mach-adm5120/adm5120_info.h>
37#include <asm/mach-adm5120/adm5120_defs.h>
38#include <asm/mach-adm5120/adm5120_switch.h>
39
40#include "adm5120sw.h"
41
42#define DRV_NAME "adm5120-switch"
43#define DRV_DESC "ADM5120 built-in ethernet switch driver"
44#define DRV_VERSION "0.1.1"
45
46#define CONFIG_ADM5120_SWITCH_NAPI 1
47#undef CONFIG_ADM5120_SWITCH_DEBUG
48
49/* ------------------------------------------------------------------------ */
50
51#ifdef CONFIG_ADM5120_SWITCH_DEBUG
52#define SW_DBG(f, a...) printk(KERN_DBG "%s: " f, DRV_NAME , ## a)
53#else
54#define SW_DBG(f, a...) do {} while (0)
55#endif
56#define SW_ERR(f, a...) printk(KERN_ERR "%s: " f, DRV_NAME , ## a)
57#define SW_INFO(f, a...) printk(KERN_INFO "%s: " f, DRV_NAME , ## a)
58
59#define SWITCH_NUM_PORTS 6
60#define ETH_CSUM_LEN 4
61
62#define RX_MAX_PKTLEN 1550
63#define RX_RING_SIZE 64
64
65#define TX_RING_SIZE 32
66#define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */
67#define TX_TIMEOUT (HZ * 400)
68
69#define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *))
70#define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *))
71#define TX_DESCS_SIZE (TX_RING_SIZE * sizeof(struct dma_desc *))
72#define TX_SKBS_SIZE (TX_RING_SIZE * sizeof(struct sk_buff *))
73
74#define SKB_ALLOC_LEN (RX_MAX_PKTLEN + 32)
75#define SKB_RESERVE_LEN (NET_IP_ALIGN + NET_SKB_PAD)
76
77#define SWITCH_INTS_HIGH (SWITCH_INT_SHD | SWITCH_INT_RHD | SWITCH_INT_HDF)
78#define SWITCH_INTS_LOW (SWITCH_INT_SLD | SWITCH_INT_RLD | SWITCH_INT_LDF)
79#define SWITCH_INTS_ERR (SWITCH_INT_RDE | SWITCH_INT_SDE | SWITCH_INT_CPUH)
80#define SWITCH_INTS_Q (SWITCH_INT_P0QF | SWITCH_INT_P1QF | SWITCH_INT_P2QF | \
81            SWITCH_INT_P3QF | SWITCH_INT_P4QF | SWITCH_INT_P5QF | \
82            SWITCH_INT_CPQF | SWITCH_INT_GQF)
83
84#define SWITCH_INTS_ALL (SWITCH_INTS_HIGH | SWITCH_INTS_LOW | \
85            SWITCH_INTS_ERR | SWITCH_INTS_Q | \
86            SWITCH_INT_MD | SWITCH_INT_PSC)
87
88#define SWITCH_INTS_USED (SWITCH_INTS_LOW | SWITCH_INT_PSC)
89#define SWITCH_INTS_POLL (SWITCH_INT_RLD | SWITCH_INT_LDF | SWITCH_INT_SLD)
90
91/* ------------------------------------------------------------------------ */
92
93struct adm5120_if_priv {
94    struct net_device *dev;
95
96    unsigned int vlan_no;
97    unsigned int port_mask;
98
99#ifdef CONFIG_ADM5120_SWITCH_NAPI
100    struct napi_struct napi;
101#endif
102};
103
104struct dma_desc {
105    __u32 buf1;
106#define DESC_OWN (1UL << 31) /* Owned by the switch */
107#define DESC_EOR (1UL << 28) /* End of Ring */
108#define DESC_ADDR_MASK 0x1FFFFFF
109#define DESC_ADDR(x) ((__u32)(x) & DESC_ADDR_MASK)
110    __u32 buf2;
111#define DESC_BUF2_EN (1UL << 31) /* Buffer 2 enable */
112    __u32 buflen;
113    __u32 misc;
114/* definitions for tx/rx descriptors */
115#define DESC_PKTLEN_SHIFT 16
116#define DESC_PKTLEN_MASK 0x7FF
117/* tx descriptor specific part */
118#define DESC_CSUM (1UL << 31) /* Append checksum */
119#define DESC_DSTPORT_SHIFT 8
120#define DESC_DSTPORT_MASK 0x3F
121#define DESC_VLAN_MASK 0x3F
122/* rx descriptor specific part */
123#define DESC_SRCPORT_SHIFT 12
124#define DESC_SRCPORT_MASK 0x7
125#define DESC_DA_MASK 0x3
126#define DESC_DA_SHIFT 4
127#define DESC_IPCSUM_FAIL (1UL << 3) /* IP checksum fail */
128#define DESC_VLAN_TAG (1UL << 2) /* VLAN tag present */
129#define DESC_TYPE_MASK 0x3 /* mask for Packet type */
130#define DESC_TYPE_IP 0x0 /* IP packet */
131#define DESC_TYPE_PPPoE 0x1 /* PPPoE packet */
132} __attribute__ ((aligned(16)));
133
134/* ------------------------------------------------------------------------ */
135
136static int adm5120_nrdevs;
137
138static struct net_device *adm5120_devs[SWITCH_NUM_PORTS];
139/* Lookup table port -> device */
140static struct net_device *adm5120_port[SWITCH_NUM_PORTS];
141
142static struct dma_desc *txl_descs;
143static struct dma_desc *rxl_descs;
144
145static dma_addr_t txl_descs_dma;
146static dma_addr_t rxl_descs_dma;
147
148static struct sk_buff **txl_skbuff;
149static struct sk_buff **rxl_skbuff;
150
151static unsigned int cur_rxl, dirty_rxl; /* producer/consumer ring indices */
152static unsigned int cur_txl, dirty_txl;
153
154static unsigned int sw_used;
155
156static spinlock_t tx_lock = SPIN_LOCK_UNLOCKED;
157
158/* ------------------------------------------------------------------------ */
159
160static inline u32 sw_read_reg(u32 reg)
161{
162    return __raw_readl((void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
163}
164
165static inline void sw_write_reg(u32 reg, u32 val)
166{
167    __raw_writel(val, (void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
168}
169
170static inline void sw_int_mask(u32 mask)
171{
172    u32 t;
173
174    t = sw_read_reg(SWITCH_REG_INT_MASK);
175    t |= mask;
176    sw_write_reg(SWITCH_REG_INT_MASK, t);
177}
178
179static inline void sw_int_unmask(u32 mask)
180{
181    u32 t;
182
183    t = sw_read_reg(SWITCH_REG_INT_MASK);
184    t &= ~mask;
185    sw_write_reg(SWITCH_REG_INT_MASK, t);
186}
187
188static inline void sw_int_ack(u32 mask)
189{
190    sw_write_reg(SWITCH_REG_INT_STATUS, mask);
191}
192
193static inline u32 sw_int_status(void)
194{
195    u32 t;
196
197    t = sw_read_reg(SWITCH_REG_INT_STATUS);
198    t &= ~sw_read_reg(SWITCH_REG_INT_MASK);
199    return t;
200}
201
202static inline u32 desc_get_srcport(struct dma_desc *desc)
203{
204    return (desc->misc >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK;
205}
206
207static inline u32 desc_get_pktlen(struct dma_desc *desc)
208{
209    return (desc->misc >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK;
210}
211
212static inline int desc_ipcsum_fail(struct dma_desc *desc)
213{
214    return ((desc->misc & DESC_IPCSUM_FAIL) != 0);
215}
216
217/* ------------------------------------------------------------------------ */
218
219static void sw_dump_desc(char *label, struct dma_desc *desc, int tx)
220{
221    u32 t;
222
223    SW_DBG("%s %s desc/%p\n", label, tx ? "tx" : "rx", desc);
224
225    t = desc->buf1;
226    SW_DBG(" buf1 %08X addr=%08X; len=%08X %s%s\n", t,
227        t & DESC_ADDR_MASK,
228        desc->buflen,
229        (t & DESC_OWN) ? "SWITCH" : "CPU",
230        (t & DESC_EOR) ? " RE" : "");
231
232    t = desc->buf2;
233    SW_DBG(" buf2 %08X addr=%08X%s\n", desc->buf2,
234        t & DESC_ADDR_MASK,
235        (t & DESC_BUF2_EN) ? " EN" : "");
236
237    t = desc->misc;
238    if (tx)
239        SW_DBG(" misc %08X%s pktlen=%04X ports=%02X vlan=%02X\n", t,
240            (t & DESC_CSUM) ? " CSUM" : "",
241            (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
242            (t >> DESC_DSTPORT_SHIFT) & DESC_DSTPORT_MASK,
243            t & DESC_VLAN_MASK);
244    else
245        SW_DBG(" misc %08X pktlen=%04X port=%d DA=%d%s%s type=%d\n",
246            t,
247            (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
248            (t >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK,
249            (t >> DESC_DA_SHIFT) & DESC_DA_MASK,
250            (t & DESC_IPCSUM_FAIL) ? " IPCF" : "",
251            (t & DESC_VLAN_TAG) ? " VLAN" : "",
252            (t & DESC_TYPE_MASK));
253}
254
255static void sw_dump_intr_mask(char *label, u32 mask)
256{
257    SW_DBG("%s %08X%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
258        label, mask,
259        (mask & SWITCH_INT_SHD) ? " SHD" : "",
260        (mask & SWITCH_INT_SLD) ? " SLD" : "",
261        (mask & SWITCH_INT_RHD) ? " RHD" : "",
262        (mask & SWITCH_INT_RLD) ? " RLD" : "",
263        (mask & SWITCH_INT_HDF) ? " HDF" : "",
264        (mask & SWITCH_INT_LDF) ? " LDF" : "",
265        (mask & SWITCH_INT_P0QF) ? " P0QF" : "",
266        (mask & SWITCH_INT_P1QF) ? " P1QF" : "",
267        (mask & SWITCH_INT_P2QF) ? " P2QF" : "",
268        (mask & SWITCH_INT_P3QF) ? " P3QF" : "",
269        (mask & SWITCH_INT_P4QF) ? " P4QF" : "",
270        (mask & SWITCH_INT_CPQF) ? " CPQF" : "",
271        (mask & SWITCH_INT_GQF) ? " GQF" : "",
272        (mask & SWITCH_INT_MD) ? " MD" : "",
273        (mask & SWITCH_INT_BCS) ? " BCS" : "",
274        (mask & SWITCH_INT_PSC) ? " PSC" : "",
275        (mask & SWITCH_INT_ID) ? " ID" : "",
276        (mask & SWITCH_INT_W0TE) ? " W0TE" : "",
277        (mask & SWITCH_INT_W1TE) ? " W1TE" : "",
278        (mask & SWITCH_INT_RDE) ? " RDE" : "",
279        (mask & SWITCH_INT_SDE) ? " SDE" : "",
280        (mask & SWITCH_INT_CPUH) ? " CPUH" : "");
281}
282
283static void sw_dump_regs(void)
284{
285    u32 t;
286
287    t = sw_read_reg(SWITCH_REG_PHY_STATUS);
288    SW_DBG("phy_status: %08X\n", t);
289
290    t = sw_read_reg(SWITCH_REG_CPUP_CONF);
291    SW_DBG("cpup_conf: %08X%s%s%s\n", t,
292        (t & CPUP_CONF_DCPUP) ? " DCPUP" : "",
293        (t & CPUP_CONF_CRCP) ? " CRCP" : "",
294        (t & CPUP_CONF_BTM) ? " BTM" : "");
295
296    t = sw_read_reg(SWITCH_REG_PORT_CONF0);
297    SW_DBG("port_conf0: %08X\n", t);
298    t = sw_read_reg(SWITCH_REG_PORT_CONF1);
299    SW_DBG("port_conf1: %08X\n", t);
300    t = sw_read_reg(SWITCH_REG_PORT_CONF2);
301    SW_DBG("port_conf2: %08X\n", t);
302
303    t = sw_read_reg(SWITCH_REG_VLAN_G1);
304    SW_DBG("vlan g1: %08X\n", t);
305    t = sw_read_reg(SWITCH_REG_VLAN_G2);
306    SW_DBG("vlan g2: %08X\n", t);
307
308    t = sw_read_reg(SWITCH_REG_BW_CNTL0);
309    SW_DBG("bw_cntl0: %08X\n", t);
310    t = sw_read_reg(SWITCH_REG_BW_CNTL1);
311    SW_DBG("bw_cntl1: %08X\n", t);
312
313    t = sw_read_reg(SWITCH_REG_PHY_CNTL0);
314    SW_DBG("phy_cntl0: %08X\n", t);
315    t = sw_read_reg(SWITCH_REG_PHY_CNTL1);
316    SW_DBG("phy_cntl1: %08X\n", t);
317    t = sw_read_reg(SWITCH_REG_PHY_CNTL2);
318    SW_DBG("phy_cntl2: %08X\n", t);
319    t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
320    SW_DBG("phy_cntl3: %08X\n", t);
321    t = sw_read_reg(SWITCH_REG_PHY_CNTL4);
322    SW_DBG("phy_cntl4: %08X\n", t);
323
324    t = sw_read_reg(SWITCH_REG_INT_STATUS);
325    sw_dump_intr_mask("int_status: ", t);
326
327    t = sw_read_reg(SWITCH_REG_INT_MASK);
328    sw_dump_intr_mask("int_mask: ", t);
329
330    t = sw_read_reg(SWITCH_REG_SHDA);
331    SW_DBG("shda: %08X\n", t);
332    t = sw_read_reg(SWITCH_REG_SLDA);
333    SW_DBG("slda: %08X\n", t);
334    t = sw_read_reg(SWITCH_REG_RHDA);
335    SW_DBG("rhda: %08X\n", t);
336    t = sw_read_reg(SWITCH_REG_RLDA);
337    SW_DBG("rlda: %08X\n", t);
338}
339
340/* ------------------------------------------------------------------------ */
341
342static inline void adm5120_rx_dma_update(struct dma_desc *desc,
343    struct sk_buff *skb, int end)
344{
345    desc->misc = 0;
346    desc->buf2 = 0;
347    desc->buflen = RX_MAX_PKTLEN;
348    desc->buf1 = DESC_ADDR(skb->data) |
349        DESC_OWN | (end ? DESC_EOR : 0);
350}
351
352static void adm5120_switch_rx_refill(void)
353{
354    unsigned int entry;
355
356    for (; cur_rxl - dirty_rxl > 0; dirty_rxl++) {
357        struct dma_desc *desc;
358        struct sk_buff *skb;
359
360        entry = dirty_rxl % RX_RING_SIZE;
361        desc = &rxl_descs[entry];
362
363        skb = rxl_skbuff[entry];
364        if (skb == NULL) {
365            skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
366            if (skb) {
367                skb_reserve(skb, SKB_RESERVE_LEN);
368                rxl_skbuff[entry] = skb;
369            } else {
370                SW_ERR("no memory for skb\n");
371                desc->buflen = 0;
372                desc->buf2 = 0;
373                desc->misc = 0;
374                desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN;
375                break;
376            }
377        }
378
379        desc->buf2 = 0;
380        desc->buflen = RX_MAX_PKTLEN;
381        desc->misc = 0;
382        desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN |
383                DESC_ADDR(skb->data);
384    }
385}
386
387static int adm5120_switch_rx(int limit)
388{
389    unsigned int done = 0;
390
391    SW_DBG("rx start, limit=%d, cur_rxl=%u, dirty_rxl=%u\n",
392                limit, cur_rxl, dirty_rxl);
393
394    while (done < limit) {
395        int entry = cur_rxl % RX_RING_SIZE;
396        struct dma_desc *desc = &rxl_descs[entry];
397        struct net_device *rdev;
398        unsigned int port;
399
400        if (desc->buf1 & DESC_OWN)
401            break;
402
403        if (dirty_rxl + RX_RING_SIZE == cur_rxl)
404            break;
405
406        port = desc_get_srcport(desc);
407        rdev = adm5120_port[port];
408
409        SW_DBG("rx descriptor %u, desc=%p, skb=%p\n", entry, desc,
410                rxl_skbuff[entry]);
411
412        if ((rdev) && netif_running(rdev)) {
413            struct sk_buff *skb = rxl_skbuff[entry];
414            int pktlen;
415
416            pktlen = desc_get_pktlen(desc);
417            pktlen -= ETH_CSUM_LEN;
418
419            if ((pktlen == 0) || desc_ipcsum_fail(desc)) {
420                rdev->stats.rx_errors++;
421                if (pktlen == 0)
422                    rdev->stats.rx_length_errors++;
423                if (desc_ipcsum_fail(desc))
424                    rdev->stats.rx_crc_errors++;
425                SW_DBG("rx error, recycling skb %u\n", entry);
426            } else {
427                skb_put(skb, pktlen);
428
429                skb->dev = rdev;
430                skb->protocol = eth_type_trans(skb, rdev);
431                skb->ip_summed = CHECKSUM_UNNECESSARY;
432
433                dma_cache_wback_inv((unsigned long)skb->data,
434                    skb->len);
435
436#ifdef CONFIG_ADM5120_SWITCH_NAPI
437                netif_receive_skb(skb);
438#else
439                netif_rx(skb);
440#endif
441
442                rdev->last_rx = jiffies;
443                rdev->stats.rx_packets++;
444                rdev->stats.rx_bytes += pktlen;
445
446                rxl_skbuff[entry] = NULL;
447                done++;
448            }
449        } else {
450            SW_DBG("no rx device, recycling skb %u\n", entry);
451        }
452
453        cur_rxl++;
454        if (cur_rxl - dirty_rxl > RX_RING_SIZE / 4)
455            adm5120_switch_rx_refill();
456    }
457
458    adm5120_switch_rx_refill();
459
460    SW_DBG("rx finished, cur_rxl=%u, dirty_rxl=%u, processed %d\n",
461                cur_rxl, dirty_rxl, done);
462
463    return done;
464}
465
466static void adm5120_switch_tx(void)
467{
468    unsigned int entry;
469
470    spin_lock(&tx_lock);
471    entry = dirty_txl % TX_RING_SIZE;
472    while (dirty_txl != cur_txl) {
473        struct dma_desc *desc = &txl_descs[entry];
474        struct sk_buff *skb = txl_skbuff[entry];
475
476        if (desc->buf1 & DESC_OWN)
477            break;
478
479        if (netif_running(skb->dev)) {
480            skb->dev->stats.tx_bytes += skb->len;
481            skb->dev->stats.tx_packets++;
482        }
483
484        dev_kfree_skb_irq(skb);
485        txl_skbuff[entry] = NULL;
486        entry = (++dirty_txl) % TX_RING_SIZE;
487    }
488
489    if ((cur_txl - dirty_txl) < TX_QUEUE_LEN - 4) {
490        int i;
491        for (i = 0; i < SWITCH_NUM_PORTS; i++) {
492            if (!adm5120_devs[i])
493                continue;
494            netif_wake_queue(adm5120_devs[i]);
495        }
496    }
497    spin_unlock(&tx_lock);
498}
499
500#ifdef CONFIG_ADM5120_SWITCH_NAPI
501static int adm5120_if_poll(struct napi_struct *napi, int limit)
502{
503    struct adm5120_if_priv *priv = container_of(napi,
504                struct adm5120_if_priv, napi);
505    struct net_device *dev = priv->dev;
506    int done;
507    u32 status;
508
509    sw_int_ack(SWITCH_INTS_POLL);
510
511    SW_DBG("%s: processing TX ring\n", dev->name);
512    adm5120_switch_tx();
513
514    SW_DBG("%s: processing RX ring\n", dev->name);
515    done = adm5120_switch_rx(limit);
516
517    status = sw_int_status() & SWITCH_INTS_POLL;
518    if ((done < limit) && (!status)) {
519        SW_DBG("disable polling mode for %s\n", dev->name);
520        napi_complete(napi);
521        sw_int_unmask(SWITCH_INTS_POLL);
522        return 0;
523    }
524
525    SW_DBG("%s still in polling mode, done=%d, status=%x\n",
526            dev->name, done, status);
527    return 1;
528}
529#endif /* CONFIG_ADM5120_SWITCH_NAPI */
530
531
532static irqreturn_t adm5120_switch_irq(int irq, void *dev_id)
533{
534    u32 status;
535
536    status = sw_int_status();
537    status &= SWITCH_INTS_ALL;
538    if (!status)
539        return IRQ_NONE;
540
541#ifdef CONFIG_ADM5120_SWITCH_NAPI
542    sw_int_ack(status & ~SWITCH_INTS_POLL);
543
544    if (status & SWITCH_INTS_POLL) {
545        struct net_device *dev = dev_id;
546        struct adm5120_if_priv *priv = netdev_priv(dev);
547
548        sw_dump_intr_mask("poll ints", status);
549        SW_DBG("enable polling mode for %s\n", dev->name);
550        sw_int_mask(SWITCH_INTS_POLL);
551        napi_schedule(&priv->napi);
552    }
553#else
554    sw_int_ack(status);
555
556    if (status & (SWITCH_INT_RLD | SWITCH_INT_LDF))
557        adm5120_switch_rx(RX_RING_SIZE);
558
559    if (status & SWITCH_INT_SLD)
560        adm5120_switch_tx();
561#endif
562
563    return IRQ_HANDLED;
564}
565
566static void adm5120_set_bw(char *matrix)
567{
568    unsigned long val;
569
570    /* Port 0 to 3 are set using the bandwidth control 0 register */
571    val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
572    sw_write_reg(SWITCH_REG_BW_CNTL0, val);
573
574    /* Port 4 and 5 are set using the bandwidth control 1 register */
575    val = matrix[4];
576    if (matrix[5] == 1)
577        sw_write_reg(SWITCH_REG_BW_CNTL1, val | 0x80000000);
578    else
579        sw_write_reg(SWITCH_REG_BW_CNTL1, val & ~0x8000000);
580
581    SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0),
582        sw_read_reg(SWITCH_REG_BW_CNTL1));
583}
584
585static void adm5120_switch_tx_ring_reset(struct dma_desc *desc,
586        struct sk_buff **skbl, int num)
587{
588    memset(desc, 0, num * sizeof(*desc));
589    desc[num-1].buf1 |= DESC_EOR;
590    memset(skbl, 0, sizeof(struct skb *) * num);
591
592    cur_txl = 0;
593    dirty_txl = 0;
594}
595
596static void adm5120_switch_rx_ring_reset(struct dma_desc *desc,
597        struct sk_buff **skbl, int num)
598{
599    int i;
600
601    memset(desc, 0, num * sizeof(*desc));
602    for (i = 0; i < num; i++) {
603        skbl[i] = dev_alloc_skb(SKB_ALLOC_LEN);
604        if (!skbl[i]) {
605            i = num;
606            break;
607        }
608        skb_reserve(skbl[i], SKB_RESERVE_LEN);
609        adm5120_rx_dma_update(&desc[i], skbl[i], (num - 1 == i));
610    }
611
612    cur_rxl = 0;
613    dirty_rxl = 0;
614}
615
616static int adm5120_switch_tx_ring_alloc(void)
617{
618    int err;
619
620    txl_descs = dma_alloc_coherent(NULL, TX_DESCS_SIZE, &txl_descs_dma,
621                    GFP_ATOMIC);
622    if (!txl_descs) {
623        err = -ENOMEM;
624        goto err;
625    }
626
627    txl_skbuff = kzalloc(TX_SKBS_SIZE, GFP_KERNEL);
628    if (!txl_skbuff) {
629        err = -ENOMEM;
630        goto err;
631    }
632
633    return 0;
634
635err:
636    return err;
637}
638
639static void adm5120_switch_tx_ring_free(void)
640{
641    int i;
642
643    if (txl_skbuff) {
644        for (i = 0; i < TX_RING_SIZE; i++)
645            if (txl_skbuff[i])
646                kfree_skb(txl_skbuff[i]);
647        kfree(txl_skbuff);
648    }
649
650    if (txl_descs)
651        dma_free_coherent(NULL, TX_DESCS_SIZE, txl_descs,
652            txl_descs_dma);
653}
654
655static int adm5120_switch_rx_ring_alloc(void)
656{
657    int err;
658    int i;
659
660    /* init RX ring */
661    rxl_descs = dma_alloc_coherent(NULL, RX_DESCS_SIZE, &rxl_descs_dma,
662                    GFP_ATOMIC);
663    if (!rxl_descs) {
664        err = -ENOMEM;
665        goto err;
666    }
667
668    rxl_skbuff = kzalloc(RX_SKBS_SIZE, GFP_KERNEL);
669    if (!rxl_skbuff) {
670        err = -ENOMEM;
671        goto err;
672    }
673
674    for (i = 0; i < RX_RING_SIZE; i++) {
675        struct sk_buff *skb;
676        skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
677        if (!skb) {
678            err = -ENOMEM;
679            goto err;
680        }
681        rxl_skbuff[i] = skb;
682        skb_reserve(skb, SKB_RESERVE_LEN);
683    }
684
685    return 0;
686
687err:
688    return err;
689}
690
691static void adm5120_switch_rx_ring_free(void)
692{
693    int i;
694
695    if (rxl_skbuff) {
696        for (i = 0; i < RX_RING_SIZE; i++)
697            if (rxl_skbuff[i])
698                kfree_skb(rxl_skbuff[i]);
699        kfree(rxl_skbuff);
700    }
701
702    if (rxl_descs)
703        dma_free_coherent(NULL, RX_DESCS_SIZE, rxl_descs,
704            rxl_descs_dma);
705}
706
707static void adm5120_write_mac(struct net_device *dev)
708{
709    struct adm5120_if_priv *priv = netdev_priv(dev);
710    unsigned char *mac = dev->dev_addr;
711    u32 t;
712
713    t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) |
714        (mac[4] << MAC_WT1_MAC4_SHIFT) | (mac[5] << MAC_WT1_MAC5_SHIFT);
715    sw_write_reg(SWITCH_REG_MAC_WT1, t);
716
717    t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
718        MAC_WT0_MAWC | MAC_WT0_WVE | (priv->vlan_no<<3);
719
720    sw_write_reg(SWITCH_REG_MAC_WT0, t);
721
722    while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD))
723        ;
724}
725
726static void adm5120_set_vlan(char *matrix)
727{
728    unsigned long val;
729    int vlan_port, port;
730
731    val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
732    sw_write_reg(SWITCH_REG_VLAN_G1, val);
733    val = matrix[4] + (matrix[5]<<8);
734    sw_write_reg(SWITCH_REG_VLAN_G2, val);
735
736    /* Now set/update the port vs. device lookup table */
737    for (port = 0; port < SWITCH_NUM_PORTS; port++) {
738        for (vlan_port = 0; vlan_port < SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++)
739            ;
740        if (vlan_port < SWITCH_NUM_PORTS)
741            adm5120_port[port] = adm5120_devs[vlan_port];
742        else
743            adm5120_port[port] = NULL;
744    }
745}
746
747static void adm5120_switch_set_vlan_mac(unsigned int vlan, unsigned char *mac)
748{
749    u32 t;
750
751    t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT)
752        | (mac[4] << MAC_WT1_MAC4_SHIFT)
753        | (mac[5] << MAC_WT1_MAC5_SHIFT);
754    sw_write_reg(SWITCH_REG_MAC_WT1, t);
755
756    t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
757        MAC_WT0_MAWC | MAC_WT0_WVE | (vlan << MAC_WT0_WVN_SHIFT) |
758        (MAC_WT0_WAF_STATIC << MAC_WT0_WAF_SHIFT);
759    sw_write_reg(SWITCH_REG_MAC_WT0, t);
760
761    do {
762        t = sw_read_reg(SWITCH_REG_MAC_WT0);
763    } while ((t & MAC_WT0_MWD) == 0);
764}
765
766static void adm5120_switch_set_vlan_ports(unsigned int vlan, u32 ports)
767{
768    unsigned int reg;
769    u32 t;
770
771    if (vlan < 4)
772        reg = SWITCH_REG_VLAN_G1;
773    else {
774        vlan -= 4;
775        reg = SWITCH_REG_VLAN_G2;
776    }
777
778    t = sw_read_reg(reg);
779    t &= ~(0xFF << (vlan*8));
780    t |= (ports << (vlan*8));
781    sw_write_reg(reg, t);
782}
783
784/* ------------------------------------------------------------------------ */
785
786#ifdef CONFIG_ADM5120_SWITCH_NAPI
787static inline void adm5120_if_napi_enable(struct net_device *dev)
788{
789    struct adm5120_if_priv *priv = netdev_priv(dev);
790    napi_enable(&priv->napi);
791}
792
793static inline void adm5120_if_napi_disable(struct net_device *dev)
794{
795    struct adm5120_if_priv *priv = netdev_priv(dev);
796    napi_disable(&priv->napi);
797}
798#else
799static inline void adm5120_if_napi_enable(struct net_device *dev) {}
800static inline void adm5120_if_napi_disable(struct net_device *dev) {}
801#endif /* CONFIG_ADM5120_SWITCH_NAPI */
802
803static int adm5120_if_open(struct net_device *dev)
804{
805    u32 t;
806    int err;
807    int i;
808
809    adm5120_if_napi_enable(dev);
810
811    err = request_irq(dev->irq, adm5120_switch_irq, IRQF_SHARED,
812              dev->name, dev);
813    if (err) {
814        SW_ERR("unable to get irq for %s\n", dev->name);
815        goto err;
816    }
817
818    if (!sw_used++)
819        /* enable interrupts on first open */
820        sw_int_unmask(SWITCH_INTS_USED);
821
822    /* enable (additional) port */
823    t = sw_read_reg(SWITCH_REG_PORT_CONF0);
824    for (i = 0; i < SWITCH_NUM_PORTS; i++) {
825        if (dev == adm5120_devs[i])
826            t &= ~adm5120_eth_vlans[i];
827    }
828    sw_write_reg(SWITCH_REG_PORT_CONF0, t);
829
830    netif_start_queue(dev);
831
832    return 0;
833
834err:
835    adm5120_if_napi_disable(dev);
836    return err;
837}
838
839static int adm5120_if_stop(struct net_device *dev)
840{
841    u32 t;
842    int i;
843
844    netif_stop_queue(dev);
845    adm5120_if_napi_disable(dev);
846
847    /* disable port if not assigned to other devices */
848    t = sw_read_reg(SWITCH_REG_PORT_CONF0);
849    t |= SWITCH_PORTS_NOCPU;
850    for (i = 0; i < SWITCH_NUM_PORTS; i++) {
851        if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i]))
852            t &= ~adm5120_eth_vlans[i];
853    }
854    sw_write_reg(SWITCH_REG_PORT_CONF0, t);
855
856    if (!--sw_used)
857        sw_int_mask(SWITCH_INTS_USED);
858
859    free_irq(dev->irq, dev);
860
861    return 0;
862}
863
864static int adm5120_if_hard_start_xmit(struct sk_buff *skb,
865        struct net_device *dev)
866{
867    struct dma_desc *desc;
868    struct adm5120_if_priv *priv = netdev_priv(dev);
869    unsigned int entry;
870    unsigned long data;
871    int i;
872
873    /* lock switch irq */
874    spin_lock_irq(&tx_lock);
875
876    /* calculate the next TX descriptor entry. */
877    entry = cur_txl % TX_RING_SIZE;
878
879    desc = &txl_descs[entry];
880    if (desc->buf1 & DESC_OWN) {
881        /* We want to write a packet but the TX queue is still
882         * occupied by the DMA. We are faster than the DMA... */
883        SW_DBG("%s unable to transmit, packet dopped\n", dev->name);
884        dev_kfree_skb(skb);
885        dev->stats.tx_dropped++;
886        return 0;
887    }
888
889    txl_skbuff[entry] = skb;
890    data = (desc->buf1 & DESC_EOR);
891    data |= DESC_ADDR(skb->data);
892
893    desc->misc =
894        ((skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len) << DESC_PKTLEN_SHIFT) |
895        (0x1 << priv->vlan_no);
896
897    desc->buflen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
898
899    desc->buf1 = data | DESC_OWN;
900    sw_write_reg(SWITCH_REG_SEND_TRIG, SEND_TRIG_STL);
901
902    cur_txl++;
903    if (cur_txl == dirty_txl + TX_QUEUE_LEN) {
904        for (i = 0; i < SWITCH_NUM_PORTS; i++) {
905            if (!adm5120_devs[i])
906                continue;
907            netif_stop_queue(adm5120_devs[i]);
908        }
909    }
910
911    dev->trans_start = jiffies;
912
913    spin_unlock_irq(&tx_lock);
914
915    return 0;
916}
917
918static void adm5120_if_tx_timeout(struct net_device *dev)
919{
920    SW_INFO("TX timeout on %s\n", dev->name);
921}
922
923static void adm5120_if_set_multicast_list(struct net_device *dev)
924{
925    struct adm5120_if_priv *priv = netdev_priv(dev);
926    u32 ports;
927    u32 t;
928
929    ports = adm5120_eth_vlans[priv->vlan_no] & SWITCH_PORTS_NOCPU;
930
931    t = sw_read_reg(SWITCH_REG_CPUP_CONF);
932    if (dev->flags & IFF_PROMISC)
933        /* enable unknown packets */
934        t &= ~(ports << CPUP_CONF_DUNP_SHIFT);
935    else
936        /* disable unknown packets */
937        t |= (ports << CPUP_CONF_DUNP_SHIFT);
938
939    if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
940                    dev->mc_count)
941        /* enable multicast packets */
942        t &= ~(ports << CPUP_CONF_DMCP_SHIFT);
943    else
944        /* disable multicast packets */
945        t |= (ports << CPUP_CONF_DMCP_SHIFT);
946
947    /* If there is any port configured to be in promiscuous mode, then the */
948    /* Bridge Test Mode has to be activated. This will result in */
949    /* transporting also packets learned in another VLAN to be forwarded */
950    /* to the CPU. */
951    /* The difficult scenario is when we want to build a bridge on the CPU.*/
952    /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
953    /* CPU port in VLAN1. Now we build a bridge on the CPU between */
954    /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
955    /* Now assume a packet with ethernet source address 99 enters port 0 */
956    /* It will be forwarded to the CPU because it is unknown. Then the */
957    /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
958    /* When now a packet with ethernet destination address 99 comes in at */
959    /* port 1 in VLAN1, then the switch has learned that this address is */
960    /* located at port 0 in VLAN0. Therefore the switch will drop */
961    /* this packet. In order to avoid this and to send the packet still */
962    /* to the CPU, the Bridge Test Mode has to be activated. */
963
964    /* Check if there is any vlan in promisc mode. */
965    if (~t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT))
966        t |= CPUP_CONF_BTM; /* Enable Bridge Testing Mode */
967    else
968        t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */
969
970    sw_write_reg(SWITCH_REG_CPUP_CONF, t);
971
972}
973
974static int adm5120_if_set_mac_address(struct net_device *dev, void *p)
975{
976    int ret;
977
978    ret = eth_mac_addr(dev, p);
979    if (ret)
980        return ret;
981
982    adm5120_write_mac(dev);
983    return 0;
984}
985
986static int adm5120_if_do_ioctl(struct net_device *dev, struct ifreq *rq,
987        int cmd)
988{
989    int err;
990    struct adm5120_sw_info info;
991    struct adm5120_if_priv *priv = netdev_priv(dev);
992
993    switch (cmd) {
994    case SIOCGADMINFO:
995        info.magic = 0x5120;
996        info.ports = adm5120_nrdevs;
997        info.vlan = priv->vlan_no;
998        err = copy_to_user(rq->ifr_data, &info, sizeof(info));
999        if (err)
1000            return -EFAULT;
1001        break;
1002    case SIOCSMATRIX:
1003        if (!capable(CAP_NET_ADMIN))
1004            return -EPERM;
1005        err = copy_from_user(adm5120_eth_vlans, rq->ifr_data,
1006                    sizeof(adm5120_eth_vlans));
1007        if (err)
1008            return -EFAULT;
1009        adm5120_set_vlan(adm5120_eth_vlans);
1010        break;
1011    case SIOCGMATRIX:
1012        err = copy_to_user(rq->ifr_data, adm5120_eth_vlans,
1013                    sizeof(adm5120_eth_vlans));
1014        if (err)
1015            return -EFAULT;
1016        break;
1017    default:
1018        return -EOPNOTSUPP;
1019    }
1020    return 0;
1021}
1022
1023static const struct net_device_ops adm5120sw_netdev_ops = {
1024    .ndo_open = adm5120_if_open,
1025    .ndo_stop = adm5120_if_stop,
1026    .ndo_start_xmit = adm5120_if_hard_start_xmit,
1027    .ndo_set_multicast_list = adm5120_if_set_multicast_list,
1028    .ndo_do_ioctl = adm5120_if_do_ioctl,
1029    .ndo_tx_timeout = adm5120_if_tx_timeout,
1030    .ndo_validate_addr = eth_validate_addr,
1031    .ndo_change_mtu = eth_change_mtu,
1032    .ndo_set_mac_address = adm5120_if_set_mac_address,
1033};
1034
1035static struct net_device *adm5120_if_alloc(void)
1036{
1037    struct net_device *dev;
1038    struct adm5120_if_priv *priv;
1039
1040    dev = alloc_etherdev(sizeof(*priv));
1041    if (!dev)
1042        return NULL;
1043
1044    priv = netdev_priv(dev);
1045    priv->dev = dev;
1046
1047    dev->irq = ADM5120_IRQ_SWITCH;
1048    dev->netdev_ops = &adm5120sw_netdev_ops;
1049    dev->watchdog_timeo = TX_TIMEOUT;
1050
1051#ifdef CONFIG_ADM5120_SWITCH_NAPI
1052    netif_napi_add(dev, &priv->napi, adm5120_if_poll, 64);
1053#endif
1054
1055    return dev;
1056}
1057
1058/* ------------------------------------------------------------------------ */
1059
1060static void adm5120_switch_cleanup(void)
1061{
1062    int i;
1063
1064    /* disable interrupts */
1065    sw_int_mask(SWITCH_INTS_ALL);
1066
1067    for (i = 0; i < SWITCH_NUM_PORTS; i++) {
1068        struct net_device *dev = adm5120_devs[i];
1069        if (dev) {
1070            unregister_netdev(dev);
1071            free_netdev(dev);
1072        }
1073    }
1074
1075    adm5120_switch_tx_ring_free();
1076    adm5120_switch_rx_ring_free();
1077}
1078
1079static int __init adm5120_switch_probe(struct platform_device *pdev)
1080{
1081    u32 t;
1082    int i, err;
1083
1084    adm5120_nrdevs = adm5120_eth_num_ports;
1085
1086    t = CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
1087        SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT |
1088        SWITCH_PORTS_NOCPU << CPUP_CONF_DMCP_SHIFT ;
1089    sw_write_reg(SWITCH_REG_CPUP_CONF, t);
1090
1091    t = (SWITCH_PORTS_NOCPU << PORT_CONF0_EMCP_SHIFT) |
1092        (SWITCH_PORTS_NOCPU << PORT_CONF0_BP_SHIFT) |
1093        (SWITCH_PORTS_NOCPU);
1094    sw_write_reg(SWITCH_REG_PORT_CONF0, t);
1095
1096    /* setup ports to Autoneg/100M/Full duplex/Auto MDIX */
1097    t = SWITCH_PORTS_PHY |
1098        (SWITCH_PORTS_PHY << PHY_CNTL2_SC_SHIFT) |
1099        (SWITCH_PORTS_PHY << PHY_CNTL2_DC_SHIFT) |
1100        (SWITCH_PORTS_PHY << PHY_CNTL2_PHYR_SHIFT) |
1101        (SWITCH_PORTS_PHY << PHY_CNTL2_AMDIX_SHIFT) |
1102        PHY_CNTL2_RMAE;
1103    sw_write_reg(SWITCH_REG_PHY_CNTL2, t);
1104
1105    t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
1106    t |= PHY_CNTL3_RNT;
1107    sw_write_reg(SWITCH_REG_PHY_CNTL3, t);
1108
1109    /* Force all the packets from all ports are low priority */
1110    sw_write_reg(SWITCH_REG_PRI_CNTL, 0);
1111
1112    sw_int_mask(SWITCH_INTS_ALL);
1113    sw_int_ack(SWITCH_INTS_ALL);
1114
1115    err = adm5120_switch_rx_ring_alloc();
1116    if (err)
1117        goto err;
1118
1119    err = adm5120_switch_tx_ring_alloc();
1120    if (err)
1121        goto err;
1122
1123    adm5120_switch_tx_ring_reset(txl_descs, txl_skbuff, TX_RING_SIZE);
1124    adm5120_switch_rx_ring_reset(rxl_descs, rxl_skbuff, RX_RING_SIZE);
1125
1126    sw_write_reg(SWITCH_REG_SHDA, 0);
1127    sw_write_reg(SWITCH_REG_SLDA, KSEG1ADDR(txl_descs));
1128    sw_write_reg(SWITCH_REG_RHDA, 0);
1129    sw_write_reg(SWITCH_REG_RLDA, KSEG1ADDR(rxl_descs));
1130
1131    for (i = 0; i < SWITCH_NUM_PORTS; i++) {
1132        struct net_device *dev;
1133        struct adm5120_if_priv *priv;
1134
1135        dev = adm5120_if_alloc();
1136        if (!dev) {
1137            err = -ENOMEM;
1138            goto err;
1139        }
1140
1141        adm5120_devs[i] = dev;
1142        priv = netdev_priv(dev);
1143
1144        priv->vlan_no = i;
1145        priv->port_mask = adm5120_eth_vlans[i];
1146
1147        memcpy(dev->dev_addr, adm5120_eth_macs[i], 6);
1148        adm5120_write_mac(dev);
1149
1150        err = register_netdev(dev);
1151        if (err) {
1152            SW_INFO("%s register failed, error=%d\n",
1153                    dev->name, err);
1154            goto err;
1155        }
1156    }
1157
1158    /* setup vlan/port mapping after devs are filled up */
1159    adm5120_set_vlan(adm5120_eth_vlans);
1160
1161    /* enable CPU port */
1162    t = sw_read_reg(SWITCH_REG_CPUP_CONF);
1163    t &= ~CPUP_CONF_DCPUP;
1164    sw_write_reg(SWITCH_REG_CPUP_CONF, t);
1165
1166    return 0;
1167
1168err:
1169    adm5120_switch_cleanup();
1170
1171    SW_ERR("init failed\n");
1172    return err;
1173}
1174
1175static int adm5120_switch_remove(struct platform_device *dev)
1176{
1177    adm5120_switch_cleanup();
1178    return 0;
1179}
1180
1181static struct platform_driver adm5120_switch_driver = {
1182    .probe = adm5120_switch_probe,
1183    .remove = adm5120_switch_remove,
1184    .driver = {
1185        .name = DRV_NAME,
1186    },
1187};
1188
1189/* -------------------------------------------------------------------------- */
1190
1191static int __init adm5120_switch_mod_init(void)
1192{
1193    int err;
1194
1195    pr_info(DRV_DESC " version " DRV_VERSION "\n");
1196    err = platform_driver_register(&adm5120_switch_driver);
1197
1198    return err;
1199}
1200
1201static void __exit adm5120_switch_mod_exit(void)
1202{
1203    platform_driver_unregister(&adm5120_switch_driver);
1204}
1205
1206module_init(adm5120_switch_mod_init);
1207module_exit(adm5120_switch_mod_exit);
1208
1209MODULE_LICENSE("GPL v2");
1210MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
1211MODULE_DESCRIPTION(DRV_DESC);
1212MODULE_VERSION(DRV_VERSION);
1213

Archive Download this file



interactive