Root/package/platform/lantiq/ltq-ptm/src/ifxmips_ptm_adsl.c

1/******************************************************************************
2**
3** FILE NAME : ifxmips_ptm_adsl.c
4** PROJECT : UEIP
5** MODULES : PTM
6**
7** DATE : 7 Jul 2009
8** AUTHOR : Xu Liang
9** DESCRIPTION : PTM driver common source file (core functions for Danube/
10** Amazon-SE/AR9)
11** COPYRIGHT : Copyright (c) 2006
12** Infineon Technologies AG
13** Am Campeon 1-12, 85579 Neubiberg, Germany
14**
15** This program is free software; you can redistribute it and/or modify
16** it under the terms of the GNU General Public License as published by
17** the Free Software Foundation; either version 2 of the License, or
18** (at your option) any later version.
19**
20** HISTORY
21** $Date $Author $Comment
22** 07 JUL 2009 Xu Liang Init Version
23*******************************************************************************/
24
25
26
27/*
28 * ####################################
29 * Head File
30 * ####################################
31 */
32
33/*
34 * Common Head File
35 */
36#include <linux/version.h>
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/errno.h>
41#include <linux/proc_fs.h>
42#include <linux/init.h>
43#include <linux/ioctl.h>
44#include <linux/etherdevice.h>
45#include <linux/interrupt.h>
46#include <asm/io.h>
47
48/*
49 * Chip Specific Head File
50 */
51#include "ifxmips_ptm_adsl.h"
52
53
54#include <lantiq_soc.h>
55
56/*
57 * ####################################
58 * Kernel Version Adaption
59 * ####################################
60 */
61#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
62  #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
63  #define MODULE_PARM(a, b) module_param(a, int, 0)
64#else
65  #define MODULE_PARM_ARRAY(a, b) MODULE_PARM(a, b)
66#endif
67
68
69
70/*
71 * ####################################
72 * Parameters to Configure PPE
73 * ####################################
74 */
75
76static int write_desc_delay = 0x20; /* Write descriptor delay */
77
78static int rx_max_packet_size = ETH_MAX_FRAME_LENGTH;
79                                                /* Max packet size for RX */
80
81static int dma_rx_descriptor_length = 24; /* Number of descriptors per DMA RX channel */
82static int dma_tx_descriptor_length = 24; /* Number of descriptors per DMA TX channel */
83
84static int eth_efmtc_crc_cfg = 0x03100710; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 */
85                                                /* rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */
86
87MODULE_PARM(write_desc_delay, "i");
88MODULE_PARM_DESC(write_desc_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
89
90MODULE_PARM(rx_max_packet_size, "i");
91MODULE_PARM_DESC(rx_max_packet_size, "Max packet size in byte for downstream ethernet frames");
92
93MODULE_PARM(dma_rx_descriptor_length, "i");
94MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)");
95MODULE_PARM(dma_tx_descriptor_length, "i");
96MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)");
97
98MODULE_PARM(eth_efmtc_crc_cfg, "i");
99MODULE_PARM_DESC(eth_efmtc_crc_cfg, "Configuration for PTM TX/RX ethernet/efm-tc CRC");
100
101
102
103/*
104 * ####################################
105 * Definition
106 * ####################################
107 */
108
109
110#define DUMP_SKB_LEN ~0
111
112
113
114/*
115 * ####################################
116 * Declaration
117 * ####################################
118 */
119
120/*
121 * Network Operations
122 */
123static void ptm_setup(struct net_device *, int);
124static struct net_device_stats *ptm_get_stats(struct net_device *);
125static int ptm_open(struct net_device *);
126static int ptm_stop(struct net_device *);
127  static unsigned int ptm_poll(int, unsigned int);
128  static int ptm_napi_poll(struct napi_struct *, int);
129static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
130static int ptm_ioctl(struct net_device *, struct ifreq *, int);
131static void ptm_tx_timeout(struct net_device *);
132
133/*
134 * DSL Data LED
135 */
136static INLINE void adsl_led_flash(void);
137
138/*
139 * buffer manage functions
140 */
141static INLINE struct sk_buff* alloc_skb_rx(void);
142//static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
143static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int);
144static INLINE int get_tx_desc(unsigned int, unsigned int *);
145
146/*
147 * Mailbox handler and signal function
148 */
149static INLINE int mailbox_rx_irq_handler(unsigned int);
150static irqreturn_t mailbox_irq_handler(int, void *);
151static INLINE void mailbox_signal(unsigned int, int);
152#ifdef CONFIG_IFX_PTM_RX_TASKLET
153  static void do_ptm_tasklet(unsigned long);
154#endif
155
156/*
157 * Debug Functions
158 */
159#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
160  static void dump_skb(struct sk_buff *, u32, char *, int, int, int);
161#else
162  #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
163#endif
164#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
165  static void skb_swap(struct sk_buff *);
166#else
167  #define skb_swap(skb) do {} while (0)
168#endif
169
170/*
171 * Proc File Functions
172 */
173static INLINE void proc_file_create(void);
174static INLINE void proc_file_delete(void);
175static int proc_read_version(char *, char **, off_t, int, int *, void *);
176static int proc_read_wanmib(char *, char **, off_t, int, int *, void *);
177static int proc_write_wanmib(struct file *, const char *, unsigned long, void *);
178#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
179  static int proc_read_genconf(char *, char **, off_t, int, int *, void *);
180#endif
181#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
182  static int proc_read_dbg(char *, char **, off_t, int, int *, void *);
183  static int proc_write_dbg(struct file *, const char *, unsigned long, void *);
184#endif
185
186/*
187 * Proc Help Functions
188 */
189static INLINE int stricmp(const char *, const char *);
190#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
191  static INLINE int strincmp(const char *, const char *, int);
192#endif
193static INLINE int ifx_ptm_version(char *);
194
195/*
196 * Init & clean-up functions
197 */
198static INLINE void check_parameters(void);
199static INLINE int init_priv_data(void);
200static INLINE void clear_priv_data(void);
201static INLINE void init_tables(void);
202
203/*
204 * Exteranl Function
205 */
206#if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
207  extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
208#else
209  static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr)
210  {
211    if ( is_showtime != NULL )
212        *is_showtime = 0;
213    return 0;
214  }
215#endif
216
217/*
218 * External variable
219 */
220#if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
221  extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
222  extern int (*ifx_mei_atm_showtime_exit)(void);
223#else
224  int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL;
225  EXPORT_SYMBOL(ifx_mei_atm_showtime_enter);
226  int (*ifx_mei_atm_showtime_exit)(void) = NULL;
227  EXPORT_SYMBOL(ifx_mei_atm_showtime_exit);
228#endif
229
230
231
232/*
233 * ####################################
234 * Local Variable
235 * ####################################
236 */
237
238static struct ptm_priv_data g_ptm_priv_data;
239
240#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
241static struct net_device_ops g_ptm_netdev_ops = {
242    .ndo_get_stats = ptm_get_stats,
243    .ndo_open = ptm_open,
244    .ndo_stop = ptm_stop,
245    .ndo_start_xmit = ptm_hard_start_xmit,
246    .ndo_validate_addr = eth_validate_addr,
247    .ndo_set_mac_address = eth_mac_addr,
248    .ndo_change_mtu = eth_change_mtu,
249    .ndo_do_ioctl = ptm_ioctl,
250    .ndo_tx_timeout = ptm_tx_timeout,
251};
252#endif
253
254static struct net_device *g_net_dev[2] = {0};
255static char *g_net_dev_name[2] = {"ptm0", "ptmfast0"};
256
257#ifdef CONFIG_IFX_PTM_RX_TASKLET
258  static struct tasklet_struct g_ptm_tasklet[] = {
259    {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 0},
260    {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 1},
261  };
262#endif
263
264unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
265
266static struct proc_dir_entry* g_ptm_dir = NULL;
267
268static int g_showtime = 0;
269
270
271
272/*
273 * ####################################
274 * Local Function
275 * ####################################
276 */
277
278static void ptm_setup(struct net_device *dev, int ndev)
279{
280    /* hook network operations */
281    dev->netdev_ops = &g_ptm_netdev_ops;
282    netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 25);
283    dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
284
285    dev->dev_addr[0] = 0x00;
286    dev->dev_addr[1] = 0x20;
287    dev->dev_addr[2] = 0xda;
288    dev->dev_addr[3] = 0x86;
289    dev->dev_addr[4] = 0x23;
290    dev->dev_addr[5] = 0x75 + ndev;
291}
292
293static struct net_device_stats *ptm_get_stats(struct net_device *dev)
294{
295    int ndev;
296
297    for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
298    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
299
300    g_ptm_priv_data.itf[ndev].stats.rx_errors = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu + WAN_MIB_TABLE[ndev].wrx_ethcrc_err_pdu;
301    g_ptm_priv_data.itf[ndev].stats.rx_dropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu + (WAN_MIB_TABLE[ndev].wrx_correct_pdu - g_ptm_priv_data.itf[ndev].stats.rx_packets);
302
303    return &g_ptm_priv_data.itf[ndev].stats;
304}
305
306static int ptm_open(struct net_device *dev)
307{
308    int ndev;
309
310    for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
311    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
312
313    napi_enable(&g_ptm_priv_data.itf[ndev].napi);
314
315    IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
316
317    netif_start_queue(dev);
318
319    return 0;
320}
321
322static int ptm_stop(struct net_device *dev)
323{
324    int ndev;
325
326    for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
327    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
328
329    IFX_REG_W32_MASK((1 << ndev) | (1 << (ndev + 16)), 0, MBOX_IGU1_IER);
330
331    napi_disable(&g_ptm_priv_data.itf[ndev].napi);
332
333    netif_stop_queue(dev);
334
335    return 0;
336}
337
338static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
339{
340    unsigned int work_done = 0;
341
342    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
343
344    while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes > 0 ) {
345        if ( mailbox_rx_irq_handler(ndev) < 0 )
346            break;
347
348        work_done++;
349    }
350
351    return work_done;
352}
353static int ptm_napi_poll(struct napi_struct *napi, int budget)
354{
355    int ndev;
356    unsigned int work_done;
357
358    for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != napi->dev; ndev++ );
359
360    work_done = ptm_poll(ndev, budget);
361
362    // interface down
363    if ( !netif_running(napi->dev) ) {
364        napi_complete(napi);
365        return work_done;
366    }
367
368    // no more traffic
369    if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
370        // clear interrupt
371        IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_ISRC);
372        // double check
373        if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
374            napi_complete(napi);
375            IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
376            return work_done;
377        }
378    }
379
380    // next round
381    return work_done;
382}
383
384static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
385{
386    int ndev;
387    unsigned int f_full;
388    int desc_base;
389    register struct tx_descriptor reg_desc = {0};
390
391    for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
392    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
393
394    if ( !g_showtime ) {
395        err("not in showtime");
396        goto PTM_HARD_START_XMIT_FAIL;
397    }
398
399    /* allocate descriptor */
400    desc_base = get_tx_desc(ndev, &f_full);
401    if ( f_full ) {
402        dev->trans_start = jiffies;
403        netif_stop_queue(dev);
404
405        IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_ISRC);
406        IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_IER);
407    }
408    if ( desc_base < 0 )
409        goto PTM_HARD_START_XMIT_FAIL;
410
411    if ( g_ptm_priv_data.itf[ndev].tx_skb[desc_base] != NULL )
412        dev_kfree_skb_any(g_ptm_priv_data.itf[ndev].tx_skb[desc_base]);
413    g_ptm_priv_data.itf[ndev].tx_skb[desc_base] = skb;
414
415    reg_desc.dataptr = (unsigned int)skb->data >> 2;
416    reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
417    reg_desc.byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
418    reg_desc.own = 1;
419    reg_desc.c = 1;
420    reg_desc.sop = reg_desc.eop = 1;
421
422    /* write discriptor to memory and write back cache */
423    g_ptm_priv_data.itf[ndev].tx_desc[desc_base] = reg_desc;
424    dma_cache_wback((unsigned long)skb->data, skb->len);
425    wmb();
426
427    dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 1);
428
429    if ( (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ) {
430        skb_swap(skb);
431    }
432
433    g_ptm_priv_data.itf[ndev].stats.tx_packets++;
434    g_ptm_priv_data.itf[ndev].stats.tx_bytes += reg_desc.datalen;
435
436    dev->trans_start = jiffies;
437    mailbox_signal(ndev, 1);
438
439    adsl_led_flash();
440
441    return NETDEV_TX_OK;
442
443PTM_HARD_START_XMIT_FAIL:
444    dev_kfree_skb_any(skb);
445    g_ptm_priv_data.itf[ndev].stats.tx_dropped++;
446    return NETDEV_TX_OK;
447}
448
449static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
450{
451    int ndev;
452
453    for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
454    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
455
456    switch ( cmd )
457    {
458    case IFX_PTM_MIB_CW_GET:
459        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = WAN_MIB_TABLE[ndev].wrx_nonidle_cw;
460        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = WAN_MIB_TABLE[ndev].wrx_idle_cw;
461        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = WAN_MIB_TABLE[ndev].wrx_err_cw;
462        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = 0;
463        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = 0;
464        break;
465    case IFX_PTM_MIB_FRAME_GET:
466        ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxCorrect = WAN_MIB_TABLE[ndev].wrx_correct_pdu;
467        ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TC_CrcError = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu;
468        ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxDropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu;
469        ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TxSend = WAN_MIB_TABLE[ndev].wtx_total_pdu;
470        break;
471    case IFX_PTM_CFG_GET:
472        ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = CFG_ETH_EFMTC_CRC->rx_eth_crc_present;
473        ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = CFG_ETH_EFMTC_CRC->rx_eth_crc_check;
474        ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = CFG_ETH_EFMTC_CRC->rx_tc_crc_check;
475        ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = CFG_ETH_EFMTC_CRC->rx_tc_crc_len;
476        ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = CFG_ETH_EFMTC_CRC->tx_eth_crc_gen;
477        ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = CFG_ETH_EFMTC_CRC->tx_tc_crc_gen;
478        ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = CFG_ETH_EFMTC_CRC->tx_tc_crc_len;
479        break;
480    case IFX_PTM_CFG_SET:
481        CFG_ETH_EFMTC_CRC->rx_eth_crc_present = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent ? 1 : 0;
482        CFG_ETH_EFMTC_CRC->rx_eth_crc_check = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 1 : 0;
483        if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck && (((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 32) )
484        {
485            CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 1;
486            CFG_ETH_EFMTC_CRC->rx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen;
487        }
488        else
489        {
490            CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 0;
491            CFG_ETH_EFMTC_CRC->rx_tc_crc_len = 0;
492        }
493        CFG_ETH_EFMTC_CRC->tx_eth_crc_gen = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 1 : 0;
494        if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen && (((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 32) )
495        {
496            CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 1;
497            CFG_ETH_EFMTC_CRC->tx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen;
498        }
499        else
500        {
501            CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 0;
502            CFG_ETH_EFMTC_CRC->tx_tc_crc_len = 0;
503        }
504        break;
505    default:
506        return -EOPNOTSUPP;
507    }
508
509    return 0;
510}
511
512static void ptm_tx_timeout(struct net_device *dev)
513{
514    int ndev;
515
516    for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
517    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
518
519    /* disable TX irq, release skb when sending new packet */
520    IFX_REG_W32_MASK(1 << (ndev + 16), 0, MBOX_IGU1_IER);
521
522    /* wake up TX queue */
523    netif_wake_queue(dev);
524
525    return;
526}
527
528static INLINE void adsl_led_flash(void)
529{
530}
531
532static INLINE struct sk_buff* alloc_skb_rx(void)
533{
534    struct sk_buff *skb;
535
536    /* allocate memroy including trailer and padding */
537    skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
538    if ( skb != NULL ) {
539        /* must be burst length alignment and reserve two more bytes for MAC address alignment */
540        if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
541            skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
542        /* pub skb in reserved area "skb->data - 4" */
543        *((struct sk_buff **)skb->data - 1) = skb;
544        wmb();
545        /* write back and invalidate cache */
546        dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
547        /* invalidate cache */
548        dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
549    }
550
551    return skb;
552}
553
554#if 0
555static INLINE struct sk_buff* alloc_skb_tx(unsigned int size)
556{
557    struct sk_buff *skb;
558
559    /* allocate memory including padding */
560    size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
561    skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
562    /* must be burst length alignment */
563    if ( skb != NULL )
564        skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
565    return skb;
566}
567#endif
568
569static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr)
570{
571    unsigned int skb_dataptr;
572    struct sk_buff *skb;
573
574    skb_dataptr = ((dataptr - 1) << 2) | KSEG1;
575    skb = *(struct sk_buff **)skb_dataptr;
576
577    ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
578    ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
579
580    return skb;
581}
582
583static INLINE int get_tx_desc(unsigned int itf, unsigned int *f_full)
584{
585    int desc_base = -1;
586    struct ptm_itf *p_itf = &g_ptm_priv_data.itf[itf];
587
588    // assume TX is serial operation
589    // no protection provided
590
591    *f_full = 1;
592
593    if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 ) {
594        desc_base = p_itf->tx_desc_pos;
595        if ( ++(p_itf->tx_desc_pos) == dma_tx_descriptor_length )
596            p_itf->tx_desc_pos = 0;
597        if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 )
598            *f_full = 0;
599    }
600
601    return desc_base;
602}
603
604static INLINE int mailbox_rx_irq_handler(unsigned int ch) // return: < 0 - descriptor not available, 0 - received one packet
605{
606    unsigned int ndev = ch;
607    struct sk_buff *skb;
608    struct sk_buff *new_skb;
609    volatile struct rx_descriptor *desc;
610    struct rx_descriptor reg_desc;
611    int netif_rx_ret;
612
613    desc = &g_ptm_priv_data.itf[ndev].rx_desc[g_ptm_priv_data.itf[ndev].rx_desc_pos];
614    if ( desc->own || !desc->c ) // if PP32 hold descriptor or descriptor not completed
615        return -EAGAIN;
616    if ( ++g_ptm_priv_data.itf[ndev].rx_desc_pos == dma_rx_descriptor_length )
617        g_ptm_priv_data.itf[ndev].rx_desc_pos = 0;
618
619    reg_desc = *desc;
620    skb = get_skb_rx_pointer(reg_desc.dataptr);
621
622    if ( !reg_desc.err ) {
623        new_skb = alloc_skb_rx();
624        if ( new_skb != NULL ) {
625            skb_reserve(skb, reg_desc.byteoff);
626            skb_put(skb, reg_desc.datalen);
627
628            dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 0);
629
630            // parse protocol header
631            skb->dev = g_net_dev[ndev];
632            skb->protocol = eth_type_trans(skb, skb->dev);
633
634            g_net_dev[ndev]->last_rx = jiffies;
635
636            netif_rx_ret = netif_receive_skb(skb);
637
638            if ( netif_rx_ret != NET_RX_DROP ) {
639                g_ptm_priv_data.itf[ndev].stats.rx_packets++;
640                g_ptm_priv_data.itf[ndev].stats.rx_bytes += reg_desc.datalen;
641            }
642
643            reg_desc.dataptr = ((unsigned int)new_skb->data >> 2) & 0x0FFFFFFF;
644            reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
645        }
646    }
647    else
648        reg_desc.err = 0;
649
650    reg_desc.datalen = rx_max_packet_size;
651    reg_desc.own = 1;
652    reg_desc.c = 0;
653
654    // update descriptor
655    *desc = reg_desc;
656    wmb();
657
658    mailbox_signal(ndev, 0);
659
660    adsl_led_flash();
661
662    return 0;
663}
664
665static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
666{
667    unsigned int isr;
668    int i;
669
670    isr = IFX_REG_R32(MBOX_IGU1_ISR);
671    IFX_REG_W32(isr, MBOX_IGU1_ISRC);
672    isr &= IFX_REG_R32(MBOX_IGU1_IER);
673
674    while ( (i = __fls(isr)) >= 0 ) {
675        isr ^= 1 << i;
676
677        if ( i >= 16 ) {
678            // TX
679            IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
680            i -= 16;
681            if ( i < MAX_ITF_NUMBER )
682                netif_wake_queue(g_net_dev[i]);
683        }
684        else {
685            // RX
686#ifdef CONFIG_IFX_PTM_RX_INTERRUPT
687            while ( WRX_DMA_CHANNEL_CONFIG(i)->vlddes > 0 )
688                mailbox_rx_irq_handler(i);
689#else
690            IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
691            napi_schedule(&g_ptm_priv_data.itf[i].napi);
692#endif
693        }
694    }
695
696    return IRQ_HANDLED;
697}
698
699static INLINE void mailbox_signal(unsigned int itf, int is_tx)
700{
701    int count = 1000;
702
703    if ( is_tx ) {
704        while ( MBOX_IGU3_ISR_ISR(itf + 16) && count > 0 )
705            count--;
706        IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf + 16), MBOX_IGU3_ISRS);
707    }
708    else {
709        while ( MBOX_IGU3_ISR_ISR(itf) && count > 0 )
710            count--;
711        IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf), MBOX_IGU3_ISRS);
712    }
713
714    ASSERT(count != 0, "MBOX_IGU3_ISR = 0x%08x", IFX_REG_R32(MBOX_IGU3_ISR));
715}
716
717#ifdef CONFIG_IFX_PTM_RX_TASKLET
718static void do_ptm_tasklet(unsigned long arg)
719{
720    unsigned int work_to_do = 25;
721    unsigned int work_done = 0;
722
723    ASSERT(arg >= 0 && arg < ARRAY_SIZE(g_net_dev), "arg = %lu (wrong value)", arg);
724
725    while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(arg)->vlddes > 0 ) {
726        if ( mailbox_rx_irq_handler(arg) < 0 )
727            break;
728
729        work_done++;
730    }
731
732    // interface down
733    if ( !netif_running(g_net_dev[arg]) )
734        return;
735
736    // no more traffic
737    if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
738        // clear interrupt
739        IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_ISRC);
740        // double check
741        if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
742            IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_IER);
743            return;
744        }
745    }
746
747    // next round
748    tasklet_schedule(&g_ptm_tasklet[arg]);
749}
750#endif
751
752#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
753static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx)
754{
755    int i;
756
757    if ( !(ifx_ptm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) )
758        return;
759
760    if ( skb->len < len )
761        len = skb->len;
762
763    if ( len > rx_max_packet_size ) {
764        printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len);
765        return;
766    }
767
768    if ( ch >= 0 )
769        printk("%s (port %d, ch %d)\n", title, port, ch);
770    else
771        printk("%s\n", title);
772    printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len);
773    for ( i = 1; i <= len; i++ ) {
774        if ( i % 16 == 1 )
775            printk(" %4d:", i - 1);
776        printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF));
777        if ( i % 16 == 0 )
778            printk("\n");
779    }
780    if ( (i - 1) % 16 != 0 )
781        printk("\n");
782}
783#endif
784
785#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
786static void skb_swap(struct sk_buff *skb)
787{
788    unsigned char tmp[8];
789    unsigned char *p = skb->data;
790
791    if ( !(p[0] & 0x01) ) { // bypass broadcast/multicast
792        // swap MAC
793        memcpy(tmp, p, 6);
794        memcpy(p, p + 6, 6);
795        memcpy(p + 6, tmp, 6);
796        p += 12;
797
798        // bypass VLAN
799        while ( p[0] == 0x81 && p[1] == 0x00 )
800            p += 4;
801
802        // IP
803        if ( p[0] == 0x08 && p[1] == 0x00 ) {
804            p += 14;
805            memcpy(tmp, p, 4);
806            memcpy(p, p + 4, 4);
807            memcpy(p + 4, tmp, 4);
808            p += 8;
809        }
810
811        dma_cache_wback((unsigned long)skb->data, (unsigned long)p - (unsigned long)skb->data);
812    }
813}
814#endif
815
816static INLINE void proc_file_create(void)
817{
818    struct proc_dir_entry *res;
819
820    g_ptm_dir = proc_mkdir("driver/ifx_ptm", NULL);
821
822    create_proc_read_entry("version",
823                            0,
824                            g_ptm_dir,
825                            proc_read_version,
826                            NULL);
827
828    res = create_proc_entry("wanmib",
829                            0,
830                            g_ptm_dir);
831    if ( res != NULL ) {
832        res->read_proc = proc_read_wanmib;
833        res->write_proc = proc_write_wanmib;
834    }
835
836#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
837    create_proc_read_entry("genconf",
838                            0,
839                            g_ptm_dir,
840                            proc_read_genconf,
841                            NULL);
842
843  #ifdef CONFIG_AR9
844    create_proc_read_entry("regs",
845                            0,
846                            g_ptm_dir,
847                            ifx_ptm_proc_read_regs,
848                            NULL);
849  #endif
850#endif
851
852#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
853    res = create_proc_entry("dbg",
854                            0,
855                            g_ptm_dir);
856    if ( res != NULL ) {
857        res->read_proc = proc_read_dbg;
858        res->write_proc = proc_write_dbg;
859    }
860#endif
861}
862
863static INLINE void proc_file_delete(void)
864{
865#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
866    remove_proc_entry("dbg", g_ptm_dir);
867#endif
868
869#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
870  #ifdef CONFIG_AR9
871    remove_proc_entry("regs", g_ptm_dir);
872  #endif
873
874    remove_proc_entry("genconf", g_ptm_dir);
875#endif
876
877    remove_proc_entry("wanmib", g_ptm_dir);
878
879    remove_proc_entry("version", g_ptm_dir);
880
881    remove_proc_entry("driver/ifx_ptm", NULL);
882}
883
884static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data)
885{
886    int len = 0;
887
888    len += ifx_ptm_version(buf + len);
889
890    if ( offset >= len ) {
891        *start = buf;
892        *eof = 1;
893        return 0;
894    }
895    *start = buf + offset;
896    if ( (len -= offset) > count )
897        return count;
898    *eof = 1;
899    return len;
900}
901
902static int proc_read_wanmib(char *page, char **start, off_t off, int count, int *eof, void *data)
903{
904    int len = 0;
905    int i;
906    char *title[] = {
907        "ptm0\n",
908        "ptmfast0\n"
909    };
910
911    for ( i = 0; i < ARRAY_SIZE(title); i++ ) {
912        len += sprintf(page + off + len, title[i]);
913        len += sprintf(page + off + len, " wrx_correct_pdu = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu);
914        len += sprintf(page + off + len, " wrx_correct_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu_bytes);
915        len += sprintf(page + off + len, " wrx_tccrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu);
916        len += sprintf(page + off + len, " wrx_tccrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu_bytes);
917        len += sprintf(page + off + len, " wrx_ethcrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu);
918        len += sprintf(page + off + len, " wrx_ethcrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu_bytes);
919        len += sprintf(page + off + len, " wrx_nodesc_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_nodesc_drop_pdu);
920        len += sprintf(page + off + len, " wrx_len_violation_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_len_violation_drop_pdu);
921        len += sprintf(page + off + len, " wrx_idle_bytes = %d\n", WAN_MIB_TABLE[i].wrx_idle_bytes);
922        len += sprintf(page + off + len, " wrx_nonidle_cw = %d\n", WAN_MIB_TABLE[i].wrx_nonidle_cw);
923        len += sprintf(page + off + len, " wrx_idle_cw = %d\n", WAN_MIB_TABLE[i].wrx_idle_cw);
924        len += sprintf(page + off + len, " wrx_err_cw = %d\n", WAN_MIB_TABLE[i].wrx_err_cw);
925        len += sprintf(page + off + len, " wtx_total_pdu = %d\n", WAN_MIB_TABLE[i].wtx_total_pdu);
926        len += sprintf(page + off + len, " wtx_total_bytes = %d\n", WAN_MIB_TABLE[i].wtx_total_bytes);
927    }
928
929    *eof = 1;
930
931    return len;
932}
933
934static int proc_write_wanmib(struct file *file, const char *buf, unsigned long count, void *data)
935{
936    char str[2048];
937    char *p;
938    int len, rlen;
939
940    int i;
941
942    len = count < sizeof(str) ? count : sizeof(str) - 1;
943    rlen = len - copy_from_user(str, buf, len);
944    while ( rlen && str[rlen - 1] <= ' ' )
945        rlen--;
946    str[rlen] = 0;
947    for ( p = str; *p && *p <= ' '; p++, rlen-- );
948    if ( !*p )
949        return count;
950
951    if ( stricmp(p, "clear") == 0 || stricmp(p, "clean") == 0 ) {
952        for ( i = 0; i < 2; i++ )
953            memset((void*)&WAN_MIB_TABLE[i], 0, sizeof(WAN_MIB_TABLE[i]));
954    }
955
956    return count;
957}
958
959#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
960
961static int proc_read_genconf(char *page, char **start, off_t off, int count, int *eof, void *data)
962{
963    int len = 0;
964    int len_max = off + count;
965    char *pstr;
966    char str[2048];
967    int llen = 0;
968    int i;
969    unsigned long bit;
970
971    pstr = *start = page;
972
973    __sync();
974
975    llen += sprintf(str + llen, "CFG_WAN_WRDES_DELAY (0x%08X): %d\n", (unsigned int)CFG_WAN_WRDES_DELAY, IFX_REG_R32(CFG_WAN_WRDES_DELAY));
976    llen += sprintf(str + llen, "CFG_WRX_DMACH_ON (0x%08X):", (unsigned int)CFG_WRX_DMACH_ON);
977    for ( i = 0, bit = 1; i < MAX_RX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
978        llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WRX_DMACH_ON) & bit) ? "on " : "off");
979    llen += sprintf(str + llen, "\n");
980    llen += sprintf(str + llen, "CFG_WTX_DMACH_ON (0x%08X):", (unsigned int)CFG_WTX_DMACH_ON);
981    for ( i = 0, bit = 1; i < MAX_TX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
982        llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WTX_DMACH_ON) & bit) ? "on " : "off");
983    llen += sprintf(str + llen, "\n");
984    llen += sprintf(str + llen, "CFG_WRX_LOOK_BITTH (0x%08X): %d\n", (unsigned int)CFG_WRX_LOOK_BITTH, IFX_REG_R32(CFG_WRX_LOOK_BITTH));
985    llen += sprintf(str + llen, "CFG_ETH_EFMTC_CRC (0x%08X): rx_tc_crc_len - %2d, rx_tc_crc_check - %s\n", (unsigned int)CFG_ETH_EFMTC_CRC, CFG_ETH_EFMTC_CRC->rx_tc_crc_len, CFG_ETH_EFMTC_CRC->rx_tc_crc_check ? " on" : "off");
986    llen += sprintf(str + llen, " rx_eth_crc_check - %s, rx_eth_crc_present - %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_check ? " on" : "off", CFG_ETH_EFMTC_CRC->rx_eth_crc_present ? " on" : "off");
987    llen += sprintf(str + llen, " tx_tc_crc_len - %2d, tx_tc_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_len, CFG_ETH_EFMTC_CRC->tx_tc_crc_gen ? " on" : "off");
988    llen += sprintf(str + llen, " tx_eth_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_eth_crc_gen ? " on" : "off");
989
990    llen += sprintf(str + llen, "RX Port:\n");
991    for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
992        llen += sprintf(str + llen, " %d (0x%08X). mfs - %5d, dmach - %d, local_state - %d, partner_state - %d\n", i, (unsigned int)WRX_PORT_CONFIG(i), WRX_PORT_CONFIG(i)->mfs, WRX_PORT_CONFIG(i)->dmach, WRX_PORT_CONFIG(i)->local_state, WRX_PORT_CONFIG(i)->partner_state);
993    llen += sprintf(str + llen, "RX DMA Channel:\n");
994    for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
995        llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WRX_DMA_CHANNEL_CONFIG(i), WRX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WRX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WRX_DMA_CHANNEL_CONFIG(i)->deslen, WRX_DMA_CHANNEL_CONFIG(i)->vlddes);
996
997    llen += sprintf(str + llen, "TX Port:\n");
998    for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
999        llen += sprintf(str + llen, " %d (0x%08X). tx_cwth2 - %d, tx_cwth1 - %d\n", i, (unsigned int)WTX_PORT_CONFIG(i), WTX_PORT_CONFIG(i)->tx_cwth2, WTX_PORT_CONFIG(i)->tx_cwth1);
1000    llen += sprintf(str + llen, "TX DMA Channel:\n");
1001    for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1002        llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WTX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes);
1003
1004    if ( len <= off && len + llen > off )
1005    {
1006        memcpy(pstr, str + off - len, len + llen - off);
1007        pstr += len + llen - off;
1008    }
1009    else if ( len > off )
1010    {
1011        memcpy(pstr, str, llen);
1012        pstr += llen;
1013    }
1014    len += llen;
1015    if ( len >= len_max )
1016        goto PROC_READ_GENCONF_OVERRUN_END;
1017
1018    *eof = 1;
1019
1020    return len - off;
1021
1022PROC_READ_GENCONF_OVERRUN_END:
1023    return len - llen - off;
1024}
1025
1026#endif // defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1027
1028#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1029
1030static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data)
1031{
1032    int len = 0;
1033
1034    len += sprintf(page + off + len, "error print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled");
1035    len += sprintf(page + off + len, "debug print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled");
1036    len += sprintf(page + off + len, "assert - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled");
1037    len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled");
1038    len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled");
1039    len += sprintf(page + off + len, "mac swap - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ? "enabled" : "disabled");
1040
1041    *eof = 1;
1042
1043    return len;
1044}
1045
1046static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data)
1047{
1048    static const char *dbg_enable_mask_str[] = {
1049        " error print",
1050        " err",
1051        " debug print",
1052        " dbg",
1053        " assert",
1054        " assert",
1055        " dump rx skb",
1056        " rx",
1057        " dump tx skb",
1058        " tx",
1059        " dump init",
1060        " init",
1061        " dump qos",
1062        " qos",
1063        " mac swap",
1064        " swap",
1065        " all"
1066    };
1067    static const int dbg_enable_mask_str_len[] = {
1068        12, 4,
1069        12, 4,
1070        7, 7,
1071        12, 3,
1072        12, 3,
1073        10, 5,
1074        9, 4,
1075        9, 5,
1076        4
1077    };
1078    unsigned int dbg_enable_mask[] = {
1079        DBG_ENABLE_MASK_ERR,
1080        DBG_ENABLE_MASK_DEBUG_PRINT,
1081        DBG_ENABLE_MASK_ASSERT,
1082        DBG_ENABLE_MASK_DUMP_SKB_RX,
1083        DBG_ENABLE_MASK_DUMP_SKB_TX,
1084        DBG_ENABLE_MASK_DUMP_INIT,
1085        DBG_ENABLE_MASK_DUMP_QOS,
1086        DBG_ENABLE_MASK_MAC_SWAP,
1087        DBG_ENABLE_MASK_ALL
1088    };
1089
1090    char str[2048];
1091    char *p;
1092
1093    int len, rlen;
1094
1095    int f_enable = 0;
1096    int i;
1097
1098    len = count < sizeof(str) ? count : sizeof(str) - 1;
1099    rlen = len - copy_from_user(str, buf, len);
1100    while ( rlen && str[rlen - 1] <= ' ' )
1101        rlen--;
1102    str[rlen] = 0;
1103    for ( p = str; *p && *p <= ' '; p++, rlen-- );
1104    if ( !*p )
1105        return 0;
1106
1107    // debugging feature for enter/leave showtime
1108    if ( strincmp(p, "enter", 5) == 0 && ifx_mei_atm_showtime_enter != NULL )
1109        ifx_mei_atm_showtime_enter(NULL, NULL);
1110    else if ( strincmp(p, "leave", 5) == 0 && ifx_mei_atm_showtime_exit != NULL )
1111        ifx_mei_atm_showtime_exit();
1112
1113    if ( strincmp(p, "enable", 6) == 0 ) {
1114        p += 6;
1115        f_enable = 1;
1116    }
1117    else if ( strincmp(p, "disable", 7) == 0 ) {
1118        p += 7;
1119        f_enable = -1;
1120    }
1121    else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) {
1122        printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/qos/swap/all] > /proc/driver/ifx_ptm/dbg\n");
1123    }
1124
1125    if ( f_enable ) {
1126        if ( *p == 0 ) {
1127            if ( f_enable > 0 )
1128                ifx_ptm_dbg_enable |= DBG_ENABLE_MASK_ALL & ~DBG_ENABLE_MASK_MAC_SWAP;
1129            else
1130                ifx_ptm_dbg_enable &= ~DBG_ENABLE_MASK_ALL | DBG_ENABLE_MASK_MAC_SWAP;
1131        }
1132        else {
1133            do {
1134                for ( i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++ )
1135                    if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) {
1136                        if ( f_enable > 0 )
1137                            ifx_ptm_dbg_enable |= dbg_enable_mask[i >> 1];
1138                        else
1139                            ifx_ptm_dbg_enable &= ~dbg_enable_mask[i >> 1];
1140                        p += dbg_enable_mask_str_len[i];
1141                        break;
1142                    }
1143            } while ( i < ARRAY_SIZE(dbg_enable_mask_str) );
1144        }
1145    }
1146
1147    return count;
1148}
1149
1150#endif // defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1151
1152static INLINE int stricmp(const char *p1, const char *p2)
1153{
1154    int c1, c2;
1155
1156    while ( *p1 && *p2 )
1157    {
1158        c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1159        c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1160        if ( (c1 -= c2) )
1161            return c1;
1162        p1++;
1163        p2++;
1164    }
1165
1166    return *p1 - *p2;
1167}
1168
1169#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1170static INLINE int strincmp(const char *p1, const char *p2, int n)
1171{
1172    int c1 = 0, c2;
1173
1174    while ( n && *p1 && *p2 )
1175    {
1176        c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1177        c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1178        if ( (c1 -= c2) )
1179            return c1;
1180        p1++;
1181        p2++;
1182        n--;
1183    }
1184
1185    return n ? *p1 - *p2 : c1;
1186}
1187#endif
1188
1189static INLINE int ifx_ptm_version(char *buf)
1190{
1191    int len = 0;
1192    unsigned int major, minor;
1193
1194    ifx_ptm_get_fw_ver(&major, &minor);
1195
1196    len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
1197    len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
1198
1199    return len;
1200}
1201
1202static INLINE void check_parameters(void)
1203{
1204    /* There is a delay between PPE write descriptor and descriptor is */
1205    /* really stored in memory. Host also has this delay when writing */
1206    /* descriptor. So PPE will use this value to determine if the write */
1207    /* operation makes effect. */
1208    if ( write_desc_delay < 0 )
1209        write_desc_delay = 0;
1210
1211    /* Because of the limitation of length field in descriptors, the packet */
1212    /* size could not be larger than 64K minus overhead size. */
1213    if ( rx_max_packet_size < ETH_MIN_FRAME_LENGTH )
1214        rx_max_packet_size = ETH_MIN_FRAME_LENGTH;
1215    else if ( rx_max_packet_size > 65536 - 1 )
1216        rx_max_packet_size = 65536 - 1;
1217
1218    if ( dma_rx_descriptor_length < 2 )
1219        dma_rx_descriptor_length = 2;
1220    if ( dma_tx_descriptor_length < 2 )
1221        dma_tx_descriptor_length = 2;
1222}
1223
1224static INLINE int init_priv_data(void)
1225{
1226    void *p;
1227    int i;
1228    struct rx_descriptor rx_desc = {0};
1229    struct sk_buff *skb;
1230    volatile struct rx_descriptor *p_rx_desc;
1231    volatile struct tx_descriptor *p_tx_desc;
1232    struct sk_buff **ppskb;
1233
1234    // clear ptm private data structure
1235    memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
1236
1237    // allocate memory for RX descriptors
1238    p = kzalloc(MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1239    if ( p == NULL )
1240        return -1;
1241    dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
1242    g_ptm_priv_data.rx_desc_base = p;
1243    //p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1244
1245    // allocate memory for TX descriptors
1246    p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1247    if ( p == NULL )
1248        return -1;
1249    dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT);
1250    g_ptm_priv_data.tx_desc_base = p;
1251
1252    // allocate memroy for TX skb pointers
1253    p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL);
1254    if ( p == NULL )
1255        return -1;
1256    dma_cache_wback_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4);
1257    g_ptm_priv_data.tx_skb_base = p;
1258
1259    p_rx_desc = (volatile struct rx_descriptor *)((((unsigned int)g_ptm_priv_data.rx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1260    p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_ptm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1261    ppskb = (struct sk_buff **)(((unsigned int)g_ptm_priv_data.tx_skb_base + 3) & ~3);
1262    for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1263        g_ptm_priv_data.itf[i].rx_desc = &p_rx_desc[i * dma_rx_descriptor_length];
1264        g_ptm_priv_data.itf[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
1265        g_ptm_priv_data.itf[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
1266    }
1267
1268    rx_desc.own = 1;
1269    rx_desc.c = 0;
1270    rx_desc.sop = 1;
1271    rx_desc.eop = 1;
1272    rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
1273    rx_desc.id = 0;
1274    rx_desc.err = 0;
1275    rx_desc.datalen = rx_max_packet_size;
1276    for ( i = 0; i < MAX_ITF_NUMBER * dma_rx_descriptor_length; i++ ) {
1277        skb = alloc_skb_rx();
1278        if ( skb == NULL )
1279            return -1;
1280        rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF;
1281        p_rx_desc[i] = rx_desc;
1282    }
1283
1284    return 0;
1285}
1286
1287static INLINE void clear_priv_data(void)
1288{
1289    int i, j;
1290    struct sk_buff *skb;
1291
1292    for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1293        if ( g_ptm_priv_data.itf[i].tx_skb != NULL ) {
1294            for ( j = 0; j < dma_tx_descriptor_length; j++ )
1295                if ( g_ptm_priv_data.itf[i].tx_skb[j] != NULL )
1296                    dev_kfree_skb_any(g_ptm_priv_data.itf[i].tx_skb[j]);
1297        }
1298        if ( g_ptm_priv_data.itf[i].rx_desc != NULL ) {
1299            for ( j = 0; j < dma_rx_descriptor_length; j++ ) {
1300                if ( g_ptm_priv_data.itf[i].rx_desc[j].sop || g_ptm_priv_data.itf[i].rx_desc[j].eop ) { // descriptor initialized
1301                    skb = get_skb_rx_pointer(g_ptm_priv_data.itf[i].rx_desc[j].dataptr);
1302                    dev_kfree_skb_any(skb);
1303                }
1304            }
1305        }
1306    }
1307
1308    if ( g_ptm_priv_data.rx_desc_base != NULL )
1309        kfree(g_ptm_priv_data.rx_desc_base);
1310
1311    if ( g_ptm_priv_data.tx_desc_base != NULL )
1312        kfree(g_ptm_priv_data.tx_desc_base);
1313
1314    if ( g_ptm_priv_data.tx_skb_base != NULL )
1315        kfree(g_ptm_priv_data.tx_skb_base);
1316}
1317
1318static INLINE void init_tables(void)
1319{
1320    int i;
1321    volatile unsigned int *p;
1322    struct wrx_dma_channel_config rx_config = {0};
1323    struct wtx_dma_channel_config tx_config = {0};
1324    struct wrx_port_cfg_status rx_port_cfg = { 0 };
1325    struct wtx_port_cfg tx_port_cfg = { 0 };
1326
1327    /*
1328     * CDM Block 1
1329     */
1330    IFX_REG_W32(CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00), CDM_CFG); // CDM block 1 must be data memory and mapped to 0x5000 (dword addr)
1331    p = CDM_DATA_MEMORY(0, 0); // Clear CDM block 1
1332    for ( i = 0; i < CDM_DATA_MEMORY_DWLEN; i++, p++ )
1333        IFX_REG_W32(0, p);
1334
1335    /*
1336     * General Registers
1337     */
1338    IFX_REG_W32(write_desc_delay, CFG_WAN_WRDES_DELAY);
1339    IFX_REG_W32((1 << MAX_RX_DMA_CHANNEL_NUMBER) - 1, CFG_WRX_DMACH_ON);
1340    IFX_REG_W32((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1, CFG_WTX_DMACH_ON);
1341
1342    IFX_REG_W32(8, CFG_WRX_LOOK_BITTH); // WAN RX EFM-TC Looking Threshold
1343
1344    IFX_REG_W32(eth_efmtc_crc_cfg, CFG_ETH_EFMTC_CRC);
1345
1346    /*
1347     * WRX DMA Channel Configuration Table
1348     */
1349    rx_config.deslen = dma_rx_descriptor_length;
1350    rx_port_cfg.mfs = ETH_MAX_FRAME_LENGTH;
1351    rx_port_cfg.local_state = 0; // looking for sync
1352    rx_port_cfg.partner_state = 0; // parter receiver is out of sync
1353
1354    for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ ) {
1355        rx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].rx_desc >> 2) & 0x0FFFFFFF;
1356        *WRX_DMA_CHANNEL_CONFIG(i) = rx_config;
1357
1358        rx_port_cfg.dmach = i;
1359        *WRX_PORT_CONFIG(i) = rx_port_cfg;
1360    }
1361
1362    /*
1363     * WTX DMA Channel Configuration Table
1364     */
1365    tx_config.deslen = dma_tx_descriptor_length;
1366    tx_port_cfg.tx_cwth1 = 5;
1367    tx_port_cfg.tx_cwth2 = 4;
1368
1369    for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ ) {
1370        tx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].tx_desc >> 2) & 0x0FFFFFFF;
1371        *WTX_DMA_CHANNEL_CONFIG(i) = tx_config;
1372
1373        *WTX_PORT_CONFIG(i) = tx_port_cfg;
1374    }
1375}
1376
1377
1378
1379/*
1380 * ####################################
1381 * Global Function
1382 * ####################################
1383 */
1384
1385static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
1386{
1387
1388    g_showtime = 1;
1389
1390    printk("enter showtime\n");
1391
1392    return 0;
1393}
1394
1395static int ptm_showtime_exit(void)
1396{
1397    if ( !g_showtime )
1398        return -1;
1399
1400    g_showtime = 0;
1401
1402    printk("leave showtime\n");
1403
1404    return 0;
1405}
1406
1407
1408
1409/*
1410 * ####################################
1411 * Init/Cleanup API
1412 * ####################################
1413 */
1414
1415/*
1416 * Description:
1417 * Initialize global variables, PP32, comunication structures, register IRQ
1418 * and register device.
1419 * Input:
1420 * none
1421 * Output:
1422 * 0 --- successful
1423 * else --- failure, usually it is negative value of error code
1424 */
1425static int __devinit ifx_ptm_init(void)
1426{
1427    int ret;
1428    struct port_cell_info port_cell = {0};
1429    void *xdata_addr = NULL;
1430    int i;
1431    char ver_str[256];
1432
1433    check_parameters();
1434
1435    ret = init_priv_data();
1436    if ( ret != 0 ) {
1437        err("INIT_PRIV_DATA_FAIL");
1438        goto INIT_PRIV_DATA_FAIL;
1439    }
1440
1441    ifx_ptm_init_chip();
1442    init_tables();
1443
1444    for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1445        g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], ether_setup);
1446        if ( g_net_dev[i] == NULL )
1447            goto ALLOC_NETDEV_FAIL;
1448        ptm_setup(g_net_dev[i], i);
1449    }
1450
1451    for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1452        ret = register_netdev(g_net_dev[i]);
1453        if ( ret != 0 )
1454            goto REGISTER_NETDEV_FAIL;
1455    }
1456
1457    /* register interrupt handler */
1458    ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
1459    if ( ret ) {
1460        if ( ret == -EBUSY ) {
1461            err("IRQ may be occupied by other driver, please reconfig to disable it.");
1462        }
1463        else {
1464            err("request_irq fail");
1465        }
1466        goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1467    }
1468    disable_irq(PPE_MAILBOX_IGU1_INT);
1469
1470    ret = ifx_pp32_start(0);
1471    if ( ret ) {
1472        err("ifx_pp32_start fail!");
1473        goto PP32_START_FAIL;
1474    }
1475    IFX_REG_W32(0, MBOX_IGU1_IER);
1476    IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1477
1478    enable_irq(PPE_MAILBOX_IGU1_INT);
1479
1480
1481    proc_file_create();
1482
1483    port_cell.port_num = 1;
1484    ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &xdata_addr);
1485
1486    ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1487    ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1488
1489    ifx_ptm_version(ver_str);
1490    printk(KERN_INFO "%s", ver_str);
1491
1492    printk("ifxmips_ptm: PTM init succeed\n");
1493
1494    return 0;
1495
1496PP32_START_FAIL:
1497    free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1498REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1499    i = ARRAY_SIZE(g_net_dev);
1500REGISTER_NETDEV_FAIL:
1501    while ( i-- )
1502        unregister_netdev(g_net_dev[i]);
1503    i = ARRAY_SIZE(g_net_dev);
1504ALLOC_NETDEV_FAIL:
1505    while ( i-- ) {
1506        free_netdev(g_net_dev[i]);
1507        g_net_dev[i] = NULL;
1508    }
1509INIT_PRIV_DATA_FAIL:
1510    clear_priv_data();
1511    printk("ifxmips_ptm: PTM init failed\n");
1512    return ret;
1513}
1514
1515/*
1516 * Description:
1517 * Release memory, free IRQ, and deregister device.
1518 * Input:
1519 * none
1520 * Output:
1521 * none
1522 */
1523static void __exit ifx_ptm_exit(void)
1524{
1525    int i;
1526
1527    ifx_mei_atm_showtime_enter = NULL;
1528    ifx_mei_atm_showtime_exit = NULL;
1529
1530    proc_file_delete();
1531
1532
1533    ifx_pp32_stop(0);
1534
1535    free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1536
1537    for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1538        unregister_netdev(g_net_dev[i]);
1539
1540    for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1541        free_netdev(g_net_dev[i]);
1542        g_net_dev[i] = NULL;
1543    }
1544
1545    ifx_ptm_uninit_chip();
1546
1547    clear_priv_data();
1548}
1549
1550module_init(ifx_ptm_init);
1551module_exit(ifx_ptm_exit);
1552

Archive Download this file



interactive