Root/package/ltq-dsl/src/ifxmips_atm_core.c

1/******************************************************************************
2**
3** FILE NAME : ifxmips_atm_core.c
4** PROJECT : UEIP
5** MODULES : ATM
6**
7** DATE : 7 Jul 2009
8** AUTHOR : Xu Liang
9** DESCRIPTION : ATM driver common source file (core functions)
10** COPYRIGHT : Copyright (c) 2006
11** Infineon Technologies AG
12** Am Campeon 1-12, 85579 Neubiberg, Germany
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License as published by
16** the Free Software Foundation; either version 2 of the License, or
17** (at your option) any later version.
18**
19** HISTORY
20** $Date $Author $Comment
21** 07 JUL 2009 Xu Liang Init Version
22*******************************************************************************/
23
24
25
26/*
27 * ####################################
28 * Version No.
29 * ####################################
30 */
31
32#define IFX_ATM_VER_MAJOR 1
33#define IFX_ATM_VER_MID 0
34#define IFX_ATM_VER_MINOR 8
35
36
37
38/*
39 * ####################################
40 * Head File
41 * ####################################
42 */
43
44/*
45 * Common Head File
46 */
47#include <linux/kernel.h>
48#include <linux/module.h>
49#include <linux/version.h>
50#include <linux/types.h>
51#include <linux/errno.h>
52#include <linux/proc_fs.h>
53#include <linux/init.h>
54#include <linux/ioctl.h>
55#include <linux/atmdev.h>
56#include <linux/atm.h>
57#include <linux/clk.h>
58
59/*
60 * Chip Specific Head File
61 */
62#include <lantiq_soc.h>
63#include "ifxmips_atm_core.h"
64
65
66
67/*
68 * ####################################
69 * Kernel Version Adaption
70 * ####################################
71 */
72#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
73  #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
74  #define MODULE_PARM(a, b) module_param(a, int, 0)
75#else
76  #define MODULE_PARM_ARRAY(a, b) MODULE_PARM(a, b)
77#endif
78
79
80
81/*!
82  \addtogroup IFXMIPS_ATM_MODULE_PARAMS
83 */
84/*@{*/
85/*
86 * ####################################
87 * Parameters to Configure PPE
88 * ####################################
89 */
90/*!
91  \brief QSB cell delay variation due to concurrency
92 */
93static int qsb_tau = 1; /* QSB cell delay variation due to concurrency */
94/*!
95  \brief QSB scheduler burst length
96 */
97static int qsb_srvm = 0x0F; /* QSB scheduler burst length */
98/*!
99  \brief QSB time step, all legal values are 1, 2, 4
100 */
101static int qsb_tstep = 4 ; /* QSB time step, all legal values are 1, 2, 4 */
102
103/*!
104  \brief Write descriptor delay
105 */
106static int write_descriptor_delay = 0x20; /* Write descriptor delay */
107
108/*!
109  \brief AAL5 padding byte ('~')
110 */
111static int aal5_fill_pattern = 0x007E; /* AAL5 padding byte ('~') */
112/*!
113  \brief Max frame size for RX
114 */
115static int aal5r_max_packet_size = 0x0700; /* Max frame size for RX */
116/*!
117  \brief Min frame size for RX
118 */
119static int aal5r_min_packet_size = 0x0000; /* Min frame size for RX */
120/*!
121  \brief Max frame size for TX
122 */
123static int aal5s_max_packet_size = 0x0700; /* Max frame size for TX */
124/*!
125  \brief Min frame size for TX
126 */
127static int aal5s_min_packet_size = 0x0000; /* Min frame size for TX */
128/*!
129  \brief Drop error packet in RX path
130 */
131static int aal5r_drop_error_packet = 1; /* Drop error packet in RX path */
132
133/*!
134  \brief Number of descriptors per DMA RX channel
135 */
136static int dma_rx_descriptor_length = 128; /* Number of descriptors per DMA RX channel */
137/*!
138  \brief Number of descriptors per DMA TX channel
139 */
140static int dma_tx_descriptor_length = 64; /* Number of descriptors per DMA TX channel */
141/*!
142  \brief PPE core clock cycles between descriptor write and effectiveness in external RAM
143 */
144static int dma_rx_clp1_descriptor_threshold = 38;
145/*@}*/
146
147MODULE_PARM(qsb_tau, "i");
148MODULE_PARM_DESC(qsb_tau, "Cell delay variation. Value must be > 0");
149MODULE_PARM(qsb_srvm, "i");
150MODULE_PARM_DESC(qsb_srvm, "Maximum burst size");
151MODULE_PARM(qsb_tstep, "i");
152MODULE_PARM_DESC(qsb_tstep, "n*32 cycles per sbs cycles n=1,2,4");
153
154MODULE_PARM(write_descriptor_delay, "i");
155MODULE_PARM_DESC(write_descriptor_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
156
157MODULE_PARM(aal5_fill_pattern, "i");
158MODULE_PARM_DESC(aal5_fill_pattern, "Filling pattern (PAD) for AAL5 frames");
159MODULE_PARM(aal5r_max_packet_size, "i");
160MODULE_PARM_DESC(aal5r_max_packet_size, "Max packet size in byte for downstream AAL5 frames");
161MODULE_PARM(aal5r_min_packet_size, "i");
162MODULE_PARM_DESC(aal5r_min_packet_size, "Min packet size in byte for downstream AAL5 frames");
163MODULE_PARM(aal5s_max_packet_size, "i");
164MODULE_PARM_DESC(aal5s_max_packet_size, "Max packet size in byte for upstream AAL5 frames");
165MODULE_PARM(aal5s_min_packet_size, "i");
166MODULE_PARM_DESC(aal5s_min_packet_size, "Min packet size in byte for upstream AAL5 frames");
167MODULE_PARM(aal5r_drop_error_packet, "i");
168MODULE_PARM_DESC(aal5r_drop_error_packet, "Non-zero value to drop error packet for downstream");
169
170MODULE_PARM(dma_rx_descriptor_length, "i");
171MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)");
172MODULE_PARM(dma_tx_descriptor_length, "i");
173MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)");
174MODULE_PARM(dma_rx_clp1_descriptor_threshold, "i");
175MODULE_PARM_DESC(dma_rx_clp1_descriptor_threshold, "Descriptor threshold for cells with cell loss priority 1");
176
177
178
179/*
180 * ####################################
181 * Definition
182 * ####################################
183 */
184
185#define DUMP_SKB_LEN ~0
186
187
188
189/*
190 * ####################################
191 * Declaration
192 * ####################################
193 */
194
195/*
196 * Network Operations
197 */
198static int ppe_ioctl(struct atm_dev *, unsigned int, void *);
199static int ppe_open(struct atm_vcc *);
200static void ppe_close(struct atm_vcc *);
201static int ppe_send(struct atm_vcc *, struct sk_buff *);
202static int ppe_send_oam(struct atm_vcc *, void *, int);
203static int ppe_change_qos(struct atm_vcc *, struct atm_qos *, int);
204
205/*
206 * ADSL LED
207 */
208static INLINE int adsl_led_flash(void);
209
210/*
211 * 64-bit operation used by MIB calculation
212 */
213static INLINE void u64_add_u32(ppe_u64_t, unsigned int, ppe_u64_t *);
214
215/*
216 * buffer manage functions
217 */
218static INLINE struct sk_buff* alloc_skb_rx(void);
219static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
220struct sk_buff* atm_alloc_tx(struct atm_vcc *, unsigned int);
221static INLINE void atm_free_tx_skb_vcc(struct sk_buff *, struct atm_vcc *);
222static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int);
223static INLINE int get_tx_desc(unsigned int);
224
225/*
226 * mailbox handler and signal function
227 */
228static INLINE void mailbox_oam_rx_handler(void);
229static INLINE void mailbox_aal_rx_handler(void);
230#if defined(ENABLE_TASKLET) && ENABLE_TASKLET
231  static void do_ppe_tasklet(unsigned long);
232#endif
233static irqreturn_t mailbox_irq_handler(int, void *);
234static INLINE void mailbox_signal(unsigned int, int);
235
236/*
237 * QSB & HTU setting functions
238 */
239static void set_qsb(struct atm_vcc *, struct atm_qos *, unsigned int);
240static void qsb_global_set(void);
241static INLINE void set_htu_entry(unsigned int, unsigned int, unsigned int, int, int);
242static INLINE void clear_htu_entry(unsigned int);
243static void validate_oam_htu_entry(void);
244static void invalidate_oam_htu_entry(void);
245
246/*
247 * look up for connection ID
248 */
249static INLINE int find_vpi(unsigned int);
250static INLINE int find_vpivci(unsigned int, unsigned int);
251static INLINE int find_vcc(struct atm_vcc *);
252
253/*
254 * Debug Functions
255 */
256#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
257  static void dump_skb(struct sk_buff *, u32, char *, int, int, int);
258#else
259  #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
260#endif
261
262/*
263 * Proc File Functions
264 */
265static INLINE void proc_file_create(void);
266static INLINE void proc_file_delete(void);
267static int proc_read_version(char *, char **, off_t, int, int *, void *);
268static int proc_read_mib(char *, char **, off_t, int, int *, void *);
269static int proc_write_mib(struct file *, const char *, unsigned long, void *);
270#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
271  static int proc_read_dbg(char *, char **, off_t, int, int *, void *);
272  static int proc_write_dbg(struct file *, const char *, unsigned long, void *);
273#endif
274#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
275  static int proc_read_htu(char *, char **, off_t, int, int *, void *);
276  static int proc_read_txq(char *, char **, off_t, int, int *, void *);
277#endif
278
279/*
280 * Proc Help Functions
281 */
282static int stricmp(const char *, const char *);
283#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
284  static int strincmp(const char *, const char *, int);
285#endif
286static INLINE int ifx_atm_version(char *);
287//static INLINE int print_reset_domain(char *, int);
288//static INLINE int print_reset_handler(char *, int, ifx_rcu_handler_t *);
289
290/*
291 * Init & clean-up functions
292 */
293#ifdef MODULE
294  static INLINE void reset_ppe(void);
295#endif
296static INLINE void check_parameters(void);
297static INLINE int init_priv_data(void);
298static INLINE void clear_priv_data(void);
299static INLINE void init_rx_tables(void);
300static INLINE void init_tx_tables(void);
301
302/*
303 * Exteranl Function
304 */
305#if defined(CONFIG_IFX_OAM) || defined(CONFIG_IFX_OAM_MODULE)
306  extern void ifx_push_oam(unsigned char *);
307#else
308  static inline void ifx_push_oam(unsigned char *dummy) {}
309#endif
310#if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
311  extern int ifx_mei_atm_led_blink(void);
312  extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
313#else
314  static inline int ifx_mei_atm_led_blink(void) { return IFX_SUCCESS; }
315  static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr)
316  {
317    if ( is_showtime != NULL )
318        *is_showtime = 0;
319    return IFX_SUCCESS;
320  }
321#endif
322
323/*
324 * External variable
325 */
326extern struct sk_buff* (*ifx_atm_alloc_tx)(struct atm_vcc *, unsigned int);
327#if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
328  extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
329  extern int (*ifx_mei_atm_showtime_exit)(void);
330#else
331  int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL;
332  EXPORT_SYMBOL(ifx_mei_atm_showtime_enter);
333  int (*ifx_mei_atm_showtime_exit)(void) = NULL;
334  EXPORT_SYMBOL(ifx_mei_atm_showtime_exit);
335#endif
336
337
338
339/*
340 * ####################################
341 * Local Variable
342 * ####################################
343 */
344
345static struct atm_priv_data g_atm_priv_data;
346
347static struct atmdev_ops g_ifx_atm_ops = {
348    .open = ppe_open,
349    .close = ppe_close,
350    .ioctl = ppe_ioctl,
351    .send = ppe_send,
352    .send_oam = ppe_send_oam,
353    .change_qos = ppe_change_qos,
354    .owner = THIS_MODULE,
355};
356
357#if defined(ENABLE_TASKLET) && ENABLE_TASKLET
358  DECLARE_TASKLET(g_dma_tasklet, do_ppe_tasklet, 0);
359#endif
360
361static int g_showtime = 0;
362static void *g_xdata_addr = NULL;
363
364unsigned int ifx_atm_dbg_enable = 0;
365
366static struct proc_dir_entry* g_atm_dir = NULL;
367
368
369
370/*
371 * ####################################
372 * Local Function
373 * ####################################
374 */
375
376static int ppe_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)
377{
378    int ret = 0;
379    atm_cell_ifEntry_t mib_cell;
380    atm_aal5_ifEntry_t mib_aal5;
381    atm_aal5_vcc_x_t mib_vcc;
382    unsigned int value;
383    int conn;
384
385    if ( _IOC_TYPE(cmd) != PPE_ATM_IOC_MAGIC
386        || _IOC_NR(cmd) >= PPE_ATM_IOC_MAXNR )
387        return -ENOTTY;
388
389    if ( _IOC_DIR(cmd) & _IOC_READ )
390        ret = !access_ok(VERIFY_WRITE, arg, _IOC_SIZE(cmd));
391    else if ( _IOC_DIR(cmd) & _IOC_WRITE )
392        ret = !access_ok(VERIFY_READ, arg, _IOC_SIZE(cmd));
393    if ( ret )
394        return -EFAULT;
395
396    switch ( cmd )
397    {
398    case PPE_ATM_MIB_CELL: /* cell level MIB */
399        /* These MIB should be read at ARC side, now put zero only. */
400        mib_cell.ifHCInOctets_h = 0;
401        mib_cell.ifHCInOctets_l = 0;
402        mib_cell.ifHCOutOctets_h = 0;
403        mib_cell.ifHCOutOctets_l = 0;
404        mib_cell.ifInErrors = 0;
405        mib_cell.ifInUnknownProtos = WAN_MIB_TABLE->wrx_drophtu_cell;
406        mib_cell.ifOutErrors = 0;
407
408        ret = sizeof(mib_cell) - copy_to_user(arg, &mib_cell, sizeof(mib_cell));
409        break;
410
411    case PPE_ATM_MIB_AAL5: /* AAL5 MIB */
412        value = WAN_MIB_TABLE->wrx_total_byte;
413        u64_add_u32(g_atm_priv_data.wrx_total_byte, value - g_atm_priv_data.prev_wrx_total_byte, &g_atm_priv_data.wrx_total_byte);
414        g_atm_priv_data.prev_wrx_total_byte = value;
415        mib_aal5.ifHCInOctets_h = g_atm_priv_data.wrx_total_byte.h;
416        mib_aal5.ifHCInOctets_l = g_atm_priv_data.wrx_total_byte.l;
417
418        value = WAN_MIB_TABLE->wtx_total_byte;
419        u64_add_u32(g_atm_priv_data.wtx_total_byte, value - g_atm_priv_data.prev_wtx_total_byte, &g_atm_priv_data.wtx_total_byte);
420        g_atm_priv_data.prev_wtx_total_byte = value;
421        mib_aal5.ifHCOutOctets_h = g_atm_priv_data.wtx_total_byte.h;
422        mib_aal5.ifHCOutOctets_l = g_atm_priv_data.wtx_total_byte.l;
423
424        mib_aal5.ifInUcastPkts = g_atm_priv_data.wrx_pdu;
425        mib_aal5.ifOutUcastPkts = WAN_MIB_TABLE->wtx_total_pdu;
426        mib_aal5.ifInErrors = WAN_MIB_TABLE->wrx_err_pdu;
427        mib_aal5.ifInDiscards = WAN_MIB_TABLE->wrx_dropdes_pdu + g_atm_priv_data.wrx_drop_pdu;
428        mib_aal5.ifOutErros = g_atm_priv_data.wtx_err_pdu;
429        mib_aal5.ifOutDiscards = g_atm_priv_data.wtx_drop_pdu;
430
431        ret = sizeof(mib_aal5) - copy_to_user(arg, &mib_aal5, sizeof(mib_aal5));
432        break;
433
434    case PPE_ATM_MIB_VCC: /* VCC related MIB */
435        copy_from_user(&mib_vcc, arg, sizeof(mib_vcc));
436        conn = find_vpivci(mib_vcc.vpi, mib_vcc.vci);
437        if ( conn >= 0 )
438        {
439            mib_vcc.mib_vcc.aal5VccCrcErrors = g_atm_priv_data.conn[conn].aal5_vcc_crc_err;
440            mib_vcc.mib_vcc.aal5VccOverSizedSDUs = g_atm_priv_data.conn[conn].aal5_vcc_oversize_sdu;
441            mib_vcc.mib_vcc.aal5VccSarTimeOuts = 0; /* no timer support */
442            ret = sizeof(mib_vcc) - copy_to_user(arg, &mib_vcc, sizeof(mib_vcc));
443        }
444        else
445            ret = -EINVAL;
446        break;
447
448    default:
449        ret = -ENOIOCTLCMD;
450    }
451
452    return ret;
453}
454
455static int ppe_open(struct atm_vcc *vcc)
456{
457    int ret;
458    short vpi = vcc->vpi;
459    int vci = vcc->vci;
460    struct port *port = &g_atm_priv_data.port[(int)vcc->dev->dev_data];
461    int conn;
462    int f_enable_irq = 0;
463#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
464    int sys_flag;
465#endif
466
467    if ( vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0 )
468        return -EPROTONOSUPPORT;
469
470    /* check bandwidth */
471    if ( (vcc->qos.txtp.traffic_class == ATM_CBR && vcc->qos.txtp.max_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate))
472      || (vcc->qos.txtp.traffic_class == ATM_VBR_RT && vcc->qos.txtp.max_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate))
473      || (vcc->qos.txtp.traffic_class == ATM_VBR_NRT && vcc->qos.txtp.scr > (port->tx_max_cell_rate - port->tx_current_cell_rate))
474      || (vcc->qos.txtp.traffic_class == ATM_UBR_PLUS && vcc->qos.txtp.min_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate)) )
475    {
476        ret = -EINVAL;
477        goto PPE_OPEN_EXIT;
478    }
479
480    /* check existing vpi,vci */
481    conn = find_vpivci(vpi, vci);
482    if ( conn >= 0 ) {
483        ret = -EADDRINUSE;
484        goto PPE_OPEN_EXIT;
485    }
486
487    /* check whether it need to enable irq */
488    if ( g_atm_priv_data.conn_table == 0 )
489        f_enable_irq = 1;
490
491    /* allocate connection */
492    for ( conn = 0; conn < MAX_PVC_NUMBER; conn++ ) {
493        if ( test_and_set_bit(conn, &g_atm_priv_data.conn_table) == 0 ) {
494            g_atm_priv_data.conn[conn].vcc = vcc;
495            break;
496        }
497    }
498    if ( conn == MAX_PVC_NUMBER )
499    {
500        ret = -EINVAL;
501        goto PPE_OPEN_EXIT;
502    }
503
504    /* reserve bandwidth */
505    switch ( vcc->qos.txtp.traffic_class ) {
506    case ATM_CBR:
507    case ATM_VBR_RT:
508        port->tx_current_cell_rate += vcc->qos.txtp.max_pcr;
509        break;
510    case ATM_VBR_NRT:
511        port->tx_current_cell_rate += vcc->qos.txtp.scr;
512        break;
513    case ATM_UBR_PLUS:
514        port->tx_current_cell_rate += vcc->qos.txtp.min_pcr;
515        break;
516    }
517
518    /* set qsb */
519    set_qsb(vcc, &vcc->qos, conn);
520
521    /* update atm_vcc structure */
522    vcc->itf = (int)vcc->dev->dev_data;
523    vcc->vpi = vpi;
524    vcc->vci = vci;
525    set_bit(ATM_VF_READY, &vcc->flags);
526
527    /* enable irq */
528    if (f_enable_irq ) {
529        ifx_atm_alloc_tx = atm_alloc_tx;
530
531        *MBOX_IGU1_ISRC = (1 << RX_DMA_CH_AAL) | (1 << RX_DMA_CH_OAM);
532        *MBOX_IGU1_IER = (1 << RX_DMA_CH_AAL) | (1 << RX_DMA_CH_OAM);
533
534        enable_irq(PPE_MAILBOX_IGU1_INT);
535    }
536
537    /* set port */
538    WTX_QUEUE_CONFIG(conn)->sbid = (int)vcc->dev->dev_data;
539
540    /* set htu entry */
541    set_htu_entry(vpi, vci, conn, vcc->qos.aal == ATM_AAL5 ? 1 : 0, 0);
542
543#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
544    // ReTX: occupy second QID
545    local_irq_save(sys_flag);
546    if ( g_retx_htu && vcc->qos.aal == ATM_AAL5 )
547    {
548        int retx_conn = (conn + 8) % 16; // ReTX queue
549
550        if ( retx_conn < MAX_PVC_NUMBER && test_and_set_bit(retx_conn, &g_atm_priv_data.conn_table) == 0 ) {
551            g_atm_priv_data.conn[retx_conn].vcc = vcc;
552            set_htu_entry(vpi, vci, retx_conn, vcc->qos.aal == ATM_AAL5 ? 1 : 0, 1);
553        }
554    }
555    local_irq_restore(sys_flag);
556#endif
557
558    ret = 0;
559
560PPE_OPEN_EXIT:
561    return ret;
562}
563
564static void ppe_close(struct atm_vcc *vcc)
565{
566    int conn;
567    struct port *port;
568    struct connection *connection;
569
570    if ( vcc == NULL )
571        return;
572
573    /* get connection id */
574    conn = find_vcc(vcc);
575    if ( conn < 0 ) {
576        err("can't find vcc");
577        goto PPE_CLOSE_EXIT;
578    }
579    connection = &g_atm_priv_data.conn[conn];
580    port = &g_atm_priv_data.port[connection->port];
581
582    /* clear htu */
583    clear_htu_entry(conn);
584
585    /* release connection */
586    clear_bit(conn, &g_atm_priv_data.conn_table);
587    connection->vcc = NULL;
588    connection->aal5_vcc_crc_err = 0;
589    connection->aal5_vcc_oversize_sdu = 0;
590
591    /* disable irq */
592    if ( g_atm_priv_data.conn_table == 0 ) {
593        disable_irq(PPE_MAILBOX_IGU1_INT);
594        ifx_atm_alloc_tx = NULL;
595    }
596
597    /* release bandwidth */
598    switch ( vcc->qos.txtp.traffic_class )
599    {
600    case ATM_CBR:
601    case ATM_VBR_RT:
602        port->tx_current_cell_rate -= vcc->qos.txtp.max_pcr;
603        break;
604    case ATM_VBR_NRT:
605        port->tx_current_cell_rate -= vcc->qos.txtp.scr;
606        break;
607    case ATM_UBR_PLUS:
608        port->tx_current_cell_rate -= vcc->qos.txtp.min_pcr;
609        break;
610    }
611
612PPE_CLOSE_EXIT:
613    return;
614}
615
616static int ppe_send(struct atm_vcc *vcc, struct sk_buff *skb)
617{
618    int ret;
619    int conn;
620    int desc_base;
621    struct tx_descriptor reg_desc = {0};
622
623    if ( vcc == NULL || skb == NULL )
624        return -EINVAL;
625
626    skb_get(skb);
627    atm_free_tx_skb_vcc(skb, vcc);
628
629    conn = find_vcc(vcc);
630    if ( conn < 0 ) {
631        ret = -EINVAL;
632        goto FIND_VCC_FAIL;
633    }
634
635    if ( !g_showtime ) {
636        err("not in showtime");
637        ret = -EIO;
638        goto PPE_SEND_FAIL;
639    }
640
641    if ( vcc->qos.aal == ATM_AAL5 ) {
642        int byteoff;
643        int datalen;
644        struct tx_inband_header *header;
645
646        datalen = skb->len;
647        byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
648
649        if ( skb_headroom(skb) < byteoff + TX_INBAND_HEADER_LENGTH ) {
650            struct sk_buff *new_skb;
651
652            new_skb = alloc_skb_tx(datalen);
653            if ( new_skb == NULL ) {
654                err("ALLOC_SKB_TX_FAIL");
655                ret = -ENOMEM;
656                goto PPE_SEND_FAIL;
657            }
658            skb_put(new_skb, datalen);
659            memcpy(new_skb->data, skb->data, datalen);
660            dev_kfree_skb_any(skb);
661            skb = new_skb;
662            byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
663        }
664
665        skb_push(skb, byteoff + TX_INBAND_HEADER_LENGTH);
666
667        header = (struct tx_inband_header *)skb->data;
668
669        /* setup inband trailer */
670        header->uu = 0;
671        header->cpi = 0;
672        header->pad = aal5_fill_pattern;
673        header->res1 = 0;
674
675        /* setup cell header */
676        header->clp = (vcc->atm_options & ATM_ATMOPT_CLP) ? 1 : 0;
677        header->pti = ATM_PTI_US0;
678        header->vci = vcc->vci;
679        header->vpi = vcc->vpi;
680        header->gfc = 0;
681
682        /* setup descriptor */
683        reg_desc.dataptr = (unsigned int)skb->data >> 2;
684        reg_desc.datalen = datalen;
685        reg_desc.byteoff = byteoff;
686        reg_desc.iscell = 0;
687    }
688    else {
689        /* if data pointer is not aligned, allocate new sk_buff */
690        if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 ) {
691            struct sk_buff *new_skb;
692
693            err("skb->data not aligned");
694
695            new_skb = alloc_skb_tx(skb->len);
696            if ( new_skb == NULL ) {
697                err("ALLOC_SKB_TX_FAIL");
698                ret = -ENOMEM;
699                goto PPE_SEND_FAIL;
700            }
701            skb_put(new_skb, skb->len);
702            memcpy(new_skb->data, skb->data, skb->len);
703            dev_kfree_skb_any(skb);
704            skb = new_skb;
705        }
706
707        reg_desc.dataptr = (unsigned int)skb->data >> 2;
708        reg_desc.datalen = skb->len;
709        reg_desc.byteoff = 0;
710        reg_desc.iscell = 1;
711    }
712
713    reg_desc.own = 1;
714    reg_desc.c = 1;
715    reg_desc.sop = reg_desc.eop = 1;
716
717    desc_base = get_tx_desc(conn);
718    if ( desc_base < 0 ) {
719        err("ALLOC_TX_CONNECTION_FAIL");
720        ret = -EIO;
721        goto PPE_SEND_FAIL;
722    }
723
724    if ( vcc->stats )
725        atomic_inc(&vcc->stats->tx);
726    if ( vcc->qos.aal == ATM_AAL5 )
727        g_atm_priv_data.wtx_pdu++;
728
729    /* update descriptor send pointer */
730    if ( g_atm_priv_data.conn[conn].tx_skb[desc_base] != NULL )
731        dev_kfree_skb_any(g_atm_priv_data.conn[conn].tx_skb[desc_base]);
732    g_atm_priv_data.conn[conn].tx_skb[desc_base] = skb;
733
734    /* write discriptor to memory and write back cache */
735    g_atm_priv_data.conn[conn].tx_desc[desc_base] = reg_desc;
736    dma_cache_wback((unsigned long)skb->data, skb->len);
737
738    dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, 0, conn, 1);
739
740    mailbox_signal(conn, 1);
741
742    adsl_led_flash();
743
744    return 0;
745
746FIND_VCC_FAIL:
747    err("FIND_VCC_FAIL");
748    g_atm_priv_data.wtx_err_pdu++;
749    dev_kfree_skb_any(skb);
750    return ret;
751
752PPE_SEND_FAIL:
753    if ( vcc->qos.aal == ATM_AAL5 )
754        g_atm_priv_data.wtx_drop_pdu++;
755    if ( vcc->stats )
756        atomic_inc(&vcc->stats->tx_err);
757    dev_kfree_skb_any(skb);
758    return ret;
759}
760
761static int ppe_send_oam(struct atm_vcc *vcc, void *cell, int flags)
762{
763    int conn;
764    struct uni_cell_header *uni_cell_header = (struct uni_cell_header *)cell;
765    int desc_base;
766    struct sk_buff *skb;
767    struct tx_descriptor reg_desc = {0};
768
769    if ( ((uni_cell_header->pti == ATM_PTI_SEGF5 || uni_cell_header->pti == ATM_PTI_E2EF5)
770        && find_vpivci(uni_cell_header->vpi, uni_cell_header->vci) < 0)
771        || ((uni_cell_header->vci == 0x03 || uni_cell_header->vci == 0x04)
772        && find_vpi(uni_cell_header->vpi) < 0) )
773        return -EINVAL;
774
775    if ( !g_showtime ) {
776        err("not in showtime");
777        return -EIO;
778    }
779
780    conn = find_vcc(vcc);
781    if ( conn < 0 ) {
782        err("FIND_VCC_FAIL");
783        return -EINVAL;
784    }
785
786    skb = alloc_skb_tx(CELL_SIZE);
787    if ( skb == NULL ) {
788        err("ALLOC_SKB_TX_FAIL");
789        return -ENOMEM;
790    }
791    memcpy(skb->data, cell, CELL_SIZE);
792
793    reg_desc.dataptr = (unsigned int)skb->data >> 2;
794    reg_desc.datalen = CELL_SIZE;
795    reg_desc.byteoff = 0;
796    reg_desc.iscell = 1;
797
798    reg_desc.own = 1;
799    reg_desc.c = 1;
800    reg_desc.sop = reg_desc.eop = 1;
801
802    desc_base = get_tx_desc(conn);
803    if ( desc_base < 0 ) {
804        dev_kfree_skb_any(skb);
805        err("ALLOC_TX_CONNECTION_FAIL");
806        return -EIO;
807    }
808
809    if ( vcc->stats )
810        atomic_inc(&vcc->stats->tx);
811
812    /* update descriptor send pointer */
813    if ( g_atm_priv_data.conn[conn].tx_skb[desc_base] != NULL )
814        dev_kfree_skb_any(g_atm_priv_data.conn[conn].tx_skb[desc_base]);
815    g_atm_priv_data.conn[conn].tx_skb[desc_base] = skb;
816
817    /* write discriptor to memory and write back cache */
818    g_atm_priv_data.conn[conn].tx_desc[desc_base] = reg_desc;
819    dma_cache_wback((unsigned long)skb->data, CELL_SIZE);
820
821    dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, 0, conn, 1);
822
823    mailbox_signal(conn, 1);
824
825    adsl_led_flash();
826
827    return 0;
828}
829
830static int ppe_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
831{
832    int conn;
833
834    if ( vcc == NULL || qos == NULL )
835        return -EINVAL;
836
837    conn = find_vcc(vcc);
838    if ( conn < 0 )
839        return -EINVAL;
840
841    set_qsb(vcc, qos, conn);
842
843    return 0;
844}
845
846static INLINE int adsl_led_flash(void)
847{
848    return ifx_mei_atm_led_blink();
849}
850
851/*
852 * Description:
853 * Add a 32-bit value to 64-bit value, and put result in a 64-bit variable.
854 * Input:
855 * opt1 --- ppe_u64_t, first operand, a 64-bit unsigned integer value
856 * opt2 --- unsigned int, second operand, a 32-bit unsigned integer value
857 * ret --- ppe_u64_t, pointer to a variable to hold result
858 * Output:
859 * none
860 */
861static INLINE void u64_add_u32(ppe_u64_t opt1, unsigned int opt2, ppe_u64_t *ret)
862{
863    ret->l = opt1.l + opt2;
864    if ( ret->l < opt1.l || ret->l < opt2 )
865        ret->h++;
866}
867
868static INLINE struct sk_buff* alloc_skb_rx(void)
869{
870    struct sk_buff *skb;
871
872    skb = dev_alloc_skb(RX_DMA_CH_AAL_BUF_SIZE + DATA_BUFFER_ALIGNMENT);
873    if ( skb != NULL ) {
874        /* must be burst length alignment */
875        if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
876            skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
877        /* pub skb in reserved area "skb->data - 4" */
878        *((struct sk_buff **)skb->data - 1) = skb;
879        /* write back and invalidate cache */
880        dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
881        /* invalidate cache */
882        dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
883    }
884
885    return skb;
886}
887
888static INLINE struct sk_buff* alloc_skb_tx(unsigned int size)
889{
890    struct sk_buff *skb;
891
892    /* allocate memory including header and padding */
893    size += TX_INBAND_HEADER_LENGTH + MAX_TX_PACKET_ALIGN_BYTES + MAX_TX_PACKET_PADDING_BYTES;
894    size &= ~(DATA_BUFFER_ALIGNMENT - 1);
895    skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
896    /* must be burst length alignment */
897    if ( skb != NULL )
898        skb_reserve(skb, (~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1)) + TX_INBAND_HEADER_LENGTH);
899    return skb;
900}
901
902struct sk_buff* atm_alloc_tx(struct atm_vcc *vcc, unsigned int size)
903{
904    int conn;
905    struct sk_buff *skb;
906
907    /* oversize packet */
908    if ( size > aal5s_max_packet_size ) {
909        err("atm_alloc_tx: oversize packet");
910        return NULL;
911    }
912    /* send buffer overflow */
913    if ( atomic_read(&sk_atm(vcc)->sk_wmem_alloc) && !atm_may_send(vcc, size) ) {
914        err("atm_alloc_tx: send buffer overflow");
915        return NULL;
916    }
917    conn = find_vcc(vcc);
918    if ( conn < 0 ) {
919        err("atm_alloc_tx: unknown VCC");
920        return NULL;
921    }
922
923    skb = dev_alloc_skb(size);
924    if ( skb == NULL ) {
925        err("atm_alloc_tx: sk buffer is used up");
926        return NULL;
927    }
928
929    atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
930
931    return skb;
932}
933
934static INLINE void atm_free_tx_skb_vcc(struct sk_buff *skb, struct atm_vcc *vcc)
935{
936    if ( vcc->pop != NULL )
937        vcc->pop(vcc, skb);
938    else
939        dev_kfree_skb_any(skb);
940}
941
942static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr)
943{
944    unsigned int skb_dataptr;
945    struct sk_buff *skb;
946
947    skb_dataptr = ((dataptr - 1) << 2) | KSEG1;
948    skb = *(struct sk_buff **)skb_dataptr;
949
950    ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
951    ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
952
953    return skb;
954}
955
956static INLINE int get_tx_desc(unsigned int conn)
957{
958    int desc_base = -1;
959    struct connection *p_conn = &g_atm_priv_data.conn[conn];
960
961    if ( p_conn->tx_desc[p_conn->tx_desc_pos].own == 0 ) {
962        desc_base = p_conn->tx_desc_pos;
963        if ( ++(p_conn->tx_desc_pos) == dma_tx_descriptor_length )
964            p_conn->tx_desc_pos = 0;
965    }
966
967    return desc_base;
968}
969
970static INLINE void mailbox_oam_rx_handler(void)
971{
972    unsigned int vlddes = WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_OAM)->vlddes;
973    struct rx_descriptor reg_desc;
974    struct uni_cell_header *header;
975    int conn;
976    struct atm_vcc *vcc;
977    unsigned int i;
978
979    for ( i = 0; i < vlddes; i++ ) {
980        do {
981            reg_desc = g_atm_priv_data.oam_desc[g_atm_priv_data.oam_desc_pos];
982        } while ( reg_desc.own || !reg_desc.c ); // keep test OWN and C bit until data is ready
983
984        header = (struct uni_cell_header *)&g_atm_priv_data.oam_buf[g_atm_priv_data.oam_desc_pos * RX_DMA_CH_OAM_BUF_SIZE];
985
986        if ( header->pti == ATM_PTI_SEGF5 || header->pti == ATM_PTI_E2EF5 )
987            conn = find_vpivci(header->vpi, header->vci);
988        else if ( header->vci == 0x03 || header->vci == 0x04 )
989            conn = find_vpi(header->vpi);
990        else
991            conn = -1;
992
993        if ( conn >= 0 && g_atm_priv_data.conn[conn].vcc != NULL ) {
994            vcc = g_atm_priv_data.conn[conn].vcc;
995
996            if ( vcc->push_oam != NULL )
997                vcc->push_oam(vcc, header);
998            else
999                ifx_push_oam((unsigned char *)header);
1000
1001            adsl_led_flash();
1002        }
1003
1004        reg_desc.byteoff = 0;
1005        reg_desc.datalen = RX_DMA_CH_OAM_BUF_SIZE;
1006        reg_desc.own = 1;
1007        reg_desc.c = 0;
1008
1009        g_atm_priv_data.oam_desc[g_atm_priv_data.oam_desc_pos] = reg_desc;
1010        if ( ++g_atm_priv_data.oam_desc_pos == RX_DMA_CH_OAM_DESC_LEN )
1011            g_atm_priv_data.oam_desc_pos = 0;
1012
1013        mailbox_signal(RX_DMA_CH_OAM, 0);
1014    }
1015}
1016
1017static INLINE void mailbox_aal_rx_handler(void)
1018{
1019    unsigned int vlddes = WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_AAL)->vlddes;
1020    struct rx_descriptor reg_desc;
1021    int conn;
1022    struct atm_vcc *vcc;
1023    struct sk_buff *skb, *new_skb;
1024    struct rx_inband_trailer *trailer;
1025    unsigned int i;
1026
1027    for ( i = 0; i < vlddes; i++ ) {
1028        do {
1029            reg_desc = g_atm_priv_data.aal_desc[g_atm_priv_data.aal_desc_pos];
1030        } while ( reg_desc.own || !reg_desc.c ); // keep test OWN and C bit until data is ready
1031
1032        conn = reg_desc.id;
1033
1034        if ( g_atm_priv_data.conn[conn].vcc != NULL ) {
1035            vcc = g_atm_priv_data.conn[conn].vcc;
1036
1037            skb = get_skb_rx_pointer(reg_desc.dataptr);
1038
1039            if ( reg_desc.err ) {
1040                if ( vcc->qos.aal == ATM_AAL5 ) {
1041                    trailer = (struct rx_inband_trailer *)((unsigned int)skb->data + ((reg_desc.byteoff + reg_desc.datalen + MAX_RX_PACKET_PADDING_BYTES) & ~MAX_RX_PACKET_PADDING_BYTES));
1042                    if ( trailer->stw_crc )
1043                        g_atm_priv_data.conn[conn].aal5_vcc_crc_err++;
1044                    if ( trailer->stw_ovz )
1045                        g_atm_priv_data.conn[conn].aal5_vcc_oversize_sdu++;
1046                    g_atm_priv_data.wrx_drop_pdu++;
1047                }
1048                if ( vcc->stats ) {
1049                    atomic_inc(&vcc->stats->rx_drop);
1050                    atomic_inc(&vcc->stats->rx_err);
1051                }
1052            }
1053            else if ( atm_charge(vcc, skb->truesize) ) {
1054                new_skb = alloc_skb_rx();
1055                if ( new_skb != NULL ) {
1056                    skb_reserve(skb, reg_desc.byteoff);
1057                    skb_put(skb, reg_desc.datalen);
1058                    ATM_SKB(skb)->vcc = vcc;
1059
1060                    dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, 0, conn, 0);
1061
1062                    vcc->push(vcc, skb);
1063
1064                    if ( vcc->qos.aal == ATM_AAL5 )
1065                        g_atm_priv_data.wrx_pdu++;
1066                    if ( vcc->stats )
1067                        atomic_inc(&vcc->stats->rx);
1068                    adsl_led_flash();
1069
1070                    reg_desc.dataptr = (unsigned int)new_skb->data >> 2;
1071                }
1072                else {
1073                    atm_return(vcc, skb->truesize);
1074                    if ( vcc->qos.aal == ATM_AAL5 )
1075                        g_atm_priv_data.wrx_drop_pdu++;
1076                    if ( vcc->stats )
1077                        atomic_inc(&vcc->stats->rx_drop);
1078                }
1079            }
1080            else {
1081                if ( vcc->qos.aal == ATM_AAL5 )
1082                    g_atm_priv_data.wrx_drop_pdu++;
1083                if ( vcc->stats )
1084                    atomic_inc(&vcc->stats->rx_drop);
1085            }
1086        }
1087        else {
1088            g_atm_priv_data.wrx_drop_pdu++;
1089        }
1090
1091        reg_desc.byteoff = 0;
1092        reg_desc.datalen = RX_DMA_CH_AAL_BUF_SIZE;
1093        reg_desc.own = 1;
1094        reg_desc.c = 0;
1095
1096        g_atm_priv_data.aal_desc[g_atm_priv_data.aal_desc_pos] = reg_desc;
1097        if ( ++g_atm_priv_data.aal_desc_pos == dma_rx_descriptor_length )
1098            g_atm_priv_data.aal_desc_pos = 0;
1099
1100        mailbox_signal(RX_DMA_CH_AAL, 0);
1101    }
1102}
1103
1104#if defined(ENABLE_TASKLET) && ENABLE_TASKLET
1105static void do_ppe_tasklet(unsigned long arg)
1106{
1107    *MBOX_IGU1_ISRC = *MBOX_IGU1_ISR;
1108    mailbox_oam_rx_handler();
1109    mailbox_aal_rx_handler();
1110    if ( (*MBOX_IGU1_ISR & ((1 << RX_DMA_CH_AAL) | (1 << RX_DMA_CH_OAM))) != 0 )
1111        tasklet_schedule(&g_dma_tasklet);
1112    else
1113        enable_irq(PPE_MAILBOX_IGU1_INT);
1114}
1115#endif
1116
1117static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
1118{
1119    if ( !*MBOX_IGU1_ISR )
1120        return IRQ_HANDLED;
1121
1122#if defined(ENABLE_TASKLET) && ENABLE_TASKLET
1123    disable_irq(PPE_MAILBOX_IGU1_INT);
1124    tasklet_schedule(&g_dma_tasklet);
1125#else
1126    *MBOX_IGU1_ISRC = *MBOX_IGU1_ISR;
1127    mailbox_oam_rx_handler();
1128    mailbox_aal_rx_handler();
1129#endif
1130
1131    return IRQ_HANDLED;
1132}
1133
1134static INLINE void mailbox_signal(unsigned int queue, int is_tx)
1135{
1136    if ( is_tx ) {
1137        while ( MBOX_IGU3_ISR_ISR(queue + FIRST_QSB_QID + 16) );
1138        *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(queue + FIRST_QSB_QID + 16);
1139    }
1140    else {
1141        while ( MBOX_IGU3_ISR_ISR(queue) );
1142        *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(queue);
1143    }
1144}
1145
1146static void set_qsb(struct atm_vcc *vcc, struct atm_qos *qos, unsigned int queue)
1147{
1148    struct clk *clk = clk_get(0, "fpi");
1149    unsigned int qsb_clk = clk_get_rate(clk);
1150    unsigned int qsb_qid = queue + FIRST_QSB_QID;
1151    union qsb_queue_parameter_table qsb_queue_parameter_table = {{0}};
1152    union qsb_queue_vbr_parameter_table qsb_queue_vbr_parameter_table = {{0}};
1153    unsigned int tmp;
1154
1155#if defined(DEBUG_QOS) && DEBUG_QOS
1156    if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) {
1157        static char *str_traffic_class[9] = {
1158            "ATM_NONE",
1159            "ATM_UBR",
1160            "ATM_CBR",
1161            "ATM_VBR",
1162            "ATM_ABR",
1163            "ATM_ANYCLASS",
1164            "ATM_VBR_RT",
1165            "ATM_UBR_PLUS",
1166            "ATM_MAX_PCR"
1167        };
1168        printk(KERN_INFO "QoS Parameters:\n");
1169        printk(KERN_INFO "\tAAL : %d\n", qos->aal);
1170        printk(KERN_INFO "\tTX Traffic Class: %s\n", str_traffic_class[qos->txtp.traffic_class]);
1171        printk(KERN_INFO "\tTX Max PCR : %d\n", qos->txtp.max_pcr);
1172        printk(KERN_INFO "\tTX Min PCR : %d\n", qos->txtp.min_pcr);
1173        printk(KERN_INFO "\tTX PCR : %d\n", qos->txtp.pcr);
1174        printk(KERN_INFO "\tTX Max CDV : %d\n", qos->txtp.max_cdv);
1175        printk(KERN_INFO "\tTX Max SDU : %d\n", qos->txtp.max_sdu);
1176        printk(KERN_INFO "\tTX SCR : %d\n", qos->txtp.scr);
1177        printk(KERN_INFO "\tTX MBS : %d\n", qos->txtp.mbs);
1178        printk(KERN_INFO "\tTX CDV : %d\n", qos->txtp.cdv);
1179        printk(KERN_INFO "\tRX Traffic Class: %s\n", str_traffic_class[qos->rxtp.traffic_class]);
1180        printk(KERN_INFO "\tRX Max PCR : %d\n", qos->rxtp.max_pcr);
1181        printk(KERN_INFO "\tRX Min PCR : %d\n", qos->rxtp.min_pcr);
1182        printk(KERN_INFO "\tRX PCR : %d\n", qos->rxtp.pcr);
1183        printk(KERN_INFO "\tRX Max CDV : %d\n", qos->rxtp.max_cdv);
1184        printk(KERN_INFO "\tRX Max SDU : %d\n", qos->rxtp.max_sdu);
1185        printk(KERN_INFO "\tRX SCR : %d\n", qos->rxtp.scr);
1186        printk(KERN_INFO "\tRX MBS : %d\n", qos->rxtp.mbs);
1187        printk(KERN_INFO "\tRX CDV : %d\n", qos->rxtp.cdv);
1188    }
1189#endif // defined(DEBUG_QOS) && DEBUG_QOS
1190
1191    /*
1192     * Peak Cell Rate (PCR) Limiter
1193     */
1194    if ( qos->txtp.max_pcr == 0 )
1195        qsb_queue_parameter_table.bit.tp = 0; /* disable PCR limiter */
1196    else {
1197        /* peak cell rate would be slightly lower than requested [maximum_rate / pcr = (qsb_clock / 8) * (time_step / 4) / pcr] */
1198        tmp = ((qsb_clk * qsb_tstep) >> 5) / qos->txtp.max_pcr + 1;
1199        /* check if overflow takes place */
1200        qsb_queue_parameter_table.bit.tp = tmp > QSB_TP_TS_MAX ? QSB_TP_TS_MAX : tmp;
1201    }
1202
1203    // A funny issue. Create two PVCs, one UBR and one UBR with max_pcr.
1204    // Send packets to these two PVCs at same time, it trigger strange behavior.
1205    // In A1, RAM from 0x80000000 to 0x0x8007FFFF was corrupted with fixed pattern 0x00000000 0x40000000.
1206    // In A4, PPE firmware keep emiting unknown cell and do not respond to driver.
1207    // To work around, create UBR always with max_pcr.
1208    // If user want to create UBR without max_pcr, we give a default one larger than line-rate.
1209    if ( qos->txtp.traffic_class == ATM_UBR && qsb_queue_parameter_table.bit.tp == 0 ) {
1210        int port = g_atm_priv_data.conn[queue].port;
1211        unsigned int max_pcr = g_atm_priv_data.port[port].tx_max_cell_rate + 1000;
1212
1213        tmp = ((qsb_clk * qsb_tstep) >> 5) / max_pcr + 1;
1214        if ( tmp > QSB_TP_TS_MAX )
1215            tmp = QSB_TP_TS_MAX;
1216        else if ( tmp < 1 )
1217            tmp = 1;
1218        qsb_queue_parameter_table.bit.tp = tmp;
1219    }
1220
1221    /*
1222     * Weighted Fair Queueing Factor (WFQF)
1223     */
1224    switch ( qos->txtp.traffic_class ) {
1225    case ATM_CBR:
1226    case ATM_VBR_RT:
1227        /* real time queue gets weighted fair queueing bypass */
1228        qsb_queue_parameter_table.bit.wfqf = 0;
1229        break;
1230    case ATM_VBR_NRT:
1231    case ATM_UBR_PLUS:
1232        /* WFQF calculation here is based on virtual cell rates, to reduce granularity for high rates */
1233        /* WFQF is maximum cell rate / garenteed cell rate */
1234        /* wfqf = qsb_minimum_cell_rate * QSB_WFQ_NONUBR_MAX / requested_minimum_peak_cell_rate */
1235        if ( qos->txtp.min_pcr == 0 )
1236            qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_NONUBR_MAX;
1237        else
1238        {
1239            tmp = QSB_GCR_MIN * QSB_WFQ_NONUBR_MAX / qos->txtp.min_pcr;
1240            if ( tmp == 0 )
1241                qsb_queue_parameter_table.bit.wfqf = 1;
1242            else if ( tmp > QSB_WFQ_NONUBR_MAX )
1243                qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_NONUBR_MAX;
1244            else
1245                qsb_queue_parameter_table.bit.wfqf = tmp;
1246        }
1247        break;
1248    default:
1249    case ATM_UBR:
1250        qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_UBR_BYPASS;
1251    }
1252
1253    /*
1254     * Sustained Cell Rate (SCR) Leaky Bucket Shaper VBR.0/VBR.1
1255     */
1256    if ( qos->txtp.traffic_class == ATM_VBR_RT || qos->txtp.traffic_class == ATM_VBR_NRT ) {
1257        if ( qos->txtp.scr == 0 ) {
1258            /* disable shaper */
1259            qsb_queue_vbr_parameter_table.bit.taus = 0;
1260            qsb_queue_vbr_parameter_table.bit.ts = 0;
1261        }
1262        else {
1263            /* Cell Loss Priority (CLP) */
1264            if ( (vcc->atm_options & ATM_ATMOPT_CLP) )
1265                /* CLP1 */
1266                qsb_queue_parameter_table.bit.vbr = 1;
1267            else
1268                /* CLP0 */
1269                qsb_queue_parameter_table.bit.vbr = 0;
1270            /* Rate Shaper Parameter (TS) and Burst Tolerance Parameter for SCR (tauS) */
1271            tmp = ((qsb_clk * qsb_tstep) >> 5) / qos->txtp.scr + 1;
1272            qsb_queue_vbr_parameter_table.bit.ts = tmp > QSB_TP_TS_MAX ? QSB_TP_TS_MAX : tmp;
1273            tmp = (qos->txtp.mbs - 1) * (qsb_queue_vbr_parameter_table.bit.ts - qsb_queue_parameter_table.bit.tp) / 64;
1274            if ( tmp == 0 )
1275                qsb_queue_vbr_parameter_table.bit.taus = 1;
1276            else if ( tmp > QSB_TAUS_MAX )
1277                qsb_queue_vbr_parameter_table.bit.taus = QSB_TAUS_MAX;
1278            else
1279                qsb_queue_vbr_parameter_table.bit.taus = tmp;
1280        }
1281    }
1282    else {
1283        qsb_queue_vbr_parameter_table.bit.taus = 0;
1284        qsb_queue_vbr_parameter_table.bit.ts = 0;
1285    }
1286
1287    /* Queue Parameter Table (QPT) */
1288    *QSB_RTM = QSB_RTM_DM_SET(QSB_QPT_SET_MASK);
1289    *QSB_RTD = QSB_RTD_TTV_SET(qsb_queue_parameter_table.dword);
1290    *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_QPT) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(qsb_qid);
1291#if defined(DEBUG_QOS) && DEBUG_QOS
1292    if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) )
1293        printk("QPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM, *QSB_RTM, (unsigned int)QSB_RTD, *QSB_RTD, (unsigned int)QSB_RAMAC, *QSB_RAMAC);
1294#endif
1295    /* Queue VBR Paramter Table (QVPT) */
1296    *QSB_RTM = QSB_RTM_DM_SET(QSB_QVPT_SET_MASK);
1297    *QSB_RTD = QSB_RTD_TTV_SET(qsb_queue_vbr_parameter_table.dword);
1298    *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_VBR) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(qsb_qid);
1299#if defined(DEBUG_QOS) && DEBUG_QOS
1300    if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) )
1301        printk("QVPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM, *QSB_RTM, (unsigned int)QSB_RTD, *QSB_RTD, (unsigned int)QSB_RAMAC, *QSB_RAMAC);
1302#endif
1303
1304#if defined(DEBUG_QOS) && DEBUG_QOS
1305    if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) {
1306        printk("set_qsb\n");
1307        printk(" qsb_clk = %lu\n", (unsigned long)qsb_clk);
1308        printk(" qsb_queue_parameter_table.bit.tp = %d\n", (int)qsb_queue_parameter_table.bit.tp);
1309        printk(" qsb_queue_parameter_table.bit.wfqf = %d (0x%08X)\n", (int)qsb_queue_parameter_table.bit.wfqf, (int)qsb_queue_parameter_table.bit.wfqf);
1310        printk(" qsb_queue_parameter_table.bit.vbr = %d\n", (int)qsb_queue_parameter_table.bit.vbr);
1311        printk(" qsb_queue_parameter_table.dword = 0x%08X\n", (int)qsb_queue_parameter_table.dword);
1312        printk(" qsb_queue_vbr_parameter_table.bit.ts = %d\n", (int)qsb_queue_vbr_parameter_table.bit.ts);
1313        printk(" qsb_queue_vbr_parameter_table.bit.taus = %d\n", (int)qsb_queue_vbr_parameter_table.bit.taus);
1314        printk(" qsb_queue_vbr_parameter_table.dword = 0x%08X\n", (int)qsb_queue_vbr_parameter_table.dword);
1315    }
1316#endif
1317}
1318
1319static void qsb_global_set(void)
1320{
1321    struct clk *clk = clk_get(0, "fpi");
1322    unsigned int qsb_clk = clk_get_rate(clk);
1323    int i;
1324    unsigned int tmp1, tmp2, tmp3;
1325
1326    *QSB_ICDV = QSB_ICDV_TAU_SET(qsb_tau);
1327    *QSB_SBL = QSB_SBL_SBL_SET(qsb_srvm);
1328    *QSB_CFG = QSB_CFG_TSTEPC_SET(qsb_tstep >> 1);
1329#if defined(DEBUG_QOS) && DEBUG_QOS
1330    if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ) {
1331        printk("qsb_clk = %u\n", qsb_clk);
1332        printk("QSB_ICDV (%08X) = %d (%d), QSB_SBL (%08X) = %d (%d), QSB_CFG (%08X) = %d (%d)\n", (unsigned int)QSB_ICDV, *QSB_ICDV, QSB_ICDV_TAU_SET(qsb_tau), (unsigned int)QSB_SBL, *QSB_SBL, QSB_SBL_SBL_SET(qsb_srvm), (unsigned int)QSB_CFG, *QSB_CFG, QSB_CFG_TSTEPC_SET(qsb_tstep >> 1));
1333    }
1334#endif
1335
1336    /*
1337     * set SCT and SPT per port
1338     */
1339    for ( i = 0; i < ATM_PORT_NUMBER; i++ ) {
1340        if ( g_atm_priv_data.port[i].tx_max_cell_rate != 0 ) {
1341            tmp1 = ((qsb_clk * qsb_tstep) >> 1) / g_atm_priv_data.port[i].tx_max_cell_rate;
1342            tmp2 = tmp1 >> 6; /* integer value of Tsb */
1343            tmp3 = (tmp1 & ((1 << 6) - 1)) + 1; /* fractional part of Tsb */
1344            /* carry over to integer part (?) */
1345            if ( tmp3 == (1 << 6) )
1346            {
1347                tmp3 = 0;
1348                tmp2++;
1349            }
1350            if ( tmp2 == 0 )
1351                tmp2 = tmp3 = 1;
1352            /* 1. set mask */
1353            /* 2. write value to data transfer register */
1354            /* 3. start the tranfer */
1355            /* SCT (FracRate) */
1356            *QSB_RTM = QSB_RTM_DM_SET(QSB_SET_SCT_MASK);
1357            *QSB_RTD = QSB_RTD_TTV_SET(tmp3);
1358            *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SCT) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(i & 0x01);
1359#if defined(DEBUG_QOS) && DEBUG_QOS
1360            if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) )
1361                printk("SCT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM, *QSB_RTM, (unsigned int)QSB_RTD, *QSB_RTD, (unsigned int)QSB_RAMAC, *QSB_RAMAC);
1362#endif
1363            /* SPT (SBV + PN + IntRage) */
1364            *QSB_RTM = QSB_RTM_DM_SET(QSB_SET_SPT_MASK);
1365            *QSB_RTD = QSB_RTD_TTV_SET(QSB_SPT_SBV_VALID | QSB_SPT_PN_SET(i & 0x01) | QSB_SPT_INTRATE_SET(tmp2));
1366            *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_SPT) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(i & 0x01);
1367#if defined(DEBUG_QOS) && DEBUG_QOS
1368            if ( (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) )
1369                printk("SPT: QSB_RTM (%08X) = 0x%08X, QSB_RTD (%08X) = 0x%08X, QSB_RAMAC (%08X) = 0x%08X\n", (unsigned int)QSB_RTM, *QSB_RTM, (unsigned int)QSB_RTD, *QSB_RTD, (unsigned int)QSB_RAMAC, *QSB_RAMAC);
1370#endif
1371        }
1372    }
1373}
1374
1375static INLINE void set_htu_entry(unsigned int vpi, unsigned int vci, unsigned int queue, int aal5, int is_retx)
1376{
1377    struct htu_entry htu_entry = { res1: 0x00,
1378                                    clp: is_retx ? 0x01 : 0x00,
1379                                    pid: g_atm_priv_data.conn[queue].port & 0x01,
1380                                    vpi: vpi,
1381                                    vci: vci,
1382                                    pti: 0x00,
1383                                    vld: 0x01};
1384
1385    struct htu_mask htu_mask = { set: 0x01,
1386#if !defined(ENABLE_ATM_RETX) || !ENABLE_ATM_RETX
1387                                    clp: 0x01,
1388                                    pid_mask: 0x02,
1389#else
1390                                    clp: g_retx_htu ? 0x00 : 0x01,
1391                                    pid_mask: RETX_MODE_CFG->retx_en ? 0x03 : 0x02,
1392#endif
1393                                    vpi_mask: 0x00,
1394#if !defined(ENABLE_ATM_RETX) || !ENABLE_ATM_RETX
1395                                    vci_mask: 0x0000,
1396#else
1397                                    vci_mask: RETX_MODE_CFG->retx_en ? 0xFF00 : 0x0000,
1398#endif
1399                                    pti_mask: 0x03, // 0xx, user data
1400                                    clear: 0x00};
1401
1402    struct htu_result htu_result = {res1: 0x00,
1403                                    cellid: queue,
1404                                    res2: 0x00,
1405                                    type: aal5 ? 0x00 : 0x01,
1406                                    ven: 0x01,
1407                                    res3: 0x00,
1408                                    qid: queue};
1409
1410    *HTU_RESULT(queue + OAM_HTU_ENTRY_NUMBER) = htu_result;
1411    *HTU_MASK(queue + OAM_HTU_ENTRY_NUMBER) = htu_mask;
1412    *HTU_ENTRY(queue + OAM_HTU_ENTRY_NUMBER) = htu_entry;
1413}
1414
1415static INLINE void clear_htu_entry(unsigned int queue)
1416{
1417    HTU_ENTRY(queue + OAM_HTU_ENTRY_NUMBER)->vld = 0;
1418}
1419
1420static void validate_oam_htu_entry(void)
1421{
1422    HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY)->vld = 1;
1423    HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY)->vld = 1;
1424    HTU_ENTRY(OAM_F5_HTU_ENTRY)->vld = 1;
1425#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
1426    HTU_ENTRY(OAM_ARQ_HTU_ENTRY)->vld = 1;
1427#endif
1428}
1429
1430static void invalidate_oam_htu_entry(void)
1431{
1432    HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY)->vld = 0;
1433    HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY)->vld = 0;
1434    HTU_ENTRY(OAM_F5_HTU_ENTRY)->vld = 0;
1435#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
1436    HTU_ENTRY(OAM_ARQ_HTU_ENTRY)->vld = 0;
1437#endif
1438}
1439
1440static INLINE int find_vpi(unsigned int vpi)
1441{
1442    int i;
1443    unsigned int bit;
1444
1445    for ( i = 0, bit = 1; i < MAX_PVC_NUMBER; i++, bit <<= 1 ) {
1446        if ( (g_atm_priv_data.conn_table & bit) != 0
1447            && g_atm_priv_data.conn[i].vcc != NULL
1448            && vpi == g_atm_priv_data.conn[i].vcc->vpi )
1449            return i;
1450    }
1451
1452    return -1;
1453}
1454
1455static INLINE int find_vpivci(unsigned int vpi, unsigned int vci)
1456{
1457    int i;
1458    unsigned int bit;
1459
1460    for ( i = 0, bit = 1; i < MAX_PVC_NUMBER; i++, bit <<= 1 ) {
1461        if ( (g_atm_priv_data.conn_table & bit) != 0
1462            && g_atm_priv_data.conn[i].vcc != NULL
1463            && vpi == g_atm_priv_data.conn[i].vcc->vpi
1464            && vci == g_atm_priv_data.conn[i].vcc->vci )
1465            return i;
1466    }
1467
1468    return -1;
1469}
1470
1471static INLINE int find_vcc(struct atm_vcc *vcc)
1472{
1473    int i;
1474    unsigned int bit;
1475
1476    for ( i = 0, bit = 1; i < MAX_PVC_NUMBER; i++, bit <<= 1 ) {
1477        if ( (g_atm_priv_data.conn_table & bit) != 0
1478            && g_atm_priv_data.conn[i].vcc == vcc )
1479            return i;
1480    }
1481
1482    return -1;
1483}
1484
1485#if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
1486static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx)
1487{
1488    int i;
1489
1490    if ( !(ifx_atm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) )
1491        return;
1492
1493    if ( skb->len < len )
1494        len = skb->len;
1495
1496    if ( len > RX_DMA_CH_AAL_BUF_SIZE ) {
1497        printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len);
1498        return;
1499    }
1500
1501    if ( ch >= 0 )
1502        printk("%s (port %d, ch %d)\n", title, port, ch);
1503    else
1504        printk("%s\n", title);
1505    printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len);
1506    for ( i = 1; i <= len; i++ ) {
1507        if ( i % 16 == 1 )
1508            printk(" %4d:", i - 1);
1509        printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF));
1510        if ( i % 16 == 0 )
1511            printk("\n");
1512    }
1513    if ( (i - 1) % 16 != 0 )
1514        printk("\n");
1515}
1516#endif
1517
1518static INLINE void proc_file_create(void)
1519{
1520#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1521    struct proc_dir_entry *res;
1522#endif
1523
1524    g_atm_dir = proc_mkdir("driver/ifx_atm", NULL);
1525
1526    create_proc_read_entry("version",
1527                            0,
1528                            g_atm_dir,
1529                            proc_read_version,
1530                            NULL);
1531
1532    res = create_proc_entry("mib",
1533                            0,
1534                            g_atm_dir);
1535    if ( res != NULL ) {
1536        res->read_proc = proc_read_mib;
1537        res->write_proc = proc_write_mib;
1538    }
1539
1540#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1541    res = create_proc_entry("dbg",
1542                            0,
1543                            g_atm_dir);
1544    if ( res != NULL ) {
1545        res->read_proc = proc_read_dbg;
1546        res->write_proc = proc_write_dbg;
1547    }
1548#endif
1549
1550#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1551    create_proc_read_entry("htu",
1552                            0,
1553                            g_atm_dir,
1554                            proc_read_htu,
1555                            NULL);
1556
1557    create_proc_read_entry("txq",
1558                            0,
1559                            g_atm_dir,
1560                            proc_read_txq,
1561                            NULL);
1562#endif
1563}
1564
1565static INLINE void proc_file_delete(void)
1566{
1567#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1568    remove_proc_entry("txq", g_atm_dir);
1569
1570    remove_proc_entry("htu", g_atm_dir);
1571#endif
1572
1573#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1574    remove_proc_entry("dbg", g_atm_dir);
1575#endif
1576
1577    remove_proc_entry("version", g_atm_dir);
1578
1579    remove_proc_entry("driver/ifx_atm", NULL);
1580}
1581
1582static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data)
1583{
1584    int len = 0;
1585
1586    len += ifx_atm_version(buf + len);
1587
1588    if ( offset >= len ) {
1589        *start = buf;
1590        *eof = 1;
1591        return 0;
1592    }
1593    *start = buf + offset;
1594    if ( (len -= offset) > count )
1595        return count;
1596    *eof = 1;
1597    return len;
1598}
1599
1600static int proc_read_mib(char *page, char **start, off_t off, int count, int *eof, void *data)
1601{
1602    int len = 0;
1603
1604    len += sprintf(page + off + len, "Firmware\n");
1605    len += sprintf(page + off + len, " wrx_drophtu_cell = %u\n", WAN_MIB_TABLE->wrx_drophtu_cell);
1606    len += sprintf(page + off + len, " wrx_dropdes_pdu = %u\n", WAN_MIB_TABLE->wrx_dropdes_pdu);
1607    len += sprintf(page + off + len, " wrx_correct_pdu = %u\n", WAN_MIB_TABLE->wrx_correct_pdu);
1608    len += sprintf(page + off + len, " wrx_err_pdu = %u\n", WAN_MIB_TABLE->wrx_err_pdu);
1609    len += sprintf(page + off + len, " wrx_dropdes_cell = %u\n", WAN_MIB_TABLE->wrx_dropdes_cell);
1610    len += sprintf(page + off + len, " wrx_correct_cell = %u\n", WAN_MIB_TABLE->wrx_correct_cell);
1611    len += sprintf(page + off + len, " wrx_err_cell = %u\n", WAN_MIB_TABLE->wrx_err_cell);
1612    len += sprintf(page + off + len, " wrx_total_byte = %u\n", WAN_MIB_TABLE->wrx_total_byte);
1613    len += sprintf(page + off + len, " wtx_total_pdu = %u\n", WAN_MIB_TABLE->wtx_total_pdu);
1614    len += sprintf(page + off + len, " wtx_total_cell = %u\n", WAN_MIB_TABLE->wtx_total_cell);
1615    len += sprintf(page + off + len, " wtx_total_byte = %u\n", WAN_MIB_TABLE->wtx_total_byte);
1616    len += sprintf(page + off + len, "Driver\n");
1617    len += sprintf(page + off + len, " wrx_pdu = %u\n", g_atm_priv_data.wrx_pdu);
1618    len += sprintf(page + off + len, " wrx_drop_pdu = %u\n", g_atm_priv_data.wrx_drop_pdu);
1619    len += sprintf(page + off + len, " wtx_pdu = %u\n", g_atm_priv_data.wtx_pdu);
1620    len += sprintf(page + off + len, " wtx_err_pdu = %u\n", g_atm_priv_data.wtx_err_pdu);
1621    len += sprintf(page + off + len, " wtx_drop_pdu = %u\n", g_atm_priv_data.wtx_drop_pdu);
1622
1623    *eof = 1;
1624
1625    return len;
1626}
1627
1628static int proc_write_mib(struct file *file, const char *buf, unsigned long count, void *data)
1629{
1630    char str[2048];
1631    char *p;
1632    int len, rlen;
1633
1634    len = count < sizeof(str) ? count : sizeof(str) - 1;
1635    rlen = len - copy_from_user(str, buf, len);
1636    while ( rlen && str[rlen - 1] <= ' ' )
1637        rlen--;
1638    str[rlen] = 0;
1639    for ( p = str; *p && *p <= ' '; p++, rlen-- );
1640    if ( !*p )
1641        return 0;
1642
1643    if ( stricmp(p, "clear") == 0 || stricmp(p, "clear all") == 0
1644        || stricmp(p, "clean") == 0 || stricmp(p, "clean all") == 0 ) {
1645        memset(WAN_MIB_TABLE, 0, sizeof(*WAN_MIB_TABLE));
1646        g_atm_priv_data.wrx_pdu = 0;
1647        g_atm_priv_data.wrx_drop_pdu = 0;
1648        g_atm_priv_data.wtx_pdu = 0;
1649        g_atm_priv_data.wtx_err_pdu = 0;
1650        g_atm_priv_data.wtx_drop_pdu = 0;
1651    }
1652
1653    return count;
1654}
1655
1656#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1657
1658static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data)
1659{
1660    int len = 0;
1661
1662    len += sprintf(page + off + len, "error print - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled");
1663    len += sprintf(page + off + len, "debug print - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled");
1664    len += sprintf(page + off + len, "assert - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled");
1665    len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled");
1666    len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled");
1667    len += sprintf(page + off + len, "qos - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_QOS) ? "enabled" : "disabled");
1668    len += sprintf(page + off + len, "dump init - %s\n", (ifx_atm_dbg_enable & DBG_ENABLE_MASK_DUMP_INIT) ? "enabled" : "disabled");
1669
1670    *eof = 1;
1671
1672    return len;
1673}
1674
1675static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data)
1676{
1677    static const char *dbg_enable_mask_str[] = {
1678        " error print",
1679        " err",
1680        " debug print",
1681        " dbg",
1682        " assert",
1683        " assert",
1684        " dump rx skb",
1685        " rx",
1686        " dump tx skb",
1687        " tx",
1688        " dump qos",
1689        " qos",
1690        " dump init",
1691        " init",
1692        " all"
1693    };
1694    static const int dbg_enable_mask_str_len[] = {
1695        12, 4,
1696        12, 4,
1697        7, 7,
1698        12, 3,
1699        12, 3,
1700        9, 4,
1701        10, 5,
1702        4
1703    };
1704    u32 dbg_enable_mask[] = {
1705        DBG_ENABLE_MASK_ERR,
1706        DBG_ENABLE_MASK_DEBUG_PRINT,
1707        DBG_ENABLE_MASK_ASSERT,
1708        DBG_ENABLE_MASK_DUMP_SKB_RX,
1709        DBG_ENABLE_MASK_DUMP_SKB_TX,
1710        DBG_ENABLE_MASK_DUMP_QOS,
1711        DBG_ENABLE_MASK_DUMP_INIT,
1712        DBG_ENABLE_MASK_ALL
1713    };
1714
1715    char str[2048];
1716    char *p;
1717
1718    int len, rlen;
1719
1720    int f_enable = 0;
1721    int i;
1722
1723    len = count < sizeof(str) ? count : sizeof(str) - 1;
1724    rlen = len - copy_from_user(str, buf, len);
1725    while ( rlen && str[rlen - 1] <= ' ' )
1726        rlen--;
1727    str[rlen] = 0;
1728    for ( p = str; *p && *p <= ' '; p++, rlen-- );
1729    if ( !*p )
1730        return 0;
1731
1732    if ( strincmp(p, "enable", 6) == 0 ) {
1733        p += 6;
1734        f_enable = 1;
1735    }
1736    else if ( strincmp(p, "disable", 7) == 0 ) {
1737        p += 7;
1738        f_enable = -1;
1739    }
1740    else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) {
1741        printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/all] > /proc/eth/dbg\n");
1742    }
1743
1744    if ( f_enable ) {
1745        if ( *p == 0 ) {
1746            if ( f_enable > 0 )
1747                ifx_atm_dbg_enable |= DBG_ENABLE_MASK_ALL;
1748            else
1749                ifx_atm_dbg_enable &= ~DBG_ENABLE_MASK_ALL;
1750        }
1751        else {
1752            do {
1753                for ( i = 0; i < NUM_ENTITY(dbg_enable_mask_str); i++ )
1754                    if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) {
1755                        if ( f_enable > 0 )
1756                            ifx_atm_dbg_enable |= dbg_enable_mask[i >> 1];
1757                        else
1758                            ifx_atm_dbg_enable &= ~dbg_enable_mask[i >> 1];
1759                        p += dbg_enable_mask_str_len[i];
1760                        break;
1761                    }
1762            } while ( i < NUM_ENTITY(dbg_enable_mask_str) );
1763        }
1764    }
1765
1766    return count;
1767}
1768
1769#endif
1770
1771#if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1772
1773static INLINE int print_htu(char *buf, int i)
1774{
1775    int len = 0;
1776
1777    if ( HTU_ENTRY(i)->vld ) {
1778        len += sprintf(buf + len, "%2d. valid\n", i);
1779        len += sprintf(buf + len, " entry 0x%08x - pid %01x vpi %02x vci %04x pti %01x\n", *(u32*)HTU_ENTRY(i), HTU_ENTRY(i)->pid, HTU_ENTRY(i)->vpi, HTU_ENTRY(i)->vci, HTU_ENTRY(i)->pti);
1780        len += sprintf(buf + len, " mask 0x%08x - pid %01x vpi %02x vci %04x pti %01x\n", *(u32*)HTU_MASK(i), HTU_MASK(i)->pid_mask, HTU_MASK(i)->vpi_mask, HTU_MASK(i)->vci_mask, HTU_MASK(i)->pti_mask);
1781        len += sprintf(buf + len, " result 0x%08x - type: %s, qid: %d", *(u32*)HTU_RESULT(i), HTU_RESULT(i)->type ? "cell" : "AAL5", HTU_RESULT(i)->qid);
1782        if ( HTU_RESULT(i)->type )
1783            len += sprintf(buf + len, ", cell id: %d, verification: %s", HTU_RESULT(i)->cellid, HTU_RESULT(i)->ven ? "on" : "off");
1784        len += sprintf(buf + len, "\n");
1785    }
1786    else
1787        len += sprintf(buf + len, "%2d. invalid\n", i);
1788
1789    return len;
1790}
1791
1792static int proc_read_htu(char *page, char **start, off_t off, int count, int *eof, void *data)
1793{
1794    int len = 0;
1795    int len_max = off + count;
1796    char *pstr;
1797    char str[1024];
1798    int llen;
1799
1800    int htuts = *CFG_WRX_HTUTS;
1801    int i;
1802
1803    pstr = *start = page;
1804
1805    llen = sprintf(pstr, "HTU Table (Max %d):\n", htuts);
1806    pstr += llen;
1807    len += llen;
1808
1809    for ( i = 0; i < htuts; i++ ) {
1810        llen = print_htu(str, i);
1811        if ( len <= off && len + llen > off ) {
1812            memcpy(pstr, str + off - len, len + llen - off);
1813            pstr += len + llen - off;
1814        }
1815        else if ( len > off ) {
1816            memcpy(pstr, str, llen);
1817            pstr += llen;
1818        }
1819        len += llen;
1820        if ( len >= len_max )
1821            goto PROC_READ_HTU_OVERRUN_END;
1822    }
1823
1824    *eof = 1;
1825
1826    return len - off;
1827
1828PROC_READ_HTU_OVERRUN_END:
1829
1830    return len - llen - off;
1831}
1832
1833static INLINE int print_tx_queue(char *buf, int i)
1834{
1835    int len = 0;
1836
1837    if ( (*WTX_DMACH_ON & (1 << i)) ) {
1838        len += sprintf(buf + len, "%2d. valid\n", i);
1839        len += sprintf(buf + len, " queue 0x%08x - sbid %u, qsb %s\n", *(u32*)WTX_QUEUE_CONFIG(i), (unsigned int)WTX_QUEUE_CONFIG(i)->sbid, WTX_QUEUE_CONFIG(i)->qsben ? "enable" : "disable");
1840        len += sprintf(buf + len, " dma 0x%08x - base %08x, len %u, vlddes %u\n", *(u32*)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes);
1841    }
1842    else
1843        len += sprintf(buf + len, "%2d. invalid\n", i);
1844
1845    return len;
1846}
1847
1848static int proc_read_txq(char *page, char **start, off_t off, int count, int *eof, void *data)
1849{
1850    int len = 0;
1851    int len_max = off + count;
1852    char *pstr;
1853    char str[1024];
1854    int llen;
1855
1856    int i;
1857
1858    pstr = *start = page;
1859
1860    llen = sprintf(pstr, "TX Queue Config (Max %d):\n", *CFG_WTX_DCHNUM);
1861    pstr += llen;
1862    len += llen;
1863
1864    for ( i = 0; i < 16; i++ ) {
1865        llen = print_tx_queue(str, i);
1866        if ( len <= off && len + llen > off ) {
1867            memcpy(pstr, str + off - len, len + llen - off);
1868            pstr += len + llen - off;
1869        }
1870        else if ( len > off ) {
1871            memcpy(pstr, str, llen);
1872            pstr += llen;
1873        }
1874        len += llen;
1875        if ( len >= len_max )
1876            goto PROC_READ_HTU_OVERRUN_END;
1877    }
1878
1879    *eof = 1;
1880
1881    return len - off;
1882
1883PROC_READ_HTU_OVERRUN_END:
1884
1885    return len - llen - off;
1886}
1887
1888#endif
1889
1890static int stricmp(const char *p1, const char *p2)
1891{
1892    int c1, c2;
1893
1894    while ( *p1 && *p2 )
1895    {
1896        c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1897        c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1898        if ( (c1 -= c2) )
1899            return c1;
1900        p1++;
1901        p2++;
1902    }
1903
1904    return *p1 - *p2;
1905}
1906
1907#if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1908static int strincmp(const char *p1, const char *p2, int n)
1909{
1910    int c1 = 0, c2;
1911
1912    while ( n && *p1 && *p2 )
1913    {
1914        c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1915        c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1916        if ( (c1 -= c2) )
1917            return c1;
1918        p1++;
1919        p2++;
1920        n--;
1921    }
1922
1923    return n ? *p1 - *p2 : c1;
1924}
1925#endif
1926
1927static INLINE int ifx_atm_version(char *buf)
1928{
1929    int len = 0;
1930    unsigned int major, minor;
1931
1932    ifx_atm_get_fw_ver(&major, &minor);
1933
1934    len += sprintf(buf + len, "Infineon Technologies ATM driver version %d.%d.%d\n", IFX_ATM_VER_MAJOR, IFX_ATM_VER_MID, IFX_ATM_VER_MINOR);
1935    len += sprintf(buf + len, "Infineon Technologies ATM (A1) firmware version %d.%d\n", major, minor);
1936
1937    return len;
1938}
1939
1940#ifdef MODULE
1941static INLINE void reset_ppe(void)
1942{
1943    // TODO:
1944}
1945#endif
1946
1947static INLINE void check_parameters(void)
1948{
1949    /* Please refer to Amazon spec 15.4 for setting these values. */
1950    if ( qsb_tau < 1 )
1951        qsb_tau = 1;
1952    if ( qsb_tstep < 1 )
1953        qsb_tstep = 1;
1954    else if ( qsb_tstep > 4 )
1955        qsb_tstep = 4;
1956    else if ( qsb_tstep == 3 )
1957        qsb_tstep = 2;
1958
1959    /* There is a delay between PPE write descriptor and descriptor is */
1960    /* really stored in memory. Host also has this delay when writing */
1961    /* descriptor. So PPE will use this value to determine if the write */
1962    /* operation makes effect. */
1963    if ( write_descriptor_delay < 0 )
1964        write_descriptor_delay = 0;
1965
1966    if ( aal5_fill_pattern < 0 )
1967        aal5_fill_pattern = 0;
1968    else
1969        aal5_fill_pattern &= 0xFF;
1970
1971    /* Because of the limitation of length field in descriptors, the packet */
1972    /* size could not be larger than 64K minus overhead size. */
1973    if ( aal5r_max_packet_size < 0 )
1974        aal5r_max_packet_size = 0;
1975    else if ( aal5r_max_packet_size >= 65535 - MAX_RX_FRAME_EXTRA_BYTES )
1976        aal5r_max_packet_size = 65535 - MAX_RX_FRAME_EXTRA_BYTES;
1977    if ( aal5r_min_packet_size < 0 )
1978        aal5r_min_packet_size = 0;
1979    else if ( aal5r_min_packet_size > aal5r_max_packet_size )
1980        aal5r_min_packet_size = aal5r_max_packet_size;
1981    if ( aal5s_max_packet_size < 0 )
1982        aal5s_max_packet_size = 0;
1983    else if ( aal5s_max_packet_size >= 65535 - MAX_TX_FRAME_EXTRA_BYTES )
1984        aal5s_max_packet_size = 65535 - MAX_TX_FRAME_EXTRA_BYTES;
1985    if ( aal5s_min_packet_size < 0 )
1986        aal5s_min_packet_size = 0;
1987    else if ( aal5s_min_packet_size > aal5s_max_packet_size )
1988        aal5s_min_packet_size = aal5s_max_packet_size;
1989
1990    if ( dma_rx_descriptor_length < 2 )
1991        dma_rx_descriptor_length = 2;
1992    if ( dma_tx_descriptor_length < 2 )
1993        dma_tx_descriptor_length = 2;
1994    if ( dma_rx_clp1_descriptor_threshold < 0 )
1995        dma_rx_clp1_descriptor_threshold = 0;
1996    else if ( dma_rx_clp1_descriptor_threshold > dma_rx_descriptor_length )
1997        dma_rx_clp1_descriptor_threshold = dma_rx_descriptor_length;
1998
1999    if ( dma_tx_descriptor_length < 2 )
2000        dma_tx_descriptor_length = 2;
2001}
2002
2003static INLINE int init_priv_data(void)
2004{
2005    void *p;
2006    int i;
2007    struct rx_descriptor rx_desc = {0};
2008    struct sk_buff *skb;
2009    volatile struct tx_descriptor *p_tx_desc;
2010    struct sk_buff **ppskb;
2011
2012    // clear atm private data structure
2013    memset(&g_atm_priv_data, 0, sizeof(g_atm_priv_data));
2014
2015    // allocate memory for RX (AAL) descriptors
2016    p = kzalloc(dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
2017    if ( p == NULL )
2018        return IFX_ERROR;
2019    dma_cache_wback_inv((unsigned long)p, dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
2020    g_atm_priv_data.aal_desc_base = p;
2021    p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
2022    g_atm_priv_data.aal_desc = (volatile struct rx_descriptor *)p;
2023
2024    // allocate memory for RX (OAM) descriptors
2025    p = kzalloc(RX_DMA_CH_OAM_DESC_LEN * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
2026    if ( p == NULL )
2027        return IFX_ERROR;
2028    dma_cache_wback_inv((unsigned long)p, RX_DMA_CH_OAM_DESC_LEN * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
2029    g_atm_priv_data.oam_desc_base = p;
2030    p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
2031    g_atm_priv_data.oam_desc = (volatile struct rx_descriptor *)p;
2032
2033    // allocate memory for RX (OAM) buffer
2034    p = kzalloc(RX_DMA_CH_OAM_DESC_LEN * RX_DMA_CH_OAM_BUF_SIZE + DATA_BUFFER_ALIGNMENT, GFP_KERNEL);
2035    if ( p == NULL )
2036        return IFX_ERROR;
2037    dma_cache_wback_inv((unsigned long)p, RX_DMA_CH_OAM_DESC_LEN * RX_DMA_CH_OAM_BUF_SIZE + DATA_BUFFER_ALIGNMENT);
2038    g_atm_priv_data.oam_buf_base = p;
2039    p = (void *)(((unsigned int)p + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1));
2040    g_atm_priv_data.oam_buf = p;
2041
2042    // allocate memory for TX descriptors
2043    p = kzalloc(MAX_PVC_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
2044    if ( p == NULL )
2045        return IFX_ERROR;
2046    dma_cache_wback_inv((unsigned long)p, MAX_PVC_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT);
2047    g_atm_priv_data.tx_desc_base = p;
2048
2049    // allocate memory for TX skb pointers
2050    p = kzalloc(MAX_PVC_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL);
2051    if ( p == NULL )
2052        return IFX_ERROR;
2053    dma_cache_wback_inv((unsigned long)p, MAX_PVC_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4);
2054    g_atm_priv_data.tx_skb_base = p;
2055
2056    // setup RX (AAL) descriptors
2057    rx_desc.own = 1;
2058    rx_desc.c = 0;
2059    rx_desc.sop = 1;
2060    rx_desc.eop = 1;
2061    rx_desc.byteoff = 0;
2062    rx_desc.id = 0;
2063    rx_desc.err = 0;
2064    rx_desc.datalen = RX_DMA_CH_AAL_BUF_SIZE;
2065    for ( i = 0; i < dma_rx_descriptor_length; i++ ) {
2066        skb = alloc_skb_rx();
2067        if ( skb == NULL )
2068            return IFX_ERROR;
2069        rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF;
2070        g_atm_priv_data.aal_desc[i] = rx_desc;
2071    }
2072
2073    // setup RX (OAM) descriptors
2074    p = (void *)((unsigned int)g_atm_priv_data.oam_buf | KSEG1);
2075    rx_desc.own = 1;
2076    rx_desc.c = 0;
2077    rx_desc.sop = 1;
2078    rx_desc.eop = 1;
2079    rx_desc.byteoff = 0;
2080    rx_desc.id = 0;
2081    rx_desc.err = 0;
2082    rx_desc.datalen = RX_DMA_CH_OAM_BUF_SIZE;
2083    for ( i = 0; i < RX_DMA_CH_OAM_DESC_LEN; i++ ) {
2084        rx_desc.dataptr = ((unsigned int)p >> 2) & 0x0FFFFFFF;
2085        g_atm_priv_data.oam_desc[i] = rx_desc;
2086        p = (void *)((unsigned int)p + RX_DMA_CH_OAM_BUF_SIZE);
2087    }
2088
2089    // setup TX descriptors and skb pointers
2090    p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_atm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
2091    ppskb = (struct sk_buff **)(((unsigned int)g_atm_priv_data.tx_skb_base + 3) & ~3);
2092    for ( i = 0; i < MAX_PVC_NUMBER; i++ ) {
2093        g_atm_priv_data.conn[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
2094        g_atm_priv_data.conn[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
2095    }
2096
2097    for ( i = 0; i < ATM_PORT_NUMBER; i++ )
2098        g_atm_priv_data.port[i].tx_max_cell_rate = DEFAULT_TX_LINK_RATE;
2099
2100    return IFX_SUCCESS;
2101}
2102
2103static INLINE void clear_priv_data(void)
2104{
2105    int i, j;
2106    struct sk_buff *skb;
2107
2108    for ( i = 0; i < MAX_PVC_NUMBER; i++ ) {
2109        if ( g_atm_priv_data.conn[i].tx_skb != NULL ) {
2110            for ( j = 0; j < dma_tx_descriptor_length; j++ )
2111                if ( g_atm_priv_data.conn[i].tx_skb[j] != NULL )
2112                    dev_kfree_skb_any(g_atm_priv_data.conn[i].tx_skb[j]);
2113        }
2114    }
2115
2116    if ( g_atm_priv_data.tx_skb_base != NULL )
2117        kfree(g_atm_priv_data.tx_skb_base);
2118
2119    if ( g_atm_priv_data.tx_desc_base != NULL )
2120        kfree(g_atm_priv_data.tx_desc_base);
2121
2122    if ( g_atm_priv_data.oam_buf_base != NULL )
2123        kfree(g_atm_priv_data.oam_buf_base);
2124
2125    if ( g_atm_priv_data.oam_desc_base != NULL )
2126        kfree(g_atm_priv_data.oam_desc_base);
2127
2128    if ( g_atm_priv_data.aal_desc_base != NULL ) {
2129        for ( i = 0; i < dma_rx_descriptor_length; i++ ) {
2130            if ( g_atm_priv_data.aal_desc[i].sop || g_atm_priv_data.aal_desc[i].eop ) { // descriptor initialized
2131                skb = get_skb_rx_pointer(g_atm_priv_data.aal_desc[i].dataptr);
2132                dev_kfree_skb_any(skb);
2133            }
2134        }
2135        kfree(g_atm_priv_data.aal_desc_base);
2136    }
2137}
2138
2139static INLINE void init_rx_tables(void)
2140{
2141    int i;
2142    struct wrx_queue_config wrx_queue_config = {0};
2143    struct wrx_dma_channel_config wrx_dma_channel_config = {0};
2144    struct htu_entry htu_entry = {0};
2145    struct htu_result htu_result = {0};
2146    struct htu_mask htu_mask = { set: 0x01,
2147                                    clp: 0x01,
2148                                    pid_mask: 0x00,
2149                                    vpi_mask: 0x00,
2150                                    vci_mask: 0x00,
2151                                    pti_mask: 0x00,
2152                                    clear: 0x00};
2153
2154    /*
2155     * General Registers
2156     */
2157    *CFG_WRX_HTUTS = MAX_PVC_NUMBER + OAM_HTU_ENTRY_NUMBER;
2158    *CFG_WRX_QNUM = MAX_QUEUE_NUMBER;
2159    *CFG_WRX_DCHNUM = RX_DMA_CH_TOTAL;
2160    *WRX_DMACH_ON = (1 << RX_DMA_CH_TOTAL) - 1;
2161    *WRX_HUNT_BITTH = DEFAULT_RX_HUNT_BITTH;
2162
2163    /*
2164     * WRX Queue Configuration Table
2165     */
2166    wrx_queue_config.uumask = 0;
2167    wrx_queue_config.cpimask = 0;
2168    wrx_queue_config.uuexp = 0;
2169    wrx_queue_config.cpiexp = 0;
2170    wrx_queue_config.mfs = aal5r_max_packet_size;
2171    wrx_queue_config.oversize = aal5r_max_packet_size;
2172    wrx_queue_config.undersize = aal5r_min_packet_size;
2173    wrx_queue_config.errdp = aal5r_drop_error_packet;
2174    wrx_queue_config.dmach = RX_DMA_CH_AAL;
2175    for ( i = 0; i < MAX_QUEUE_NUMBER; i++ )
2176        *WRX_QUEUE_CONFIG(i) = wrx_queue_config;
2177    WRX_QUEUE_CONFIG(OAM_RX_QUEUE)->dmach = RX_DMA_CH_OAM;
2178
2179    /*
2180     * WRX DMA Channel Configuration Table
2181     */
2182    wrx_dma_channel_config.chrl = 0;
2183    wrx_dma_channel_config.clp1th = dma_rx_clp1_descriptor_threshold;
2184    wrx_dma_channel_config.mode = 0;
2185    wrx_dma_channel_config.rlcfg = 0;
2186
2187    wrx_dma_channel_config.deslen = RX_DMA_CH_OAM_DESC_LEN;
2188    wrx_dma_channel_config.desba = ((unsigned int)g_atm_priv_data.oam_desc >> 2) & 0x0FFFFFFF;
2189    *WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_OAM) = wrx_dma_channel_config;
2190
2191    wrx_dma_channel_config.deslen = dma_rx_descriptor_length;
2192    wrx_dma_channel_config.desba = ((unsigned int)g_atm_priv_data.aal_desc >> 2) & 0x0FFFFFFF;
2193    *WRX_DMA_CHANNEL_CONFIG(RX_DMA_CH_AAL) = wrx_dma_channel_config;
2194
2195    /*
2196     * HTU Tables
2197     */
2198    for ( i = 0; i < MAX_PVC_NUMBER; i++ )
2199    {
2200        htu_result.qid = (unsigned int)i;
2201
2202        *HTU_ENTRY(i + OAM_HTU_ENTRY_NUMBER) = htu_entry;
2203        *HTU_MASK(i + OAM_HTU_ENTRY_NUMBER) = htu_mask;
2204        *HTU_RESULT(i + OAM_HTU_ENTRY_NUMBER) = htu_result;
2205    }
2206    /* OAM HTU Entry */
2207    htu_entry.vci = 0x03;
2208    htu_mask.pid_mask = 0x03;
2209    htu_mask.vpi_mask = 0xFF;
2210    htu_mask.vci_mask = 0x0000;
2211    htu_mask.pti_mask = 0x07;
2212    htu_result.cellid = OAM_RX_QUEUE;
2213    htu_result.type = 1;
2214    htu_result.ven = 1;
2215    htu_result.qid = OAM_RX_QUEUE;
2216    *HTU_RESULT(OAM_F4_SEG_HTU_ENTRY) = htu_result;
2217    *HTU_MASK(OAM_F4_SEG_HTU_ENTRY) = htu_mask;
2218    *HTU_ENTRY(OAM_F4_SEG_HTU_ENTRY) = htu_entry;
2219    htu_entry.vci = 0x04;
2220    htu_result.cellid = OAM_RX_QUEUE;
2221    htu_result.type = 1;
2222    htu_result.ven = 1;
2223    htu_result.qid = OAM_RX_QUEUE;
2224    *HTU_RESULT(OAM_F4_TOT_HTU_ENTRY) = htu_result;
2225    *HTU_MASK(OAM_F4_TOT_HTU_ENTRY) = htu_mask;
2226    *HTU_ENTRY(OAM_F4_TOT_HTU_ENTRY) = htu_entry;
2227    htu_entry.vci = 0x00;
2228    htu_entry.pti = 0x04;
2229    htu_mask.vci_mask = 0xFFFF;
2230    htu_mask.pti_mask = 0x01;
2231    htu_result.cellid = OAM_RX_QUEUE;
2232    htu_result.type = 1;
2233    htu_result.ven = 1;
2234    htu_result.qid = OAM_RX_QUEUE;
2235    *HTU_RESULT(OAM_F5_HTU_ENTRY) = htu_result;
2236    *HTU_MASK(OAM_F5_HTU_ENTRY) = htu_mask;
2237    *HTU_ENTRY(OAM_F5_HTU_ENTRY) = htu_entry;
2238#if defined(ENABLE_ATM_RETX) && ENABLE_ATM_RETX
2239    htu_entry.pid = 0x0;
2240    htu_entry.vpi = 0x01;
2241    htu_entry.vci = 0x0001;
2242    htu_entry.pti = 0x00;
2243    htu_mask.pid_mask = 0x0;
2244    htu_mask.vpi_mask = 0x00;
2245    htu_mask.vci_mask = 0x0000;
2246    htu_mask.pti_mask = 0x3;
2247    htu_result.cellid = OAM_RX_QUEUE;
2248    htu_result.type = 1;
2249    htu_result.ven = 1;
2250    htu_result.qid = OAM_RX_QUEUE;
2251    *HTU_RESULT(OAM_ARQ_HTU_ENTRY) = htu_result;
2252    *HTU_MASK(OAM_ARQ_HTU_ENTRY) = htu_mask;
2253    *HTU_ENTRY(OAM_ARQ_HTU_ENTRY) = htu_entry;
2254#endif
2255}
2256
2257static INLINE void init_tx_tables(void)
2258{
2259    int i;
2260    struct wtx_queue_config wtx_queue_config = {0};
2261    struct wtx_dma_channel_config wtx_dma_channel_config = {0};
2262    struct wtx_port_config wtx_port_config = { res1: 0,
2263                                                qid: 0,
2264                                                qsben: 1};
2265
2266    /*
2267     * General Registers
2268     */
2269    *CFG_WTX_DCHNUM = MAX_TX_DMA_CHANNEL_NUMBER;
2270    *WTX_DMACH_ON = ((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1) ^ ((1 << FIRST_QSB_QID) - 1);
2271    *CFG_WRDES_DELAY = write_descriptor_delay;
2272
2273    /*
2274     * WTX Port Configuration Table
2275     */
2276    for ( i = 0; i < ATM_PORT_NUMBER; i++ )
2277        *WTX_PORT_CONFIG(i) = wtx_port_config;
2278
2279    /*
2280     * WTX Queue Configuration Table
2281     */
2282    wtx_queue_config.type = 0x0;
2283    wtx_queue_config.qsben = 1;
2284    wtx_queue_config.sbid = 0;
2285    for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
2286        *WTX_QUEUE_CONFIG(i) = wtx_queue_config;
2287
2288    /*
2289     * WTX DMA Channel Configuration Table
2290     */
2291    wtx_dma_channel_config.mode = 0;
2292    wtx_dma_channel_config.deslen = 0;
2293    wtx_dma_channel_config.desba = 0;
2294    for ( i = 0; i < FIRST_QSB_QID; i++ )
2295        *WTX_DMA_CHANNEL_CONFIG(i) = wtx_dma_channel_config;
2296    /* normal connection */
2297    wtx_dma_channel_config.deslen = dma_tx_descriptor_length;
2298    for ( ; i < MAX_TX_DMA_CHANNEL_NUMBER ; i++ ) {
2299        wtx_dma_channel_config.desba = ((unsigned int)g_atm_priv_data.conn[i - FIRST_QSB_QID].tx_desc >> 2) & 0x0FFFFFFF;
2300        *WTX_DMA_CHANNEL_CONFIG(i) = wtx_dma_channel_config;
2301    }
2302}
2303
2304
2305
2306/*
2307 * ####################################
2308 * Global Function
2309 * ####################################
2310 */
2311
2312static int atm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
2313{
2314    int i, j;
2315
2316    ASSERT(port_cell != NULL, "port_cell is NULL");
2317    ASSERT(xdata_addr != NULL, "xdata_addr is NULL");
2318
2319    for ( j = 0; j < ATM_PORT_NUMBER && j < port_cell->port_num; j++ )
2320        if ( port_cell->tx_link_rate[j] > 0 )
2321            break;
2322    for ( i = 0; i < ATM_PORT_NUMBER && i < port_cell->port_num; i++ )
2323        g_atm_priv_data.port[i].tx_max_cell_rate = port_cell->tx_link_rate[i] > 0 ? port_cell->tx_link_rate[i] : port_cell->tx_link_rate[j];
2324
2325    qsb_global_set();
2326
2327    for ( i = 0; i < MAX_PVC_NUMBER; i++ )
2328        if ( g_atm_priv_data.conn[i].vcc != NULL )
2329            set_qsb(g_atm_priv_data.conn[i].vcc, &g_atm_priv_data.conn[i].vcc->qos, i);
2330
2331    // TODO: ReTX set xdata_addr
2332    g_xdata_addr = xdata_addr;
2333
2334    g_showtime = 1;
2335
2336#if defined(CONFIG_VR9)
2337    IFX_REG_W32(0x0F, UTP_CFG);
2338#endif
2339
2340    pr_debug("enter showtime, cell rate: 0 - %d, 1 - %d, xdata addr: 0x%08x\n", g_atm_priv_data.port[0].tx_max_cell_rate, g_atm_priv_data.port[1].tx_max_cell_rate, (unsigned int)g_xdata_addr);
2341
2342    return IFX_SUCCESS;
2343}
2344
2345static int atm_showtime_exit(void)
2346{
2347#if defined(CONFIG_VR9)
2348    IFX_REG_W32(0x00, UTP_CFG);
2349#endif
2350
2351    g_showtime = 0;
2352
2353    // TODO: ReTX clean state
2354    g_xdata_addr = NULL;
2355
2356    pr_debug("leave showtime\n");
2357
2358    return IFX_SUCCESS;
2359}
2360
2361
2362
2363/*
2364 * ####################################
2365 * Init/Cleanup API
2366 * ####################################
2367 */
2368
2369/*
2370 * Description:
2371 * Initialize global variables, PP32, comunication structures, register IRQ
2372 * and register device.
2373 * Input:
2374 * none
2375 * Output:
2376 * 0 --- successful
2377 * else --- failure, usually it is negative value of error code
2378 */
2379static int __devinit ifx_atm_init(void)
2380{
2381    int ret;
2382    int port_num;
2383    struct port_cell_info port_cell = {0};
2384    int i, j;
2385    char ver_str[256];
2386
2387#ifdef MODULE
2388    reset_ppe();
2389#endif
2390
2391    check_parameters();
2392
2393    ret = init_priv_data();
2394    if ( ret != IFX_SUCCESS ) {
2395        err("INIT_PRIV_DATA_FAIL");
2396        goto INIT_PRIV_DATA_FAIL;
2397    }
2398
2399    ifx_atm_init_chip();
2400    init_rx_tables();
2401    init_tx_tables();
2402
2403    /* create devices */
2404    for ( port_num = 0; port_num < ATM_PORT_NUMBER; port_num++ ) {
2405#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
2406    g_atm_priv_data.port[port_num].dev = atm_dev_register("ifxmips_atm", &g_ifx_atm_ops, -1, NULL);
2407#else
2408    g_atm_priv_data.port[port_num].dev = atm_dev_register("ifxmips_atm", NULL, &g_ifx_atm_ops, -1, NULL);
2409#endif
2410        if ( !g_atm_priv_data.port[port_num].dev ) {
2411            err("failed to register atm device %d!", port_num);
2412            ret = -EIO;
2413            goto ATM_DEV_REGISTER_FAIL;
2414        }
2415        else {
2416            g_atm_priv_data.port[port_num].dev->ci_range.vpi_bits = 8;
2417            g_atm_priv_data.port[port_num].dev->ci_range.vci_bits = 16;
2418            g_atm_priv_data.port[port_num].dev->link_rate = g_atm_priv_data.port[port_num].tx_max_cell_rate;
2419            g_atm_priv_data.port[port_num].dev->dev_data = (void*)port_num;
2420        }
2421    }
2422
2423    /* register interrupt handler */
2424    ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "atm_mailbox_isr", &g_atm_priv_data);
2425    if ( ret ) {
2426        if ( ret == -EBUSY ) {
2427            err("IRQ may be occupied by other driver, please reconfig to disable it.");
2428        }
2429        else {
2430            err("request_irq fail");
2431        }
2432        goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
2433    }
2434    disable_irq(PPE_MAILBOX_IGU1_INT);
2435
2436    ret = ifx_pp32_start(0);
2437    if ( ret ) {
2438        err("ifx_pp32_start fail!");
2439        goto PP32_START_FAIL;
2440    }
2441
2442    port_cell.port_num = ATM_PORT_NUMBER;
2443    ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr);
2444    if ( g_showtime ) {
2445        for ( i = 0; i < ATM_PORT_NUMBER; i++ )
2446            if ( port_cell.tx_link_rate[i] != 0 )
2447                break;
2448        for ( j = 0; j < ATM_PORT_NUMBER; j++ )
2449            g_atm_priv_data.port[j].tx_max_cell_rate = port_cell.tx_link_rate[j] != 0 ? port_cell.tx_link_rate[j] : port_cell.tx_link_rate[i];
2450    }
2451
2452    qsb_global_set();
2453    validate_oam_htu_entry();
2454
2455    /* create proc file */
2456    proc_file_create();
2457
2458    ifx_mei_atm_showtime_enter = atm_showtime_enter;
2459    ifx_mei_atm_showtime_exit = atm_showtime_exit;
2460
2461    ifx_atm_version(ver_str);
2462    printk(KERN_INFO "%s", ver_str);
2463
2464    printk("ifxmips_atm: ATM init succeed\n");
2465
2466    return IFX_SUCCESS;
2467
2468PP32_START_FAIL:
2469    free_irq(PPE_MAILBOX_IGU1_INT, &g_atm_priv_data);
2470REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
2471ATM_DEV_REGISTER_FAIL:
2472    while ( port_num-- > 0 )
2473        atm_dev_deregister(g_atm_priv_data.port[port_num].dev);
2474INIT_PRIV_DATA_FAIL:
2475    clear_priv_data();
2476    printk("ifxmips_atm: ATM init failed\n");
2477    return ret;
2478}
2479
2480/*
2481 * Description:
2482 * Release memory, free IRQ, and deregister device.
2483 * Input:
2484 * none
2485 * Output:
2486 * none
2487 */
2488static void __exit ifx_atm_exit(void)
2489{
2490    int port_num;
2491
2492    ifx_mei_atm_showtime_enter = NULL;
2493    ifx_mei_atm_showtime_exit = NULL;
2494
2495    proc_file_delete();
2496
2497    invalidate_oam_htu_entry();
2498
2499    ifx_pp32_stop(0);
2500
2501    free_irq(PPE_MAILBOX_IGU1_INT, &g_atm_priv_data);
2502
2503    for ( port_num = 0; port_num < ATM_PORT_NUMBER; port_num++ )
2504        atm_dev_deregister(g_atm_priv_data.port[port_num].dev);
2505
2506    ifx_atm_uninit_chip();
2507
2508    clear_priv_data();
2509}
2510
2511module_init(ifx_atm_init);
2512module_exit(ifx_atm_exit);
2513MODULE_LICENSE("Dual BSD/GPL");
2514

Archive Download this file



interactive