Root/package/platform/lantiq/ltq-ptm/src/ifxmips_ptm_vdsl.c

1/******************************************************************************
2**
3** FILE NAME : ifxmips_ptm_vdsl.c
4** PROJECT : UEIP
5** MODULES : PTM
6**
7** DATE : 7 Jul 2009
8** AUTHOR : Xu Liang
9** DESCRIPTION : PTM driver common source file (core functions for VR9)
10** COPYRIGHT : Copyright (c) 2006
11** Infineon Technologies AG
12** Am Campeon 1-12, 85579 Neubiberg, Germany
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License as published by
16** the Free Software Foundation; either version 2 of the License, or
17** (at your option) any later version.
18**
19** HISTORY
20** $Date $Author $Comment
21** 07 JUL 2009 Xu Liang Init Version
22*******************************************************************************/
23
24#include <linux/version.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/ctype.h>
29#include <linux/errno.h>
30#include <linux/proc_fs.h>
31#include <linux/init.h>
32#include <linux/ioctl.h>
33#include <linux/etherdevice.h>
34#include <linux/interrupt.h>
35
36#include "ifxmips_ptm_vdsl.h"
37#include <lantiq_soc.h>
38
39#define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
40#define MODULE_PARM(a, b) module_param(a, int, 0)
41
42static int wanqos_en = 0;
43static int queue_gamma_map[4] = {0xFE, 0x01, 0x00, 0x00};
44
45MODULE_PARM(wanqos_en, "i");
46MODULE_PARM_DESC(wanqos_en, "WAN QoS support, 1 - enabled, 0 - disabled.");
47
48MODULE_PARM_ARRAY(queue_gamma_map, "4-4i");
49MODULE_PARM_DESC(queue_gamma_map, "TX QoS queues mapping to 4 TX Gamma interfaces.");
50
51extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
52extern int (*ifx_mei_atm_showtime_exit)(void);
53extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
54
55static int g_showtime = 0;
56static void *g_xdata_addr = NULL;
57
58
59#define ENABLE_TMP_DBG 0
60
61unsigned long cgu_get_pp32_clock(void)
62{
63    struct clk *c = clk_get_ppe();
64    unsigned long rate = clk_get_rate(c);
65    clk_put(c);
66    return rate;
67}
68
69static void ptm_setup(struct net_device *, int);
70static struct net_device_stats *ptm_get_stats(struct net_device *);
71static int ptm_open(struct net_device *);
72static int ptm_stop(struct net_device *);
73  static unsigned int ptm_poll(int, unsigned int);
74  static int ptm_napi_poll(struct napi_struct *, int);
75static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
76static int ptm_ioctl(struct net_device *, struct ifreq *, int);
77static void ptm_tx_timeout(struct net_device *);
78
79static inline struct sk_buff* alloc_skb_rx(void);
80static inline struct sk_buff* alloc_skb_tx(unsigned int);
81static inline struct sk_buff *get_skb_pointer(unsigned int);
82static inline int get_tx_desc(unsigned int, unsigned int *);
83
84/*
85 * Mailbox handler and signal function
86 */
87static irqreturn_t mailbox_irq_handler(int, void *);
88
89/*
90 * Tasklet to Handle Swap Descriptors
91 */
92static void do_swap_desc_tasklet(unsigned long);
93
94
95/*
96 * Init & clean-up functions
97 */
98static inline int init_priv_data(void);
99static inline void clear_priv_data(void);
100static inline int init_tables(void);
101static inline void clear_tables(void);
102
103static int g_wanqos_en = 0;
104
105static int g_queue_gamma_map[4];
106
107static struct ptm_priv_data g_ptm_priv_data;
108
109static struct net_device_ops g_ptm_netdev_ops = {
110    .ndo_get_stats = ptm_get_stats,
111    .ndo_open = ptm_open,
112    .ndo_stop = ptm_stop,
113    .ndo_start_xmit = ptm_hard_start_xmit,
114    .ndo_validate_addr = eth_validate_addr,
115    .ndo_set_mac_address = eth_mac_addr,
116    .ndo_change_mtu = eth_change_mtu,
117    .ndo_do_ioctl = ptm_ioctl,
118    .ndo_tx_timeout = ptm_tx_timeout,
119};
120
121static struct net_device *g_net_dev[1] = {0};
122static char *g_net_dev_name[1] = {"ptm0"};
123
124static int g_ptm_prio_queue_map[8];
125
126static DECLARE_TASKLET(g_swap_desc_tasklet, do_swap_desc_tasklet, 0);
127
128
129unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
130
131/*
132 * ####################################
133 * Local Function
134 * ####################################
135 */
136
137static void ptm_setup(struct net_device *dev, int ndev)
138{
139    dev->netdev_ops = &g_ptm_netdev_ops;
140    netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 16);
141    dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
142
143    dev->dev_addr[0] = 0x00;
144    dev->dev_addr[1] = 0x20;
145    dev->dev_addr[2] = 0xda;
146    dev->dev_addr[3] = 0x86;
147    dev->dev_addr[4] = 0x23;
148    dev->dev_addr[5] = 0x75 + ndev;
149}
150
151static struct net_device_stats *ptm_get_stats(struct net_device *dev)
152{
153   struct net_device_stats *s;
154  
155    if ( dev != g_net_dev[0] )
156        return NULL;
157s = &g_ptm_priv_data.itf[0].stats;
158
159    return s;
160}
161
162static int ptm_open(struct net_device *dev)
163{
164    ASSERT(dev == g_net_dev[0], "incorrect device");
165
166    napi_enable(&g_ptm_priv_data.itf[0].napi);
167
168    IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
169
170    netif_start_queue(dev);
171
172    return 0;
173}
174
175static int ptm_stop(struct net_device *dev)
176{
177    ASSERT(dev == g_net_dev[0], "incorrect device");
178
179    IFX_REG_W32_MASK(1 | (1 << 17), 0, MBOX_IGU1_IER);
180
181    napi_disable(&g_ptm_priv_data.itf[0].napi);
182
183    netif_stop_queue(dev);
184
185    return 0;
186}
187
188static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
189{
190    unsigned int work_done = 0;
191    volatile struct rx_descriptor *desc;
192    struct rx_descriptor reg_desc;
193    struct sk_buff *skb, *new_skb;
194
195    ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
196
197    while ( work_done < work_to_do ) {
198    desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
199        if ( desc->own /* || !desc->c */ ) // if PP32 hold descriptor or descriptor not completed
200            break;
201        if ( ++g_ptm_priv_data.itf[0].rx_desc_pos == WAN_RX_DESC_NUM )
202            g_ptm_priv_data.itf[0].rx_desc_pos = 0;
203
204        reg_desc = *desc;
205        skb = get_skb_pointer(reg_desc.dataptr);
206        ASSERT(skb != NULL, "invalid pointer skb == NULL");
207
208        new_skb = alloc_skb_rx();
209        if ( new_skb != NULL ) {
210            skb_reserve(skb, reg_desc.byteoff);
211            skb_put(skb, reg_desc.datalen);
212
213            // parse protocol header
214            skb->dev = g_net_dev[0];
215            skb->protocol = eth_type_trans(skb, skb->dev);
216
217            g_net_dev[0]->last_rx = jiffies;
218
219            netif_receive_skb(skb);
220
221            g_ptm_priv_data.itf[0].stats.rx_packets++;
222            g_ptm_priv_data.itf[0].stats.rx_bytes += reg_desc.datalen;
223
224            reg_desc.dataptr = (unsigned int)new_skb->data & 0x0FFFFFFF;
225            reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
226        }
227
228        reg_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
229        reg_desc.own = 1;
230        reg_desc.c = 0;
231
232        /* write discriptor to memory */
233        *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
234        wmb();
235        *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
236
237        work_done++;
238    }
239
240    return work_done;
241}
242
243static int ptm_napi_poll(struct napi_struct *napi, int budget)
244{
245    int ndev = 0;
246    unsigned int work_done;
247
248    work_done = ptm_poll(ndev, budget);
249
250    // interface down
251    if ( !netif_running(napi->dev) ) {
252        napi_complete(napi);
253        return work_done;
254    }
255
256    // clear interrupt
257    IFX_REG_W32_MASK(0, 1, MBOX_IGU1_ISRC);
258    // no more traffic
259    if ( WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos].own ) { // if PP32 hold descriptor
260        napi_complete(napi);
261        IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
262        return work_done;
263    }
264
265    // next round
266    return work_done;
267}
268
269static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
270{
271    unsigned int f_full;
272    int desc_base;
273    volatile struct tx_descriptor *desc;
274    struct tx_descriptor reg_desc = {0};
275    struct sk_buff *skb_to_free;
276    unsigned int byteoff;
277
278    ASSERT(dev == g_net_dev[0], "incorrect device");
279
280    if ( !g_showtime ) {
281        err("not in showtime");
282        goto PTM_HARD_START_XMIT_FAIL;
283    }
284
285    /* allocate descriptor */
286    desc_base = get_tx_desc(0, &f_full);
287    if ( f_full ) {
288        dev->trans_start = jiffies;
289        netif_stop_queue(dev);
290
291        IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_ISRC);
292        IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_IER);
293    }
294    if ( desc_base < 0 )
295        goto PTM_HARD_START_XMIT_FAIL;
296    desc = &CPU_TO_WAN_TX_DESC_BASE[desc_base];
297
298    byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
299    if ( skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff || skb_cloned(skb) ) {
300        struct sk_buff *new_skb;
301
302        ASSERT(skb_headroom(skb) >= sizeof(struct sk_buff *) + byteoff, "skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff");
303        ASSERT(!skb_cloned(skb), "skb is cloned");
304
305        new_skb = alloc_skb_tx(skb->len);
306        if ( new_skb == NULL ) {
307            dbg("no memory");
308            goto ALLOC_SKB_TX_FAIL;
309        }
310        skb_put(new_skb, skb->len);
311        memcpy(new_skb->data, skb->data, skb->len);
312        dev_kfree_skb_any(skb);
313        skb = new_skb;
314        byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
315        /* write back to physical memory */
316        dma_cache_wback((unsigned long)skb->data, skb->len);
317    }
318
319    *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
320    /* write back to physical memory */
321    dma_cache_wback((unsigned long)skb->data - byteoff - sizeof(struct sk_buff *), skb->len + byteoff + sizeof(struct sk_buff *));
322
323    /* free previous skb */
324    skb_to_free = get_skb_pointer(desc->dataptr);
325    if ( skb_to_free != NULL )
326        dev_kfree_skb_any(skb_to_free);
327
328    /* update descriptor */
329    reg_desc.small = 0;
330    reg_desc.dataptr = (unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1));
331    reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
332    reg_desc.qid = g_ptm_prio_queue_map[skb->priority > 7 ? 7 : skb->priority];
333    reg_desc.byteoff = byteoff;
334    reg_desc.own = 1;
335    reg_desc.c = 1;
336    reg_desc.sop = reg_desc.eop = 1;
337
338    /* update MIB */
339    g_ptm_priv_data.itf[0].stats.tx_packets++;
340    g_ptm_priv_data.itf[0].stats.tx_bytes += reg_desc.datalen;
341
342    /* write discriptor to memory */
343    *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
344    wmb();
345    *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
346
347    dev->trans_start = jiffies;
348
349    return 0;
350
351ALLOC_SKB_TX_FAIL:
352PTM_HARD_START_XMIT_FAIL:
353    dev_kfree_skb_any(skb);
354    g_ptm_priv_data.itf[0].stats.tx_dropped++;
355    return 0;
356}
357
358static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
359{
360    ASSERT(dev == g_net_dev[0], "incorrect device");
361
362    switch ( cmd )
363    {
364    case IFX_PTM_MIB_CW_GET:
365    ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = IFX_REG_R32(DREG_AR_CELL0) + IFX_REG_R32(DREG_AR_CELL1);
366        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = IFX_REG_R32(DREG_AR_IDLE_CNT0) + IFX_REG_R32(DREG_AR_IDLE_CNT1);
367        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = IFX_REG_R32(DREG_AR_CVN_CNT0) + IFX_REG_R32(DREG_AR_CVN_CNT1) + IFX_REG_R32(DREG_AR_CVNP_CNT0) + IFX_REG_R32(DREG_AR_CVNP_CNT1);
368        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = IFX_REG_R32(DREG_AT_CELL0) + IFX_REG_R32(DREG_AT_CELL1);
369        ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = IFX_REG_R32(DREG_AT_IDLE_CNT0) + IFX_REG_R32(DREG_AT_IDLE_CNT1);
370        break;
371    case IFX_PTM_MIB_FRAME_GET:
372    {
373            PTM_FRAME_MIB_T data = {0};
374            int i;
375
376            data.RxCorrect = IFX_REG_R32(DREG_AR_HEC_CNT0) + IFX_REG_R32(DREG_AR_HEC_CNT1) + IFX_REG_R32(DREG_AR_AIIDLE_CNT0) + IFX_REG_R32(DREG_AR_AIIDLE_CNT1);
377            for ( i = 0; i < 4; i++ )
378                data.RxDropped += WAN_RX_MIB_TABLE(i)->wrx_dropdes_pdu;
379            for ( i = 0; i < 8; i++ )
380                data.TxSend += WAN_TX_MIB_TABLE(i)->wtx_total_pdu;
381
382            *((PTM_FRAME_MIB_T *)ifr->ifr_data) = data;
383        }
384        break;
385    case IFX_PTM_CFG_GET:
386    // use bear channel 0 preemption gamma interface settings
387        ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = 1;
388        ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_eth_fcs_ver_dis == 0 ? 1 : 0;
389        ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis == 0 ? 1 : 0;;
390        ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size == 0 ? 0 : (RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size * 16);
391        ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis == 0 ? 1 : 0;
392        ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : 1;
393        ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : (TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size * 16);
394        break;
395    case IFX_PTM_CFG_SET:
396    {
397            int i;
398
399            for ( i = 0; i < 4; i++ ) {
400                RX_GAMMA_ITF_CFG(i)->rx_eth_fcs_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 0 : 1;
401
402                RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck ? 0 : 1;
403
404                switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen ) {
405                    case 16: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 1; break;
406                    case 32: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 2; break;
407                    default: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 0;
408                }
409
410                TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 0 : 1;
411
412                if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen ) {
413                    switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen ) {
414                        case 16: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 1; break;
415                        case 32: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 2; break;
416                        default: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
417                    }
418                }
419                else
420                    TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
421            }
422        }
423        break;
424    case IFX_PTM_MAP_PKT_PRIO_TO_Q:
425        {
426            struct ppe_prio_q_map cmd;
427
428            if ( copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)) )
429                return -EFAULT;
430
431            if ( cmd.pkt_prio < 0 || cmd.pkt_prio >= ARRAY_SIZE(g_ptm_prio_queue_map) )
432                return -EINVAL;
433
434            if ( cmd.qid < 0 || cmd.qid >= g_wanqos_en )
435                return -EINVAL;
436
437            g_ptm_prio_queue_map[cmd.pkt_prio] = cmd.qid;
438        }
439        break;
440    default:
441        return -EOPNOTSUPP;
442    }
443
444    return 0;
445}
446
447static void ptm_tx_timeout(struct net_device *dev)
448{
449    ASSERT(dev == g_net_dev[0], "incorrect device");
450
451    /* disable TX irq, release skb when sending new packet */
452    IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
453
454    /* wake up TX queue */
455    netif_wake_queue(dev);
456
457    return;
458}
459
460static inline struct sk_buff* alloc_skb_rx(void)
461{
462    struct sk_buff *skb;
463
464    /* allocate memroy including trailer and padding */
465    skb = dev_alloc_skb(RX_MAX_BUFFER_SIZE + DATA_BUFFER_ALIGNMENT);
466    if ( skb != NULL ) {
467        /* must be burst length alignment and reserve two more bytes for MAC address alignment */
468        if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
469            skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
470        /* pub skb in reserved area "skb->data - 4" */
471        *((struct sk_buff **)skb->data - 1) = skb;
472        wmb();
473        /* write back and invalidate cache */
474        dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
475        /* invalidate cache */
476        dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
477    }
478
479    return skb;
480}
481
482static inline struct sk_buff* alloc_skb_tx(unsigned int size)
483{
484    struct sk_buff *skb;
485
486    /* allocate memory including padding */
487    size = RX_MAX_BUFFER_SIZE;
488    size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
489    skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
490    /* must be burst length alignment */
491    if ( skb != NULL )
492        skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
493    return skb;
494}
495
496static inline struct sk_buff *get_skb_pointer(unsigned int dataptr)
497{
498    unsigned int skb_dataptr;
499    struct sk_buff *skb;
500
501    // usually, CPE memory is less than 256M bytes
502    // so NULL means invalid pointer
503    if ( dataptr == 0 ) {
504        dbg("dataptr is 0, it's supposed to be invalid pointer");
505        return NULL;
506    }
507
508    skb_dataptr = (dataptr - 4) | KSEG1;
509    skb = *(struct sk_buff **)skb_dataptr;
510
511    ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
512    ASSERT((((unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1))) | KSEG1) == (dataptr | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
513
514    return skb;
515}
516
517static inline int get_tx_desc(unsigned int itf, unsigned int *f_full)
518{
519    int desc_base = -1;
520    struct ptm_itf *p_itf = &g_ptm_priv_data.itf[0];
521
522    // assume TX is serial operation
523    // no protection provided
524
525    *f_full = 1;
526
527    if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 ) {
528        desc_base = p_itf->tx_desc_pos;
529        if ( ++(p_itf->tx_desc_pos) == CPU_TO_WAN_TX_DESC_NUM )
530            p_itf->tx_desc_pos = 0;
531        if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 )
532            *f_full = 0;
533    }
534
535    return desc_base;
536}
537
538static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
539{
540    unsigned int isr;
541    int i;
542
543    isr = IFX_REG_R32(MBOX_IGU1_ISR);
544    IFX_REG_W32(isr, MBOX_IGU1_ISRC);
545    isr &= IFX_REG_R32(MBOX_IGU1_IER);
546
547            if (isr & BIT(0)) {
548                IFX_REG_W32_MASK(1, 0, MBOX_IGU1_IER);
549                napi_schedule(&g_ptm_priv_data.itf[0].napi);
550#if defined(ENABLE_TMP_DBG) && ENABLE_TMP_DBG
551                {
552                    volatile struct rx_descriptor *desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
553
554                    if ( desc->own ) { // PP32 hold
555                        err("invalid interrupt");
556                    }
557                }
558#endif
559            }
560       if (isr & BIT(16)) {
561                IFX_REG_W32_MASK(1 << 16, 0, MBOX_IGU1_IER);
562                tasklet_hi_schedule(&g_swap_desc_tasklet);
563            }
564        if (isr & BIT(17)) {
565                IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
566                netif_wake_queue(g_net_dev[0]);
567            }
568
569    return IRQ_HANDLED;
570}
571
572static void do_swap_desc_tasklet(unsigned long arg)
573{
574    int budget = 32;
575    volatile struct tx_descriptor *desc;
576    struct sk_buff *skb;
577    unsigned int byteoff;
578
579    while ( budget-- > 0 ) {
580    if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) // if PP32 hold descriptor
581            break;
582
583        desc = &WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos];
584        if ( ++g_ptm_priv_data.itf[0].tx_swap_desc_pos == WAN_SWAP_DESC_NUM )
585            g_ptm_priv_data.itf[0].tx_swap_desc_pos = 0;
586
587        skb = get_skb_pointer(desc->dataptr);
588        if ( skb != NULL )
589            dev_kfree_skb_any(skb);
590
591        skb = alloc_skb_tx(RX_MAX_BUFFER_SIZE);
592        if ( skb == NULL )
593            panic("can't allocate swap buffer for PPE firmware use\n");
594        byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
595        *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
596
597        desc->dataptr = (unsigned int)skb->data & 0x0FFFFFFF;
598        desc->own = 1;
599    }
600
601    // clear interrupt
602    IFX_REG_W32_MASK(0, 16, MBOX_IGU1_ISRC);
603    // no more skb to be replaced
604    if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) { // if PP32 hold descriptor
605        IFX_REG_W32_MASK(0, 1 << 16, MBOX_IGU1_IER);
606        return;
607    }
608
609    tasklet_hi_schedule(&g_swap_desc_tasklet);
610    return;
611}
612
613
614static inline int ifx_ptm_version(char *buf)
615{
616    int len = 0;
617    unsigned int major, minor;
618
619    ifx_ptm_get_fw_ver(&major, &minor);
620
621    len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
622    len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
623
624    return len;
625}
626
627static inline int init_priv_data(void)
628{
629    int i, j;
630
631    g_wanqos_en = wanqos_en ? wanqos_en : 8;
632    if ( g_wanqos_en > 8 )
633        g_wanqos_en = 8;
634
635    for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ )
636    {
637        g_queue_gamma_map[i] = queue_gamma_map[i] & ((1 << g_wanqos_en) - 1);
638        for ( j = 0; j < i; j++ )
639            g_queue_gamma_map[i] &= ~g_queue_gamma_map[j];
640    }
641
642    memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
643
644    {
645        int max_packet_priority = ARRAY_SIZE(g_ptm_prio_queue_map);
646        int tx_num_q;
647        int q_step, q_accum, p_step;
648
649        tx_num_q = __ETH_WAN_TX_QUEUE_NUM;
650        q_step = tx_num_q - 1;
651        p_step = max_packet_priority - 1;
652        for ( j = 0, q_accum = 0; j < max_packet_priority; j++, q_accum += q_step )
653            g_ptm_prio_queue_map[j] = q_step - (q_accum + (p_step >> 1)) / p_step;
654    }
655
656    return 0;
657}
658
659static inline void clear_priv_data(void)
660{
661}
662
663static inline int init_tables(void)
664{
665    struct sk_buff *skb_pool[WAN_RX_DESC_NUM] = {0};
666    struct cfg_std_data_len cfg_std_data_len = {0};
667    struct tx_qos_cfg tx_qos_cfg = {0};
668    struct psave_cfg psave_cfg = {0};
669    struct eg_bwctrl_cfg eg_bwctrl_cfg = {0};
670    struct test_mode test_mode = {0};
671    struct rx_bc_cfg rx_bc_cfg = {0};
672    struct tx_bc_cfg tx_bc_cfg = {0};
673    struct gpio_mode gpio_mode = {0};
674    struct gpio_wm_cfg gpio_wm_cfg = {0};
675    struct rx_gamma_itf_cfg rx_gamma_itf_cfg = {0};
676    struct tx_gamma_itf_cfg tx_gamma_itf_cfg = {0};
677    struct wtx_qos_q_desc_cfg wtx_qos_q_desc_cfg = {0};
678    struct rx_descriptor rx_desc = {0};
679    struct tx_descriptor tx_desc = {0};
680    int i;
681
682    for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
683        skb_pool[i] = alloc_skb_rx();
684        if ( skb_pool[i] == NULL )
685            goto ALLOC_SKB_RX_FAIL;
686    }
687
688    cfg_std_data_len.byte_off = RX_HEAD_MAC_ADDR_ALIGNMENT; // this field replaces byte_off in rx descriptor of VDSL ingress
689    cfg_std_data_len.data_len = 1600;
690    *CFG_STD_DATA_LEN = cfg_std_data_len;
691
692    tx_qos_cfg.time_tick = cgu_get_pp32_clock() / 62500; // 16 * (cgu_get_pp32_clock() / 1000000)
693    tx_qos_cfg.overhd_bytes = 0;
694    tx_qos_cfg.eth1_eg_qnum = __ETH_WAN_TX_QUEUE_NUM;
695    tx_qos_cfg.eth1_burst_chk = 1;
696    tx_qos_cfg.eth1_qss = 0;
697    tx_qos_cfg.shape_en = 0; // disable
698    tx_qos_cfg.wfq_en = 0; // strict priority
699    *TX_QOS_CFG = tx_qos_cfg;
700
701    psave_cfg.start_state = 0;
702    psave_cfg.sleep_en = 1; // enable sleep mode
703    *PSAVE_CFG = psave_cfg;
704
705    eg_bwctrl_cfg.fdesc_wm = 16;
706    eg_bwctrl_cfg.class_len = 128;
707    *EG_BWCTRL_CFG = eg_bwctrl_cfg;
708
709    //*GPIO_ADDR = (unsigned int)IFX_GPIO_P0_OUT;
710    *GPIO_ADDR = (unsigned int)0x00000000; // disabled by default
711
712    gpio_mode.gpio_bit_bc1 = 2;
713    gpio_mode.gpio_bit_bc0 = 1;
714    gpio_mode.gpio_bc1_en = 0;
715    gpio_mode.gpio_bc0_en = 0;
716    *GPIO_MODE = gpio_mode;
717
718    gpio_wm_cfg.stop_wm_bc1 = 2;
719    gpio_wm_cfg.start_wm_bc1 = 4;
720    gpio_wm_cfg.stop_wm_bc0 = 2;
721    gpio_wm_cfg.start_wm_bc0 = 4;
722    *GPIO_WM_CFG = gpio_wm_cfg;
723
724    test_mode.mib_clear_mode = 0;
725    test_mode.test_mode = 0;
726    *TEST_MODE = test_mode;
727
728    rx_bc_cfg.local_state = 0;
729    rx_bc_cfg.remote_state = 0;
730    rx_bc_cfg.to_false_th = 7;
731    rx_bc_cfg.to_looking_th = 3;
732    *RX_BC_CFG(0) = rx_bc_cfg;
733    *RX_BC_CFG(1) = rx_bc_cfg;
734
735    tx_bc_cfg.fill_wm = 2;
736    tx_bc_cfg.uflw_wm = 2;
737    *TX_BC_CFG(0) = tx_bc_cfg;
738    *TX_BC_CFG(1) = tx_bc_cfg;
739
740    rx_gamma_itf_cfg.receive_state = 0;
741    rx_gamma_itf_cfg.rx_min_len = 60;
742    rx_gamma_itf_cfg.rx_pad_en = 1;
743    rx_gamma_itf_cfg.rx_eth_fcs_ver_dis = 0;
744    rx_gamma_itf_cfg.rx_rm_eth_fcs = 1;
745    rx_gamma_itf_cfg.rx_tc_crc_ver_dis = 0;
746    rx_gamma_itf_cfg.rx_tc_crc_size = 1;
747    rx_gamma_itf_cfg.rx_eth_fcs_result = 0xC704DD7B;
748    rx_gamma_itf_cfg.rx_tc_crc_result = 0x1D0F1D0F;
749    rx_gamma_itf_cfg.rx_crc_cfg = 0x2500;
750    rx_gamma_itf_cfg.rx_eth_fcs_init_value = 0xFFFFFFFF;
751    rx_gamma_itf_cfg.rx_tc_crc_init_value = 0x0000FFFF;
752    rx_gamma_itf_cfg.rx_max_len_sel = 0;
753    rx_gamma_itf_cfg.rx_edit_num2 = 0;
754    rx_gamma_itf_cfg.rx_edit_pos2 = 0;
755    rx_gamma_itf_cfg.rx_edit_type2 = 0;
756    rx_gamma_itf_cfg.rx_edit_en2 = 0;
757    rx_gamma_itf_cfg.rx_edit_num1 = 0;
758    rx_gamma_itf_cfg.rx_edit_pos1 = 0;
759    rx_gamma_itf_cfg.rx_edit_type1 = 0;
760    rx_gamma_itf_cfg.rx_edit_en1 = 0;
761    rx_gamma_itf_cfg.rx_inserted_bytes_1l = 0;
762    rx_gamma_itf_cfg.rx_inserted_bytes_1h = 0;
763    rx_gamma_itf_cfg.rx_inserted_bytes_2l = 0;
764    rx_gamma_itf_cfg.rx_inserted_bytes_2h = 0;
765    rx_gamma_itf_cfg.rx_len_adj = -6;
766    for ( i = 0; i < 4; i++ )
767        *RX_GAMMA_ITF_CFG(i) = rx_gamma_itf_cfg;
768
769    tx_gamma_itf_cfg.tx_len_adj = 6;
770    tx_gamma_itf_cfg.tx_crc_off_adj = 6;
771    tx_gamma_itf_cfg.tx_min_len = 0;
772    tx_gamma_itf_cfg.tx_eth_fcs_gen_dis = 0;
773    tx_gamma_itf_cfg.tx_tc_crc_size = 1;
774    tx_gamma_itf_cfg.tx_crc_cfg = 0x2F00;
775    tx_gamma_itf_cfg.tx_eth_fcs_init_value = 0xFFFFFFFF;
776    tx_gamma_itf_cfg.tx_tc_crc_init_value = 0x0000FFFF;
777    for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ ) {
778        tx_gamma_itf_cfg.queue_mapping = g_queue_gamma_map[i];
779        *TX_GAMMA_ITF_CFG(i) = tx_gamma_itf_cfg;
780    }
781
782    for ( i = 0; i < __ETH_WAN_TX_QUEUE_NUM; i++ ) {
783        wtx_qos_q_desc_cfg.length = WAN_TX_DESC_NUM;
784        wtx_qos_q_desc_cfg.addr = __ETH_WAN_TX_DESC_BASE(i);
785        *WTX_QOS_Q_DESC_CFG(i) = wtx_qos_q_desc_cfg;
786    }
787
788    // default TX queue QoS config is all ZERO
789
790    // TX Ctrl K Table
791    IFX_REG_W32(0x90111293, TX_CTRL_K_TABLE(0));
792    IFX_REG_W32(0x14959617, TX_CTRL_K_TABLE(1));
793    IFX_REG_W32(0x18999A1B, TX_CTRL_K_TABLE(2));
794    IFX_REG_W32(0x9C1D1E9F, TX_CTRL_K_TABLE(3));
795    IFX_REG_W32(0xA02122A3, TX_CTRL_K_TABLE(4));
796    IFX_REG_W32(0x24A5A627, TX_CTRL_K_TABLE(5));
797    IFX_REG_W32(0x28A9AA2B, TX_CTRL_K_TABLE(6));
798    IFX_REG_W32(0xAC2D2EAF, TX_CTRL_K_TABLE(7));
799    IFX_REG_W32(0x30B1B233, TX_CTRL_K_TABLE(8));
800    IFX_REG_W32(0xB43536B7, TX_CTRL_K_TABLE(9));
801    IFX_REG_W32(0xB8393ABB, TX_CTRL_K_TABLE(10));
802    IFX_REG_W32(0x3CBDBE3F, TX_CTRL_K_TABLE(11));
803    IFX_REG_W32(0xC04142C3, TX_CTRL_K_TABLE(12));
804    IFX_REG_W32(0x44C5C647, TX_CTRL_K_TABLE(13));
805    IFX_REG_W32(0x48C9CA4B, TX_CTRL_K_TABLE(14));
806    IFX_REG_W32(0xCC4D4ECF, TX_CTRL_K_TABLE(15));
807
808    // init RX descriptor
809    rx_desc.own = 1;
810    rx_desc.c = 0;
811    rx_desc.sop = 1;
812    rx_desc.eop = 1;
813    rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
814    rx_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
815    for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
816        rx_desc.dataptr = (unsigned int)skb_pool[i]->data & 0x0FFFFFFF;
817        WAN_RX_DESC_BASE[i] = rx_desc;
818    }
819
820    // init TX descriptor
821    tx_desc.own = 0;
822    tx_desc.c = 0;
823    tx_desc.sop = 1;
824    tx_desc.eop = 1;
825    tx_desc.byteoff = 0;
826    tx_desc.qid = 0;
827    tx_desc.datalen = 0;
828    tx_desc.small = 0;
829    tx_desc.dataptr = 0;
830    for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ )
831        CPU_TO_WAN_TX_DESC_BASE[i] = tx_desc;
832    for ( i = 0; i < WAN_TX_DESC_NUM_TOTAL; i++ )
833        WAN_TX_DESC_BASE(0)[i] = tx_desc;
834
835    // init Swap descriptor
836    for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ )
837        WAN_SWAP_DESC_BASE[i] = tx_desc;
838
839    // init fastpath TX descriptor
840    tx_desc.own = 1;
841    for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ )
842        FASTPATH_TO_WAN_TX_DESC_BASE[i] = tx_desc;
843
844    return 0;
845
846ALLOC_SKB_RX_FAIL:
847    while ( i-- > 0 )
848        dev_kfree_skb_any(skb_pool[i]);
849    return -1;
850}
851
852static inline void clear_tables(void)
853{
854    struct sk_buff *skb;
855    int i, j;
856
857    for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
858        skb = get_skb_pointer(WAN_RX_DESC_BASE[i].dataptr);
859        if ( skb != NULL )
860            dev_kfree_skb_any(skb);
861    }
862
863    for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ ) {
864        skb = get_skb_pointer(CPU_TO_WAN_TX_DESC_BASE[i].dataptr);
865        if ( skb != NULL )
866            dev_kfree_skb_any(skb);
867    }
868
869    for ( j = 0; j < 8; j++ )
870        for ( i = 0; i < WAN_TX_DESC_NUM; i++ ) {
871            skb = get_skb_pointer(WAN_TX_DESC_BASE(j)[i].dataptr);
872            if ( skb != NULL )
873                dev_kfree_skb_any(skb);
874        }
875
876    for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ ) {
877        skb = get_skb_pointer(WAN_SWAP_DESC_BASE[i].dataptr);
878        if ( skb != NULL )
879            dev_kfree_skb_any(skb);
880    }
881
882    for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ ) {
883        skb = get_skb_pointer(FASTPATH_TO_WAN_TX_DESC_BASE[i].dataptr);
884        if ( skb != NULL )
885            dev_kfree_skb_any(skb);
886    }
887}
888
889static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
890{
891    ASSERT(port_cell != NULL, "port_cell is NULL");
892    ASSERT(xdata_addr != NULL, "xdata_addr is NULL");
893
894    // TODO: ReTX set xdata_addr
895    g_xdata_addr = xdata_addr;
896
897    g_showtime = 1;
898
899    IFX_REG_W32(0x0F, UTP_CFG);
900
901    //#ifdef CONFIG_VR9
902    // IFX_REG_W32_MASK(1 << 17, 0, FFSM_CFG0);
903    //#endif
904
905    printk("enter showtime\n");
906
907    return 0;
908}
909
910static int ptm_showtime_exit(void)
911{
912    if ( !g_showtime )
913        return -1;
914
915    //#ifdef CONFIG_VR9
916    // IFX_REG_W32_MASK(0, 1 << 17, FFSM_CFG0);
917    //#endif
918
919    IFX_REG_W32(0x00, UTP_CFG);
920
921    g_showtime = 0;
922
923    // TODO: ReTX clean state
924    g_xdata_addr = NULL;
925
926    printk("leave showtime\n");
927
928    return 0;
929}
930
931
932
933static int __devinit ifx_ptm_init(void)
934{
935    int ret;
936    int i;
937    char ver_str[128];
938    struct port_cell_info port_cell = {0};
939
940    ret = init_priv_data();
941    if ( ret != 0 ) {
942        err("INIT_PRIV_DATA_FAIL");
943        goto INIT_PRIV_DATA_FAIL;
944    }
945
946    ifx_ptm_init_chip();
947    ret = init_tables();
948    if ( ret != 0 ) {
949        err("INIT_TABLES_FAIL");
950        goto INIT_TABLES_FAIL;
951    }
952
953    for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
954        g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], ether_setup);
955        if ( g_net_dev[i] == NULL )
956            goto ALLOC_NETDEV_FAIL;
957        ptm_setup(g_net_dev[i], i);
958    }
959
960    for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
961        ret = register_netdev(g_net_dev[i]);
962        if ( ret != 0 )
963            goto REGISTER_NETDEV_FAIL;
964    }
965
966    /* register interrupt handler */
967    ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
968    if ( ret ) {
969        if ( ret == -EBUSY ) {
970            err("IRQ may be occupied by other driver, please reconfig to disable it.");
971        }
972        else {
973            err("request_irq fail");
974        }
975        goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
976    }
977    disable_irq(PPE_MAILBOX_IGU1_INT);
978
979    ret = ifx_pp32_start(0);
980    if ( ret ) {
981        err("ifx_pp32_start fail!");
982        goto PP32_START_FAIL;
983    }
984    IFX_REG_W32(1 << 16, MBOX_IGU1_IER); // enable SWAP interrupt
985    IFX_REG_W32(~0, MBOX_IGU1_ISRC);
986
987    enable_irq(PPE_MAILBOX_IGU1_INT);
988
989    ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr);
990    
991    ifx_mei_atm_showtime_enter = ptm_showtime_enter;
992    ifx_mei_atm_showtime_exit = ptm_showtime_exit;
993
994    ifx_ptm_version(ver_str);
995    printk(KERN_INFO "%s", ver_str);
996
997    printk("ifxmips_ptm: PTM init succeed\n");
998
999    return 0;
1000
1001PP32_START_FAIL:
1002    free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1003REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1004    i = ARRAY_SIZE(g_net_dev);
1005REGISTER_NETDEV_FAIL:
1006    while ( i-- )
1007        unregister_netdev(g_net_dev[i]);
1008    i = ARRAY_SIZE(g_net_dev);
1009ALLOC_NETDEV_FAIL:
1010    while ( i-- ) {
1011        free_netdev(g_net_dev[i]);
1012        g_net_dev[i] = NULL;
1013    }
1014INIT_TABLES_FAIL:
1015INIT_PRIV_DATA_FAIL:
1016    clear_priv_data();
1017    printk("ifxmips_ptm: PTM init failed\n");
1018    return ret;
1019}
1020
1021static void __exit ifx_ptm_exit(void)
1022{
1023    int i;
1024    ifx_mei_atm_showtime_enter = NULL;
1025    ifx_mei_atm_showtime_exit = NULL;
1026
1027
1028    ifx_pp32_stop(0);
1029
1030    free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1031
1032    for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1033        unregister_netdev(g_net_dev[i]);
1034
1035    for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1036        free_netdev(g_net_dev[i]);
1037        g_net_dev[i] = NULL;
1038    }
1039
1040    clear_tables();
1041
1042    ifx_ptm_uninit_chip();
1043
1044    clear_priv_data();
1045}
1046
1047#ifndef MODULE
1048static int __init wanqos_en_setup(char *line)
1049{
1050    wanqos_en = simple_strtoul(line, NULL, 0);
1051
1052    if ( wanqos_en < 1 || wanqos_en > 8 )
1053        wanqos_en = 0;
1054
1055    return 0;
1056}
1057
1058static int __init queue_gamma_map_setup(char *line)
1059{
1060    char *p;
1061    int i;
1062
1063    for ( i = 0, p = line; i < ARRAY_SIZE(queue_gamma_map) && isxdigit(*p); i++ )
1064    {
1065        queue_gamma_map[i] = simple_strtoul(p, &p, 0);
1066        if ( *p == ',' || *p == ';' || *p == ':' )
1067            p++;
1068    }
1069
1070    return 0;
1071}
1072#endif
1073module_init(ifx_ptm_init);
1074module_exit(ifx_ptm_exit);
1075#ifndef MODULE
1076  __setup("wanqos_en=", wanqos_en_setup);
1077  __setup("queue_gamma_map=", queue_gamma_map_setup);
1078#endif
1079
1080MODULE_LICENSE("GPL");
1081

Archive Download this file



interactive