Root/target/linux/lantiq/files/arch/mips/lantiq/svip/dma.c

1/*
2 ** Copyright (C) 2005 Wu Qi Ming <Qi-Ming.Wu@infineon.com>
3 **
4 ** This program is free software; you can redistribute it and/or modify
5 ** it under the terms of the GNU General Public License as published by
6 ** the Free Software Foundation; either version 2 of the License, or
7 ** (at your option) any later version.
8 **
9 ** This program is distributed in the hope that it will be useful,
10 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
11 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 ** GNU General Public License for more details.
13 **
14 ** You should have received a copy of the GNU General Public License
15 ** along with this program; if not, write to the Free Software
16 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18/*
19 * Description:
20 * Driver for SVIP DMA
21 * Author: Wu Qi Ming[Qi-Ming.Wu@infineon.com]
22 * Created: 26-September-2005
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/fs.h>
33#include <linux/errno.h>
34#include <linux/proc_fs.h>
35#include <linux/stat.h>
36#include <linux/mm.h>
37#include <linux/tty.h>
38#include <linux/selection.h>
39#include <linux/kmod.h>
40#include <linux/vmalloc.h>
41#include <linux/interrupt.h>
42#include <linux/delay.h>
43#include <linux/errno.h>
44#include <linux/uaccess.h>
45#include <linux/io.h>
46#include <linux/semaphore.h>
47
48#include <base_reg.h>
49#include <mps_reg.h>
50#include <dma_reg.h>
51#include <svip_dma.h>
52#include <lantiq_soc.h>
53#include <irq.h>
54#include <sys1_reg.h>
55
56static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
57static struct svip_reg_dma *const dma = (struct svip_reg_dma *)LTQ_DMA_BASE;
58static struct svip_reg_mbs *const mbs = (struct svip_reg_mbs *)LTQ_MBS_BASE;
59
60#define DRV_NAME "ltq_dma"
61extern void ltq_mask_and_ack_irq(struct irq_data *data);
62extern void ltq_enable_irq(struct irq_data *data);
63
64static inline void mask_and_ack_irq(unsigned int irq_nr)
65{
66    static int i = 0;
67    struct irq_data data;
68    data.irq = irq_nr;
69    if ((i < 2) && (irq_nr == 137)) {
70        printk("eth delay hack\n");
71        i++;
72    }
73    ltq_mask_and_ack_irq(&data);
74}
75
76static inline void svip_enable_irq(unsigned int irq_nr)
77{
78    struct irq_data data;
79    data.irq = irq_nr;
80    ltq_enable_irq(&data);
81}
82
83#define DMA_EMSG(fmt, args...) \
84    printk(KERN_ERR "%s: " fmt, __func__, ## args)
85
86static inline void mbs_grab(void)
87{
88    while (mbs_r32(mbsr0) != 0);
89}
90
91static inline void mbs_release(void)
92{
93    mbs_w32(0, mbsr0);
94    asm("sync");
95}
96
97/* max ports connecting to dma */
98#define LTQ_MAX_DMA_DEVICE_NUM ARRAY_SIZE(dma_devices)
99/* max dma channels */
100#define LTQ_MAX_DMA_CHANNEL_NUM ARRAY_SIZE(dma_chan)
101
102/* bytes per descriptor */
103#define DMA_DESCR_SIZE 8
104
105#define DMA_DESCR_CH_SIZE (DMA_DESCR_NUM * DMA_DESCR_SIZE)
106#define DMA_DESCR_TOTAL_SIZE (LTQ_MAX_DMA_CHANNEL_NUM * DMA_DESCR_CH_SIZE)
107#define DMA_DESCR_MEM_PAGES ((DMA_DESCR_TOTAL_SIZE / PAGE_SIZE) + \
108                 (((DMA_DESCR_TOTAL_SIZE % PAGE_SIZE) > 0)))
109
110/* budget for interrupt handling */
111#define DMA_INT_BUDGET 100
112/* set the correct counter value here! */
113#define DMA_POLL_COUNTER 32
114
115struct proc_dir_entry *g_dma_dir;
116
117/* device_name | max_rx_chan_num | max_tx_chan_num | drop_enable */
118struct dma_device_info dma_devices[] = {
119    { "SW", 4, 4, 0 },
120    { "DEU", 1, 1, 0 },
121    { "SSC0", 1, 1, 0 },
122    { "SSC1", 1, 1, 0 },
123    { "MCTRL", 1, 1, 0 },
124    { "PCM0", 1, 1, 0 },
125    { "PCM1", 1, 1, 0 },
126    { "PCM2", 1, 1, 0 },
127    { "PCM3", 1, 1, 0 }
128};
129
130/* *dma_dev | dir | pri | irq | rel_chan_no */
131struct dma_channel_info dma_chan[] = {
132    { &dma_devices[0], DIR_RX, 0, INT_NUM_IM4_IRL0 + 0, 0 },
133    { &dma_devices[0], DIR_TX, 0, INT_NUM_IM4_IRL0 + 1, 0 },
134    { &dma_devices[0], DIR_RX, 1, INT_NUM_IM4_IRL0 + 2, 1 },
135    { &dma_devices[0], DIR_TX, 1, INT_NUM_IM4_IRL0 + 3, 1 },
136    { &dma_devices[0], DIR_RX, 2, INT_NUM_IM4_IRL0 + 4, 2 },
137    { &dma_devices[0], DIR_TX, 2, INT_NUM_IM4_IRL0 + 5, 2 },
138    { &dma_devices[0], DIR_RX, 3, INT_NUM_IM4_IRL0 + 6, 3 },
139    { &dma_devices[0], DIR_TX, 3, INT_NUM_IM4_IRL0 + 7, 3 },
140    { &dma_devices[1], DIR_RX, 0, INT_NUM_IM4_IRL0 + 8, 0 },
141    { &dma_devices[1], DIR_TX, 0, INT_NUM_IM4_IRL0 + 9, 0 },
142    { &dma_devices[2], DIR_RX, 0, INT_NUM_IM4_IRL0 + 10, 0 },
143    { &dma_devices[2], DIR_TX, 0, INT_NUM_IM4_IRL0 + 11, 0 },
144    { &dma_devices[3], DIR_RX, 0, INT_NUM_IM4_IRL0 + 12, 0 },
145    { &dma_devices[3], DIR_TX, 0, INT_NUM_IM4_IRL0 + 13, 0 },
146    { &dma_devices[4], DIR_RX, 0, INT_NUM_IM4_IRL0 + 14, 0 },
147    { &dma_devices[4], DIR_TX, 0, INT_NUM_IM4_IRL0 + 15, 0 },
148    { &dma_devices[5], DIR_RX, 0, INT_NUM_IM4_IRL0 + 16, 0 },
149    { &dma_devices[5], DIR_TX, 0, INT_NUM_IM4_IRL0 + 17, 0 },
150    { &dma_devices[6], DIR_RX, 1, INT_NUM_IM3_IRL0 + 18, 0 },
151    { &dma_devices[6], DIR_TX, 1, INT_NUM_IM3_IRL0 + 19, 0 },
152    { &dma_devices[7], DIR_RX, 2, INT_NUM_IM4_IRL0 + 20, 0 },
153    { &dma_devices[7], DIR_TX, 2, INT_NUM_IM4_IRL0 + 21, 0 },
154    { &dma_devices[8], DIR_RX, 3, INT_NUM_IM4_IRL0 + 22, 0 },
155    { &dma_devices[8], DIR_TX, 3, INT_NUM_IM4_IRL0 + 23, 0 }
156};
157
158u64 *g_desc_list[DMA_DESCR_MEM_PAGES];
159
160volatile u32 g_dma_int_status = 0;
161
162/* 0 - not in process, 1 - in process */
163volatile int g_dma_in_process;
164
165int ltq_dma_init(void);
166void do_dma_tasklet(unsigned long);
167DECLARE_TASKLET(dma_tasklet, do_dma_tasklet, 0);
168irqreturn_t dma_interrupt(int irq, void *dev_id);
169
170u8 *common_buffer_alloc(int len, int *byte_offset, void **opt)
171{
172    u8 *buffer = kmalloc(len * sizeof(u8), GFP_KERNEL);
173    *byte_offset = 0;
174    return buffer;
175}
176
177void common_buffer_free(u8 *dataptr, void *opt)
178{
179    kfree(dataptr);
180}
181
182void enable_ch_irq(struct dma_channel_info *ch)
183{
184    int chan_no = (int)(ch - dma_chan);
185    unsigned long flag;
186    u32 val;
187
188    if (ch->dir == DIR_RX)
189        val = DMA_CIE_DESCPT | DMA_CIE_DUR;
190    else
191        val = DMA_CIE_DESCPT;
192
193    local_irq_save(flag);
194    mbs_grab();
195    dma_w32(chan_no, cs);
196    dma_w32(val, cie);
197    dma_w32_mask(0, 1 << chan_no, irnen);
198    mbs_release();
199    local_irq_restore(flag);
200
201    svip_enable_irq(ch->irq);
202}
203
204void disable_ch_irq(struct dma_channel_info *ch)
205{
206    unsigned long flag;
207    int chan_no = (int)(ch - dma_chan);
208
209    local_irq_save(flag);
210    g_dma_int_status &= ~(1 << chan_no);
211    mbs_grab();
212    dma_w32(chan_no, cs);
213    dma_w32(0, cie);
214    mbs_release();
215    dma_w32_mask(1 << chan_no, 0, irnen);
216    local_irq_restore(flag);
217
218    mask_and_ack_irq(ch->irq);
219}
220
221int open_chan(struct dma_channel_info *ch)
222{
223    unsigned long flag;
224    int j;
225    int chan_no = (int)(ch - dma_chan);
226    u8 *buffer;
227    int byte_offset;
228    struct rx_desc *rx_desc_p;
229    struct tx_desc *tx_desc_p;
230
231    if (ch->control == LTQ_DMA_CH_ON)
232        return -1;
233
234    if (ch->dir == DIR_RX) {
235        for (j = 0; j < ch->desc_len; j++) {
236            rx_desc_p = (struct rx_desc *)ch->desc_base+j;
237            buffer = ch->dma_dev->buffer_alloc(ch->packet_size,
238                               &byte_offset,
239                               (void *)&ch->opt[j]);
240            if (!buffer)
241                return -ENOBUFS;
242
243            rx_desc_p->data_pointer = (u32)CPHYSADDR((u32)buffer);
244            rx_desc_p->status.word = 0;
245            rx_desc_p->status.field.byte_offset = byte_offset;
246            rx_desc_p->status.field.data_length = ch->packet_size;
247            rx_desc_p->status.field.own = DMA_OWN;
248        }
249    } else {
250        for (j = 0; j < ch->desc_len; j++) {
251            tx_desc_p = (struct tx_desc *)ch->desc_base + j;
252            tx_desc_p->data_pointer = 0;
253            tx_desc_p->status.word = 0;
254        }
255    }
256    ch->xfer_cnt = 0;
257
258    local_irq_save(flag);
259    mbs_grab();
260    dma_w32(chan_no, cs);
261    dma_w32(ch->desc_len, cdlen);
262    dma_w32(0x7e, cis);
263    dma_w32(DMA_CCTRL_TXWGT_VAL(ch->tx_weight)
264        | DMA_CCTRL_CLASS_VAL(ch->pri)
265        | (ch->dir == DIR_RX ? DMA_CCTRL_ON_OFF : 0), cctrl);
266    mbs_release();
267    ch->control = LTQ_DMA_CH_ON;
268    local_irq_restore(flag);
269
270    if (request_irq(ch->irq, dma_interrupt,
271            IRQF_DISABLED, "dma-core", (void *)ch) != 0) {
272        printk(KERN_ERR "error, cannot get dma_irq!\n");
273        return -EFAULT;
274    }
275
276    enable_ch_irq(ch);
277    return 0;
278}
279
280int close_chan(struct dma_channel_info *ch)
281{
282    unsigned long flag;
283    int j;
284    int chan_no = (int)(ch - dma_chan);
285    struct rx_desc *desc_p;
286
287    if (ch->control == LTQ_DMA_CH_OFF)
288        return -1;
289
290    local_irq_save(flag);
291    mbs_grab();
292    dma_w32(chan_no, cs);
293    dma_w32_mask(DMA_CCTRL_ON_OFF, 0, cctrl);
294    mbs_release();
295    disable_ch_irq(ch);
296    free_irq(ch->irq, (void *)ch);
297    ch->control = LTQ_DMA_CH_OFF;
298    local_irq_restore(flag);
299
300    /* free descriptors in use */
301    for (j = 0; j < ch->desc_len; j++) {
302        desc_p = (struct rx_desc *)ch->desc_base+j;
303        if ((desc_p->status.field.own == CPU_OWN &&
304             desc_p->status.field.c) ||
305            (desc_p->status.field.own == DMA_OWN)) {
306            if (desc_p->data_pointer) {
307                ch->dma_dev->buffer_free((u8 *)__va(desc_p->data_pointer),
308                             (void *)ch->opt[j]);
309                desc_p->data_pointer = (u32)NULL;
310            }
311        }
312    }
313
314    return 0;
315}
316
317int reset_chan(struct dma_channel_info *ch)
318{
319    unsigned long flag;
320    int val;
321    int chan_no = (int)(ch - dma_chan);
322
323    close_chan(ch);
324
325    local_irq_save(flag);
326    mbs_grab();
327    dma_w32(chan_no, cs);
328    dma_w32_mask(0, DMA_CCTRL_RST, cctrl);
329    mbs_release();
330    local_irq_restore(flag);
331
332    do {
333        local_irq_save(flag);
334        mbs_grab();
335        dma_w32(chan_no, cs);
336        val = dma_r32(cctrl);
337        mbs_release();
338        local_irq_restore(flag);
339    } while (val & DMA_CCTRL_RST);
340
341    return 0;
342}
343
344static inline void rx_chan_intr_handler(int chan_no)
345{
346    struct dma_device_info *dma_dev = (struct dma_device_info *)
347        dma_chan[chan_no].dma_dev;
348    struct dma_channel_info *ch = &dma_chan[chan_no];
349    struct rx_desc *rx_desc_p;
350    unsigned long flag;
351    u32 val;
352
353    local_irq_save(flag);
354    mbs_grab();
355    dma_w32(chan_no, cs);
356    val = dma_r32(cis);
357    dma_w32(DMA_CIS_DESCPT, cis);
358    mbs_release();
359
360    /* handle command complete interrupt */
361    rx_desc_p = (struct rx_desc *)ch->desc_base + ch->curr_desc;
362    if ((rx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) ==
363        DMA_DESC_CPT_SET) {
364        local_irq_restore(flag);
365        /* Every thing is correct, then we inform the upper layer */
366        dma_dev->current_rx_chan = ch->rel_chan_no;
367        if (dma_dev->intr_handler)
368            dma_dev->intr_handler(dma_dev, RCV_INT);
369        ch->weight--;
370    } else {
371        g_dma_int_status &= ~(1 << chan_no);
372        local_irq_restore(flag);
373        svip_enable_irq(dma_chan[chan_no].irq);
374    }
375}
376
377static inline void tx_chan_intr_handler(int chan_no)
378{
379    struct dma_device_info *dma_dev = (struct dma_device_info *)
380        dma_chan[chan_no].dma_dev;
381    struct dma_channel_info *ch = &dma_chan[chan_no];
382    struct tx_desc *tx_desc_p;
383    unsigned long flag;
384
385    local_irq_save(flag);
386    mbs_grab();
387    dma_w32(chan_no, cs);
388    dma_w32(DMA_CIS_DESCPT, cis);
389    mbs_release();
390
391    tx_desc_p = (struct tx_desc *)ch->desc_base+ch->prev_desc;
392    if ((tx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) ==
393       DMA_DESC_CPT_SET) {
394        local_irq_restore(flag);
395
396        dma_dev->buffer_free((u8 *)__va(tx_desc_p->data_pointer),
397                  ch->opt[ch->prev_desc]);
398        memset(tx_desc_p, 0, sizeof(struct tx_desc));
399        dma_dev->current_tx_chan = ch->rel_chan_no;
400        if (dma_dev->intr_handler)
401            dma_dev->intr_handler(dma_dev, TRANSMIT_CPT_INT);
402        ch->weight--;
403
404        ch->prev_desc = (ch->prev_desc + 1) % (ch->desc_len);
405    } else {
406        g_dma_int_status &= ~(1 << chan_no);
407        local_irq_restore(flag);
408        svip_enable_irq(dma_chan[chan_no].irq);
409    }
410}
411
412void do_dma_tasklet(unsigned long unused)
413{
414    int i;
415    int chan_no = 0;
416    int budget = DMA_INT_BUDGET;
417    int weight = 0;
418    unsigned long flag;
419
420    while (g_dma_int_status) {
421        if (budget-- < 0) {
422            tasklet_schedule(&dma_tasklet);
423            return;
424        }
425        chan_no = -1;
426        weight = 0;
427        /* WFQ algorithm to select the channel */
428        for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
429            if (g_dma_int_status & (1 << i) &&
430                dma_chan[i].weight > 0) {
431                if (dma_chan[i].weight > weight) {
432                    chan_no = i;
433                    weight = dma_chan[chan_no].weight;
434                }
435            }
436        }
437        if (chan_no >= 0) {
438            if (dma_chan[chan_no].dir == DIR_RX)
439                rx_chan_intr_handler(chan_no);
440            else
441                tx_chan_intr_handler(chan_no);
442        } else {
443            /* reset all the channels */
444            for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
445                dma_chan[i].weight = dma_chan[i].default_weight;
446        }
447    }
448
449    local_irq_save(flag);
450    g_dma_in_process = 0;
451    if (g_dma_int_status) {
452        g_dma_in_process = 1;
453        tasklet_schedule(&dma_tasklet);
454    }
455    local_irq_restore(flag);
456}
457
458irqreturn_t dma_interrupt(int irq, void *dev_id)
459{
460    struct dma_channel_info *ch;
461    int chan_no = 0;
462
463    ch = (struct dma_channel_info *)dev_id;
464    chan_no = (int)(ch - dma_chan);
465
466    if ((unsigned)chan_no >= LTQ_MAX_DMA_CHANNEL_NUM) {
467        printk(KERN_ERR "error: dma_interrupt irq=%d chan_no=%d\n",
468               irq, chan_no);
469    }
470
471    g_dma_int_status |= 1 << chan_no;
472    dma_w32(1 << chan_no, irncr);
473    mask_and_ack_irq(irq);
474
475    if (!g_dma_in_process) {
476        g_dma_in_process = 1;
477        tasklet_schedule(&dma_tasklet);
478    }
479
480    return IRQ_RETVAL(1);
481}
482
483struct dma_device_info *dma_device_reserve(char *dev_name)
484{
485    int i;
486
487    ltq_dma_init();
488    for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
489        if (strcmp(dev_name, dma_devices[i].device_name) == 0) {
490            if (dma_devices[i].reserved)
491                return NULL;
492            dma_devices[i].reserved = 1;
493            break;
494        }
495    }
496
497    if (i == LTQ_MAX_DMA_DEVICE_NUM)
498        return NULL;
499
500    return &dma_devices[i];
501}
502EXPORT_SYMBOL(dma_device_reserve);
503
504int dma_device_release(struct dma_device_info *dma_dev)
505{
506    dma_dev->reserved = 0;
507
508    return 0;
509}
510EXPORT_SYMBOL(dma_device_release);
511
512int dma_device_register(struct dma_device_info *dma_dev)
513{
514    int port_no = (int)(dma_dev - dma_devices);
515    int txbl, rxbl;
516    unsigned long flag;
517
518    switch (dma_dev->tx_burst_len) {
519    case 8:
520        txbl = 3;
521        break;
522    case 4:
523        txbl = 2;
524        break;
525    default:
526        txbl = 1;
527        break;
528    }
529
530    switch (dma_dev->rx_burst_len) {
531    case 8:
532        rxbl = 3;
533        break;
534    case 4:
535        rxbl = 2;
536        break;
537    default:
538        rxbl = 1;
539    }
540
541    local_irq_save(flag);
542    mbs_grab();
543    dma_w32(port_no, ps);
544    dma_w32(DMA_PCTRL_TXWGT_VAL(dma_dev->tx_weight)
545        | DMA_PCTRL_TXENDI_VAL(dma_dev->tx_endianness_mode)
546        | DMA_PCTRL_RXENDI_VAL(dma_dev->rx_endianness_mode)
547        | DMA_PCTRL_PDEN_VAL(dma_dev->drop_enable)
548        | DMA_PCTRL_TXBL_VAL(txbl)
549        | DMA_PCTRL_RXBL_VAL(rxbl), pctrl);
550    mbs_release();
551    local_irq_restore(flag);
552
553    return 0;
554}
555EXPORT_SYMBOL(dma_device_register);
556
557int dma_device_unregister(struct dma_device_info *dma_dev)
558{
559    int i;
560    int port_no = (int)(dma_dev - dma_devices);
561    unsigned long flag;
562
563    /* flush memcopy module; has no effect for other ports */
564    local_irq_save(flag);
565    mbs_grab();
566    dma_w32(port_no, ps);
567    dma_w32_mask(0, DMA_PCTRL_GPC, pctrl);
568    mbs_release();
569    local_irq_restore(flag);
570
571    for (i = 0; i < dma_dev->max_tx_chan_num; i++)
572        reset_chan(dma_dev->tx_chan[i]);
573
574    for (i = 0; i < dma_dev->max_rx_chan_num; i++)
575        reset_chan(dma_dev->rx_chan[i]);
576
577    return 0;
578}
579EXPORT_SYMBOL(dma_device_unregister);
580
581/**
582 * Read Packet from DMA Rx channel.
583 * The function gets the data from the current rx descriptor assigned
584 * to the passed DMA device and passes it back to the caller.
585 * The function is called in the context of DMA interrupt.
586 * In detail the following actions are done:
587 * - get current receive descriptor
588 * - allocate memory via allocation callback function
589 * - pass data from descriptor to allocated memory
590 * - update channel weight
591 * - release descriptor
592 * - update current descriptor position
593 *
594 * \param *dma_dev - pointer to DMA device structure
595 * \param **dataptr - pointer to received data
596 * \param **opt
597 * \return packet length - length of received data
598 * \ingroup Internal
599 */
600int dma_device_read(struct dma_device_info *dma_dev, u8 **dataptr, void **opt)
601{
602    u8 *buf;
603    int len;
604    int byte_offset = 0;
605    void *p = NULL;
606
607    struct dma_channel_info *ch =
608        dma_dev->rx_chan[dma_dev->current_rx_chan];
609
610    struct rx_desc *rx_desc_p;
611
612    /* get the rx data first */
613    rx_desc_p = (struct rx_desc *)ch->desc_base+ch->curr_desc;
614    buf = (u8 *)__va(rx_desc_p->data_pointer);
615    *(u32 *)dataptr = (u32)buf;
616    len = rx_desc_p->status.field.data_length;
617#ifndef CONFIG_MIPS_UNCACHED
618    dma_cache_inv((unsigned long)buf, len);
619#endif
620    if (opt)
621        *(int *)opt = (int)ch->opt[ch->curr_desc];
622
623    /* replace with a new allocated buffer */
624    buf = dma_dev->buffer_alloc(ch->packet_size, &byte_offset, &p);
625    if (buf) {
626        ch->opt[ch->curr_desc] = p;
627
628        wmb();
629        rx_desc_p->data_pointer = (u32)CPHYSADDR((u32)buf);
630        rx_desc_p->status.word = (DMA_OWN << 31) \
631                     |(byte_offset << 23) \
632                     | ch->packet_size;
633
634        wmb();
635    } else {
636        *(u32 *)dataptr = 0;
637        if (opt)
638            *(int *)opt = 0;
639    }
640
641    ch->xfer_cnt++;
642    /* increase the curr_desc pointer */
643    ch->curr_desc++;
644    if (ch->curr_desc == ch->desc_len)
645        ch->curr_desc = 0;
646    /* return the length of the received packet */
647    return len;
648}
649EXPORT_SYMBOL(dma_device_read);
650
651/**
652 * Write Packet through DMA Tx channel to peripheral.
653 *
654 * \param *dma_dev - pointer to DMA device structure
655 * \param *dataptr - pointer to data to be sent
656 * \param len - amount of data bytes to be sent
657 * \param *opt
658 * \return len - length of transmitted data
659 * \ingroup Internal
660 */
661int dma_device_write(struct dma_device_info *dma_dev, u8 *dataptr, int len,
662             void *opt)
663{
664    unsigned long flag;
665    u32 byte_offset;
666    struct dma_channel_info *ch;
667    int chan_no;
668    struct tx_desc *tx_desc_p;
669    local_irq_save(flag);
670
671    ch = dma_dev->tx_chan[dma_dev->current_tx_chan];
672    chan_no = (int)(ch - dma_chan);
673
674    if (ch->control == LTQ_DMA_CH_OFF) {
675        local_irq_restore(flag);
676        printk(KERN_ERR "%s: dma channel %d not enabled!\n",
677               __func__, chan_no);
678        return 0;
679    }
680
681    tx_desc_p = (struct tx_desc *)ch->desc_base+ch->curr_desc;
682    /* Check whether this descriptor is available */
683    if (tx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) {
684        /* if not , the tell the upper layer device */
685        dma_dev->intr_handler(dma_dev, TX_BUF_FULL_INT);
686        local_irq_restore(flag);
687        return 0;
688    }
689    ch->opt[ch->curr_desc] = opt;
690    /* byte offset----to adjust the starting address of the data buffer,
691     * should be multiple of the burst length.*/
692    byte_offset = ((u32)CPHYSADDR((u32)dataptr)) %
693        (dma_dev->tx_burst_len * 4);
694#ifndef CONFIG_MIPS_UNCACHED
695    dma_cache_wback((unsigned long)dataptr, len);
696    wmb();
697#endif
698    tx_desc_p->data_pointer = (u32)CPHYSADDR((u32)dataptr) - byte_offset;
699    wmb();
700    tx_desc_p->status.word = (DMA_OWN << 31)
701        | DMA_DESC_SOP_SET
702        | DMA_DESC_EOP_SET
703        | (byte_offset << 23)
704        | len;
705    wmb();
706
707    if (ch->xfer_cnt == 0) {
708        mbs_grab();
709        dma_w32(chan_no, cs);
710        dma_w32_mask(0, DMA_CCTRL_ON_OFF, cctrl);
711        mbs_release();
712    }
713
714    ch->xfer_cnt++;
715    ch->curr_desc++;
716    if (ch->curr_desc == ch->desc_len)
717        ch->curr_desc = 0;
718
719    local_irq_restore(flag);
720    return len;
721}
722EXPORT_SYMBOL(dma_device_write);
723
724/**
725 * Display descriptor list via proc file
726 *
727 * \param chan_no - logical channel number
728 * \ingroup Internal
729 */
730int desc_list_proc_read(char *buf, char **start, off_t offset,
731            int count, int *eof, void *data)
732{
733    int len = 0;
734    int i;
735    static int chan_no;
736    u32 *p;
737
738    if ((chan_no == 0) && (offset > count)) {
739        *eof = 1;
740        return 0;
741    }
742
743    if (chan_no != 0) {
744        *start = buf;
745    } else {
746        buf = buf + offset;
747        *start = buf;
748    }
749
750    p = (u32 *)dma_chan[chan_no].desc_base;
751
752    if (dma_chan[chan_no].dir == DIR_RX)
753        len += sprintf(buf + len,
754                   "channel %d %s Rx descriptor list:\n",
755                   chan_no, dma_chan[chan_no].dma_dev->device_name);
756    else
757        len += sprintf(buf + len,
758                   "channel %d %s Tx descriptor list:\n",
759                   chan_no, dma_chan[chan_no].dma_dev->device_name);
760    len += sprintf(buf + len,
761               " no address data pointer command bits "
762               "(Own, Complete, SoP, EoP, Offset) \n");
763    len += sprintf(buf + len,
764               "----------------------------------------------"
765               "-----------------------------------\n");
766    for (i = 0; i < dma_chan[chan_no].desc_len; i++) {
767        len += sprintf(buf + len, "%3d ", i);
768        len += sprintf(buf + len, "0x%08x ", (u32)(p + (i * 2)));
769        len += sprintf(buf + len, "%08x ", *(p + (i * 2 + 1)));
770        len += sprintf(buf + len, "%08x ", *(p + (i * 2)));
771
772        if (*(p + (i * 2)) & 0x80000000)
773            len += sprintf(buf + len, "D ");
774        else
775            len += sprintf(buf + len, "C ");
776        if (*(p + (i * 2)) & 0x40000000)
777            len += sprintf(buf + len, "C ");
778        else
779            len += sprintf(buf + len, "c ");
780        if (*(p + (i * 2)) & 0x20000000)
781            len += sprintf(buf + len, "S ");
782        else
783            len += sprintf(buf + len, "s ");
784        if (*(p + (i * 2)) & 0x10000000)
785            len += sprintf(buf + len, "E ");
786        else
787            len += sprintf(buf + len, "e ");
788
789        /* byte offset is different for rx and tx descriptors*/
790        if (dma_chan[chan_no].dir == DIR_RX) {
791            len += sprintf(buf + len, "%01x ",
792                       (*(p + (i * 2)) & 0x01800000) >> 23);
793        } else {
794            len += sprintf(buf + len, "%02x ",
795                       (*(p + (i * 2)) & 0x0F800000) >> 23);
796        }
797
798        if (dma_chan[chan_no].curr_desc == i)
799            len += sprintf(buf + len, "<- CURR");
800
801        if (dma_chan[chan_no].prev_desc == i)
802            len += sprintf(buf + len, "<- PREV");
803
804        len += sprintf(buf + len, "\n");
805
806    }
807
808    len += sprintf(buf + len, "\n");
809    chan_no++;
810    if (chan_no > LTQ_MAX_DMA_CHANNEL_NUM - 1)
811        chan_no = 0;
812
813    *eof = 1;
814    return len;
815}
816
817/**
818 * Displays the weight of all DMA channels via proc file
819 *
820 *
821 *
822 * \param *buf
823 * \param **start
824 * \param offset
825 * \param count
826 * \param *eof
827 * \param *data
828 * \return len - amount of bytes written to file
829 */
830int channel_weight_proc_read(char *buf, char **start, off_t offset,
831                 int count, int *eof, void *data)
832{
833    int i;
834    int len = 0;
835    len += sprintf(buf + len, "Qos dma channel weight list\n");
836    len += sprintf(buf + len, "channel_num default_weight "
837               "current_weight device Tx/Rx\n");
838    len += sprintf(buf + len, "---------------------------"
839               "---------------------------------\n");
840    for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
841        struct dma_channel_info *ch = &dma_chan[i];
842
843        if (ch->dir == DIR_RX) {
844            len += sprintf(buf + len,
845                       " %2d %08x "
846                       "%08x %10s Rx\n",
847                      i, ch->default_weight, ch->weight,
848                      ch->dma_dev->device_name);
849        } else {
850            len += sprintf(buf + len,
851                       " %2d %08x "
852                       "%08x %10s Tx\n",
853                      i, ch->default_weight, ch->weight,
854                      ch->dma_dev->device_name);
855        }
856    }
857
858    return len;
859}
860
861/**
862 * Provides DMA Register Content to proc file
863 * This function reads the content of general DMA Registers, DMA Channel
864 * Registers and DMA Port Registers and performs a structures output to the
865 * DMA proc file
866 *
867 * \param *buf
868 * \param **start
869 * \param offset
870 * \param count
871 * \param *eof
872 * \param *data
873 * \return len - amount of bytes written to file
874 */
875int dma_register_proc_read(char *buf, char **start, off_t offset,
876               int count, int *eof, void *data)
877{
878    int len = 0;
879    int i;
880    int limit = count;
881    unsigned long flags;
882    static int blockcount;
883    static int channel_no;
884
885    if ((blockcount == 0) && (offset > count)) {
886        *eof = 1;
887        return 0;
888    }
889
890    switch (blockcount) {
891    case 0:
892        len += sprintf(buf + len, "\nGeneral DMA Registers\n");
893        len += sprintf(buf + len, "-------------------------"
894                   "----------------\n");
895        len += sprintf(buf + len, "CLC= %08x\n", dma_r32(clc));
896        len += sprintf(buf + len, "ID= %08x\n", dma_r32(id));
897        len += sprintf(buf + len, "DMA_CPOLL= %08x\n", dma_r32(cpoll));
898        len += sprintf(buf + len, "DMA_CS= %08x\n", dma_r32(cs));
899        len += sprintf(buf + len, "DMA_PS= %08x\n", dma_r32(ps));
900        len += sprintf(buf + len, "DMA_IRNEN= %08x\n", dma_r32(irnen));
901        len += sprintf(buf + len, "DMA_IRNCR= %08x\n", dma_r32(irncr));
902        len += sprintf(buf + len, "DMA_IRNICR= %08x\n",
903                   dma_r32(irnicr));
904        len += sprintf(buf + len, "\nDMA Channel Registers\n");
905        blockcount = 1;
906        return len;
907        break;
908    case 1:
909        /* If we had an overflow start at beginning of buffer
910         * otherwise use offset */
911        if (channel_no != 0) {
912            *start = buf;
913        } else {
914            buf = buf + offset;
915            *start = buf;
916        }
917
918        local_irq_save(flags);
919        for (i = channel_no; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
920            struct dma_channel_info *ch = &dma_chan[i];
921
922            if (len + 300 > limit) {
923                local_irq_restore(flags);
924                channel_no = i;
925                blockcount = 1;
926                return len;
927            }
928            len += sprintf(buf + len, "----------------------"
929                       "-------------------\n");
930            if (ch->dir == DIR_RX) {
931                len += sprintf(buf + len,
932                           "Channel %d - Device %s Rx\n",
933                           i, ch->dma_dev->device_name);
934            } else {
935                len += sprintf(buf + len,
936                           "Channel %d - Device %s Tx\n",
937                           i, ch->dma_dev->device_name);
938            }
939            dma_w32(i, cs);
940            len += sprintf(buf + len, "DMA_CCTRL= %08x\n",
941                       dma_r32(cctrl));
942            len += sprintf(buf + len, "DMA_CDBA= %08x\n",
943                       dma_r32(cdba));
944            len += sprintf(buf + len, "DMA_CIE= %08x\n",
945                       dma_r32(cie));
946            len += sprintf(buf + len, "DMA_CIS= %08x\n",
947                       dma_r32(cis));
948            len += sprintf(buf + len, "DMA_CDLEN= %08x\n",
949                       dma_r32(cdlen));
950        }
951        local_irq_restore(flags);
952        blockcount = 2;
953        channel_no = 0;
954        return len;
955        break;
956    case 2:
957        *start = buf;
958        /*
959         * display port dependent registers
960         */
961        len += sprintf(buf + len, "\nDMA Port Registers\n");
962        len += sprintf(buf + len,
963                   "-----------------------------------------\n");
964        local_irq_save(flags);
965        for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
966            dma_w32(i, ps);
967            len += sprintf(buf + len,
968                       "Port %d DMA_PCTRL= %08x\n",
969                       i, dma_r32(pctrl));
970        }
971        local_irq_restore(flags);
972        blockcount = 0;
973        *eof = 1;
974        return len;
975        break;
976    }
977
978    blockcount = 0;
979    *eof = 1;
980    return 0;
981}
982
983/**
984 * Open Method of DMA Device Driver
985 * This function increments the device driver's use counter.
986 *
987 *
988 * \param
989 * \return
990 */
991static int dma_open(struct inode *inode, struct file *file)
992{
993    return 0;
994}
995
996/**
997 * Release Method of DMA Device driver.
998 * This function decrements the device driver's use counter.
999 *
1000 *
1001 * \param
1002 * \return
1003 */
1004static int dma_release(struct inode *inode, struct file *file)
1005{
1006    /* release the resources */
1007    return 0;
1008}
1009
1010/**
1011 * Ioctl Interface to DMA Module
1012 *
1013 * \param None
1014 * \return 0 - initialization successful
1015 * <0 - failed initialization
1016 */
1017static long dma_ioctl(struct file *file,
1018             unsigned int cmd, unsigned long arg)
1019{
1020    int result = 0;
1021    /* TODO: add some user controled functions here */
1022    return result;
1023}
1024
1025const static struct file_operations dma_fops = {
1026    .owner = THIS_MODULE,
1027    .open = dma_open,
1028    .release = dma_release,
1029    .unlocked_ioctl = dma_ioctl,
1030};
1031
1032void map_dma_chan(struct dma_channel_info *map)
1033{
1034    int i;
1035
1036    /* assign default values for channel settings */
1037    for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
1038        dma_chan[i].byte_offset = 0;
1039        dma_chan[i].open = &open_chan;
1040        dma_chan[i].close = &close_chan;
1041        dma_chan[i].reset = &reset_chan;
1042        dma_chan[i].enable_irq = enable_ch_irq;
1043        dma_chan[i].disable_irq = disable_ch_irq;
1044        dma_chan[i].tx_weight = 1;
1045        dma_chan[i].control = 0;
1046        dma_chan[i].default_weight = LTQ_DMA_CH_DEFAULT_WEIGHT;
1047        dma_chan[i].weight = dma_chan[i].default_weight;
1048        dma_chan[i].curr_desc = 0;
1049        dma_chan[i].prev_desc = 0;
1050    }
1051
1052    /* assign default values for port settings */
1053    for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
1054        /*set default tx channel number to be one*/
1055        dma_devices[i].num_tx_chan = 1;
1056        /*set default rx channel number to be one*/
1057        dma_devices[i].num_rx_chan = 1;
1058        dma_devices[i].buffer_alloc = common_buffer_alloc;
1059        dma_devices[i].buffer_free = common_buffer_free;
1060        dma_devices[i].intr_handler = NULL;
1061        dma_devices[i].tx_burst_len = 4;
1062        dma_devices[i].rx_burst_len = 4;
1063#ifdef CONFIG_CPU_LITTLE_ENDIAN
1064        dma_devices[i].tx_endianness_mode = 0;
1065        dma_devices[i].rx_endianness_mode = 0;
1066#else
1067        dma_devices[i].tx_endianness_mode = 3;
1068        dma_devices[i].rx_endianness_mode = 3;
1069#endif
1070    }
1071}
1072
1073void dma_chip_init(void)
1074{
1075    int i;
1076
1077    sys1_w32(SYS1_CLKENR_DMA, clkenr);
1078    wmb();
1079    /* reset DMA */
1080    dma_w32(DMA_CTRL_RST, ctrl);
1081    wmb();
1082    /* disable all the interrupts first */
1083    dma_w32(0, irnen);
1084
1085    /* enable polling for all channels */
1086    dma_w32(DMA_CPOLL_EN | DMA_CPOLL_CNT_VAL(DMA_POLL_COUNTER), cpoll);
1087
1088    /****************************************************/
1089    for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
1090        disable_ch_irq(&dma_chan[i]);
1091}
1092
1093int ltq_dma_init(void)
1094{
1095    int result = 0;
1096    int i;
1097    unsigned long flag;
1098    static int dma_initialized;
1099
1100    if (dma_initialized == 1)
1101        return 0;
1102    dma_initialized = 1;
1103
1104    result = register_chrdev(DMA_MAJOR, "dma-core", &dma_fops);
1105    if (result) {
1106        DMA_EMSG("cannot register device dma-core!\n");
1107        return result;
1108    }
1109
1110    dma_chip_init();
1111    map_dma_chan(dma_chan);
1112
1113    /* allocate DMA memory for buffer descriptors */
1114    for (i = 0; i < DMA_DESCR_MEM_PAGES; i++) {
1115        g_desc_list[i] = (u64 *)__get_free_page(GFP_DMA);
1116        if (g_desc_list[i] == NULL) {
1117            DMA_EMSG("no memory for desriptor\n");
1118            return -ENOMEM;
1119        }
1120        g_desc_list[i] = (u64 *)KSEG1ADDR(g_desc_list[i]);
1121        memset(g_desc_list[i], 0, PAGE_SIZE);
1122    }
1123
1124    for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
1125        int page_index, ch_per_page;
1126        /* cross-link relative channels of a port to
1127         * corresponding absolute channels */
1128        if (dma_chan[i].dir == DIR_RX) {
1129            ((struct dma_device_info *)(dma_chan[i].dma_dev))->
1130                rx_chan[dma_chan[i].rel_chan_no] = &dma_chan[i];
1131        } else {
1132            ((struct dma_device_info *)(dma_chan[i].dma_dev))->
1133                tx_chan[dma_chan[i].rel_chan_no] = &dma_chan[i];
1134        }
1135        dma_chan[i].abs_chan_no = i;
1136
1137        page_index = i * DMA_DESCR_CH_SIZE / PAGE_SIZE;
1138        ch_per_page = PAGE_SIZE / DMA_DESCR_CH_SIZE +
1139            ((PAGE_SIZE % DMA_DESCR_CH_SIZE) > 0);
1140        dma_chan[i].desc_base =
1141            (u32)g_desc_list[page_index] +
1142            (i - page_index*ch_per_page) * DMA_DESCR_NUM*8;
1143        dma_chan[i].curr_desc = 0;
1144        dma_chan[i].desc_len = DMA_DESCR_NUM;
1145
1146        local_irq_save(flag);
1147        mbs_grab();
1148        dma_w32(i, cs);
1149        dma_w32((u32)CPHYSADDR(dma_chan[i].desc_base), cdba);
1150        mbs_release();
1151        local_irq_restore(flag);
1152    }
1153
1154    g_dma_dir = proc_mkdir("driver/" DRV_NAME, NULL);
1155
1156    create_proc_read_entry("dma_register",
1157                   0,
1158                   g_dma_dir,
1159                   dma_register_proc_read,
1160                   NULL);
1161
1162    create_proc_read_entry("g_desc_list",
1163                   0,
1164                   g_dma_dir,
1165                   desc_list_proc_read,
1166                   NULL);
1167
1168    create_proc_read_entry("channel_weight",
1169                   0,
1170                   g_dma_dir,
1171                   channel_weight_proc_read,
1172                   NULL);
1173
1174    printk(KERN_NOTICE "SVIP DMA engine initialized\n");
1175
1176    return 0;
1177}
1178
1179/**
1180 * Cleanup DMA device
1181 * This function releases all resources used by the DMA device driver on
1182 * module removal.
1183 *
1184 *
1185 * \param None
1186 * \return Nothing
1187 */
1188void dma_cleanup(void)
1189{
1190    int i;
1191    unregister_chrdev(DMA_MAJOR, "dma-core");
1192
1193    for (i = 0; i < DMA_DESCR_MEM_PAGES; i++)
1194        free_page(KSEG0ADDR((unsigned long)g_desc_list[i]));
1195    remove_proc_entry("channel_weight", g_dma_dir);
1196    remove_proc_entry("g_desc_list", g_dma_dir);
1197    remove_proc_entry("dma_register", g_dma_dir);
1198    remove_proc_entry("driver/" DRV_NAME, NULL);
1199    /* release the resources */
1200    for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
1201        free_irq(dma_chan[i].irq, (void *)&dma_chan[i]);
1202}
1203
1204arch_initcall(ltq_dma_init);
1205
1206MODULE_LICENSE("GPL");
1207

Archive Download this file



interactive