Root/package/broadcom-57xx/src/mm.h

1/******************************************************************************/
2/* */
3/* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2004 Broadcom */
4/* Corporation. */
5/* All rights reserved. */
6/* */
7/* This program is free software; you can redistribute it and/or modify */
8/* it under the terms of the GNU General Public License as published by */
9/* the Free Software Foundation, located in the file LICENSE. */
10/* */
11/******************************************************************************/
12
13
14#ifndef MM_H
15#define MM_H
16
17#include <linux/config.h>
18
19#if defined(CONFIG_SMP) && !defined(__SMP__)
20#define __SMP__
21#endif
22
23#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && !defined(MODVERSIONS)
24#ifndef BCM_SMALL_DRV
25#define MODVERSIONS
26#endif
27#endif
28
29#ifndef B57UM
30#define __NO_VERSION__
31#endif
32#include <linux/version.h>
33
34#ifdef MODULE
35
36#if defined(MODVERSIONS) && (LINUX_VERSION_CODE < 0x020500)
37#ifndef BCM_SMALL_DRV
38#include <linux/modversions.h>
39#endif
40#endif
41
42#if (LINUX_VERSION_CODE < 0x020605)
43#include <linux/module.h>
44#else
45#include <linux/moduleparam.h>
46#endif
47
48#else
49
50#define MOD_INC_USE_COUNT
51#define MOD_DEC_USE_COUNT
52#define SET_MODULE_OWNER(dev)
53#define MODULE_DEVICE_TABLE(pci, pci_tbl)
54#endif
55
56
57#include <linux/kernel.h>
58#include <linux/sched.h>
59#include <linux/string.h>
60#include <linux/timer.h>
61#include <linux/errno.h>
62#include <linux/ioport.h>
63#include <linux/slab.h>
64#include <linux/interrupt.h>
65#include <linux/pci.h>
66#include <linux/init.h>
67#include <linux/netdevice.h>
68#include <linux/etherdevice.h>
69#include <linux/skbuff.h>
70#include <linux/reboot.h>
71#include <asm/processor.h> /* Processor type for cache alignment. */
72#include <asm/bitops.h>
73#include <asm/io.h>
74#include <asm/unaligned.h>
75#include <linux/delay.h>
76#include <asm/byteorder.h>
77#include <linux/time.h>
78#include <asm/uaccess.h>
79#if (LINUX_VERSION_CODE >= 0x020400)
80#if (LINUX_VERSION_CODE < 0x020500)
81#include <linux/wrapper.h>
82#endif
83#include <linux/ethtool.h>
84#endif
85#ifdef CONFIG_PROC_FS
86#include <linux/smp_lock.h>
87#include <linux/proc_fs.h>
88#define BCM_PROC_FS 1
89#endif
90#ifdef NETIF_F_HW_VLAN_TX
91#include <linux/if_vlan.h>
92#define BCM_VLAN 1
93#endif
94#ifdef NETIF_F_TSO
95#define BCM_TSO 1
96#define INCLUDE_TCP_SEG_SUPPORT 1
97#include <net/ip.h>
98#include <net/tcp.h>
99#include <net/checksum.h>
100#endif
101
102#ifndef LINUX_KERNEL_VERSION
103#define LINUX_KERNEL_VERSION 0
104#endif
105
106#ifndef MAX_SKB_FRAGS
107#define MAX_SKB_FRAGS 0
108#endif
109
110#if (LINUX_VERSION_CODE >= 0x020400)
111#ifndef ETHTOOL_GEEPROM
112
113#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
114#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
115
116/* for passing EEPROM chunks */
117struct ethtool_eeprom {
118    u32 cmd;
119    u32 magic;
120    u32 offset; /* in bytes */
121    u32 len; /* in bytes */
122    u8 data[0];
123};
124#define BCM_EEDUMP_LEN(info_p, size) *((u32 *) &((info_p)->reserved1[24]))=size
125
126#else
127
128#define BCM_EEDUMP_LEN(info_p, size) (info_p)->eedump_len=size
129
130#endif
131#endif
132
133#define BCM_INT_COAL 1
134#define BCM_NIC_SEND_BD 1
135#define BCM_ASF 1
136#define BCM_WOL 1
137#define BCM_TASKLET 1
138
139#if HAVE_NETIF_RECEIVE_SKB
140#define BCM_NAPI_RXPOLL 1
141#undef BCM_TASKLET
142#endif
143
144#if defined(CONFIG_PPC64)
145#define BCM_DISCONNECT_AT_CACHELINE 1
146#endif
147
148#ifdef BCM_SMALL_DRV
149#undef BCM_PROC_FS
150#undef ETHTOOL_GEEPROM
151#undef ETHTOOL_SEEPROM
152#undef ETHTOOL_GREGS
153#undef ETHTOOL_GPAUSEPARAM
154#undef ETHTOOL_GRXCSUM
155#undef ETHTOOL_TEST
156#undef BCM_INT_COAL
157#undef BCM_NIC_SEND_BD
158#undef BCM_WOL
159#undef BCM_TASKLET
160#undef BCM_TSO
161#endif
162
163#ifdef __BIG_ENDIAN
164#define BIG_ENDIAN_HOST 1
165#endif
166
167#define MM_SWAP_LE32(x) cpu_to_le32(x)
168#define MM_SWAP_BE32(x) cpu_to_be32(x)
169
170#if (LINUX_VERSION_CODE < 0x020327)
171#define __raw_readl readl
172#define __raw_writel writel
173#endif
174
175#define MM_MEMWRITEL(ptr, val) __raw_writel(val, ptr)
176#define MM_MEMREADL(ptr) __raw_readl(ptr)
177
178typedef atomic_t MM_ATOMIC_T;
179
180#define MM_ATOMIC_SET(ptr, val) atomic_set(ptr, val)
181#define MM_ATOMIC_READ(ptr) atomic_read(ptr)
182#define MM_ATOMIC_INC(ptr) atomic_inc(ptr)
183#define MM_ATOMIC_ADD(ptr, val) atomic_add(val, ptr)
184#define MM_ATOMIC_DEC(ptr) atomic_dec(ptr)
185#define MM_ATOMIC_SUB(ptr, val) atomic_sub(val, ptr)
186
187
188#ifndef mmiowb
189#define mmiowb()
190#endif
191
192
193#define MM_MB() mb()
194#define MM_WMB() wmb()
195#define MM_RMB() rmb()
196#define MM_MMIOWB() mmiowb()
197
198#include "lm.h"
199#include "queue.h"
200#include "tigon3.h"
201
202#if DBG
203#define STATIC
204#else
205#define STATIC static
206#endif
207
208extern int MM_Packet_Desc_Size;
209
210#define MM_PACKET_DESC_SIZE MM_Packet_Desc_Size
211
212DECLARE_QUEUE_TYPE(UM_RX_PACKET_Q, MAX_RX_PACKET_DESC_COUNT+1);
213
214#define MAX_MEM 16
215#define MAX_MEM2 4
216
217#if (LINUX_VERSION_CODE < 0x020211)
218typedef u32 dma_addr_t;
219#endif
220
221#if (LINUX_VERSION_CODE < 0x02032a)
222#define pci_map_single(dev, address, size, dir) virt_to_bus(address)
223#define pci_unmap_single(dev, dma_addr, size, dir)
224#endif
225
226#if MAX_SKB_FRAGS
227#if (LINUX_VERSION_CODE >= 0x02040d)
228
229typedef dma_addr_t dmaaddr_high_t;
230
231#else
232
233#if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && !defined(CONFIG_X86_64)
234
235#if defined(CONFIG_HIGHMEM64G)
236typedef unsigned long long dmaaddr_high_t;
237#else
238typedef dma_addr_t dmaaddr_high_t;
239#endif
240
241#ifndef pci_map_page
242#define pci_map_page bcm_pci_map_page
243#endif
244
245static inline dmaaddr_high_t
246bcm_pci_map_page(struct pci_dev *dev, struct page *page,
247            int offset, size_t size, int dir)
248{
249    dmaaddr_high_t phys;
250
251    phys = (page-mem_map) * (dmaaddr_high_t) PAGE_SIZE + offset;
252
253    return phys;
254}
255
256#ifndef pci_unmap_page
257#define pci_unmap_page(dev, map, size, dir)
258#endif
259
260#else /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
261
262typedef dma_addr_t dmaaddr_high_t;
263
264/* Warning - This may not work for all architectures if HIGHMEM is defined */
265
266#ifndef pci_map_page
267#define pci_map_page(dev, page, offset, size, dir) \
268    pci_map_single(dev, page_address(page) + (offset), size, dir)
269#endif
270#ifndef pci_unmap_page
271#define pci_unmap_page(dev, map, size, dir) \
272    pci_unmap_single(dev, map, size, dir)
273#endif
274
275#endif /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
276
277#endif /* #if (LINUX_VERSION_CODE >= 0x02040d)*/
278#endif /* #if MAX_SKB_FRAGS*/
279
280#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
281#define NO_PCI_UNMAP 1
282#endif
283
284#if (LINUX_VERSION_CODE < 0x020412)
285#if !defined(NO_PCI_UNMAP)
286#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
287#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
288
289#define pci_unmap_addr(PTR, ADDR_NAME) \
290    ((PTR)->ADDR_NAME)
291
292#define pci_unmap_len(PTR, LEN_NAME) \
293    ((PTR)->LEN_NAME)
294
295#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
296    (((PTR)->ADDR_NAME) = (VAL))
297
298#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
299    (((PTR)->LEN_NAME) = (VAL))
300#else
301#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
302#define DECLARE_PCI_UNMAP_LEN(ADDR_NAME)
303
304#define pci_unmap_addr(PTR, ADDR_NAME) 0
305#define pci_unmap_len(PTR, LEN_NAME) 0
306#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
307#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
308#endif
309#endif
310
311#if (LINUX_VERSION_CODE < 0x02030e)
312#define net_device device
313#define netif_carrier_on(dev)
314#define netif_carrier_off(dev)
315#endif
316
317#if (LINUX_VERSION_CODE < 0x02032b)
318#define tasklet_struct tq_struct
319#endif
320
321typedef struct _UM_DEVICE_BLOCK {
322    LM_DEVICE_BLOCK lm_dev;
323    struct net_device *dev;
324    struct pci_dev *pdev;
325    struct net_device *next_module;
326    char *name;
327#ifdef BCM_PROC_FS
328    struct proc_dir_entry *pfs_entry;
329    char pfs_name[32];
330#endif
331    void *mem_list[MAX_MEM];
332    dma_addr_t dma_list[MAX_MEM];
333    int mem_size_list[MAX_MEM];
334    int mem_list_num;
335
336    int index;
337    int opened;
338    int suspended;
339    int using_dac; /* dual address cycle */
340    int delayed_link_ind; /* Delay link status during initial load */
341    int adapter_just_inited; /* the first few seconds after init. */
342    int timer_interval;
343    int statstimer_interval;
344    int adaptive_expiry;
345    int crc_counter_expiry;
346    int poll_tbi_interval;
347    int poll_tbi_expiry;
348    int asf_heartbeat;
349    int tx_full;
350    int tx_queued;
351    int line_speed; /* in Mbps, 0 if link is down */
352    UM_RX_PACKET_Q rx_out_of_buf_q;
353    int rx_out_of_buf;
354    int rx_buf_repl_thresh;
355    int rx_buf_repl_panic_thresh;
356    int rx_buf_repl_isr_limit;
357    int rx_buf_align;
358    struct timer_list timer;
359    struct timer_list statstimer;
360    int do_global_lock;
361    spinlock_t global_lock;
362    spinlock_t undi_lock;
363    spinlock_t phy_lock;
364    unsigned long undi_flags;
365    volatile unsigned long interrupt;
366    atomic_t intr_sem;
367    int tasklet_pending;
368    volatile unsigned long tasklet_busy;
369    struct tasklet_struct tasklet;
370    struct net_device_stats stats;
371    int intr_test;
372    int intr_test_result;
373#ifdef NETIF_F_HW_VLAN_TX
374    struct vlan_group *vlgrp;
375#endif
376    int vlan_tag_mode; /* Setting to allow ASF to work properly with */
377                /* VLANs */
378    #define VLAN_TAG_MODE_AUTO_STRIP 0
379    #define VLAN_TAG_MODE_NORMAL_STRIP 1
380    #define VLAN_TAG_MODE_FORCED_STRIP 2
381
382    /* Auto mode - VLAN TAGs are always stripped if ASF is enabled, */
383    /* If ASF is not enabled, it will be in normal mode. */
384    /* Normal mode - VLAN TAGs are stripped when VLANs are registered */
385    /* Forced mode - VLAN TAGs are always stripped. */
386
387    int adaptive_coalesce;
388    uint rx_last_cnt;
389    uint tx_last_cnt;
390    uint rx_curr_coalesce_frames;
391    uint rx_curr_coalesce_frames_intr;
392    uint rx_curr_coalesce_ticks;
393    uint tx_curr_coalesce_frames;
394#if TIGON3_DEBUG
395    unsigned long tx_zc_count;
396    unsigned long tx_chksum_count;
397    unsigned long tx_himem_count;
398    unsigned long rx_good_chksum_count;
399#endif
400    unsigned long rx_bad_chksum_count;
401#ifdef BCM_TSO
402    unsigned long tso_pkt_count;
403#endif
404    unsigned long rx_misc_errors;
405    uint64_t phy_crc_count;
406    unsigned int spurious_int;
407
408    void *sbh;
409    unsigned long boardflags;
410    void *robo;
411    int qos;
412} UM_DEVICE_BLOCK, *PUM_DEVICE_BLOCK;
413
414typedef struct _UM_PACKET {
415    LM_PACKET lm_packet;
416    struct sk_buff *skbuff;
417#if MAX_SKB_FRAGS
418    DECLARE_PCI_UNMAP_ADDR(map[MAX_SKB_FRAGS + 1])
419    DECLARE_PCI_UNMAP_LEN(map_len[MAX_SKB_FRAGS + 1])
420#else
421    DECLARE_PCI_UNMAP_ADDR(map[1])
422    DECLARE_PCI_UNMAP_LEN(map_len[1])
423#endif
424} UM_PACKET, *PUM_PACKET;
425
426static inline void MM_SetAddr(LM_PHYSICAL_ADDRESS *paddr, dma_addr_t addr)
427{
428#if BITS_PER_LONG == 64
429    paddr->High = ((unsigned long) addr) >> 32;
430    paddr->Low = ((unsigned long) addr) & 0xffffffff;
431#else
432    paddr->High = 0;
433    paddr->Low = (unsigned long) addr;
434#endif
435}
436
437static inline void MM_SetT3Addr(T3_64BIT_HOST_ADDR *paddr, dma_addr_t addr)
438{
439#if BITS_PER_LONG == 64
440    paddr->High = ((unsigned long) addr) >> 32;
441    paddr->Low = ((unsigned long) addr) & 0xffffffff;
442#else
443    paddr->High = 0;
444    paddr->Low = (unsigned long) addr;
445#endif
446}
447
448#if MAX_SKB_FRAGS
449static inline void MM_SetT3AddrHigh(T3_64BIT_HOST_ADDR *paddr,
450    dmaaddr_high_t addr)
451{
452#if defined(CONFIG_HIGHMEM64G) && defined(CONFIG_X86) && !defined(CONFIG_X86_64)
453    paddr->High = (unsigned long) (addr >> 32);
454    paddr->Low = (unsigned long) (addr & 0xffffffff);
455#else
456    MM_SetT3Addr(paddr, (dma_addr_t) addr);
457#endif
458}
459#endif
460
461static inline void MM_MapRxDma(PLM_DEVICE_BLOCK pDevice,
462    struct _LM_PACKET *pPacket,
463    T3_64BIT_HOST_ADDR *paddr)
464{
465    dma_addr_t map;
466    struct sk_buff *skb = ((struct _UM_PACKET *) pPacket)->skbuff;
467
468    map = pci_map_single(((struct _UM_DEVICE_BLOCK *)pDevice)->pdev,
469            skb->tail,
470            pPacket->u.Rx.RxBufferSize,
471            PCI_DMA_FROMDEVICE);
472    pci_unmap_addr_set(((struct _UM_PACKET *) pPacket), map[0], map);
473    MM_SetT3Addr(paddr, map);
474}
475
476static inline void MM_MapTxDma(PLM_DEVICE_BLOCK pDevice,
477    struct _LM_PACKET *pPacket,
478    T3_64BIT_HOST_ADDR *paddr,
479    LM_UINT32 *len,
480    int frag)
481{
482    dma_addr_t map;
483    struct sk_buff *skb = ((struct _UM_PACKET *) pPacket)->skbuff;
484    unsigned int length;
485
486    if (frag == 0) {
487#if MAX_SKB_FRAGS
488        if (skb_shinfo(skb)->nr_frags)
489            length = skb->len - skb->data_len;
490        else
491#endif
492            length = skb->len;
493        map = pci_map_single(((struct _UM_DEVICE_BLOCK *)pDevice)->pdev,
494            skb->data, length, PCI_DMA_TODEVICE);
495        MM_SetT3Addr(paddr, map);
496        pci_unmap_addr_set(((struct _UM_PACKET *)pPacket), map[0], map);
497        pci_unmap_len_set(((struct _UM_PACKET *) pPacket), map_len[0],
498            length);
499        *len = length;
500    }
501#if MAX_SKB_FRAGS
502    else {
503        skb_frag_t *sk_frag;
504        dmaaddr_high_t hi_map;
505
506        sk_frag = &skb_shinfo(skb)->frags[frag - 1];
507            
508        hi_map = pci_map_page(
509                ((struct _UM_DEVICE_BLOCK *)pDevice)->pdev,
510                sk_frag->page,
511                sk_frag->page_offset,
512                sk_frag->size, PCI_DMA_TODEVICE);
513
514        MM_SetT3AddrHigh(paddr, hi_map);
515        pci_unmap_addr_set(((struct _UM_PACKET *) pPacket), map[frag],
516            hi_map);
517        pci_unmap_len_set(((struct _UM_PACKET *) pPacket),
518            map_len[frag], sk_frag->size);
519        *len = sk_frag->size;
520    }
521#endif
522}
523
524#define BCM5700_PHY_LOCK(pUmDevice, flags) { \
525    spinlock_t *lock; \
526    if ((pUmDevice)->do_global_lock) { \
527        lock = &(pUmDevice)->global_lock; \
528    } \
529    else { \
530        lock = &(pUmDevice)->phy_lock; \
531    } \
532    spin_lock_irqsave(lock, flags); \
533}
534
535#define BCM5700_PHY_UNLOCK(pUmDevice, flags) { \
536    spinlock_t *lock; \
537    if ((pUmDevice)->do_global_lock) { \
538        lock = &(pUmDevice)->global_lock; \
539    } \
540    else { \
541        lock = &(pUmDevice)->phy_lock; \
542    } \
543    spin_unlock_irqrestore(lock, flags); \
544}
545
546
547#define MM_ACQUIRE_UNDI_LOCK(_pDevice) \
548    if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
549        unsigned long flags; \
550        spin_lock_irqsave(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
551        ((PUM_DEVICE_BLOCK)(_pDevice))->undi_flags = flags; \
552    }
553
554#define MM_RELEASE_UNDI_LOCK(_pDevice) \
555    if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
556        unsigned long flags = ((PUM_DEVICE_BLOCK) (_pDevice))->undi_flags; \
557        spin_unlock_irqrestore(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
558    }
559
560#define MM_ACQUIRE_PHY_LOCK_IN_IRQ(_pDevice) \
561    if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
562        spin_lock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
563    }
564
565#define MM_RELEASE_PHY_LOCK_IN_IRQ(_pDevice) \
566    if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
567        spin_unlock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
568    }
569
570#define MM_UINT_PTR(_ptr) ((unsigned long) (_ptr))
571
572#define MM_GETSTATS64(_Ctr) \
573    (uint64_t) (_Ctr).Low + ((uint64_t) (_Ctr).High << 32)
574
575#define MM_GETSTATS32(_Ctr) \
576    (uint32_t) (_Ctr).Low
577
578#if BITS_PER_LONG == 64
579#define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS64(_Ctr)
580#else
581#define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS32(_Ctr)
582#endif
583
584#if (LINUX_VERSION_CODE >= 0x020600)
585#define mm_copy_to_user( to, from, size ) \
586    (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_to_user((to),(from),(size)))
587#define mm_copy_from_user( to, from, size ) \
588    (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_from_user((to),(from),(size)))
589#else
590#define mm_copy_to_user( to, from, size ) \
591        copy_to_user((to),(from),(size) )
592#define mm_copy_from_user( to, from, size ) \
593        copy_from_user((to),(from),(size))
594#endif
595
596#ifndef printf
597#define printf(fmt, args...) printk(KERN_WARNING fmt, ##args)
598#endif
599
600#define DbgPrint(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
601#if defined(CONFIG_X86)
602#define DbgBreakPoint() __asm__("int $129")
603#else
604#define DbgBreakPoint()
605#endif
606#define MM_Wait(time) udelay(time)
607
608#endif
609

Archive Download this file



interactive