Root/drivers/staging/et131x/et131x.c

1/*
2 * Agere Systems Inc.
3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4 *
5 * Copyright © 2005 Agere Systems Inc.
6 * All rights reserved.
7 * http://www.agere.com
8 *
9 * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com>
10 *
11 *------------------------------------------------------------------------------
12 *
13 * SOFTWARE LICENSE
14 *
15 * This software is provided subject to the following terms and conditions,
16 * which you should read carefully before using the software. Using this
17 * software indicates your acceptance of these terms and conditions. If you do
18 * not agree with these terms and conditions, do not use the software.
19 *
20 * Copyright © 2005 Agere Systems Inc.
21 * All rights reserved.
22 *
23 * Redistribution and use in source or binary forms, with or without
24 * modifications, are permitted provided that the following conditions are met:
25 *
26 * . Redistributions of source code must retain the above copyright notice, this
27 * list of conditions and the following Disclaimer as comments in the code as
28 * well as in the documentation and/or other materials provided with the
29 * distribution.
30 *
31 * . Redistributions in binary form must reproduce the above copyright notice,
32 * this list of conditions and the following Disclaimer in the documentation
33 * and/or other materials provided with the distribution.
34 *
35 * . Neither the name of Agere Systems Inc. nor the names of the contributors
36 * may be used to endorse or promote products derived from this software
37 * without specific prior written permission.
38 *
39 * Disclaimer
40 *
41 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
42 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
44 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
45 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
46 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
47 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
48 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
49 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52 * DAMAGE.
53 *
54 */
55
56#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57
58#include <linux/pci.h>
59#include <linux/init.h>
60#include <linux/module.h>
61#include <linux/types.h>
62#include <linux/kernel.h>
63
64#include <linux/sched.h>
65#include <linux/ptrace.h>
66#include <linux/slab.h>
67#include <linux/ctype.h>
68#include <linux/string.h>
69#include <linux/timer.h>
70#include <linux/interrupt.h>
71#include <linux/in.h>
72#include <linux/delay.h>
73#include <linux/bitops.h>
74#include <linux/io.h>
75
76#include <linux/netdevice.h>
77#include <linux/etherdevice.h>
78#include <linux/skbuff.h>
79#include <linux/if_arp.h>
80#include <linux/ioport.h>
81#include <linux/crc32.h>
82#include <linux/random.h>
83#include <linux/phy.h>
84
85#include "et131x.h"
86
87MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
88MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
89MODULE_LICENSE("Dual BSD/GPL");
90MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
91
92/* EEPROM defines */
93#define MAX_NUM_REGISTER_POLLS 1000
94#define MAX_NUM_WRITE_RETRIES 2
95
96/* MAC defines */
97#define COUNTER_WRAP_16_BIT 0x10000
98#define COUNTER_WRAP_12_BIT 0x1000
99
100/* PCI defines */
101#define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
102#define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
103
104/* ISR defines */
105/*
106 * For interrupts, normal running is:
107 * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
108 * watchdog_interrupt & txdma_xfer_done
109 *
110 * In both cases, when flow control is enabled for either Tx or bi-direction,
111 * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
112 * buffer rings are running low.
113 */
114#define INT_MASK_DISABLE 0xffffffff
115
116/* NOTE: Masking out MAC_STAT Interrupt for now...
117 * #define INT_MASK_ENABLE 0xfff6bf17
118 * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
119 */
120#define INT_MASK_ENABLE 0xfffebf17
121#define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
122
123/* General defines */
124/* Packet and header sizes */
125#define NIC_MIN_PACKET_SIZE 60
126
127/* Multicast list size */
128#define NIC_MAX_MCAST_LIST 128
129
130/* Supported Filters */
131#define ET131X_PACKET_TYPE_DIRECTED 0x0001
132#define ET131X_PACKET_TYPE_MULTICAST 0x0002
133#define ET131X_PACKET_TYPE_BROADCAST 0x0004
134#define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
135#define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
136
137/* Tx Timeout */
138#define ET131X_TX_TIMEOUT (1 * HZ)
139#define NIC_SEND_HANG_THRESHOLD 0
140
141/* MP_TCB flags */
142#define fMP_DEST_MULTI 0x00000001
143#define fMP_DEST_BROAD 0x00000002
144
145/* MP_ADAPTER flags */
146#define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
147#define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
148
149/* MP_SHARED flags */
150#define fMP_ADAPTER_LOWER_POWER 0x00200000
151
152#define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
153#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
154
155#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
156
157/* Some offsets in PCI config space that are actually used. */
158#define ET1310_PCI_MAC_ADDRESS 0xA4
159#define ET1310_PCI_EEPROM_STATUS 0xB2
160#define ET1310_PCI_ACK_NACK 0xC0
161#define ET1310_PCI_REPLAY 0xC2
162#define ET1310_PCI_L0L1LATENCY 0xCF
163
164/* PCI Product IDs */
165#define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
166#define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
167
168/* Define order of magnitude converter */
169#define NANO_IN_A_MICRO 1000
170
171#define PARM_RX_NUM_BUFS_DEF 4
172#define PARM_RX_TIME_INT_DEF 10
173#define PARM_RX_MEM_END_DEF 0x2bc
174#define PARM_TX_TIME_INT_DEF 40
175#define PARM_TX_NUM_BUFS_DEF 4
176#define PARM_DMA_CACHE_DEF 0
177
178/* RX defines */
179#define USE_FBR0 1
180#define FBR_CHUNKS 32
181#define MAX_DESC_PER_RING_RX 1024
182
183/* number of RFDs - default and min */
184#ifdef USE_FBR0
185#define RFD_LOW_WATER_MARK 40
186#define NIC_DEFAULT_NUM_RFD 1024
187#define NUM_FBRS 2
188#else
189#define RFD_LOW_WATER_MARK 20
190#define NIC_DEFAULT_NUM_RFD 256
191#define NUM_FBRS 1
192#endif
193
194#define NIC_MIN_NUM_RFD 64
195#define NUM_PACKETS_HANDLED 256
196
197#define ALCATEL_MULTICAST_PKT 0x01000000
198#define ALCATEL_BROADCAST_PKT 0x02000000
199
200/* typedefs for Free Buffer Descriptors */
201struct fbr_desc {
202    u32 addr_lo;
203    u32 addr_hi;
204    u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
205};
206
207/* Packet Status Ring Descriptors
208 *
209 * Word 0:
210 *
211 * top 16 bits are from the Alcatel Status Word as enumerated in
212 * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
213 *
214 * 0: hp hash pass
215 * 1: ipa IP checksum assist
216 * 2: ipp IP checksum pass
217 * 3: tcpa TCP checksum assist
218 * 4: tcpp TCP checksum pass
219 * 5: wol WOL Event
220 * 6: rxmac_error RXMAC Error Indicator
221 * 7: drop Drop packet
222 * 8: ft Frame Truncated
223 * 9: jp Jumbo Packet
224 * 10: vp VLAN Packet
225 * 11-15: unused
226 * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
227 * 17: asw_RX_DV_event short receive event detected
228 * 18: asw_false_carrier_event bad carrier since last good packet
229 * 19: asw_code_err one or more nibbles signalled as errors
230 * 20: asw_CRC_err CRC error
231 * 21: asw_len_chk_err frame length field incorrect
232 * 22: asw_too_long frame length > 1518 bytes
233 * 23: asw_OK valid CRC + no code error
234 * 24: asw_multicast has a multicast address
235 * 25: asw_broadcast has a broadcast address
236 * 26: asw_dribble_nibble spurious bits after EOP
237 * 27: asw_control_frame is a control frame
238 * 28: asw_pause_frame is a pause frame
239 * 29: asw_unsupported_op unsupported OP code
240 * 30: asw_VLAN_tag VLAN tag detected
241 * 31: asw_long_evt Rx long event
242 *
243 * Word 1:
244 * 0-15: length length in bytes
245 * 16-25: bi Buffer Index
246 * 26-27: ri Ring Index
247 * 28-31: reserved
248 */
249
250struct pkt_stat_desc {
251    u32 word0;
252    u32 word1;
253};
254
255/* Typedefs for the RX DMA status word */
256
257/*
258 * rx status word 0 holds part of the status bits of the Rx DMA engine
259 * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
260 * which contains the Free Buffer ring 0 and 1 available offset.
261 *
262 * bit 0-9 FBR1 offset
263 * bit 10 Wrap flag for FBR1
264 * bit 16-25 FBR0 offset
265 * bit 26 Wrap flag for FBR0
266 */
267
268/*
269 * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
270 * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
271 * which contains the Packet Status Ring available offset.
272 *
273 * bit 0-15 reserved
274 * bit 16-27 PSRoffset
275 * bit 28 PSRwrap
276 * bit 29-31 unused
277 */
278
279/*
280 * struct rx_status_block is a structure representing the status of the Rx
281 * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
282 */
283struct rx_status_block {
284    u32 word0;
285    u32 word1;
286};
287
288/*
289 * Structure for look-up table holding free buffer ring pointers, addresses
290 * and state.
291 */
292struct fbr_lookup {
293    void *virt[MAX_DESC_PER_RING_RX];
294    void *buffer1[MAX_DESC_PER_RING_RX];
295    void *buffer2[MAX_DESC_PER_RING_RX];
296    u32 bus_high[MAX_DESC_PER_RING_RX];
297    u32 bus_low[MAX_DESC_PER_RING_RX];
298    void *ring_virtaddr;
299    dma_addr_t ring_physaddr;
300    void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
301    dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
302    u64 real_physaddr;
303    u64 offset;
304    u32 local_full;
305    u32 num_entries;
306    u32 buffsize;
307};
308
309/*
310 * struct rx_ring is the sructure representing the adaptor's local
311 * reference(s) to the rings
312 *
313 ******************************************************************************
314 * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
315 * and index 1 to refer to FRB0
316 ******************************************************************************
317 */
318struct rx_ring {
319    struct fbr_lookup *fbr[NUM_FBRS];
320    void *ps_ring_virtaddr;
321    dma_addr_t ps_ring_physaddr;
322    u32 local_psr_full;
323    u32 psr_num_entries;
324
325    struct rx_status_block *rx_status_block;
326    dma_addr_t rx_status_bus;
327
328    /* RECV */
329    struct list_head recv_list;
330    u32 num_ready_recv;
331
332    u32 num_rfd;
333
334    bool unfinished_receives;
335
336    /* lookaside lists */
337    struct kmem_cache *recv_lookaside;
338};
339
340/* TX defines */
341/*
342 * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
343 *
344 * 0-15: length of packet
345 * 16-27: VLAN tag
346 * 28: VLAN CFI
347 * 29-31: VLAN priority
348 *
349 * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
350 *
351 * 0: last packet in the sequence
352 * 1: first packet in the sequence
353 * 2: interrupt the processor when this pkt sent
354 * 3: Control word - no packet data
355 * 4: Issue half-duplex backpressure : XON/XOFF
356 * 5: send pause frame
357 * 6: Tx frame has error
358 * 7: append CRC
359 * 8: MAC override
360 * 9: pad packet
361 * 10: Packet is a Huge packet
362 * 11: append VLAN tag
363 * 12: IP checksum assist
364 * 13: TCP checksum assist
365 * 14: UDP checksum assist
366 */
367
368/* struct tx_desc represents each descriptor on the ring */
369struct tx_desc {
370    u32 addr_hi;
371    u32 addr_lo;
372    u32 len_vlan; /* control words how to xmit the */
373    u32 flags; /* data (detailed above) */
374};
375
376/*
377 * The status of the Tx DMA engine it sits in free memory, and is pointed to
378 * by 0x101c / 0x1020. This is a DMA10 type
379 */
380
381/* TCB (Transmit Control Block: Host Side) */
382struct tcb {
383    struct tcb *next; /* Next entry in ring */
384    u32 flags; /* Our flags for the packet */
385    u32 count; /* Used to spot stuck/lost packets */
386    u32 stale; /* Used to spot stuck/lost packets */
387    struct sk_buff *skb; /* Network skb we are tied to */
388    u32 index; /* Ring indexes */
389    u32 index_start;
390};
391
392/* Structure representing our local reference(s) to the ring */
393struct tx_ring {
394    /* TCB (Transmit Control Block) memory and lists */
395    struct tcb *tcb_ring;
396
397    /* List of TCBs that are ready to be used */
398    struct tcb *tcb_qhead;
399    struct tcb *tcb_qtail;
400
401    /* list of TCBs that are currently being sent. NOTE that access to all
402     * three of these (including used) are controlled via the
403     * TCBSendQLock. This lock should be secured prior to incementing /
404     * decrementing used, or any queue manipulation on send_head /
405     * tail
406     */
407    struct tcb *send_head;
408    struct tcb *send_tail;
409    int used;
410
411    /* The actual descriptor ring */
412    struct tx_desc *tx_desc_ring;
413    dma_addr_t tx_desc_ring_pa;
414
415    /* send_idx indicates where we last wrote to in the descriptor ring. */
416    u32 send_idx;
417
418    /* The location of the write-back status block */
419    u32 *tx_status;
420    dma_addr_t tx_status_pa;
421
422    /* Packets since the last IRQ: used for interrupt coalescing */
423    int since_irq;
424};
425
426/*
427 * Do not change these values: if changed, then change also in respective
428 * TXdma and Rxdma engines
429 */
430#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
431#define NUM_TCB 64
432
433/*
434 * These values are all superseded by registry entries to facilitate tuning.
435 * Once the desired performance has been achieved, the optimal registry values
436 * should be re-populated to these #defines:
437 */
438#define TX_ERROR_PERIOD 1000
439
440#define LO_MARK_PERCENT_FOR_PSR 15
441#define LO_MARK_PERCENT_FOR_RX 15
442
443/* RFD (Receive Frame Descriptor) */
444struct rfd {
445    struct list_head list_node;
446    struct sk_buff *skb;
447    u32 len; /* total size of receive frame */
448    u16 bufferindex;
449    u8 ringindex;
450};
451
452/* Flow Control */
453#define FLOW_BOTH 0
454#define FLOW_TXONLY 1
455#define FLOW_RXONLY 2
456#define FLOW_NONE 3
457
458/* Struct to define some device statistics */
459struct ce_stats {
460    /* MIB II variables
461     *
462     * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
463     * MUST have 32, then we'll need another way to perform atomic
464     * operations
465     */
466    u32 unicast_pkts_rcvd;
467    atomic_t unicast_pkts_xmtd;
468    u32 multicast_pkts_rcvd;
469    atomic_t multicast_pkts_xmtd;
470    u32 broadcast_pkts_rcvd;
471    atomic_t broadcast_pkts_xmtd;
472    u32 rcvd_pkts_dropped;
473
474    /* Tx Statistics. */
475    u32 tx_underflows;
476
477    u32 tx_collisions;
478    u32 tx_excessive_collisions;
479    u32 tx_first_collisions;
480    u32 tx_late_collisions;
481    u32 tx_max_pkt_errs;
482    u32 tx_deferred;
483
484    /* Rx Statistics. */
485    u32 rx_overflows;
486
487    u32 rx_length_errs;
488    u32 rx_align_errs;
489    u32 rx_crc_errs;
490    u32 rx_code_violations;
491    u32 rx_other_errs;
492
493    u32 synchronous_iterations;
494    u32 interrupt_status;
495};
496
497/* The private adapter structure */
498struct et131x_adapter {
499    struct net_device *netdev;
500    struct pci_dev *pdev;
501    struct mii_bus *mii_bus;
502    struct phy_device *phydev;
503    struct work_struct task;
504
505    /* Flags that indicate current state of the adapter */
506    u32 flags;
507
508    /* local link state, to determine if a state change has occurred */
509    int link;
510
511    /* Configuration */
512    u8 rom_addr[ETH_ALEN];
513    u8 addr[ETH_ALEN];
514    bool has_eeprom;
515    u8 eeprom_data[2];
516
517    /* Spinlocks */
518    spinlock_t lock;
519
520    spinlock_t tcb_send_qlock;
521    spinlock_t tcb_ready_qlock;
522    spinlock_t send_hw_lock;
523
524    spinlock_t rcv_lock;
525    spinlock_t rcv_pend_lock;
526    spinlock_t fbr_lock;
527
528    spinlock_t phy_lock;
529
530    /* Packet Filter and look ahead size */
531    u32 packet_filter;
532
533    /* multicast list */
534    u32 multicast_addr_count;
535    u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
536
537    /* Pointer to the device's PCI register space */
538    struct address_map __iomem *regs;
539
540    /* Registry parameters */
541    u8 wanted_flow; /* Flow we want for 802.3x flow control */
542    u32 registry_jumbo_packet; /* Max supported ethernet packet size */
543
544    /* Derived from the registry: */
545    u8 flowcontrol; /* flow control validated by the far-end */
546
547    /* Minimize init-time */
548    struct timer_list error_timer;
549
550    /* variable putting the phy into coma mode when boot up with no cable
551     * plugged in after 5 seconds
552     */
553    u8 boot_coma;
554
555    /* Next two used to save power information at power down. This
556     * information will be used during power up to set up parts of Power
557     * Management in JAGCore
558     */
559    u16 pdown_speed;
560    u8 pdown_duplex;
561
562    /* Tx Memory Variables */
563    struct tx_ring tx_ring;
564
565    /* Rx Memory Variables */
566    struct rx_ring rx_ring;
567
568    /* Stats */
569    struct ce_stats stats;
570
571    struct net_device_stats net_stats;
572};
573
574static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
575{
576    u32 reg;
577    int i;
578
579    /*
580     * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
581     * bits 7,1:0 both equal to 1, at least once after reset.
582     * Subsequent operations need only to check that bits 1:0 are equal
583     * to 1 prior to starting a single byte read/write
584     */
585
586    for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
587        /* Read registers grouped in DWORD1 */
588        if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
589            return -EIO;
590
591        /* I2C idle and Phy Queue Avail both true */
592        if ((reg & 0x3000) == 0x3000) {
593            if (status)
594                *status = reg;
595            return reg & 0xFF;
596        }
597    }
598    return -ETIMEDOUT;
599}
600
601
602/**
603 * eeprom_write - Write a byte to the ET1310's EEPROM
604 * @adapter: pointer to our private adapter structure
605 * @addr: the address to write
606 * @data: the value to write
607 *
608 * Returns 1 for a successful write.
609 */
610static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
611{
612    struct pci_dev *pdev = adapter->pdev;
613    int index = 0;
614    int retries;
615    int err = 0;
616    int i2c_wack = 0;
617    int writeok = 0;
618    u32 status;
619    u32 val = 0;
620
621    /*
622     * For an EEPROM, an I2C single byte write is defined as a START
623     * condition followed by the device address, EEPROM address, one byte
624     * of data and a STOP condition. The STOP condition will trigger the
625     * EEPROM's internally timed write cycle to the nonvolatile memory.
626     * All inputs are disabled during this write cycle and the EEPROM will
627     * not respond to any access until the internal write is complete.
628     */
629
630    err = eeprom_wait_ready(pdev, NULL);
631    if (err)
632        return err;
633
634     /*
635     * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
636     * and bits 1:0 both =0. Bit 5 should be set according to the
637     * type of EEPROM being accessed (1=two byte addressing, 0=one
638     * byte addressing).
639     */
640    if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
641            LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
642        return -EIO;
643
644    i2c_wack = 1;
645
646    /* Prepare EEPROM address for Step 3 */
647
648    for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
649        /* Write the address to the LBCIF Address Register */
650        if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
651            break;
652        /*
653         * Write the data to the LBCIF Data Register (the I2C write
654         * will begin).
655         */
656        if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
657            break;
658        /*
659         * Monitor bit 1:0 of the LBCIF Status Register. When bits
660         * 1:0 are both equal to 1, the I2C write has completed and the
661         * internal write cycle of the EEPROM is about to start.
662         * (bits 1:0 = 01 is a legal state while waiting from both
663         * equal to 1, but bits 1:0 = 10 is invalid and implies that
664         * something is broken).
665         */
666        err = eeprom_wait_ready(pdev, &status);
667        if (err < 0)
668            return 0;
669
670        /*
671         * Check bit 3 of the LBCIF Status Register. If equal to 1,
672         * an error has occurred.Don't break here if we are revision
673         * 1, this is so we do a blind write for load bug.
674         */
675        if ((status & LBCIF_STATUS_GENERAL_ERROR)
676            && adapter->pdev->revision == 0)
677            break;
678
679        /*
680         * Check bit 2 of the LBCIF Status Register. If equal to 1 an
681         * ACK error has occurred on the address phase of the write.
682         * This could be due to an actual hardware failure or the
683         * EEPROM may still be in its internal write cycle from a
684         * previous write. This write operation was ignored and must be
685          *repeated later.
686         */
687        if (status & LBCIF_STATUS_ACK_ERROR) {
688            /*
689             * This could be due to an actual hardware failure
690             * or the EEPROM may still be in its internal write
691             * cycle from a previous write. This write operation
692             * was ignored and must be repeated later.
693             */
694            udelay(10);
695            continue;
696        }
697
698        writeok = 1;
699        break;
700    }
701
702    /*
703     * Set bit 6 of the LBCIF Control Register = 0.
704     */
705    udelay(10);
706
707    while (i2c_wack) {
708        if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
709            LBCIF_CONTROL_LBCIF_ENABLE))
710            writeok = 0;
711
712        /* Do read until internal ACK_ERROR goes away meaning write
713         * completed
714         */
715        do {
716            pci_write_config_dword(pdev,
717                           LBCIF_ADDRESS_REGISTER,
718                           addr);
719            do {
720                pci_read_config_dword(pdev,
721                    LBCIF_DATA_REGISTER, &val);
722            } while ((val & 0x00010000) == 0);
723        } while (val & 0x00040000);
724
725        if ((val & 0xFF00) != 0xC000 || index == 10000)
726            break;
727        index++;
728    }
729    return writeok ? 0 : -EIO;
730}
731
732/**
733 * eeprom_read - Read a byte from the ET1310's EEPROM
734 * @adapter: pointer to our private adapter structure
735 * @addr: the address from which to read
736 * @pdata: a pointer to a byte in which to store the value of the read
737 * @eeprom_id: the ID of the EEPROM
738 * @addrmode: how the EEPROM is to be accessed
739 *
740 * Returns 1 for a successful read
741 */
742static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
743{
744    struct pci_dev *pdev = adapter->pdev;
745    int err;
746    u32 status;
747
748    /*
749     * A single byte read is similar to the single byte write, with the
750     * exception of the data flow:
751     */
752
753    err = eeprom_wait_ready(pdev, NULL);
754    if (err)
755        return err;
756    /*
757     * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
758     * and bits 1:0 both =0. Bit 5 should be set according to the type
759     * of EEPROM being accessed (1=two byte addressing, 0=one byte
760     * addressing).
761     */
762    if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
763                  LBCIF_CONTROL_LBCIF_ENABLE))
764        return -EIO;
765    /*
766     * Write the address to the LBCIF Address Register (I2C read will
767     * begin).
768     */
769    if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
770        return -EIO;
771    /*
772     * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
773     * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
774     * has occurred).
775     */
776    err = eeprom_wait_ready(pdev, &status);
777    if (err < 0)
778        return err;
779    /*
780     * Regardless of error status, read data byte from LBCIF Data
781     * Register.
782     */
783    *pdata = err;
784    /*
785     * Check bit 2 of the LBCIF Status Register. If = 1,
786     * then an error has occurred.
787     */
788    return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
789}
790
791static int et131x_init_eeprom(struct et131x_adapter *adapter)
792{
793    struct pci_dev *pdev = adapter->pdev;
794    u8 eestatus;
795
796    /* We first need to check the EEPROM Status code located at offset
797     * 0xB2 of config space
798     */
799    pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
800                      &eestatus);
801
802    /* THIS IS A WORKAROUND:
803     * I need to call this function twice to get my card in a
804     * LG M1 Express Dual running. I tried also a msleep before this
805     * function, because I thought there could be some time condidions
806     * but it didn't work. Call the whole function twice also work.
807     */
808    if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
809        dev_err(&pdev->dev,
810               "Could not read PCI config space for EEPROM Status\n");
811        return -EIO;
812    }
813
814    /* Determine if the error(s) we care about are present. If they are
815     * present we need to fail.
816     */
817    if (eestatus & 0x4C) {
818        int write_failed = 0;
819        if (pdev->revision == 0x01) {
820            int i;
821            static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
822
823            /* Re-write the first 4 bytes if we have an eeprom
824             * present and the revision id is 1, this fixes the
825             * corruption seen with 1310 B Silicon
826             */
827            for (i = 0; i < 3; i++)
828                if (eeprom_write(adapter, i, eedata[i]) < 0)
829                    write_failed = 1;
830        }
831        if (pdev->revision != 0x01 || write_failed) {
832            dev_err(&pdev->dev,
833                "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
834
835            /* This error could mean that there was an error
836             * reading the eeprom or that the eeprom doesn't exist.
837             * We will treat each case the same and not try to
838             * gather additional information that normally would
839             * come from the eeprom, like MAC Address
840             */
841            adapter->has_eeprom = 0;
842            return -EIO;
843        }
844    }
845    adapter->has_eeprom = 1;
846
847    /* Read the EEPROM for information regarding LED behavior. Refer to
848     * ET1310_phy.c, et131x_xcvr_init(), for its use.
849     */
850    eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
851    eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
852
853    if (adapter->eeprom_data[0] != 0xcd)
854        /* Disable all optional features */
855        adapter->eeprom_data[1] = 0x00;
856
857    return 0;
858}
859
860/**
861 * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
862 * @adapter: pointer to our adapter structure
863 */
864static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
865{
866    /* Setup the receive dma configuration register for normal operation */
867    u32 csr = 0x2000; /* FBR1 enable */
868
869    if (adapter->rx_ring.fbr[0]->buffsize == 4096)
870        csr |= 0x0800;
871    else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
872        csr |= 0x1000;
873    else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
874        csr |= 0x1800;
875#ifdef USE_FBR0
876    csr |= 0x0400; /* FBR0 enable */
877    if (adapter->rx_ring.fbr[1]->buffsize == 256)
878        csr |= 0x0100;
879    else if (adapter->rx_ring.fbr[1]->buffsize == 512)
880        csr |= 0x0200;
881    else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
882        csr |= 0x0300;
883#endif
884    writel(csr, &adapter->regs->rxdma.csr);
885
886    csr = readl(&adapter->regs->rxdma.csr);
887    if ((csr & 0x00020000) != 0) {
888        udelay(5);
889        csr = readl(&adapter->regs->rxdma.csr);
890        if ((csr & 0x00020000) != 0) {
891            dev_err(&adapter->pdev->dev,
892                "RX Dma failed to exit halt state. CSR 0x%08x\n",
893                csr);
894        }
895    }
896}
897
898/**
899 * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
900 * @adapter: pointer to our adapter structure
901 */
902static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
903{
904    u32 csr;
905    /* Setup the receive dma configuration register */
906    writel(0x00002001, &adapter->regs->rxdma.csr);
907    csr = readl(&adapter->regs->rxdma.csr);
908    if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
909        udelay(5);
910        csr = readl(&adapter->regs->rxdma.csr);
911        if ((csr & 0x00020000) == 0)
912            dev_err(&adapter->pdev->dev,
913            "RX Dma failed to enter halt state. CSR 0x%08x\n",
914                csr);
915    }
916}
917
918/**
919 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
920 * @adapter: pointer to our adapter structure
921 *
922 * Mainly used after a return to the D0 (full-power) state from a lower state.
923 */
924static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
925{
926    /* Setup the transmit dma configuration register for normal
927     * operation
928     */
929    writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
930                    &adapter->regs->txdma.csr);
931}
932
933static inline void add_10bit(u32 *v, int n)
934{
935    *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
936}
937
938static inline void add_12bit(u32 *v, int n)
939{
940    *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
941}
942
943/**
944 * et1310_config_mac_regs1 - Initialize the first part of MAC regs
945 * @adapter: pointer to our adapter structure
946 */
947static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
948{
949    struct mac_regs __iomem *macregs = &adapter->regs->mac;
950    u32 station1;
951    u32 station2;
952    u32 ipg;
953
954    /* First we need to reset everything. Write to MAC configuration
955     * register 1 to perform reset.
956     */
957    writel(0xC00F0000, &macregs->cfg1);
958
959    /* Next lets configure the MAC Inter-packet gap register */
960    ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
961    ipg |= 0x50 << 8; /* ifg enforce 0x50 */
962    writel(ipg, &macregs->ipg);
963
964    /* Next lets configure the MAC Half Duplex register */
965    /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
966    writel(0x00A1F037, &macregs->hfdp);
967
968    /* Next lets configure the MAC Interface Control register */
969    writel(0, &macregs->if_ctrl);
970
971    /* Let's move on to setting up the mii management configuration */
972    writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */
973
974    /* Next lets configure the MAC Station Address register. These
975     * values are read from the EEPROM during initialization and stored
976     * in the adapter structure. We write what is stored in the adapter
977     * structure to the MAC Station Address registers high and low. This
978     * station address is used for generating and checking pause control
979     * packets.
980     */
981    station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
982           (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
983    station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
984           (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
985           (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
986            adapter->addr[2];
987    writel(station1, &macregs->station_addr_1);
988    writel(station2, &macregs->station_addr_2);
989
990    /* Max ethernet packet in bytes that will be passed by the mac without
991     * being truncated. Allow the MAC to pass 4 more than our max packet
992     * size. This is 4 for the Ethernet CRC.
993     *
994     * Packets larger than (registry_jumbo_packet) that do not contain a
995     * VLAN ID will be dropped by the Rx function.
996     */
997    writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
998
999    /* clear out MAC config reset */
1000    writel(0, &macregs->cfg1);
1001}
1002
1003/**
1004 * et1310_config_mac_regs2 - Initialize the second part of MAC regs
1005 * @adapter: pointer to our adapter structure
1006 */
1007static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
1008{
1009    int32_t delay = 0;
1010    struct mac_regs __iomem *mac = &adapter->regs->mac;
1011    struct phy_device *phydev = adapter->phydev;
1012    u32 cfg1;
1013    u32 cfg2;
1014    u32 ifctrl;
1015    u32 ctl;
1016
1017    ctl = readl(&adapter->regs->txmac.ctl);
1018    cfg1 = readl(&mac->cfg1);
1019    cfg2 = readl(&mac->cfg2);
1020    ifctrl = readl(&mac->if_ctrl);
1021
1022    /* Set up the if mode bits */
1023    cfg2 &= ~0x300;
1024    if (phydev && phydev->speed == SPEED_1000) {
1025        cfg2 |= 0x200;
1026        /* Phy mode bit */
1027        ifctrl &= ~(1 << 24);
1028    } else {
1029        cfg2 |= 0x100;
1030        ifctrl |= (1 << 24);
1031    }
1032
1033    /* We need to enable Rx/Tx */
1034    cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW;
1035    /* Initialize loop back to off */
1036    cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1037    if (adapter->flowcontrol == FLOW_RXONLY ||
1038                adapter->flowcontrol == FLOW_BOTH)
1039        cfg1 |= CFG1_RX_FLOW;
1040    writel(cfg1, &mac->cfg1);
1041
1042    /* Now we need to initialize the MAC Configuration 2 register */
1043    /* preamble 7, check length, huge frame off, pad crc, crc enable
1044       full duplex off */
1045    cfg2 |= 0x7016;
1046    cfg2 &= ~0x0021;
1047
1048    /* Turn on duplex if needed */
1049    if (phydev && phydev->duplex == DUPLEX_FULL)
1050        cfg2 |= 0x01;
1051
1052    ifctrl &= ~(1 << 26);
1053    if (phydev && phydev->duplex == DUPLEX_HALF)
1054        ifctrl |= (1<<26); /* Enable ghd */
1055
1056    writel(ifctrl, &mac->if_ctrl);
1057    writel(cfg2, &mac->cfg2);
1058
1059    do {
1060        udelay(10);
1061        delay++;
1062        cfg1 = readl(&mac->cfg1);
1063    } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1064
1065    if (delay == 100) {
1066        dev_warn(&adapter->pdev->dev,
1067            "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1068            cfg1);
1069    }
1070
1071    /* Enable txmac */
1072    ctl |= 0x09; /* TX mac enable, FC disable */
1073    writel(ctl, &adapter->regs->txmac.ctl);
1074
1075    /* Ready to start the RXDMA/TXDMA engine */
1076    if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1077        et131x_rx_dma_enable(adapter);
1078        et131x_tx_dma_enable(adapter);
1079    }
1080}
1081
1082/**
1083 * et1310_in_phy_coma - check if the device is in phy coma
1084 * @adapter: pointer to our adapter structure
1085 *
1086 * Returns 0 if the device is not in phy coma, 1 if it is in phy coma
1087 */
1088static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1089{
1090    u32 pmcsr;
1091
1092    pmcsr = readl(&adapter->regs->global.pm_csr);
1093
1094    return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1095}
1096
1097static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1098{
1099    struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1100    u32 hash1 = 0;
1101    u32 hash2 = 0;
1102    u32 hash3 = 0;
1103    u32 hash4 = 0;
1104    u32 pm_csr;
1105
1106    /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1107     * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
1108     * specified) then we should pass NO multi-cast addresses to the
1109     * driver.
1110     */
1111    if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
1112        int i;
1113
1114        /* Loop through our multicast array and set up the device */
1115        for (i = 0; i < adapter->multicast_addr_count; i++) {
1116            u32 result;
1117
1118            result = ether_crc(6, adapter->multicast_list[i]);
1119
1120            result = (result & 0x3F800000) >> 23;
1121
1122            if (result < 32) {
1123                hash1 |= (1 << result);
1124            } else if ((31 < result) && (result < 64)) {
1125                result -= 32;
1126                hash2 |= (1 << result);
1127            } else if ((63 < result) && (result < 96)) {
1128                result -= 64;
1129                hash3 |= (1 << result);
1130            } else {
1131                result -= 96;
1132                hash4 |= (1 << result);
1133            }
1134        }
1135    }
1136
1137    /* Write out the new hash to the device */
1138    pm_csr = readl(&adapter->regs->global.pm_csr);
1139    if (!et1310_in_phy_coma(adapter)) {
1140        writel(hash1, &rxmac->multi_hash1);
1141        writel(hash2, &rxmac->multi_hash2);
1142        writel(hash3, &rxmac->multi_hash3);
1143        writel(hash4, &rxmac->multi_hash4);
1144    }
1145}
1146
1147static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1148{
1149    struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1150    u32 uni_pf1;
1151    u32 uni_pf2;
1152    u32 uni_pf3;
1153    u32 pm_csr;
1154
1155    /* Set up unicast packet filter reg 3 to be the first two octets of
1156     * the MAC address for both address
1157     *
1158     * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1159     * MAC address for second address
1160     *
1161     * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1162     * MAC address for first address
1163     */
1164    uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1165          (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1166          (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1167           adapter->addr[1];
1168
1169    uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1170          (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1171          (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1172           adapter->addr[5];
1173
1174    uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1175          (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1176          (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1177           adapter->addr[5];
1178
1179    pm_csr = readl(&adapter->regs->global.pm_csr);
1180    if (!et1310_in_phy_coma(adapter)) {
1181        writel(uni_pf1, &rxmac->uni_pf_addr1);
1182        writel(uni_pf2, &rxmac->uni_pf_addr2);
1183        writel(uni_pf3, &rxmac->uni_pf_addr3);
1184    }
1185}
1186
1187static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1188{
1189    struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1190    struct phy_device *phydev = adapter->phydev;
1191    u32 sa_lo;
1192    u32 sa_hi = 0;
1193    u32 pf_ctrl = 0;
1194
1195    /* Disable the MAC while it is being configured (also disable WOL) */
1196    writel(0x8, &rxmac->ctrl);
1197
1198    /* Initialize WOL to disabled. */
1199    writel(0, &rxmac->crc0);
1200    writel(0, &rxmac->crc12);
1201    writel(0, &rxmac->crc34);
1202
1203    /* We need to set the WOL mask0 - mask4 next. We initialize it to
1204     * its default Values of 0x00000000 because there are not WOL masks
1205     * as of this time.
1206     */
1207    writel(0, &rxmac->mask0_word0);
1208    writel(0, &rxmac->mask0_word1);
1209    writel(0, &rxmac->mask0_word2);
1210    writel(0, &rxmac->mask0_word3);
1211
1212    writel(0, &rxmac->mask1_word0);
1213    writel(0, &rxmac->mask1_word1);
1214    writel(0, &rxmac->mask1_word2);
1215    writel(0, &rxmac->mask1_word3);
1216
1217    writel(0, &rxmac->mask2_word0);
1218    writel(0, &rxmac->mask2_word1);
1219    writel(0, &rxmac->mask2_word2);
1220    writel(0, &rxmac->mask2_word3);
1221
1222    writel(0, &rxmac->mask3_word0);
1223    writel(0, &rxmac->mask3_word1);
1224    writel(0, &rxmac->mask3_word2);
1225    writel(0, &rxmac->mask3_word3);
1226
1227    writel(0, &rxmac->mask4_word0);
1228    writel(0, &rxmac->mask4_word1);
1229    writel(0, &rxmac->mask4_word2);
1230    writel(0, &rxmac->mask4_word3);
1231
1232    /* Lets setup the WOL Source Address */
1233    sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1234        (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1235        (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1236         adapter->addr[5];
1237    writel(sa_lo, &rxmac->sa_lo);
1238
1239    sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1240               adapter->addr[1];
1241    writel(sa_hi, &rxmac->sa_hi);
1242
1243    /* Disable all Packet Filtering */
1244    writel(0, &rxmac->pf_ctrl);
1245
1246    /* Let's initialize the Unicast Packet filtering address */
1247    if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1248        et1310_setup_device_for_unicast(adapter);
1249        pf_ctrl |= 4; /* Unicast filter */
1250    } else {
1251        writel(0, &rxmac->uni_pf_addr1);
1252        writel(0, &rxmac->uni_pf_addr2);
1253        writel(0, &rxmac->uni_pf_addr3);
1254    }
1255
1256    /* Let's initialize the Multicast hash */
1257    if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1258        pf_ctrl |= 2; /* Multicast filter */
1259        et1310_setup_device_for_multicast(adapter);
1260    }
1261
1262    /* Runt packet filtering. Didn't work in version A silicon. */
1263    pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1264    pf_ctrl |= 8; /* Fragment filter */
1265
1266    if (adapter->registry_jumbo_packet > 8192)
1267        /* In order to transmit jumbo packets greater than 8k, the
1268         * FIFO between RxMAC and RxDMA needs to be reduced in size
1269         * to (16k - Jumbo packet size). In order to implement this,
1270         * we must use "cut through" mode in the RxMAC, which chops
1271         * packets down into segments which are (max_size * 16). In
1272         * this case we selected 256 bytes, since this is the size of
1273         * the PCI-Express TLP's that the 1310 uses.
1274         *
1275         * seg_en on, fc_en off, size 0x10
1276         */
1277        writel(0x41, &rxmac->mcif_ctrl_max_seg);
1278    else
1279        writel(0, &rxmac->mcif_ctrl_max_seg);
1280
1281    /* Initialize the MCIF water marks */
1282    writel(0, &rxmac->mcif_water_mark);
1283
1284    /* Initialize the MIF control */
1285    writel(0, &rxmac->mif_ctrl);
1286
1287    /* Initialize the Space Available Register */
1288    writel(0, &rxmac->space_avail);
1289
1290    /* Initialize the the mif_ctrl register
1291     * bit 3: Receive code error. One or more nibbles were signaled as
1292     * errors during the reception of the packet. Clear this
1293     * bit in Gigabit, set it in 100Mbit. This was derived
1294     * experimentally at UNH.
1295     * bit 4: Receive CRC error. The packet's CRC did not match the
1296     * internally generated CRC.
1297     * bit 5: Receive length check error. Indicates that frame length
1298     * field value in the packet does not match the actual data
1299     * byte length and is not a type field.
1300     * bit 16: Receive frame truncated.
1301     * bit 17: Drop packet enable
1302     */
1303    if (phydev && phydev->speed == SPEED_100)
1304        writel(0x30038, &rxmac->mif_ctrl);
1305    else
1306        writel(0x30030, &rxmac->mif_ctrl);
1307
1308    /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1309     * filter is always enabled since it is where the runt packets are
1310     * supposed to be dropped. For version A silicon, runt packet
1311     * dropping doesn't work, so it is disabled in the pf_ctrl register,
1312     * but we still leave the packet filter on.
1313     */
1314    writel(pf_ctrl, &rxmac->pf_ctrl);
1315    writel(0x9, &rxmac->ctrl);
1316}
1317
1318static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1319{
1320    struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1321
1322    /* We need to update the Control Frame Parameters
1323     * cfpt - control frame pause timer set to 64 (0x40)
1324     * cfep - control frame extended pause timer set to 0x0
1325     */
1326    if (adapter->flowcontrol == FLOW_NONE)
1327        writel(0, &txmac->cf_param);
1328    else
1329        writel(0x40, &txmac->cf_param);
1330}
1331
1332static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1333{
1334    struct macstat_regs __iomem *macstat =
1335        &adapter->regs->macstat;
1336
1337    /* Next we need to initialize all the macstat registers to zero on
1338     * the device.
1339     */
1340    writel(0, &macstat->txrx_0_64_byte_frames);
1341    writel(0, &macstat->txrx_65_127_byte_frames);
1342    writel(0, &macstat->txrx_128_255_byte_frames);
1343    writel(0, &macstat->txrx_256_511_byte_frames);
1344    writel(0, &macstat->txrx_512_1023_byte_frames);
1345    writel(0, &macstat->txrx_1024_1518_byte_frames);
1346    writel(0, &macstat->txrx_1519_1522_gvln_frames);
1347
1348    writel(0, &macstat->rx_bytes);
1349    writel(0, &macstat->rx_packets);
1350    writel(0, &macstat->rx_fcs_errs);
1351    writel(0, &macstat->rx_multicast_packets);
1352    writel(0, &macstat->rx_broadcast_packets);
1353    writel(0, &macstat->rx_control_frames);
1354    writel(0, &macstat->rx_pause_frames);
1355    writel(0, &macstat->rx_unknown_opcodes);
1356    writel(0, &macstat->rx_align_errs);
1357    writel(0, &macstat->rx_frame_len_errs);
1358    writel(0, &macstat->rx_code_errs);
1359    writel(0, &macstat->rx_carrier_sense_errs);
1360    writel(0, &macstat->rx_undersize_packets);
1361    writel(0, &macstat->rx_oversize_packets);
1362    writel(0, &macstat->rx_fragment_packets);
1363    writel(0, &macstat->rx_jabbers);
1364    writel(0, &macstat->rx_drops);
1365
1366    writel(0, &macstat->tx_bytes);
1367    writel(0, &macstat->tx_packets);
1368    writel(0, &macstat->tx_multicast_packets);
1369    writel(0, &macstat->tx_broadcast_packets);
1370    writel(0, &macstat->tx_pause_frames);
1371    writel(0, &macstat->tx_deferred);
1372    writel(0, &macstat->tx_excessive_deferred);
1373    writel(0, &macstat->tx_single_collisions);
1374    writel(0, &macstat->tx_multiple_collisions);
1375    writel(0, &macstat->tx_late_collisions);
1376    writel(0, &macstat->tx_excessive_collisions);
1377    writel(0, &macstat->tx_total_collisions);
1378    writel(0, &macstat->tx_pause_honored_frames);
1379    writel(0, &macstat->tx_drops);
1380    writel(0, &macstat->tx_jabbers);
1381    writel(0, &macstat->tx_fcs_errs);
1382    writel(0, &macstat->tx_control_frames);
1383    writel(0, &macstat->tx_oversize_frames);
1384    writel(0, &macstat->tx_undersize_frames);
1385    writel(0, &macstat->tx_fragments);
1386    writel(0, &macstat->carry_reg1);
1387    writel(0, &macstat->carry_reg2);
1388
1389    /* Unmask any counters that we want to track the overflow of.
1390     * Initially this will be all counters. It may become clear later
1391     * that we do not need to track all counters.
1392     */
1393    writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1394    writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1395}
1396
1397/**
1398 * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC
1399 * @adapter: pointer to our private adapter structure
1400 * @addr: the address of the transceiver
1401 * @reg: the register to read
1402 * @value: pointer to a 16-bit value in which the value will be stored
1403 *
1404 * Returns 0 on success, errno on failure (as defined in errno.h)
1405 */
1406static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1407          u8 reg, u16 *value)
1408{
1409    struct mac_regs __iomem *mac = &adapter->regs->mac;
1410    int status = 0;
1411    u32 delay = 0;
1412    u32 mii_addr;
1413    u32 mii_cmd;
1414    u32 mii_indicator;
1415
1416    /* Save a local copy of the registers we are dealing with so we can
1417     * set them back
1418     */
1419    mii_addr = readl(&mac->mii_mgmt_addr);
1420    mii_cmd = readl(&mac->mii_mgmt_cmd);
1421
1422    /* Stop the current operation */
1423    writel(0, &mac->mii_mgmt_cmd);
1424
1425    /* Set up the register we need to read from on the correct PHY */
1426    writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1427
1428    writel(0x1, &mac->mii_mgmt_cmd);
1429
1430    do {
1431        udelay(50);
1432        delay++;
1433        mii_indicator = readl(&mac->mii_mgmt_indicator);
1434    } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1435
1436    /* If we hit the max delay, we could not read the register */
1437    if (delay == 50) {
1438        dev_warn(&adapter->pdev->dev,
1439                "reg 0x%08x could not be read\n", reg);
1440        dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1441                mii_indicator);
1442
1443        status = -EIO;
1444    }
1445
1446    /* If we hit here we were able to read the register and we need to
1447     * return the value to the caller */
1448    *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1449
1450    /* Stop the read operation */
1451    writel(0, &mac->mii_mgmt_cmd);
1452
1453    /* set the registers we touched back to the state at which we entered
1454     * this function
1455     */
1456    writel(mii_addr, &mac->mii_mgmt_addr);
1457    writel(mii_cmd, &mac->mii_mgmt_cmd);
1458
1459    return status;
1460}
1461
1462static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1463{
1464    struct phy_device *phydev = adapter->phydev;
1465
1466    if (!phydev)
1467        return -EIO;
1468
1469    return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1470}
1471
1472/**
1473 * et131x_mii_write - Write to a PHY register through the MII interface of the MAC
1474 * @adapter: pointer to our private adapter structure
1475 * @reg: the register to read
1476 * @value: 16-bit value to write
1477 *
1478 * FIXME: one caller in netdev still
1479 *
1480 * Return 0 on success, errno on failure (as defined in errno.h)
1481 */
1482static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1483{
1484    struct mac_regs __iomem *mac = &adapter->regs->mac;
1485    struct phy_device *phydev = adapter->phydev;
1486    int status = 0;
1487    u8 addr;
1488    u32 delay = 0;
1489    u32 mii_addr;
1490    u32 mii_cmd;
1491    u32 mii_indicator;
1492
1493    if (!phydev)
1494        return -EIO;
1495
1496    addr = phydev->addr;
1497
1498    /* Save a local copy of the registers we are dealing with so we can
1499     * set them back
1500     */
1501    mii_addr = readl(&mac->mii_mgmt_addr);
1502    mii_cmd = readl(&mac->mii_mgmt_cmd);
1503
1504    /* Stop the current operation */
1505    writel(0, &mac->mii_mgmt_cmd);
1506
1507    /* Set up the register we need to write to on the correct PHY */
1508    writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1509
1510    /* Add the value to write to the registers to the mac */
1511    writel(value, &mac->mii_mgmt_ctrl);
1512
1513    do {
1514        udelay(50);
1515        delay++;
1516        mii_indicator = readl(&mac->mii_mgmt_indicator);
1517    } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1518
1519    /* If we hit the max delay, we could not write the register */
1520    if (delay == 100) {
1521        u16 tmp;
1522
1523        dev_warn(&adapter->pdev->dev,
1524            "reg 0x%08x could not be written", reg);
1525        dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1526                mii_indicator);
1527        dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1528                readl(&mac->mii_mgmt_cmd));
1529
1530        et131x_mii_read(adapter, reg, &tmp);
1531
1532        status = -EIO;
1533    }
1534    /* Stop the write operation */
1535    writel(0, &mac->mii_mgmt_cmd);
1536
1537    /*
1538     * set the registers we touched back to the state at which we entered
1539     * this function
1540     */
1541    writel(mii_addr, &mac->mii_mgmt_addr);
1542    writel(mii_cmd, &mac->mii_mgmt_cmd);
1543
1544    return status;
1545}
1546
1547/* Still used from _mac for BIT_READ */
1548static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
1549                      u16 action, u16 regnum, u16 bitnum,
1550                      u8 *value)
1551{
1552    u16 reg;
1553    u16 mask = 0x0001 << bitnum;
1554
1555    /* Read the requested register */
1556    et131x_mii_read(adapter, regnum, &reg);
1557
1558    switch (action) {
1559    case TRUEPHY_BIT_READ:
1560        *value = (reg & mask) >> bitnum;
1561        break;
1562
1563    case TRUEPHY_BIT_SET:
1564        et131x_mii_write(adapter, regnum, reg | mask);
1565        break;
1566
1567    case TRUEPHY_BIT_CLEAR:
1568        et131x_mii_write(adapter, regnum, reg & ~mask);
1569        break;
1570
1571    default:
1572        break;
1573    }
1574}
1575
1576static void et1310_config_flow_control(struct et131x_adapter *adapter)
1577{
1578    struct phy_device *phydev = adapter->phydev;
1579
1580    if (phydev->duplex == DUPLEX_HALF) {
1581        adapter->flowcontrol = FLOW_NONE;
1582    } else {
1583        char remote_pause, remote_async_pause;
1584
1585        et1310_phy_access_mii_bit(adapter,
1586                TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1587        et1310_phy_access_mii_bit(adapter,
1588                TRUEPHY_BIT_READ, 5, 11,
1589                &remote_async_pause);
1590
1591        if ((remote_pause == TRUEPHY_BIT_SET) &&
1592            (remote_async_pause == TRUEPHY_BIT_SET)) {
1593            adapter->flowcontrol = adapter->wanted_flow;
1594        } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1595               (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1596            if (adapter->wanted_flow == FLOW_BOTH)
1597                adapter->flowcontrol = FLOW_BOTH;
1598            else
1599                adapter->flowcontrol = FLOW_NONE;
1600        } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1601               (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1602            adapter->flowcontrol = FLOW_NONE;
1603        } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1604                   remote_async_pause == TRUEPHY_SET_BIT) */
1605            if (adapter->wanted_flow == FLOW_BOTH)
1606                adapter->flowcontrol = FLOW_RXONLY;
1607            else
1608                adapter->flowcontrol = FLOW_NONE;
1609        }
1610    }
1611}
1612
1613/**
1614 * et1310_update_macstat_host_counters - Update the local copy of the statistics
1615 * @adapter: pointer to the adapter structure
1616 */
1617static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1618{
1619    struct ce_stats *stats = &adapter->stats;
1620    struct macstat_regs __iomem *macstat =
1621        &adapter->regs->macstat;
1622
1623    stats->tx_collisions += readl(&macstat->tx_total_collisions);
1624    stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1625    stats->tx_deferred += readl(&macstat->tx_deferred);
1626    stats->tx_excessive_collisions +=
1627                readl(&macstat->tx_multiple_collisions);
1628    stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1629    stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1630    stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1631
1632    stats->rx_align_errs += readl(&macstat->rx_align_errs);
1633    stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1634    stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1635    stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1636    stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1637    stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1638    stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1639}
1640
1641/**
1642 * et1310_handle_macstat_interrupt
1643 * @adapter: pointer to the adapter structure
1644 *
1645 * One of the MACSTAT counters has wrapped. Update the local copy of
1646 * the statistics held in the adapter structure, checking the "wrap"
1647 * bit for each counter.
1648 */
1649static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1650{
1651    u32 carry_reg1;
1652    u32 carry_reg2;
1653
1654    /* Read the interrupt bits from the register(s). These are Clear On
1655     * Write.
1656     */
1657    carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1658    carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1659
1660    writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1661    writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1662
1663    /* We need to do update the host copy of all the MAC_STAT counters.
1664     * For each counter, check it's overflow bit. If the overflow bit is
1665     * set, then increment the host version of the count by one complete
1666     * revolution of the counter. This routine is called when the counter
1667     * block indicates that one of the counters has wrapped.
1668     */
1669    if (carry_reg1 & (1 << 14))
1670        adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1671    if (carry_reg1 & (1 << 8))
1672        adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1673    if (carry_reg1 & (1 << 7))
1674        adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1675    if (carry_reg1 & (1 << 2))
1676        adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1677    if (carry_reg1 & (1 << 6))
1678        adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1679    if (carry_reg1 & (1 << 3))
1680        adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1681    if (carry_reg1 & (1 << 0))
1682        adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1683    if (carry_reg2 & (1 << 16))
1684        adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1685    if (carry_reg2 & (1 << 15))
1686        adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1687    if (carry_reg2 & (1 << 6))
1688        adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1689    if (carry_reg2 & (1 << 8))
1690        adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1691    if (carry_reg2 & (1 << 5))
1692        adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1693    if (carry_reg2 & (1 << 4))
1694        adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1695    if (carry_reg2 & (1 << 2))
1696        adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1697}
1698
1699static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1700{
1701    struct net_device *netdev = bus->priv;
1702    struct et131x_adapter *adapter = netdev_priv(netdev);
1703    u16 value;
1704    int ret;
1705
1706    ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1707
1708    if (ret < 0)
1709        return ret;
1710    else
1711        return value;
1712}
1713
1714static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1715                 int reg, u16 value)
1716{
1717    struct net_device *netdev = bus->priv;
1718    struct et131x_adapter *adapter = netdev_priv(netdev);
1719
1720    return et131x_mii_write(adapter, reg, value);
1721}
1722
1723static int et131x_mdio_reset(struct mii_bus *bus)
1724{
1725    struct net_device *netdev = bus->priv;
1726    struct et131x_adapter *adapter = netdev_priv(netdev);
1727
1728    et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1729
1730    return 0;
1731}
1732
1733/**
1734 * et1310_phy_power_down - PHY power control
1735 * @adapter: device to control
1736 * @down: true for off/false for back on
1737 *
1738 * one hundred, ten, one thousand megs
1739 * How would you like to have your LAN accessed
1740 * Can't you see that this code processed
1741 * Phy power, phy power..
1742 */
1743static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1744{
1745    u16 data;
1746
1747    et131x_mii_read(adapter, MII_BMCR, &data);
1748    data &= ~BMCR_PDOWN;
1749    if (down)
1750        data |= BMCR_PDOWN;
1751    et131x_mii_write(adapter, MII_BMCR, data);
1752}
1753
1754/**
1755 * et131x_xcvr_init - Init the phy if we are setting it into force mode
1756 * @adapter: pointer to our private adapter structure
1757 *
1758 */
1759static void et131x_xcvr_init(struct et131x_adapter *adapter)
1760{
1761    u16 imr;
1762    u16 isr;
1763    u16 lcr2;
1764
1765    et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1766    et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1767
1768    /* Set the link status interrupt only. Bad behavior when link status
1769     * and auto neg are set, we run into a nested interrupt problem
1770     */
1771    imr |= (ET_PHY_INT_MASK_AUTONEGSTAT |
1772        ET_PHY_INT_MASK_LINKSTAT |
1773        ET_PHY_INT_MASK_ENABLE);
1774
1775    et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1776
1777    /* Set the LED behavior such that LED 1 indicates speed (off =
1778     * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1779     * link and activity (on for link, blink off for activity).
1780     *
1781     * NOTE: Some customizations have been added here for specific
1782     * vendors; The LED behavior is now determined by vendor data in the
1783     * EEPROM. However, the above description is the default.
1784     */
1785    if ((adapter->eeprom_data[1] & 0x4) == 0) {
1786        et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1787
1788        lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1789        lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1790
1791        if ((adapter->eeprom_data[1] & 0x8) == 0)
1792            lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1793        else
1794            lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1795
1796        et131x_mii_write(adapter, PHY_LED_2, lcr2);
1797    }
1798}
1799
1800/**
1801 * et131x_configure_global_regs - configure JAGCore global regs
1802 * @adapter: pointer to our adapter structure
1803 *
1804 * Used to configure the global registers on the JAGCore
1805 */
1806static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1807{
1808    struct global_regs __iomem *regs = &adapter->regs->global;
1809
1810    writel(0, &regs->rxq_start_addr);
1811    writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1812
1813    if (adapter->registry_jumbo_packet < 2048) {
1814        /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1815         * block of RAM that the driver can split between Tx
1816         * and Rx as it desires. Our default is to split it
1817         * 50/50:
1818         */
1819        writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr);
1820        writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr);
1821    } else if (adapter->registry_jumbo_packet < 8192) {
1822        /* For jumbo packets > 2k but < 8k, split 50-50. */
1823        writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr);
1824        writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr);
1825    } else {
1826        /* 9216 is the only packet size greater than 8k that
1827         * is available. The Tx buffer has to be big enough
1828         * for one whole packet on the Tx side. We'll make
1829         * the Tx 9408, and give the rest to Rx
1830         */
1831        writel(0x01b3, &regs->rxq_end_addr);
1832        writel(0x01b4, &regs->txq_start_addr);
1833    }
1834
1835    /* Initialize the loopback register. Disable all loopbacks. */
1836    writel(0, &regs->loopback);
1837
1838    /* MSI Register */
1839    writel(0, &regs->msi_config);
1840
1841    /* By default, disable the watchdog timer. It will be enabled when
1842     * a packet is queued.
1843     */
1844    writel(0, &regs->watchdog_timer);
1845}
1846
1847/**
1848 * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
1849 * @adapter: pointer to our adapter structure
1850 */
1851static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1852{
1853    struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1854    struct rx_ring *rx_local = &adapter->rx_ring;
1855    struct fbr_desc *fbr_entry;
1856    u32 entry;
1857    u32 psr_num_des;
1858    unsigned long flags;
1859
1860    /* Halt RXDMA to perform the reconfigure. */
1861    et131x_rx_dma_disable(adapter);
1862
1863    /* Load the completion writeback physical address
1864     *
1865     * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
1866     * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
1867     * are ever returned, make sure the high part is retrieved here
1868     * before storing the adjusted address.
1869     */
1870    writel((u32) ((u64)rx_local->rx_status_bus >> 32),
1871           &rx_dma->dma_wb_base_hi);
1872    writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
1873
1874    memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1875
1876    /* Set the address and parameters of the packet status ring into the
1877     * 1310's registers
1878     */
1879    writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
1880           &rx_dma->psr_base_hi);
1881    writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
1882    writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1883    writel(0, &rx_dma->psr_full_offset);
1884
1885    psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
1886    writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1887           &rx_dma->psr_min_des);
1888
1889    spin_lock_irqsave(&adapter->rcv_lock, flags);
1890
1891    /* These local variables track the PSR in the adapter structure */
1892    rx_local->local_psr_full = 0;
1893
1894    /* Now's the best time to initialize FBR1 contents */
1895    fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
1896    for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
1897        fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
1898        fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
1899        fbr_entry->word2 = entry;
1900        fbr_entry++;
1901    }
1902
1903    /* Set the address and parameters of Free buffer ring 1 (and 0 if
1904     * required) into the 1310's registers
1905     */
1906    writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
1907           &rx_dma->fbr1_base_hi);
1908    writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
1909    writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
1910    writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
1911
1912    /* This variable tracks the free buffer ring 1 full position, so it
1913     * has to match the above.
1914     */
1915    rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
1916    writel(
1917       ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1918       &rx_dma->fbr1_min_des);
1919
1920#ifdef USE_FBR0
1921    /* Now's the best time to initialize FBR0 contents */
1922    fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
1923    for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
1924        fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
1925        fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
1926        fbr_entry->word2 = entry;
1927        fbr_entry++;
1928    }
1929
1930    writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
1931           &rx_dma->fbr0_base_hi);
1932    writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
1933    writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
1934    writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
1935
1936    /* This variable tracks the free buffer ring 0 full position, so it
1937     * has to match the above.
1938     */
1939    rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
1940    writel(
1941       ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1942       &rx_dma->fbr0_min_des);
1943#endif
1944
1945    /* Program the number of packets we will receive before generating an
1946     * interrupt.
1947     * For version B silicon, this value gets updated once autoneg is
1948     *complete.
1949     */
1950    writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1951
1952    /* The "time_done" is not working correctly to coalesce interrupts
1953     * after a given time period, but rather is giving us an interrupt
1954     * regardless of whether we have received packets.
1955     * This value gets updated once autoneg is complete.
1956     */
1957    writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1958
1959    spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1960}
1961
1962/**
1963 * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore.
1964 * @adapter: pointer to our private adapter structure
1965 *
1966 * Configure the transmit engine with the ring buffers we have created
1967 * and prepare it for use.
1968 */
1969static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1970{
1971    struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1972
1973    /* Load the hardware with the start of the transmit descriptor ring. */
1974    writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
1975           &txdma->pr_base_hi);
1976    writel((u32) adapter->tx_ring.tx_desc_ring_pa,
1977           &txdma->pr_base_lo);
1978
1979    /* Initialise the transmit DMA engine */
1980    writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1981
1982    /* Load the completion writeback physical address */
1983    writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
1984                        &txdma->dma_wb_base_hi);
1985    writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
1986
1987    *adapter->tx_ring.tx_status = 0;
1988
1989    writel(0, &txdma->service_request);
1990    adapter->tx_ring.send_idx = 0;
1991}
1992
1993/**
1994 * et131x_adapter_setup - Set the adapter up as per cassini+ documentation
1995 * @adapter: pointer to our private adapter structure
1996 *
1997 * Returns 0 on success, errno on failure (as defined in errno.h)
1998 */
1999static void et131x_adapter_setup(struct et131x_adapter *adapter)
2000{
2001    /* Configure the JAGCore */
2002    et131x_configure_global_regs(adapter);
2003
2004    et1310_config_mac_regs1(adapter);
2005
2006    /* Configure the MMC registers */
2007    /* All we need to do is initialize the Memory Control Register */
2008    writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
2009
2010    et1310_config_rxmac_regs(adapter);
2011    et1310_config_txmac_regs(adapter);
2012
2013    et131x_config_rx_dma_regs(adapter);
2014    et131x_config_tx_dma_regs(adapter);
2015
2016    et1310_config_macstat_regs(adapter);
2017
2018    et1310_phy_power_down(adapter, 0);
2019    et131x_xcvr_init(adapter);
2020}
2021
2022/**
2023 * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
2024 * @adapter: pointer to our private adapter structure
2025 */
2026static void et131x_soft_reset(struct et131x_adapter *adapter)
2027{
2028    /* Disable MAC Core */
2029    writel(0xc00f0000, &adapter->regs->mac.cfg1);
2030
2031    /* Set everything to a reset value */
2032    writel(0x7F, &adapter->regs->global.sw_reset);
2033    writel(0x000f0000, &adapter->regs->mac.cfg1);
2034    writel(0x00000000, &adapter->regs->mac.cfg1);
2035}
2036
2037/**
2038 * et131x_enable_interrupts - enable interrupt
2039 * @adapter: et131x device
2040 *
2041 * Enable the appropriate interrupts on the ET131x according to our
2042 * configuration
2043 */
2044static void et131x_enable_interrupts(struct et131x_adapter *adapter)
2045{
2046    u32 mask;
2047
2048    /* Enable all global interrupts */
2049    if (adapter->flowcontrol == FLOW_TXONLY ||
2050                adapter->flowcontrol == FLOW_BOTH)
2051        mask = INT_MASK_ENABLE;
2052    else
2053        mask = INT_MASK_ENABLE_NO_FLOW;
2054
2055    writel(mask, &adapter->regs->global.int_mask);
2056}
2057
2058/**
2059 * et131x_disable_interrupts - interrupt disable
2060 * @adapter: et131x device
2061 *
2062 * Block all interrupts from the et131x device at the device itself
2063 */
2064static void et131x_disable_interrupts(struct et131x_adapter *adapter)
2065{
2066    /* Disable all global interrupts */
2067    writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
2068}
2069
2070/**
2071 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
2072 * @adapter: pointer to our adapter structure
2073 */
2074static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2075{
2076    /* Setup the tramsmit dma configuration register */
2077    writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
2078                    &adapter->regs->txdma.csr);
2079}
2080
2081/**
2082 * et131x_enable_txrx - Enable tx/rx queues
2083 * @netdev: device to be enabled
2084 */
2085static void et131x_enable_txrx(struct net_device *netdev)
2086{
2087    struct et131x_adapter *adapter = netdev_priv(netdev);
2088
2089    /* Enable the Tx and Rx DMA engines (if not already enabled) */
2090    et131x_rx_dma_enable(adapter);
2091    et131x_tx_dma_enable(adapter);
2092
2093    /* Enable device interrupts */
2094    if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
2095        et131x_enable_interrupts(adapter);
2096
2097    /* We're ready to move some data, so start the queue */
2098    netif_start_queue(netdev);
2099}
2100
2101/**
2102 * et131x_disable_txrx - Disable tx/rx queues
2103 * @netdev: device to be disabled
2104 */
2105static void et131x_disable_txrx(struct net_device *netdev)
2106{
2107    struct et131x_adapter *adapter = netdev_priv(netdev);
2108
2109    /* First thing is to stop the queue */
2110    netif_stop_queue(netdev);
2111
2112    /* Stop the Tx and Rx DMA engines */
2113    et131x_rx_dma_disable(adapter);
2114    et131x_tx_dma_disable(adapter);
2115
2116    /* Disable device interrupts */
2117    et131x_disable_interrupts(adapter);
2118}
2119
2120/**
2121 * et131x_init_send - Initialize send data structures
2122 * @adapter: pointer to our private adapter structure
2123 */
2124static void et131x_init_send(struct et131x_adapter *adapter)
2125{
2126    struct tcb *tcb;
2127    u32 ct;
2128    struct tx_ring *tx_ring;
2129
2130    /* Setup some convenience pointers */
2131    tx_ring = &adapter->tx_ring;
2132    tcb = adapter->tx_ring.tcb_ring;
2133
2134    tx_ring->tcb_qhead = tcb;
2135
2136    memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2137
2138    /* Go through and set up each TCB */
2139    for (ct = 0; ct++ < NUM_TCB; tcb++)
2140        /* Set the link pointer in HW TCB to the next TCB in the
2141         * chain
2142         */
2143        tcb->next = tcb + 1;
2144
2145    /* Set the tail pointer */
2146    tcb--;
2147    tx_ring->tcb_qtail = tcb;
2148    tcb->next = NULL;
2149    /* Curr send queue should now be empty */
2150    tx_ring->send_head = NULL;
2151    tx_ring->send_tail = NULL;
2152}
2153
2154/**
2155 * et1310_enable_phy_coma - called when network cable is unplugged
2156 * @adapter: pointer to our adapter structure
2157 *
2158 * driver receive an phy status change interrupt while in D0 and check that
2159 * phy_status is down.
2160 *
2161 * -- gate off JAGCore;
2162 * -- set gigE PHY in Coma mode
2163 * -- wake on phy_interrupt; Perform software reset JAGCore,
2164 * re-initialize jagcore and gigE PHY
2165 *
2166 * Add D0-ASPM-PhyLinkDown Support:
2167 * -- while in D0, when there is a phy_interrupt indicating phy link
2168 * down status, call the MPSetPhyComa routine to enter this active
2169 * state power saving mode
2170 * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt
2171 * indicating linkup status, call the MPDisablePhyComa routine to
2172 * restore JAGCore and gigE PHY
2173 */
2174static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2175{
2176    unsigned long flags;
2177    u32 pmcsr;
2178
2179    pmcsr = readl(&adapter->regs->global.pm_csr);
2180
2181    /* Save the GbE PHY speed and duplex modes. Need to restore this
2182     * when cable is plugged back in
2183     */
2184    /*
2185     * TODO - when PM is re-enabled, check if we need to
2186     * perform a similar task as this -
2187     * adapter->pdown_speed = adapter->ai_force_speed;
2188     * adapter->pdown_duplex = adapter->ai_force_duplex;
2189     */
2190
2191    /* Stop sending packets. */
2192    spin_lock_irqsave(&adapter->send_hw_lock, flags);
2193    adapter->flags |= fMP_ADAPTER_LOWER_POWER;
2194    spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2195
2196    /* Wait for outstanding Receive packets */
2197
2198    et131x_disable_txrx(adapter->netdev);
2199
2200    /* Gate off JAGCore 3 clock domains */
2201    pmcsr &= ~ET_PMCSR_INIT;
2202    writel(pmcsr, &adapter->regs->global.pm_csr);
2203
2204    /* Program gigE PHY in to Coma mode */
2205    pmcsr |= ET_PM_PHY_SW_COMA;
2206    writel(pmcsr, &adapter->regs->global.pm_csr);
2207}
2208
2209/**
2210 * et1310_disable_phy_coma - Disable the Phy Coma Mode
2211 * @adapter: pointer to our adapter structure
2212 */
2213static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2214{
2215    u32 pmcsr;
2216
2217    pmcsr = readl(&adapter->regs->global.pm_csr);
2218
2219    /* Disable phy_sw_coma register and re-enable JAGCore clocks */
2220    pmcsr |= ET_PMCSR_INIT;
2221    pmcsr &= ~ET_PM_PHY_SW_COMA;
2222    writel(pmcsr, &adapter->regs->global.pm_csr);
2223
2224    /* Restore the GbE PHY speed and duplex modes;
2225     * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
2226     */
2227    /* TODO - when PM is re-enabled, check if we need to
2228     * perform a similar task as this -
2229     * adapter->ai_force_speed = adapter->pdown_speed;
2230     * adapter->ai_force_duplex = adapter->pdown_duplex;
2231     */
2232
2233    /* Re-initialize the send structures */
2234    et131x_init_send(adapter);
2235
2236    /* Bring the device back to the state it was during init prior to
2237     * autonegotiation being complete. This way, when we get the auto-neg
2238     * complete interrupt, we can complete init by calling ConfigMacREGS2.
2239     */
2240    et131x_soft_reset(adapter);
2241
2242    /* setup et1310 as per the documentation ?? */
2243    et131x_adapter_setup(adapter);
2244
2245    /* Allow Tx to restart */
2246    adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
2247
2248    et131x_enable_txrx(adapter->netdev);
2249}
2250
2251static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2252{
2253    u32 tmp_free_buff_ring = *free_buff_ring;
2254    tmp_free_buff_ring++;
2255    /* This works for all cases where limit < 1024. The 1023 case
2256       works because 1023++ is 1024 which means the if condition is not
2257       taken but the carry of the bit into the wrap bit toggles the wrap
2258       value correctly */
2259    if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2260        tmp_free_buff_ring &= ~ET_DMA10_MASK;
2261        tmp_free_buff_ring ^= ET_DMA10_WRAP;
2262    }
2263    /* For the 1023 case */
2264    tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2265    *free_buff_ring = tmp_free_buff_ring;
2266    return tmp_free_buff_ring;
2267}
2268
2269/**
2270 * et131x_align_allocated_memory - Align allocated memory on a given boundary
2271 * @adapter: pointer to our adapter structure
2272 * @phys_addr: pointer to Physical address
2273 * @offset: pointer to the offset variable
2274 * @mask: correct mask
2275 */
2276static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
2277                      u64 *phys_addr, u64 *offset,
2278                      u64 mask)
2279{
2280    u64 new_addr = *phys_addr & ~mask;
2281
2282    *offset = 0;
2283
2284    if (new_addr != *phys_addr) {
2285        /* Move to next aligned block */
2286        new_addr += mask + 1;
2287        /* Return offset for adjusting virt addr */
2288        *offset = new_addr - *phys_addr;
2289        /* Return new physical address */
2290        *phys_addr = new_addr;
2291    }
2292}
2293
2294/**
2295 * et131x_rx_dma_memory_alloc
2296 * @adapter: pointer to our private adapter structure
2297 *
2298 * Returns 0 on success and errno on failure (as defined in errno.h)
2299 *
2300 * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
2301 * and the Packet Status Ring.
2302 */
2303static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2304{
2305    u32 i, j;
2306    u32 bufsize;
2307    u32 pktstat_ringsize, fbr_chunksize;
2308    struct rx_ring *rx_ring;
2309
2310    /* Setup some convenience pointers */
2311    rx_ring = &adapter->rx_ring;
2312
2313    /* Alloc memory for the lookup table */
2314#ifdef USE_FBR0
2315    rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2316#endif
2317    rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2318
2319    /* The first thing we will do is configure the sizes of the buffer
2320     * rings. These will change based on jumbo packet support. Larger
2321     * jumbo packets increases the size of each entry in FBR0, and the
2322     * number of entries in FBR0, while at the same time decreasing the
2323     * number of entries in FBR1.
2324     *
2325     * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
2326     * entries are huge in order to accommodate a "jumbo" frame, then it
2327     * will have less entries. Conversely, FBR1 will now be relied upon
2328     * to carry more "normal" frames, thus it's entry size also increases
2329     * and the number of entries goes up too (since it now carries
2330     * "small" + "regular" packets.
2331     *
2332     * In this scheme, we try to maintain 512 entries between the two
2333     * rings. Also, FBR1 remains a constant size - when it's size doubles
2334     * the number of entries halves. FBR0 increases in size, however.
2335     */
2336
2337    if (adapter->registry_jumbo_packet < 2048) {
2338#ifdef USE_FBR0
2339        rx_ring->fbr[1]->buffsize = 256;
2340        rx_ring->fbr[1]->num_entries = 512;
2341#endif
2342        rx_ring->fbr[0]->buffsize = 2048;
2343        rx_ring->fbr[0]->num_entries = 512;
2344    } else if (adapter->registry_jumbo_packet < 4096) {
2345#ifdef USE_FBR0
2346        rx_ring->fbr[1]->buffsize = 512;
2347        rx_ring->fbr[1]->num_entries = 1024;
2348#endif
2349        rx_ring->fbr[0]->buffsize = 4096;
2350        rx_ring->fbr[0]->num_entries = 512;
2351    } else {
2352#ifdef USE_FBR0
2353        rx_ring->fbr[1]->buffsize = 1024;
2354        rx_ring->fbr[1]->num_entries = 768;
2355#endif
2356        rx_ring->fbr[0]->buffsize = 16384;
2357        rx_ring->fbr[0]->num_entries = 128;
2358    }
2359
2360#ifdef USE_FBR0
2361    adapter->rx_ring.psr_num_entries =
2362                adapter->rx_ring.fbr[1]->num_entries +
2363                adapter->rx_ring.fbr[0]->num_entries;
2364#else
2365    adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
2366#endif
2367
2368    /* Allocate an area of memory for Free Buffer Ring 1 */
2369    bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2370                                    0xfff;
2371    rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2372                    bufsize,
2373                    &rx_ring->fbr[0]->ring_physaddr,
2374                    GFP_KERNEL);
2375    if (!rx_ring->fbr[0]->ring_virtaddr) {
2376        dev_err(&adapter->pdev->dev,
2377              "Cannot alloc memory for Free Buffer Ring 1\n");
2378        return -ENOMEM;
2379    }
2380
2381    /* Save physical address
2382     *
2383     * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2384     * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2385     * are ever returned, make sure the high part is retrieved here
2386     * before storing the adjusted address.
2387     */
2388    rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
2389
2390    /* Align Free Buffer Ring 1 on a 4K boundary */
2391    et131x_align_allocated_memory(adapter,
2392                      &rx_ring->fbr[0]->real_physaddr,
2393                      &rx_ring->fbr[0]->offset, 0x0FFF);
2394
2395    rx_ring->fbr[0]->ring_virtaddr =
2396            (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
2397            rx_ring->fbr[0]->offset);
2398
2399#ifdef USE_FBR0
2400    /* Allocate an area of memory for Free Buffer Ring 0 */
2401    bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2402                                    0xfff;
2403    rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2404                        bufsize,
2405                        &rx_ring->fbr[1]->ring_physaddr,
2406                        GFP_KERNEL);
2407    if (!rx_ring->fbr[1]->ring_virtaddr) {
2408        dev_err(&adapter->pdev->dev,
2409              "Cannot alloc memory for Free Buffer Ring 0\n");
2410        return -ENOMEM;
2411    }
2412
2413    /* Save physical address
2414     *
2415     * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2416     * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2417     * are ever returned, make sure the high part is retrieved here before
2418     * storing the adjusted address.
2419     */
2420    rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2421
2422    /* Align Free Buffer Ring 0 on a 4K boundary */
2423    et131x_align_allocated_memory(adapter,
2424                      &rx_ring->fbr[1]->real_physaddr,
2425                      &rx_ring->fbr[1]->offset, 0x0FFF);
2426
2427    rx_ring->fbr[1]->ring_virtaddr =
2428            (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2429            rx_ring->fbr[1]->offset);
2430#endif
2431    for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2432        u64 fbr1_tmp_physaddr;
2433        u64 fbr1_offset;
2434        u32 fbr1_align;
2435
2436        /* This code allocates an area of memory big enough for N
2437         * free buffers + (buffer_size - 1) so that the buffers can
2438         * be aligned on 4k boundaries. If each buffer were aligned
2439         * to a buffer_size boundary, the effect would be to double
2440         * the size of FBR0. By allocating N buffers at once, we
2441         * reduce this overhead.
2442         */
2443        if (rx_ring->fbr[0]->buffsize > 4096)
2444            fbr1_align = 4096;
2445        else
2446            fbr1_align = rx_ring->fbr[0]->buffsize;
2447
2448        fbr_chunksize =
2449            (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2450        rx_ring->fbr[0]->mem_virtaddrs[i] =
2451            dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2452                       &rx_ring->fbr[0]->mem_physaddrs[i],
2453                       GFP_KERNEL);
2454
2455        if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2456            dev_err(&adapter->pdev->dev,
2457                "Could not alloc memory\n");
2458            return -ENOMEM;
2459        }
2460
2461        /* See NOTE in "Save Physical Address" comment above */
2462        fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2463
2464        et131x_align_allocated_memory(adapter,
2465                          &fbr1_tmp_physaddr,
2466                          &fbr1_offset, (fbr1_align - 1));
2467
2468        for (j = 0; j < FBR_CHUNKS; j++) {
2469            u32 index = (i * FBR_CHUNKS) + j;
2470
2471            /* Save the Virtual address of this index for quick
2472             * access later
2473             */
2474            rx_ring->fbr[0]->virt[index] =
2475                (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2476                (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2477
2478            /* now store the physical address in the descriptor
2479             * so the device can access it
2480             */
2481            rx_ring->fbr[0]->bus_high[index] =
2482                (u32) (fbr1_tmp_physaddr >> 32);
2483            rx_ring->fbr[0]->bus_low[index] =
2484                (u32) fbr1_tmp_physaddr;
2485
2486            fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2487
2488            rx_ring->fbr[0]->buffer1[index] =
2489                rx_ring->fbr[0]->virt[index];
2490            rx_ring->fbr[0]->buffer2[index] =
2491                rx_ring->fbr[0]->virt[index] - 4;
2492        }
2493    }
2494
2495#ifdef USE_FBR0
2496    /* Same for FBR0 (if in use) */
2497    for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2498        u64 fbr0_tmp_physaddr;
2499        u64 fbr0_offset;
2500
2501        fbr_chunksize =
2502            ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2503        rx_ring->fbr[1]->mem_virtaddrs[i] =
2504            dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2505                       &rx_ring->fbr[1]->mem_physaddrs[i],
2506                       GFP_KERNEL);
2507
2508        if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2509            dev_err(&adapter->pdev->dev,
2510                "Could not alloc memory\n");
2511            return -ENOMEM;
2512        }
2513
2514        /* See NOTE in "Save Physical Address" comment above */
2515        fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2516
2517        et131x_align_allocated_memory(adapter,
2518                          &fbr0_tmp_physaddr,
2519                          &fbr0_offset,
2520                          rx_ring->fbr[1]->buffsize - 1);
2521
2522        for (j = 0; j < FBR_CHUNKS; j++) {
2523            u32 index = (i * FBR_CHUNKS) + j;
2524
2525            rx_ring->fbr[1]->virt[index] =
2526                (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2527                (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2528
2529            rx_ring->fbr[1]->bus_high[index] =
2530                (u32) (fbr0_tmp_physaddr >> 32);
2531            rx_ring->fbr[1]->bus_low[index] =
2532                (u32) fbr0_tmp_physaddr;
2533
2534            fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2535
2536            rx_ring->fbr[1]->buffer1[index] =
2537                rx_ring->fbr[1]->virt[index];
2538            rx_ring->fbr[1]->buffer2[index] =
2539                rx_ring->fbr[1]->virt[index] - 4;
2540        }
2541    }
2542#endif
2543
2544    /* Allocate an area of memory for FIFO of Packet Status ring entries */
2545    pktstat_ringsize =
2546        sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2547
2548    rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2549                          pktstat_ringsize,
2550                          &rx_ring->ps_ring_physaddr,
2551                          GFP_KERNEL);
2552
2553    if (!rx_ring->ps_ring_virtaddr) {
2554        dev_err(&adapter->pdev->dev,
2555              "Cannot alloc memory for Packet Status Ring\n");
2556        return -ENOMEM;
2557    }
2558    pr_info("Packet Status Ring %llx\n",
2559        (unsigned long long) rx_ring->ps_ring_physaddr);
2560
2561    /*
2562     * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2563     * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2564     * are ever returned, make sure the high part is retrieved here before
2565     * storing the adjusted address.
2566     */
2567
2568    /* Allocate an area of memory for writeback of status information */
2569    rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2570                        sizeof(struct rx_status_block),
2571                        &rx_ring->rx_status_bus,
2572                        GFP_KERNEL);
2573    if (!rx_ring->rx_status_block) {
2574        dev_err(&adapter->pdev->dev,
2575              "Cannot alloc memory for Status Block\n");
2576        return -ENOMEM;
2577    }
2578    rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2579    pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus);
2580
2581    /* Recv
2582     * kmem_cache_create initializes a lookaside list. After successful
2583     * creation, nonpaged fixed-size blocks can be allocated from and
2584     * freed to the lookaside list.
2585     * RFDs will be allocated from this pool.
2586     */
2587    rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2588                           sizeof(struct rfd),
2589                           0,
2590                           SLAB_CACHE_DMA |
2591                           SLAB_HWCACHE_ALIGN,
2592                           NULL);
2593
2594    adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2595
2596    /* The RFDs are going to be put on lists later on, so initialize the
2597     * lists now.
2598     */
2599    INIT_LIST_HEAD(&rx_ring->recv_list);
2600    return 0;
2601}
2602
2603/**
2604 * et131x_rx_dma_memory_free - Free all memory allocated within this module.
2605 * @adapter: pointer to our private adapter structure
2606 */
2607static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2608{
2609    u32 index;
2610    u32 bufsize;
2611    u32 pktstat_ringsize;
2612    struct rfd *rfd;
2613    struct rx_ring *rx_ring;
2614
2615    /* Setup some convenience pointers */
2616    rx_ring = &adapter->rx_ring;
2617
2618    /* Free RFDs and associated packet descriptors */
2619    WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2620
2621    while (!list_empty(&rx_ring->recv_list)) {
2622        rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2623                struct rfd, list_node);
2624
2625        list_del(&rfd->list_node);
2626        rfd->skb = NULL;
2627        kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2628    }
2629
2630    /* Free Free Buffer Ring 1 */
2631    if (rx_ring->fbr[0]->ring_virtaddr) {
2632        /* First the packet memory */
2633        for (index = 0; index <
2634             (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2635            if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2636                u32 fbr1_align;
2637
2638                if (rx_ring->fbr[0]->buffsize > 4096)
2639                    fbr1_align = 4096;
2640                else
2641                    fbr1_align = rx_ring->fbr[0]->buffsize;
2642
2643                bufsize =
2644                    (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2645                    fbr1_align - 1;
2646
2647                dma_free_coherent(&adapter->pdev->dev,
2648                    bufsize,
2649                    rx_ring->fbr[0]->mem_virtaddrs[index],
2650                    rx_ring->fbr[0]->mem_physaddrs[index]);
2651
2652                rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2653            }
2654        }
2655
2656        /* Now the FIFO itself */
2657        rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2658            rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2659
2660        bufsize =
2661            (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2662                                    0xfff;
2663
2664        dma_free_coherent(&adapter->pdev->dev, bufsize,
2665                    rx_ring->fbr[0]->ring_virtaddr,
2666                    rx_ring->fbr[0]->ring_physaddr);
2667
2668        rx_ring->fbr[0]->ring_virtaddr = NULL;
2669    }
2670
2671#ifdef USE_FBR0
2672    /* Now the same for Free Buffer Ring 0 */
2673    if (rx_ring->fbr[1]->ring_virtaddr) {
2674        /* First the packet memory */
2675        for (index = 0; index <
2676             (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2677            if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2678                bufsize =
2679                    (rx_ring->fbr[1]->buffsize *
2680                     (FBR_CHUNKS + 1)) - 1;
2681
2682                dma_free_coherent(&adapter->pdev->dev,
2683                    bufsize,
2684                    rx_ring->fbr[1]->mem_virtaddrs[index],
2685                    rx_ring->fbr[1]->mem_physaddrs[index]);
2686
2687                rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2688            }
2689        }
2690
2691        /* Now the FIFO itself */
2692        rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2693            rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2694
2695        bufsize =
2696            (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2697                                    0xfff;
2698
2699        dma_free_coherent(&adapter->pdev->dev,
2700                  bufsize,
2701                  rx_ring->fbr[1]->ring_virtaddr,
2702                  rx_ring->fbr[1]->ring_physaddr);
2703
2704        rx_ring->fbr[1]->ring_virtaddr = NULL;
2705    }
2706#endif
2707
2708    /* Free Packet Status Ring */
2709    if (rx_ring->ps_ring_virtaddr) {
2710        pktstat_ringsize =
2711            sizeof(struct pkt_stat_desc) *
2712            adapter->rx_ring.psr_num_entries;
2713
2714        dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2715                    rx_ring->ps_ring_virtaddr,
2716                    rx_ring->ps_ring_physaddr);
2717
2718        rx_ring->ps_ring_virtaddr = NULL;
2719    }
2720
2721    /* Free area of memory for the writeback of status information */
2722    if (rx_ring->rx_status_block) {
2723        dma_free_coherent(&adapter->pdev->dev,
2724            sizeof(struct rx_status_block),
2725            rx_ring->rx_status_block, rx_ring->rx_status_bus);
2726        rx_ring->rx_status_block = NULL;
2727    }
2728
2729    /* Destroy the lookaside (RFD) pool */
2730    if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2731        kmem_cache_destroy(rx_ring->recv_lookaside);
2732        adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2733    }
2734
2735    /* Free the FBR Lookup Table */
2736#ifdef USE_FBR0
2737    kfree(rx_ring->fbr[1]);
2738#endif
2739
2740    kfree(rx_ring->fbr[0]);
2741
2742    /* Reset Counters */
2743    rx_ring->num_ready_recv = 0;
2744}
2745
2746/**
2747 * et131x_init_recv - Initialize receive data structures.
2748 * @adapter: pointer to our private adapter structure
2749 *
2750 * Returns 0 on success and errno on failure (as defined in errno.h)
2751 */
2752static int et131x_init_recv(struct et131x_adapter *adapter)
2753{
2754    int status = -ENOMEM;
2755    struct rfd *rfd = NULL;
2756    u32 rfdct;
2757    u32 numrfd = 0;
2758    struct rx_ring *rx_ring;
2759
2760    /* Setup some convenience pointers */
2761    rx_ring = &adapter->rx_ring;
2762
2763    /* Setup each RFD */
2764    for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2765        rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2766                             GFP_ATOMIC | GFP_DMA);
2767
2768        if (!rfd) {
2769            dev_err(&adapter->pdev->dev,
2770                  "Couldn't alloc RFD out of kmem_cache\n");
2771            status = -ENOMEM;
2772            continue;
2773        }
2774
2775        rfd->skb = NULL;
2776
2777        /* Add this RFD to the recv_list */
2778        list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2779
2780        /* Increment both the available RFD's, and the total RFD's. */
2781        rx_ring->num_ready_recv++;
2782        numrfd++;
2783    }
2784
2785    if (numrfd > NIC_MIN_NUM_RFD)
2786        status = 0;
2787
2788    rx_ring->num_rfd = numrfd;
2789
2790    if (status != 0) {
2791        kmem_cache_free(rx_ring->recv_lookaside, rfd);
2792        dev_err(&adapter->pdev->dev,
2793              "Allocation problems in et131x_init_recv\n");
2794    }
2795    return status;
2796}
2797
2798/**
2799 * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
2800 * @adapter: pointer to our adapter structure
2801 */
2802static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2803{
2804    struct phy_device *phydev = adapter->phydev;
2805
2806    if (!phydev)
2807        return;
2808
2809    /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2810     * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2811     */
2812    if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2813        writel(0, &adapter->regs->rxdma.max_pkt_time);
2814        writel(1, &adapter->regs->rxdma.num_pkt_done);
2815    }
2816}
2817
2818/**
2819 * NICReturnRFD - Recycle a RFD and put it back onto the receive list
2820 * @adapter: pointer to our adapter
2821 * @rfd: pointer to the RFD
2822 */
2823static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2824{
2825    struct rx_ring *rx_local = &adapter->rx_ring;
2826    struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2827    u16 buff_index = rfd->bufferindex;
2828    u8 ring_index = rfd->ringindex;
2829    unsigned long flags;
2830
2831    /* We don't use any of the OOB data besides status. Otherwise, we
2832     * need to clean up OOB data
2833     */
2834    if (
2835#ifdef USE_FBR0
2836        (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2837#endif
2838        (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2839        spin_lock_irqsave(&adapter->fbr_lock, flags);
2840
2841        if (ring_index == 1) {
2842            struct fbr_desc *next = (struct fbr_desc *)
2843                    (rx_local->fbr[0]->ring_virtaddr) +
2844                    INDEX10(rx_local->fbr[0]->local_full);
2845
2846            /* Handle the Free Buffer Ring advancement here. Write
2847             * the PA / Buffer Index for the returned buffer into
2848             * the oldest (next to be freed)FBR entry
2849             */
2850            next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2851            next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2852            next->word2 = buff_index;
2853
2854            writel(bump_free_buff_ring(
2855                    &rx_local->fbr[0]->local_full,
2856                    rx_local->fbr[0]->num_entries - 1),
2857                    &rx_dma->fbr1_full_offset);
2858        }
2859#ifdef USE_FBR0
2860        else {
2861            struct fbr_desc *next = (struct fbr_desc *)
2862                rx_local->fbr[1]->ring_virtaddr +
2863                    INDEX10(rx_local->fbr[1]->local_full);
2864
2865            /* Handle the Free Buffer Ring advancement here. Write
2866             * the PA / Buffer Index for the returned buffer into
2867             * the oldest (next to be freed) FBR entry
2868             */
2869            next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2870            next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2871            next->word2 = buff_index;
2872
2873            writel(bump_free_buff_ring(
2874                    &rx_local->fbr[1]->local_full,
2875                    rx_local->fbr[1]->num_entries - 1),
2876                   &rx_dma->fbr0_full_offset);
2877        }
2878#endif
2879        spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2880    } else {
2881        dev_err(&adapter->pdev->dev,
2882              "%s illegal Buffer Index returned\n", __func__);
2883    }
2884
2885    /* The processing on this RFD is done, so put it back on the tail of
2886     * our list
2887     */
2888    spin_lock_irqsave(&adapter->rcv_lock, flags);
2889    list_add_tail(&rfd->list_node, &rx_local->recv_list);
2890    rx_local->num_ready_recv++;
2891    spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2892
2893    WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2894}
2895
2896/**
2897 * nic_rx_pkts - Checks the hardware for available packets
2898 * @adapter: pointer to our adapter
2899 *
2900 * Returns rfd, a pointer to our MPRFD.
2901 *
2902 * Checks the hardware for available packets, using completion ring
2903 * If packets are available, it gets an RFD from the recv_list, attaches
2904 * the packet to it, puts the RFD in the RecvPendList, and also returns
2905 * the pointer to the RFD.
2906 */
2907static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2908{
2909    struct rx_ring *rx_local = &adapter->rx_ring;
2910    struct rx_status_block *status;
2911    struct pkt_stat_desc *psr;
2912    struct rfd *rfd;
2913    u32 i;
2914    u8 *buf;
2915    unsigned long flags;
2916    struct list_head *element;
2917    u8 ring_index;
2918    u16 buff_index;
2919    u32 len;
2920    u32 word0;
2921    u32 word1;
2922
2923    /* RX Status block is written by the DMA engine prior to every
2924     * interrupt. It contains the next to be used entry in the Packet
2925     * Status Ring, and also the two Free Buffer rings.
2926     */
2927    status = rx_local->rx_status_block;
2928    word1 = status->word1 >> 16; /* Get the useful bits */
2929
2930    /* Check the PSR and wrap bits do not match */
2931    if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2932        /* Looks like this ring is not updated yet */
2933        return NULL;
2934
2935    /* The packet status ring indicates that data is available. */
2936    psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2937            (rx_local->local_psr_full & 0xFFF);
2938
2939    /* Grab any information that is required once the PSR is
2940     * advanced, since we can no longer rely on the memory being
2941     * accurate
2942     */
2943    len = psr->word1 & 0xFFFF;
2944    ring_index = (psr->word1 >> 26) & 0x03;
2945    buff_index = (psr->word1 >> 16) & 0x3FF;
2946    word0 = psr->word0;
2947
2948    /* Indicate that we have used this PSR entry. */
2949    /* FIXME wrap 12 */
2950    add_12bit(&rx_local->local_psr_full, 1);
2951    if (
2952      (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2953        /* Clear psr full and toggle the wrap bit */
2954        rx_local->local_psr_full &= ~0xFFF;
2955        rx_local->local_psr_full ^= 0x1000;
2956    }
2957
2958    writel(rx_local->local_psr_full,
2959           &adapter->regs->rxdma.psr_full_offset);
2960
2961#ifndef USE_FBR0
2962    if (ring_index != 1)
2963        return NULL;
2964#endif
2965
2966#ifdef USE_FBR0
2967    if (ring_index > 1 ||
2968        (ring_index == 0 &&
2969        buff_index > rx_local->fbr[1]->num_entries - 1) ||
2970        (ring_index == 1 &&
2971        buff_index > rx_local->fbr[0]->num_entries - 1)) {
2972#else
2973    if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) {
2974#endif
2975        /* Illegal buffer or ring index cannot be used by S/W*/
2976        dev_err(&adapter->pdev->dev,
2977              "NICRxPkts PSR Entry %d indicates "
2978              "length of %d and/or bad bi(%d)\n",
2979              rx_local->local_psr_full & 0xFFF,
2980              len, buff_index);
2981        return NULL;
2982    }
2983
2984    /* Get and fill the RFD. */
2985    spin_lock_irqsave(&adapter->rcv_lock, flags);
2986
2987    rfd = NULL;
2988    element = rx_local->recv_list.next;
2989    rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2990
2991    if (rfd == NULL) {
2992        spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2993        return NULL;
2994    }
2995
2996    list_del(&rfd->list_node);
2997    rx_local->num_ready_recv--;
2998
2999    spin_unlock_irqrestore(&adapter->rcv_lock, flags);
3000
3001    rfd->bufferindex = buff_index;
3002    rfd->ringindex = ring_index;
3003
3004    /* In V1 silicon, there is a bug which screws up filtering of
3005     * runt packets. Therefore runt packet filtering is disabled
3006     * in the MAC and the packets are dropped here. They are
3007     * also counted here.
3008     */
3009    if (len < (NIC_MIN_PACKET_SIZE + 4)) {
3010        adapter->stats.rx_other_errs++;
3011        len = 0;
3012    }
3013
3014    if (len) {
3015        /* Determine if this is a multicast packet coming in */
3016        if ((word0 & ALCATEL_MULTICAST_PKT) &&
3017            !(word0 & ALCATEL_BROADCAST_PKT)) {
3018            /* Promiscuous mode and Multicast mode are
3019             * not mutually exclusive as was first
3020             * thought. I guess Promiscuous is just
3021             * considered a super-set of the other
3022             * filters. Generally filter is 0x2b when in
3023             * promiscuous mode.
3024             */
3025            if ((adapter->packet_filter &
3026                    ET131X_PACKET_TYPE_MULTICAST)
3027                && !(adapter->packet_filter &
3028                    ET131X_PACKET_TYPE_PROMISCUOUS)
3029                && !(adapter->packet_filter &
3030                    ET131X_PACKET_TYPE_ALL_MULTICAST)) {
3031                /*
3032                 * Note - ring_index for fbr[] array is reversed
3033                 * 1 for FBR0 etc
3034                 */
3035                buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
3036                        virt[buff_index];
3037
3038                /* Loop through our list to see if the
3039                 * destination address of this packet
3040                 * matches one in our list.
3041                 */
3042                for (i = 0; i < adapter->multicast_addr_count;
3043                     i++) {
3044                    if (buf[0] ==
3045                        adapter->multicast_list[i][0]
3046                        && buf[1] ==
3047                        adapter->multicast_list[i][1]
3048                        && buf[2] ==
3049                        adapter->multicast_list[i][2]
3050                        && buf[3] ==
3051                        adapter->multicast_list[i][3]
3052                        && buf[4] ==
3053                        adapter->multicast_list[i][4]
3054                        && buf[5] ==
3055                        adapter->multicast_list[i][5]) {
3056                        break;
3057                    }
3058                }
3059
3060                /* If our index is equal to the number
3061                 * of Multicast address we have, then
3062                 * this means we did not find this
3063                 * packet's matching address in our
3064                 * list. Set the len to zero,
3065                 * so we free our RFD when we return
3066                 * from this function.
3067                 */
3068                if (i == adapter->multicast_addr_count)
3069                    len = 0;
3070            }
3071
3072            if (len > 0)
3073                adapter->stats.multicast_pkts_rcvd++;
3074        } else if (word0 & ALCATEL_BROADCAST_PKT)
3075            adapter->stats.broadcast_pkts_rcvd++;
3076        else
3077            /* Not sure what this counter measures in
3078             * promiscuous mode. Perhaps we should check
3079             * the MAC address to see if it is directed
3080             * to us in promiscuous mode.
3081             */
3082            adapter->stats.unicast_pkts_rcvd++;
3083    }
3084
3085    if (len > 0) {
3086        struct sk_buff *skb = NULL;
3087
3088        /*rfd->len = len - 4; */
3089        rfd->len = len;
3090
3091        skb = dev_alloc_skb(rfd->len + 2);
3092        if (!skb) {
3093            dev_err(&adapter->pdev->dev,
3094                  "Couldn't alloc an SKB for Rx\n");
3095            return NULL;
3096        }
3097
3098        adapter->net_stats.rx_bytes += rfd->len;
3099
3100        /*
3101         * Note - ring_index for fbr[] array is reversed,
3102         * 1 for FBR0 etc
3103         */
3104        memcpy(skb_put(skb, rfd->len),
3105            rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
3106            rfd->len);
3107
3108        skb->dev = adapter->netdev;
3109        skb->protocol = eth_type_trans(skb, adapter->netdev);
3110        skb->ip_summed = CHECKSUM_NONE;
3111
3112        netif_rx_ni(skb);
3113    } else {
3114        rfd->len = 0;
3115    }
3116
3117    nic_return_rfd(adapter, rfd);
3118    return rfd;
3119}
3120
3121/**
3122 * et131x_handle_recv_interrupt - Interrupt handler for receive processing
3123 * @adapter: pointer to our adapter
3124 *
3125 * Assumption, Rcv spinlock has been acquired.
3126 */
3127static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
3128{
3129    struct rfd *rfd = NULL;
3130    u32 count = 0;
3131    bool done = true;
3132
3133    /* Process up to available RFD's */
3134    while (count < NUM_PACKETS_HANDLED) {
3135        if (list_empty(&adapter->rx_ring.recv_list)) {
3136            WARN_ON(adapter->rx_ring.num_ready_recv != 0);
3137            done = false;
3138            break;
3139        }
3140
3141        rfd = nic_rx_pkts(adapter);
3142
3143        if (rfd == NULL)
3144            break;
3145
3146        /* Do not receive any packets until a filter has been set.
3147         * Do not receive any packets until we have link.
3148         * If length is zero, return the RFD in order to advance the
3149         * Free buffer ring.
3150         */
3151        if (!adapter->packet_filter ||
3152            !netif_carrier_ok(adapter->netdev) ||
3153            rfd->len == 0)
3154            continue;
3155
3156        /* Increment the number of packets we received */
3157        adapter->net_stats.rx_packets++;
3158
3159        /* Set the status on the packet, either resources or success */
3160        if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
3161            dev_warn(&adapter->pdev->dev,
3162                    "RFD's are running out\n");
3163        }
3164        count++;
3165    }
3166
3167    if (count == NUM_PACKETS_HANDLED || !done) {
3168        adapter->rx_ring.unfinished_receives = true;
3169        writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3170               &adapter->regs->global.watchdog_timer);
3171    } else
3172        /* Watchdog timer will disable itself if appropriate. */
3173        adapter->rx_ring.unfinished_receives = false;
3174}
3175
3176/**
3177 * et131x_tx_dma_memory_alloc
3178 * @adapter: pointer to our private adapter structure
3179 *
3180 * Returns 0 on success and errno on failure (as defined in errno.h).
3181 *
3182 * Allocates memory that will be visible both to the device and to the CPU.
3183 * The OS will pass us packets, pointers to which we will insert in the Tx
3184 * Descriptor queue. The device will read this queue to find the packets in
3185 * memory. The device will update the "status" in memory each time it xmits a
3186 * packet.
3187 */
3188static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
3189{
3190    int desc_size = 0;
3191    struct tx_ring *tx_ring = &adapter->tx_ring;
3192
3193    /* Allocate memory for the TCB's (Transmit Control Block) */
3194    adapter->tx_ring.tcb_ring =
3195        kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
3196    if (!adapter->tx_ring.tcb_ring) {
3197        dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
3198        return -ENOMEM;
3199    }
3200
3201    /* Allocate enough memory for the Tx descriptor ring, and allocate
3202     * some extra so that the ring can be aligned on a 4k boundary.
3203     */
3204    desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
3205    tx_ring->tx_desc_ring =
3206        (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
3207                          desc_size,
3208                          &tx_ring->tx_desc_ring_pa,
3209                          GFP_KERNEL);
3210    if (!adapter->tx_ring.tx_desc_ring) {
3211        dev_err(&adapter->pdev->dev,
3212            "Cannot alloc memory for Tx Ring\n");
3213        return -ENOMEM;
3214    }
3215
3216    /* Save physical address
3217     *
3218     * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
3219     * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
3220     * are ever returned, make sure the high part is retrieved here before
3221     * storing the adjusted address.
3222     */
3223    /* Allocate memory for the Tx status block */
3224    tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3225                            sizeof(u32),
3226                            &tx_ring->tx_status_pa,
3227                            GFP_KERNEL);
3228    if (!adapter->tx_ring.tx_status_pa) {
3229        dev_err(&adapter->pdev->dev,
3230                  "Cannot alloc memory for Tx status block\n");
3231        return -ENOMEM;
3232    }
3233    return 0;
3234}
3235
3236/**
3237 * et131x_tx_dma_memory_free - Free all memory allocated within this module
3238 * @adapter: pointer to our private adapter structure
3239 *
3240 * Returns 0 on success and errno on failure (as defined in errno.h).
3241 */
3242static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3243{
3244    int desc_size = 0;
3245
3246    if (adapter->tx_ring.tx_desc_ring) {
3247        /* Free memory relating to Tx rings here */
3248        desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3249                                + 4096 - 1;
3250        dma_free_coherent(&adapter->pdev->dev,
3251                    desc_size,
3252                    adapter->tx_ring.tx_desc_ring,
3253                    adapter->tx_ring.tx_desc_ring_pa);
3254        adapter->tx_ring.tx_desc_ring = NULL;
3255    }
3256
3257    /* Free memory for the Tx status block */
3258    if (adapter->tx_ring.tx_status) {
3259        dma_free_coherent(&adapter->pdev->dev,
3260                    sizeof(u32),
3261                    adapter->tx_ring.tx_status,
3262                    adapter->tx_ring.tx_status_pa);
3263
3264        adapter->tx_ring.tx_status = NULL;
3265    }
3266    /* Free the memory for the tcb structures */
3267    kfree(adapter->tx_ring.tcb_ring);
3268}
3269
3270/**
3271 * nic_send_packet - NIC specific send handler for version B silicon.
3272 * @adapter: pointer to our adapter
3273 * @tcb: pointer to struct tcb
3274 *
3275 * Returns 0 or errno.
3276 */
3277static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3278{
3279    u32 i;
3280    struct tx_desc desc[24]; /* 24 x 16 byte */
3281    u32 frag = 0;
3282    u32 thiscopy, remainder;
3283    struct sk_buff *skb = tcb->skb;
3284    u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3285    struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3286    unsigned long flags;
3287    struct phy_device *phydev = adapter->phydev;
3288
3289    /* Part of the optimizations of this send routine restrict us to
3290     * sending 24 fragments at a pass. In practice we should never see
3291     * more than 5 fragments.
3292     *
3293     * NOTE: The older version of this function (below) can handle any
3294     * number of fragments. If needed, we can call this function,
3295     * although it is less efficient.
3296     */
3297    if (nr_frags > 23)
3298        return -EIO;
3299
3300    memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3301
3302    for (i = 0; i < nr_frags; i++) {
3303        /* If there is something in this element, lets get a
3304         * descriptor from the ring and get the necessary data
3305         */
3306        if (i == 0) {
3307            /* If the fragments are smaller than a standard MTU,
3308             * then map them to a single descriptor in the Tx
3309             * Desc ring. However, if they're larger, as is
3310             * possible with support for jumbo packets, then
3311             * split them each across 2 descriptors.
3312             *
3313             * This will work until we determine why the hardware
3314             * doesn't seem to like large fragments.
3315             */
3316            if ((skb->len - skb->data_len) <= 1514) {
3317                desc[frag].addr_hi = 0;
3318                /* Low 16bits are length, high is vlan and
3319                   unused currently so zero */
3320                desc[frag].len_vlan =
3321                    skb->len - skb->data_len;
3322
3323                /* NOTE: Here, the dma_addr_t returned from
3324                 * dma_map_single() is implicitly cast as a
3325                 * u32. Although dma_addr_t can be
3326                 * 64-bit, the address returned by
3327                 * dma_map_single() is always 32-bit
3328                 * addressable (as defined by the pci/dma
3329                 * subsystem)
3330                 */
3331                desc[frag++].addr_lo =
3332                    dma_map_single(&adapter->pdev->dev,
3333                           skb->data,
3334                           skb->len -
3335                           skb->data_len,
3336                           DMA_TO_DEVICE);
3337            } else {
3338                desc[frag].addr_hi = 0;
3339                desc[frag].len_vlan =
3340                    (skb->len - skb->data_len) / 2;
3341
3342                /* NOTE: Here, the dma_addr_t returned from
3343                 * dma_map_single() is implicitly cast as a
3344                 * u32. Although dma_addr_t can be
3345                 * 64-bit, the address returned by
3346                 * dma_map_single() is always 32-bit
3347                 * addressable (as defined by the pci/dma
3348                 * subsystem)
3349                 */
3350                desc[frag++].addr_lo =
3351                    dma_map_single(&adapter->pdev->dev,
3352                           skb->data,
3353                           ((skb->len -
3354                             skb->data_len) / 2),
3355                           DMA_TO_DEVICE);
3356                desc[frag].addr_hi = 0;
3357
3358                desc[frag].len_vlan =
3359                    (skb->len - skb->data_len) / 2;
3360
3361                /* NOTE: Here, the dma_addr_t returned from
3362                 * dma_map_single() is implicitly cast as a
3363                 * u32. Although dma_addr_t can be
3364                 * 64-bit, the address returned by
3365                 * dma_map_single() is always 32-bit
3366                 * addressable (as defined by the pci/dma
3367                 * subsystem)
3368                 */
3369                desc[frag++].addr_lo =
3370                    dma_map_single(&adapter->pdev->dev,
3371                           skb->data +
3372                           ((skb->len -
3373                             skb->data_len) / 2),
3374                           ((skb->len -
3375                             skb->data_len) / 2),
3376                           DMA_TO_DEVICE);
3377            }
3378        } else {
3379            desc[frag].addr_hi = 0;
3380            desc[frag].len_vlan =
3381                    frags[i - 1].size;
3382
3383            /* NOTE: Here, the dma_addr_t returned from
3384             * dma_map_page() is implicitly cast as a u32.
3385             * Although dma_addr_t can be 64-bit, the address
3386             * returned by dma_map_page() is always 32-bit
3387             * addressable (as defined by the pci/dma subsystem)
3388             */
3389            desc[frag++].addr_lo = skb_frag_dma_map(
3390                            &adapter->pdev->dev,
3391                            &frags[i - 1],
3392                            0,
3393                            frags[i - 1].size,
3394                            DMA_TO_DEVICE);
3395        }
3396    }
3397
3398    if (phydev && phydev->speed == SPEED_1000) {
3399        if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3400            /* Last element & Interrupt flag */
3401            desc[frag - 1].flags = 0x5;
3402            adapter->tx_ring.since_irq = 0;
3403        } else { /* Last element */
3404            desc[frag - 1].flags = 0x1;
3405        }
3406    } else
3407        desc[frag - 1].flags = 0x5;
3408
3409    desc[0].flags |= 2; /* First element flag */
3410
3411    tcb->index_start = adapter->tx_ring.send_idx;
3412    tcb->stale = 0;
3413
3414    spin_lock_irqsave(&adapter->send_hw_lock, flags);
3415
3416    thiscopy = NUM_DESC_PER_RING_TX -
3417                INDEX10(adapter->tx_ring.send_idx);
3418
3419    if (thiscopy >= frag) {
3420        remainder = 0;
3421        thiscopy = frag;
3422    } else {
3423        remainder = frag - thiscopy;
3424    }
3425
3426    memcpy(adapter->tx_ring.tx_desc_ring +
3427           INDEX10(adapter->tx_ring.send_idx), desc,
3428           sizeof(struct tx_desc) * thiscopy);
3429
3430    add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3431
3432    if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3433          INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3434        adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3435        adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3436    }
3437
3438    if (remainder) {
3439        memcpy(adapter->tx_ring.tx_desc_ring,
3440               desc + thiscopy,
3441               sizeof(struct tx_desc) * remainder);
3442
3443        add_10bit(&adapter->tx_ring.send_idx, remainder);
3444    }
3445
3446    if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3447        if (adapter->tx_ring.send_idx)
3448            tcb->index = NUM_DESC_PER_RING_TX - 1;
3449        else
3450            tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
3451    } else
3452        tcb->index = adapter->tx_ring.send_idx - 1;
3453
3454    spin_lock(&adapter->tcb_send_qlock);
3455
3456    if (adapter->tx_ring.send_tail)
3457        adapter->tx_ring.send_tail->next = tcb;
3458    else
3459        adapter->tx_ring.send_head = tcb;
3460
3461    adapter->tx_ring.send_tail = tcb;
3462
3463    WARN_ON(tcb->next != NULL);
3464
3465    adapter->tx_ring.used++;
3466
3467    spin_unlock(&adapter->tcb_send_qlock);
3468
3469    /* Write the new write pointer back to the device. */
3470    writel(adapter->tx_ring.send_idx,
3471           &adapter->regs->txdma.service_request);
3472
3473    /* For Gig only, we use Tx Interrupt coalescing. Enable the software
3474     * timer to wake us up if this packet isn't followed by N more.
3475     */
3476    if (phydev && phydev->speed == SPEED_1000) {
3477        writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
3478               &adapter->regs->global.watchdog_timer);
3479    }
3480    spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3481
3482    return 0;
3483}
3484
3485/**
3486 * send_packet - Do the work to send a packet
3487 * @skb: the packet(s) to send
3488 * @adapter: a pointer to the device's private adapter structure
3489 *
3490 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
3491 *
3492 * Assumption: Send spinlock has been acquired
3493 */
3494static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3495{
3496    int status;
3497    struct tcb *tcb = NULL;
3498    u16 *shbufva;
3499    unsigned long flags;
3500
3501    /* All packets must have at least a MAC address and a protocol type */
3502    if (skb->len < ETH_HLEN)
3503        return -EIO;
3504
3505    /* Get a TCB for this packet */
3506    spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3507
3508    tcb = adapter->tx_ring.tcb_qhead;
3509
3510    if (tcb == NULL) {
3511        spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3512        return -ENOMEM;
3513    }
3514
3515    adapter->tx_ring.tcb_qhead = tcb->next;
3516
3517    if (adapter->tx_ring.tcb_qhead == NULL)
3518        adapter->tx_ring.tcb_qtail = NULL;
3519
3520    spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3521
3522    tcb->skb = skb;
3523
3524    if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3525        shbufva = (u16 *) skb->data;
3526
3527        if ((shbufva[0] == 0xffff) &&
3528            (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3529            tcb->flags |= fMP_DEST_BROAD;
3530        } else if ((shbufva[0] & 0x3) == 0x0001) {
3531            tcb->flags |= fMP_DEST_MULTI;
3532        }
3533    }
3534
3535    tcb->next = NULL;
3536
3537    /* Call the NIC specific send handler. */
3538    status = nic_send_packet(adapter, tcb);
3539
3540    if (status != 0) {
3541        spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3542
3543        if (adapter->tx_ring.tcb_qtail)
3544            adapter->tx_ring.tcb_qtail->next = tcb;
3545        else
3546            /* Apparently ready Q is empty. */
3547            adapter->tx_ring.tcb_qhead = tcb;
3548
3549        adapter->tx_ring.tcb_qtail = tcb;
3550        spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3551        return status;
3552    }
3553    WARN_ON(adapter->tx_ring.used > NUM_TCB);
3554    return 0;
3555}
3556
3557/**
3558 * et131x_send_packets - This function is called by the OS to send packets
3559 * @skb: the packet(s) to send
3560 * @netdev:device on which to TX the above packet(s)
3561 *
3562 * Return 0 in almost all cases; non-zero value in extreme hard failure only
3563 */
3564static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3565{
3566    int status = 0;
3567    struct et131x_adapter *adapter = netdev_priv(netdev);
3568
3569    /* Send these packets
3570     *
3571     * NOTE: The Linux Tx entry point is only given one packet at a time
3572     * to Tx, so the PacketCount and it's array used makes no sense here
3573     */
3574
3575    /* TCB is not available */
3576    if (adapter->tx_ring.used >= NUM_TCB) {
3577        /* NOTE: If there's an error on send, no need to queue the
3578         * packet under Linux; if we just send an error up to the
3579         * netif layer, it will resend the skb to us.
3580         */
3581        status = -ENOMEM;
3582    } else {
3583        /* We need to see if the link is up; if it's not, make the
3584         * netif layer think we're good and drop the packet
3585         */
3586        if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3587                    !netif_carrier_ok(netdev)) {
3588            dev_kfree_skb_any(skb);
3589            skb = NULL;
3590
3591            adapter->net_stats.tx_dropped++;
3592        } else {
3593            status = send_packet(skb, adapter);
3594            if (status != 0 && status != -ENOMEM) {
3595                /* On any other error, make netif think we're
3596                 * OK and drop the packet
3597                 */
3598                dev_kfree_skb_any(skb);
3599                skb = NULL;
3600                adapter->net_stats.tx_dropped++;
3601            }
3602        }
3603    }
3604    return status;
3605}
3606
3607/**
3608 * free_send_packet - Recycle a struct tcb
3609 * @adapter: pointer to our adapter
3610 * @tcb: pointer to struct tcb
3611 *
3612 * Complete the packet if necessary
3613 * Assumption - Send spinlock has been acquired
3614 */
3615static inline void free_send_packet(struct et131x_adapter *adapter,
3616                        struct tcb *tcb)
3617{
3618    unsigned long flags;
3619    struct tx_desc *desc = NULL;
3620    struct net_device_stats *stats = &adapter->net_stats;
3621
3622    if (tcb->flags & fMP_DEST_BROAD)
3623        atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3624    else if (tcb->flags & fMP_DEST_MULTI)
3625        atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3626    else
3627        atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3628
3629    if (tcb->skb) {
3630        stats->tx_bytes += tcb->skb->len;
3631
3632        /* Iterate through the TX descriptors on the ring
3633         * corresponding to this packet and umap the fragments
3634         * they point to
3635         */
3636        do {
3637            desc = (struct tx_desc *)
3638                    (adapter->tx_ring.tx_desc_ring +
3639                        INDEX10(tcb->index_start));
3640
3641            dma_unmap_single(&adapter->pdev->dev,
3642                     desc->addr_lo,
3643                     desc->len_vlan, DMA_TO_DEVICE);
3644
3645            add_10bit(&tcb->index_start, 1);
3646            if (INDEX10(tcb->index_start) >=
3647                            NUM_DESC_PER_RING_TX) {
3648                tcb->index_start &= ~ET_DMA10_MASK;
3649                tcb->index_start ^= ET_DMA10_WRAP;
3650            }
3651        } while (desc != (adapter->tx_ring.tx_desc_ring +
3652                INDEX10(tcb->index)));
3653
3654        dev_kfree_skb_any(tcb->skb);
3655    }
3656
3657    memset(tcb, 0, sizeof(struct tcb));
3658
3659    /* Add the TCB to the Ready Q */
3660    spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3661
3662    adapter->net_stats.tx_packets++;
3663
3664    if (adapter->tx_ring.tcb_qtail)
3665        adapter->tx_ring.tcb_qtail->next = tcb;
3666    else
3667        /* Apparently ready Q is empty. */
3668        adapter->tx_ring.tcb_qhead = tcb;
3669
3670    adapter->tx_ring.tcb_qtail = tcb;
3671
3672    spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3673    WARN_ON(adapter->tx_ring.used < 0);
3674}
3675
3676/**
3677 * et131x_free_busy_send_packets - Free and complete the stopped active sends
3678 * @adapter: pointer to our adapter
3679 *
3680 * Assumption - Send spinlock has been acquired
3681 */
3682static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3683{
3684    struct tcb *tcb;
3685    unsigned long flags;
3686    u32 freed = 0;
3687
3688    /* Any packets being sent? Check the first TCB on the send list */
3689    spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3690
3691    tcb = adapter->tx_ring.send_head;
3692
3693    while (tcb != NULL && freed < NUM_TCB) {
3694        struct tcb *next = tcb->next;
3695
3696        adapter->tx_ring.send_head = next;
3697
3698        if (next == NULL)
3699            adapter->tx_ring.send_tail = NULL;
3700
3701        adapter->tx_ring.used--;
3702
3703        spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3704
3705        freed++;
3706        free_send_packet(adapter, tcb);
3707
3708        spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3709
3710        tcb = adapter->tx_ring.send_head;
3711    }
3712
3713    WARN_ON(freed == NUM_TCB);
3714
3715    spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3716
3717    adapter->tx_ring.used = 0;
3718}
3719
3720/**
3721 * et131x_handle_send_interrupt - Interrupt handler for sending processing
3722 * @adapter: pointer to our adapter
3723 *
3724 * Re-claim the send resources, complete sends and get more to send from
3725 * the send wait queue.
3726 *
3727 * Assumption - Send spinlock has been acquired
3728 */
3729static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3730{
3731    unsigned long flags;
3732    u32 serviced;
3733    struct tcb *tcb;
3734    u32 index;
3735
3736    serviced = readl(&adapter->regs->txdma.new_service_complete);
3737    index = INDEX10(serviced);
3738
3739    /* Has the ring wrapped? Process any descriptors that do not have
3740     * the same "wrap" indicator as the current completion indicator
3741     */
3742    spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3743
3744    tcb = adapter->tx_ring.send_head;
3745
3746    while (tcb &&
3747           ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3748           index < INDEX10(tcb->index)) {
3749        adapter->tx_ring.used--;
3750        adapter->tx_ring.send_head = tcb->next;
3751        if (tcb->next == NULL)
3752            adapter->tx_ring.send_tail = NULL;
3753
3754        spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3755        free_send_packet(adapter, tcb);
3756        spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3757
3758        /* Goto the next packet */
3759        tcb = adapter->tx_ring.send_head;
3760    }
3761    while (tcb &&
3762           !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3763           && index > (tcb->index & ET_DMA10_MASK)) {
3764        adapter->tx_ring.used--;
3765        adapter->tx_ring.send_head = tcb->next;
3766        if (tcb->next == NULL)
3767            adapter->tx_ring.send_tail = NULL;
3768
3769        spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3770        free_send_packet(adapter, tcb);
3771        spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3772
3773        /* Goto the next packet */
3774        tcb = adapter->tx_ring.send_head;
3775    }
3776
3777    /* Wake up the queue when we hit a low-water mark */
3778    if (adapter->tx_ring.used <= NUM_TCB / 3)
3779        netif_wake_queue(adapter->netdev);
3780
3781    spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3782}
3783
3784static int et131x_get_settings(struct net_device *netdev,
3785                   struct ethtool_cmd *cmd)
3786{
3787    struct et131x_adapter *adapter = netdev_priv(netdev);
3788
3789    return phy_ethtool_gset(adapter->phydev, cmd);
3790}
3791
3792static int et131x_set_settings(struct net_device *netdev,
3793                   struct ethtool_cmd *cmd)
3794{
3795    struct et131x_adapter *adapter = netdev_priv(netdev);
3796
3797    return phy_ethtool_sset(adapter->phydev, cmd);
3798}
3799
3800static int et131x_get_regs_len(struct net_device *netdev)
3801{
3802#define ET131X_REGS_LEN 256
3803    return ET131X_REGS_LEN * sizeof(u32);
3804}
3805
3806static void et131x_get_regs(struct net_device *netdev,
3807                struct ethtool_regs *regs, void *regs_data)
3808{
3809    struct et131x_adapter *adapter = netdev_priv(netdev);
3810    struct address_map __iomem *aregs = adapter->regs;
3811    u32 *regs_buff = regs_data;
3812    u32 num = 0;
3813
3814    memset(regs_data, 0, et131x_get_regs_len(netdev));
3815
3816    regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3817            adapter->pdev->device;
3818
3819    /* PHY regs */
3820    et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]);
3821    et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]);
3822    et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]);
3823    et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]);
3824    et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]);
3825    et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]);
3826    et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]);
3827    /* Autoneg next page transmit reg */
3828    et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]);
3829    /* Link partner next page reg */
3830    et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
3831    et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
3832    et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
3833    et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
3834    et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
3835    et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
3836    et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3837            (u16 *)&regs_buff[num++]);
3838    et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3839            (u16 *)&regs_buff[num++]);
3840    et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3841            (u16 *)&regs_buff[num++]);
3842    et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3843            (u16 *)&regs_buff[num++]);
3844    et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
3845    et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]);
3846    et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]);
3847    et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3848            (u16 *)&regs_buff[num++]);
3849    et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]);
3850    et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]);
3851    et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]);
3852
3853    /* Global regs */
3854    regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3855    regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3856    regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3857    regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3858    regs_buff[num++] = readl(&aregs->global.pm_csr);
3859    regs_buff[num++] = adapter->stats.interrupt_status;
3860    regs_buff[num++] = readl(&aregs->global.int_mask);
3861    regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3862    regs_buff[num++] = readl(&aregs->global.int_status_alias);
3863    regs_buff[num++] = readl(&aregs->global.sw_reset);
3864    regs_buff[num++] = readl(&aregs->global.slv_timer);
3865    regs_buff[num++] = readl(&aregs->global.msi_config);
3866    regs_buff[num++] = readl(&aregs->global.loopback);
3867    regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3868
3869    /* TXDMA regs */
3870    regs_buff[num++] = readl(&aregs->txdma.csr);
3871    regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3872    regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3873    regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3874    regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3875    regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3876    regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3877    regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3878    regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3879    regs_buff[num++] = readl(&aregs->txdma.service_request);
3880    regs_buff[num++] = readl(&aregs->txdma.service_complete);
3881    regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3882    regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3883    regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3884    regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3885    regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3886    regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3887    regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3888    regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3889    regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3890    regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3891    regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3892    regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3893    regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3894    regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3895    regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3896
3897    /* RXDMA regs */
3898    regs_buff[num++] = readl(&aregs->rxdma.csr);
3899    regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3900    regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3901    regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3902    regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3903    regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3904    regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3905    regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3906    regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3907    regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3908    regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3909    regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3910    regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3911    regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3912    regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3913    regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3914    regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3915    regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3916    regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3917    regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3918    regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3919    regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3920    regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3921    regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3922    regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3923    regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3924    regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3925    regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3926    regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3927}
3928
3929#define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
3930static void et131x_get_drvinfo(struct net_device *netdev,
3931                   struct ethtool_drvinfo *info)
3932{
3933    struct et131x_adapter *adapter = netdev_priv(netdev);
3934
3935    strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN);
3936    strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN);
3937    strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3938}
3939
3940static struct ethtool_ops et131x_ethtool_ops = {
3941    .get_settings = et131x_get_settings,
3942    .set_settings = et131x_set_settings,
3943    .get_drvinfo = et131x_get_drvinfo,
3944    .get_regs_len = et131x_get_regs_len,
3945    .get_regs = et131x_get_regs,
3946    .get_link = ethtool_op_get_link,
3947};
3948/**
3949 * et131x_hwaddr_init - set up the MAC Address on the ET1310
3950 * @adapter: pointer to our private adapter structure
3951 */
3952static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3953{
3954    /* If have our default mac from init and no mac address from
3955     * EEPROM then we need to generate the last octet and set it on the
3956     * device
3957     */
3958    if (adapter->rom_addr[0] == 0x00 &&
3959        adapter->rom_addr[1] == 0x00 &&
3960        adapter->rom_addr[2] == 0x00 &&
3961        adapter->rom_addr[3] == 0x00 &&
3962        adapter->rom_addr[4] == 0x00 &&
3963        adapter->rom_addr[5] == 0x00) {
3964        /*
3965         * We need to randomly generate the last octet so we
3966         * decrease our chances of setting the mac address to
3967         * same as another one of our cards in the system
3968         */
3969        get_random_bytes(&adapter->addr[5], 1);
3970        /*
3971         * We have the default value in the register we are
3972         * working with so we need to copy the current
3973         * address into the permanent address
3974         */
3975        memcpy(adapter->rom_addr,
3976            adapter->addr, ETH_ALEN);
3977    } else {
3978        /* We do not have an override address, so set the
3979         * current address to the permanent address and add
3980         * it to the device
3981         */
3982        memcpy(adapter->addr,
3983               adapter->rom_addr, ETH_ALEN);
3984    }
3985}
3986
3987/**
3988 * et131x_pci_init - initial PCI setup
3989 * @adapter: pointer to our private adapter structure
3990 * @pdev: our PCI device
3991 *
3992 * Perform the initial setup of PCI registers and if possible initialise
3993 * the MAC address. At this point the I/O registers have yet to be mapped
3994 */
3995static int et131x_pci_init(struct et131x_adapter *adapter,
3996                        struct pci_dev *pdev)
3997{
3998    int cap = pci_pcie_cap(pdev);
3999    u16 max_payload;
4000    u16 ctl;
4001    int i, rc;
4002
4003    rc = et131x_init_eeprom(adapter);
4004    if (rc < 0)
4005        goto out;
4006
4007    if (!cap) {
4008        dev_err(&pdev->dev, "Missing PCIe capabilities\n");
4009        goto err_out;
4010    }
4011
4012    /* Let's set up the PORT LOGIC Register. First we need to know what
4013     * the max_payload_size is
4014     */
4015    if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) {
4016        dev_err(&pdev->dev,
4017            "Could not read PCI config space for Max Payload Size\n");
4018        goto err_out;
4019    }
4020
4021    /* Program the Ack/Nak latency and replay timers */
4022    max_payload &= 0x07;
4023
4024    if (max_payload < 2) {
4025        static const u16 acknak[2] = { 0x76, 0xD0 };
4026        static const u16 replay[2] = { 0x1E0, 0x2ED };
4027
4028        if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
4029                           acknak[max_payload])) {
4030            dev_err(&pdev->dev,
4031              "Could not write PCI config space for ACK/NAK\n");
4032            goto err_out;
4033        }
4034        if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
4035                           replay[max_payload])) {
4036            dev_err(&pdev->dev,
4037              "Could not write PCI config space for Replay Timer\n");
4038            goto err_out;
4039        }
4040    }
4041
4042    /* l0s and l1 latency timers. We are using default values.
4043     * Representing 001 for L0s and 010 for L1
4044     */
4045    if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
4046        dev_err(&pdev->dev,
4047          "Could not write PCI config space for Latency Timers\n");
4048        goto err_out;
4049    }
4050
4051    /* Change the max read size to 2k */
4052    if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) {
4053        dev_err(&pdev->dev,
4054            "Could not read PCI config space for Max read size\n");
4055        goto err_out;
4056    }
4057
4058    ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | (0x04 << 12);
4059
4060    if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
4061        dev_err(&pdev->dev,
4062              "Could not write PCI config space for Max read size\n");
4063        goto err_out;
4064    }
4065
4066    /* Get MAC address from config space if an eeprom exists, otherwise
4067     * the MAC address there will not be valid
4068     */
4069    if (!adapter->has_eeprom) {
4070        et131x_hwaddr_init(adapter);
4071        return 0;
4072    }
4073
4074    for (i = 0; i < ETH_ALEN; i++) {
4075        if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
4076                    adapter->rom_addr + i)) {
4077            dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
4078            goto err_out;
4079        }
4080    }
4081    memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
4082out:
4083    return rc;
4084err_out:
4085    rc = -EIO;
4086    goto out;
4087}
4088
4089/**
4090 * et131x_error_timer_handler
4091 * @data: timer-specific variable; here a pointer to our adapter structure
4092 *
4093 * The routine called when the error timer expires, to track the number of
4094 * recurring errors.
4095 */
4096static void et131x_error_timer_handler(unsigned long data)
4097{
4098    struct et131x_adapter *adapter = (struct et131x_adapter *) data;
4099    struct phy_device *phydev = adapter->phydev;
4100
4101    if (et1310_in_phy_coma(adapter)) {
4102        /* Bring the device immediately out of coma, to
4103         * prevent it from sleeping indefinitely, this
4104         * mechanism could be improved! */
4105        et1310_disable_phy_coma(adapter);
4106        adapter->boot_coma = 20;
4107    } else {
4108        et1310_update_macstat_host_counters(adapter);
4109    }
4110
4111    if (!phydev->link && adapter->boot_coma < 11)
4112        adapter->boot_coma++;
4113
4114    if (adapter->boot_coma == 10) {
4115        if (!phydev->link) {
4116            if (!et1310_in_phy_coma(adapter)) {
4117                /* NOTE - This was originally a 'sync with
4118                 * interrupt'. How to do that under Linux?
4119                 */
4120                et131x_enable_interrupts(adapter);
4121                et1310_enable_phy_coma(adapter);
4122            }
4123        }
4124    }
4125
4126    /* This is a periodic timer, so reschedule */
4127    mod_timer(&adapter->error_timer, jiffies +
4128                      TX_ERROR_PERIOD * HZ / 1000);
4129}
4130
4131/**
4132 * et131x_adapter_memory_alloc
4133 * @adapter: pointer to our private adapter structure
4134 *
4135 * Returns 0 on success, errno on failure (as defined in errno.h).
4136 *
4137 * Allocate all the memory blocks for send, receive and others.
4138 */
4139static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4140{
4141    int status;
4142
4143    /* Allocate memory for the Tx Ring */
4144    status = et131x_tx_dma_memory_alloc(adapter);
4145    if (status != 0) {
4146        dev_err(&adapter->pdev->dev,
4147              "et131x_tx_dma_memory_alloc FAILED\n");
4148        return status;
4149    }
4150    /* Receive buffer memory allocation */
4151    status = et131x_rx_dma_memory_alloc(adapter);
4152    if (status != 0) {
4153        dev_err(&adapter->pdev->dev,
4154              "et131x_rx_dma_memory_alloc FAILED\n");
4155        et131x_tx_dma_memory_free(adapter);
4156        return status;
4157    }
4158
4159    /* Init receive data structures */
4160    status = et131x_init_recv(adapter);
4161    if (status != 0) {
4162        dev_err(&adapter->pdev->dev,
4163            "et131x_init_recv FAILED\n");
4164        et131x_tx_dma_memory_free(adapter);
4165        et131x_rx_dma_memory_free(adapter);
4166    }
4167    return status;
4168}
4169
4170/**
4171 * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
4172 * @adapter: pointer to our private adapter structure
4173 */
4174static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4175{
4176    /* Free DMA memory */
4177    et131x_tx_dma_memory_free(adapter);
4178    et131x_rx_dma_memory_free(adapter);
4179}
4180
4181static void et131x_adjust_link(struct net_device *netdev)
4182{
4183    struct et131x_adapter *adapter = netdev_priv(netdev);
4184    struct phy_device *phydev = adapter->phydev;
4185
4186    if (netif_carrier_ok(netdev)) {
4187        adapter->boot_coma = 20;
4188
4189        if (phydev && phydev->speed == SPEED_10) {
4190            /*
4191             * NOTE - Is there a way to query this without
4192             * TruePHY?
4193             * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
4194             * EMI_TRUEPHY_A13O) {
4195             */
4196            u16 register18;
4197
4198            et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4199                     &register18);
4200            et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4201                     register18 | 0x4);
4202            et131x_mii_write(adapter, PHY_INDEX_REG,
4203                     register18 | 0x8402);
4204            et131x_mii_write(adapter, PHY_DATA_REG,
4205                     register18 | 511);
4206            et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4207                     register18);
4208        }
4209
4210        et1310_config_flow_control(adapter);
4211
4212        if (phydev && phydev->speed == SPEED_1000 &&
4213                adapter->registry_jumbo_packet > 2048) {
4214            u16 reg;
4215
4216            et131x_mii_read(adapter, PHY_CONFIG, &reg);
4217            reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
4218            reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
4219            et131x_mii_write(adapter, PHY_CONFIG, reg);
4220        }
4221
4222        et131x_set_rx_dma_timer(adapter);
4223        et1310_config_mac_regs2(adapter);
4224    }
4225
4226    if (phydev && phydev->link != adapter->link) {
4227        /*
4228         * Check to see if we are in coma mode and if
4229         * so, disable it because we will not be able
4230         * to read PHY values until we are out.
4231         */
4232        if (et1310_in_phy_coma(adapter))
4233            et1310_disable_phy_coma(adapter);
4234
4235        if (phydev->link) {
4236            adapter->boot_coma = 20;
4237        } else {
4238            dev_warn(&adapter->pdev->dev,
4239                "Link down - cable problem ?\n");
4240            adapter->boot_coma = 0;
4241
4242            if (phydev->speed == SPEED_10) {
4243                /* NOTE - Is there a way to query this without
4244                 * TruePHY?
4245                 * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
4246                 * EMI_TRUEPHY_A13O)
4247                 */
4248                u16 register18;
4249
4250                et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4251                         &register18);
4252                et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4253                         register18 | 0x4);
4254                et131x_mii_write(adapter, PHY_INDEX_REG,
4255                         register18 | 0x8402);
4256                et131x_mii_write(adapter, PHY_DATA_REG,
4257                         register18 | 511);
4258                et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4259                         register18);
4260            }
4261
4262            /* Free the packets being actively sent & stopped */
4263            et131x_free_busy_send_packets(adapter);
4264
4265            /* Re-initialize the send structures */
4266            et131x_init_send(adapter);
4267
4268            /*
4269             * Bring the device back to the state it was during
4270             * init prior to autonegotiation being complete. This
4271             * way, when we get the auto-neg complete interrupt,
4272             * we can complete init by calling config_mac_regs2.
4273             */
4274            et131x_soft_reset(adapter);
4275
4276            /* Setup ET1310 as per the documentation */
4277            et131x_adapter_setup(adapter);
4278
4279            /* perform reset of tx/rx */
4280            et131x_disable_txrx(netdev);
4281            et131x_enable_txrx(netdev);
4282        }
4283
4284        adapter->link = phydev->link;
4285
4286        phy_print_status(phydev);
4287    }
4288}
4289
4290static int et131x_mii_probe(struct net_device *netdev)
4291{
4292    struct et131x_adapter *adapter = netdev_priv(netdev);
4293    struct phy_device *phydev = NULL;
4294
4295    phydev = phy_find_first(adapter->mii_bus);
4296    if (!phydev) {
4297        dev_err(&adapter->pdev->dev, "no PHY found\n");
4298        return -ENODEV;
4299    }
4300
4301    phydev = phy_connect(netdev, dev_name(&phydev->dev),
4302            &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4303
4304    if (IS_ERR(phydev)) {
4305        dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4306        return PTR_ERR(phydev);
4307    }
4308
4309    phydev->supported &= (SUPPORTED_10baseT_Half
4310                | SUPPORTED_10baseT_Full
4311                | SUPPORTED_100baseT_Half
4312                | SUPPORTED_100baseT_Full
4313                | SUPPORTED_Autoneg
4314                | SUPPORTED_MII
4315                | SUPPORTED_TP);
4316
4317    if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4318        phydev->supported |= SUPPORTED_1000baseT_Full;
4319
4320    phydev->advertising = phydev->supported;
4321    adapter->phydev = phydev;
4322
4323    dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
4324         phydev->drv->name, dev_name(&phydev->dev));
4325
4326    return 0;
4327}
4328
4329/**
4330 * et131x_adapter_init
4331 * @adapter: pointer to the private adapter struct
4332 * @pdev: pointer to the PCI device
4333 *
4334 * Initialize the data structures for the et131x_adapter object and link
4335 * them together with the platform provided device structures.
4336 */
4337static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4338        struct pci_dev *pdev)
4339{
4340    static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4341
4342    struct et131x_adapter *adapter;
4343
4344    /* Allocate private adapter struct and copy in relevant information */
4345    adapter = netdev_priv(netdev);
4346    adapter->pdev = pci_dev_get(pdev);
4347    adapter->netdev = netdev;
4348
4349    /* Initialize spinlocks here */
4350    spin_lock_init(&adapter->lock);
4351    spin_lock_init(&adapter->tcb_send_qlock);
4352    spin_lock_init(&adapter->tcb_ready_qlock);
4353    spin_lock_init(&adapter->send_hw_lock);
4354    spin_lock_init(&adapter->rcv_lock);
4355    spin_lock_init(&adapter->rcv_pend_lock);
4356    spin_lock_init(&adapter->fbr_lock);
4357    spin_lock_init(&adapter->phy_lock);
4358
4359    adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
4360
4361    /* Set the MAC address to a default */
4362    memcpy(adapter->addr, default_mac, ETH_ALEN);
4363
4364    return adapter;
4365}
4366
4367/**
4368 * et131x_pci_remove
4369 * @pdev: a pointer to the device's pci_dev structure
4370 *
4371 * Registered in the pci_driver structure, this function is called when the
4372 * PCI subsystem detects that a PCI device which matches the information
4373 * contained in the pci_device_id table has been removed.
4374 */
4375static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4376{
4377    struct net_device *netdev = pci_get_drvdata(pdev);
4378    struct et131x_adapter *adapter = netdev_priv(netdev);
4379
4380    unregister_netdev(netdev);
4381    phy_disconnect(adapter->phydev);
4382    mdiobus_unregister(adapter->mii_bus);
4383    kfree(adapter->mii_bus->irq);
4384    mdiobus_free(adapter->mii_bus);
4385
4386    et131x_adapter_memory_free(adapter);
4387    iounmap(adapter->regs);
4388    pci_dev_put(pdev);
4389
4390    free_netdev(netdev);
4391    pci_release_regions(pdev);
4392    pci_disable_device(pdev);
4393}
4394
4395/**
4396 * et131x_up - Bring up a device for use.
4397 * @netdev: device to be opened
4398 */
4399static void et131x_up(struct net_device *netdev)
4400{
4401    struct et131x_adapter *adapter = netdev_priv(netdev);
4402
4403    et131x_enable_txrx(netdev);
4404    phy_start(adapter->phydev);
4405}
4406
4407/**
4408 * et131x_down - Bring down the device
4409 * @netdev: device to be brought down
4410 */
4411static void et131x_down(struct net_device *netdev)
4412{
4413    struct et131x_adapter *adapter = netdev_priv(netdev);
4414
4415    /* Save the timestamp for the TX watchdog, prevent a timeout */
4416    netdev->trans_start = jiffies;
4417
4418    phy_stop(adapter->phydev);
4419    et131x_disable_txrx(netdev);
4420}
4421
4422#ifdef CONFIG_PM_SLEEP
4423static int et131x_suspend(struct device *dev)
4424{
4425    struct pci_dev *pdev = to_pci_dev(dev);
4426    struct net_device *netdev = pci_get_drvdata(pdev);
4427
4428    if (netif_running(netdev)) {
4429        netif_device_detach(netdev);
4430        et131x_down(netdev);
4431        pci_save_state(pdev);
4432    }
4433
4434    return 0;
4435}
4436
4437static int et131x_resume(struct device *dev)
4438{
4439    struct pci_dev *pdev = to_pci_dev(dev);
4440    struct net_device *netdev = pci_get_drvdata(pdev);
4441
4442    if (netif_running(netdev)) {
4443        pci_restore_state(pdev);
4444        et131x_up(netdev);
4445        netif_device_attach(netdev);
4446    }
4447
4448    return 0;
4449}
4450
4451static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4452#define ET131X_PM_OPS (&et131x_pm_ops)
4453#else
4454#define ET131X_PM_OPS NULL
4455#endif
4456
4457/**
4458 * et131x_isr - The Interrupt Service Routine for the driver.
4459 * @irq: the IRQ on which the interrupt was received.
4460 * @dev_id: device-specific info (here a pointer to a net_device struct)
4461 *
4462 * Returns a value indicating if the interrupt was handled.
4463 */
4464irqreturn_t et131x_isr(int irq, void *dev_id)
4465{
4466    bool handled = true;
4467    struct net_device *netdev = (struct net_device *)dev_id;
4468    struct et131x_adapter *adapter = NULL;
4469    u32 status;
4470
4471    if (!netif_device_present(netdev)) {
4472        handled = false;
4473        goto out;
4474    }
4475
4476    adapter = netdev_priv(netdev);
4477
4478    /* If the adapter is in low power state, then it should not
4479     * recognize any interrupt
4480     */
4481
4482    /* Disable Device Interrupts */
4483    et131x_disable_interrupts(adapter);
4484
4485    /* Get a copy of the value in the interrupt status register
4486     * so we can process the interrupting section
4487     */
4488    status = readl(&adapter->regs->global.int_status);
4489
4490    if (adapter->flowcontrol == FLOW_TXONLY ||
4491        adapter->flowcontrol == FLOW_BOTH) {
4492        status &= ~INT_MASK_ENABLE;
4493    } else {
4494        status &= ~INT_MASK_ENABLE_NO_FLOW;
4495    }
4496
4497    /* Make sure this is our interrupt */
4498    if (!status) {
4499        handled = false;
4500        et131x_enable_interrupts(adapter);
4501        goto out;
4502    }
4503
4504    /* This is our interrupt, so process accordingly */
4505
4506    if (status & ET_INTR_WATCHDOG) {
4507        struct tcb *tcb = adapter->tx_ring.send_head;
4508
4509        if (tcb)
4510            if (++tcb->stale > 1)
4511                status |= ET_INTR_TXDMA_ISR;
4512
4513        if (adapter->rx_ring.unfinished_receives)
4514            status |= ET_INTR_RXDMA_XFR_DONE;
4515        else if (tcb == NULL)
4516            writel(0, &adapter->regs->global.watchdog_timer);
4517
4518        status &= ~ET_INTR_WATCHDOG;
4519    }
4520
4521    if (status == 0) {
4522        /* This interrupt has in some way been "handled" by
4523         * the ISR. Either it was a spurious Rx interrupt, or
4524         * it was a Tx interrupt that has been filtered by
4525         * the ISR.
4526         */
4527        et131x_enable_interrupts(adapter);
4528        goto out;
4529    }
4530
4531    /* We need to save the interrupt status value for use in our
4532     * DPC. We will clear the software copy of that in that
4533     * routine.
4534     */
4535    adapter->stats.interrupt_status = status;
4536
4537    /* Schedule the ISR handler as a bottom-half task in the
4538     * kernel's tq_immediate queue, and mark the queue for
4539     * execution
4540     */
4541    schedule_work(&adapter->task);
4542out:
4543    return IRQ_RETVAL(handled);
4544}
4545
4546/**
4547 * et131x_isr_handler - The ISR handler
4548 * @p_adapter, a pointer to the device's private adapter structure
4549 *
4550 * scheduled to run in a deferred context by the ISR. This is where the ISR's
4551 * work actually gets done.
4552 */
4553static void et131x_isr_handler(struct work_struct *work)
4554{
4555    struct et131x_adapter *adapter =
4556        container_of(work, struct et131x_adapter, task);
4557    u32 status = adapter->stats.interrupt_status;
4558    struct address_map __iomem *iomem = adapter->regs;
4559
4560    /*
4561     * These first two are by far the most common. Once handled, we clear
4562     * their two bits in the status word. If the word is now zero, we
4563     * exit.
4564     */
4565    /* Handle all the completed Transmit interrupts */
4566    if (status & ET_INTR_TXDMA_ISR)
4567        et131x_handle_send_interrupt(adapter);
4568
4569    /* Handle all the completed Receives interrupts */
4570    if (status & ET_INTR_RXDMA_XFR_DONE)
4571        et131x_handle_recv_interrupt(adapter);
4572
4573    status &= 0xffffffd7;
4574
4575    if (status) {
4576        /* Handle the TXDMA Error interrupt */
4577        if (status & ET_INTR_TXDMA_ERR) {
4578            u32 txdma_err;
4579
4580            /* Following read also clears the register (COR) */
4581            txdma_err = readl(&iomem->txdma.tx_dma_error);
4582
4583            dev_warn(&adapter->pdev->dev,
4584                    "TXDMA_ERR interrupt, error = %d\n",
4585                    txdma_err);
4586        }
4587
4588        /* Handle Free Buffer Ring 0 and 1 Low interrupt */
4589        if (status &
4590            (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
4591            /*
4592             * This indicates the number of unused buffers in
4593             * RXDMA free buffer ring 0 is <= the limit you
4594             * programmed. Free buffer resources need to be
4595             * returned. Free buffers are consumed as packets
4596             * are passed from the network to the host. The host
4597             * becomes aware of the packets from the contents of
4598             * the packet status ring. This ring is queried when
4599             * the packet done interrupt occurs. Packets are then
4600             * passed to the OS. When the OS is done with the
4601             * packets the resources can be returned to the
4602             * ET1310 for re-use. This interrupt is one method of
4603             * returning resources.
4604             */
4605
4606            /* If the user has flow control on, then we will
4607             * send a pause packet, otherwise just exit
4608             */
4609            if (adapter->flowcontrol == FLOW_TXONLY ||
4610                adapter->flowcontrol == FLOW_BOTH) {
4611                u32 pm_csr;
4612
4613                /* Tell the device to send a pause packet via
4614                 * the back pressure register (bp req and
4615                 * bp xon/xoff)
4616                 */
4617                pm_csr = readl(&iomem->global.pm_csr);
4618                if (!et1310_in_phy_coma(adapter))
4619                    writel(3, &iomem->txmac.bp_ctrl);
4620            }
4621        }
4622
4623        /* Handle Packet Status Ring Low Interrupt */
4624        if (status & ET_INTR_RXDMA_STAT_LOW) {
4625
4626            /*
4627             * Same idea as with the two Free Buffer Rings.
4628             * Packets going from the network to the host each
4629             * consume a free buffer resource and a packet status
4630             * resource. These resoures are passed to the OS.
4631             * When the OS is done with the resources, they need
4632             * to be returned to the ET1310. This is one method
4633             * of returning the resources.
4634             */
4635        }
4636
4637        /* Handle RXDMA Error Interrupt */
4638        if (status & ET_INTR_RXDMA_ERR) {
4639            /*
4640             * The rxdma_error interrupt is sent when a time-out
4641             * on a request issued by the JAGCore has occurred or
4642             * a completion is returned with an un-successful
4643             * status. In both cases the request is considered
4644             * complete. The JAGCore will automatically re-try the
4645             * request in question. Normally information on events
4646             * like these are sent to the host using the "Advanced
4647             * Error Reporting" capability. This interrupt is
4648             * another way of getting similar information. The
4649             * only thing required is to clear the interrupt by
4650             * reading the ISR in the global resources. The
4651             * JAGCore will do a re-try on the request. Normally
4652             * you should never see this interrupt. If you start
4653             * to see this interrupt occurring frequently then
4654             * something bad has occurred. A reset might be the
4655             * thing to do.
4656             */
4657            /* TRAP();*/
4658
4659            dev_warn(&adapter->pdev->dev,
4660                    "RxDMA_ERR interrupt, error %x\n",
4661                    readl(&iomem->txmac.tx_test));
4662        }
4663
4664        /* Handle the Wake on LAN Event */
4665        if (status & ET_INTR_WOL) {
4666            /*
4667             * This is a secondary interrupt for wake on LAN.
4668             * The driver should never see this, if it does,
4669             * something serious is wrong. We will TRAP the
4670             * message when we are in DBG mode, otherwise we
4671             * will ignore it.
4672             */
4673            dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4674        }
4675
4676        /* Let's move on to the TxMac */
4677        if (status & ET_INTR_TXMAC) {
4678            u32 err = readl(&iomem->txmac.err);
4679
4680            /*
4681             * When any of the errors occur and TXMAC generates
4682             * an interrupt to report these errors, it usually
4683             * means that TXMAC has detected an error in the data
4684             * stream retrieved from the on-chip Tx Q. All of
4685             * these errors are catastrophic and TXMAC won't be
4686             * able to recover data when these errors occur. In
4687             * a nutshell, the whole Tx path will have to be reset
4688             * and re-configured afterwards.
4689             */
4690            dev_warn(&adapter->pdev->dev,
4691                    "TXMAC interrupt, error 0x%08x\n",
4692                    err);
4693
4694            /* If we are debugging, we want to see this error,
4695             * otherwise we just want the device to be reset and
4696             * continue
4697             */
4698        }
4699
4700        /* Handle RXMAC Interrupt */
4701        if (status & ET_INTR_RXMAC) {
4702            /*
4703             * These interrupts are catastrophic to the device,
4704             * what we need to do is disable the interrupts and
4705             * set the flag to cause us to reset so we can solve
4706             * this issue.
4707             */
4708            /* MP_SET_FLAG( adapter,
4709                        fMP_ADAPTER_HARDWARE_ERROR); */
4710
4711            dev_warn(&adapter->pdev->dev,
4712              "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4713                    readl(&iomem->rxmac.err_reg));
4714
4715            dev_warn(&adapter->pdev->dev,
4716                    "Enable 0x%08x, Diag 0x%08x\n",
4717                    readl(&iomem->rxmac.ctrl),
4718                    readl(&iomem->rxmac.rxq_diag));
4719
4720            /*
4721             * If we are debugging, we want to see this error,
4722             * otherwise we just want the device to be reset and
4723             * continue
4724             */
4725        }
4726
4727        /* Handle MAC_STAT Interrupt */
4728        if (status & ET_INTR_MAC_STAT) {
4729            /*
4730             * This means at least one of the un-masked counters
4731             * in the MAC_STAT block has rolled over. Use this
4732             * to maintain the top, software managed bits of the
4733             * counter(s).
4734             */
4735            et1310_handle_macstat_interrupt(adapter);
4736        }
4737
4738        /* Handle SLV Timeout Interrupt */
4739        if (status & ET_INTR_SLV_TIMEOUT) {
4740            /*
4741             * This means a timeout has occurred on a read or
4742             * write request to one of the JAGCore registers. The
4743             * Global Resources block has terminated the request
4744             * and on a read request, returned a "fake" value.
4745             * The most likely reasons are: Bad Address or the
4746             * addressed module is in a power-down state and
4747             * can't respond.
4748             */
4749        }
4750    }
4751    et131x_enable_interrupts(adapter);
4752}
4753
4754/**
4755 * et131x_stats - Return the current device statistics.
4756 * @netdev: device whose stats are being queried
4757 *
4758 * Returns 0 on success, errno on failure (as defined in errno.h)
4759 */
4760static struct net_device_stats *et131x_stats(struct net_device *netdev)
4761{
4762    struct et131x_adapter *adapter = netdev_priv(netdev);
4763    struct net_device_stats *stats = &adapter->net_stats;
4764    struct ce_stats *devstat = &adapter->stats;
4765
4766    stats->rx_errors = devstat->rx_length_errs +
4767               devstat->rx_align_errs +
4768               devstat->rx_crc_errs +
4769               devstat->rx_code_violations +
4770               devstat->rx_other_errs;
4771    stats->tx_errors = devstat->tx_max_pkt_errs;
4772    stats->multicast = devstat->multicast_pkts_rcvd;
4773    stats->collisions = devstat->tx_collisions;
4774
4775    stats->rx_length_errors = devstat->rx_length_errs;
4776    stats->rx_over_errors = devstat->rx_overflows;
4777    stats->rx_crc_errors = devstat->rx_crc_errs;
4778
4779    /* NOTE: These stats don't have corresponding values in CE_STATS,
4780     * so we're going to have to update these directly from within the
4781     * TX/RX code
4782     */
4783    /* stats->rx_bytes = 20; devstat->; */
4784    /* stats->tx_bytes = 20; devstat->; */
4785    /* stats->rx_dropped = devstat->; */
4786    /* stats->tx_dropped = devstat->; */
4787
4788    /* NOTE: Not used, can't find analogous statistics */
4789    /* stats->rx_frame_errors = devstat->; */
4790    /* stats->rx_fifo_errors = devstat->; */
4791    /* stats->rx_missed_errors = devstat->; */
4792
4793    /* stats->tx_aborted_errors = devstat->; */
4794    /* stats->tx_carrier_errors = devstat->; */
4795    /* stats->tx_fifo_errors = devstat->; */
4796    /* stats->tx_heartbeat_errors = devstat->; */
4797    /* stats->tx_window_errors = devstat->; */
4798    return stats;
4799}
4800
4801/**
4802 * et131x_open - Open the device for use.
4803 * @netdev: device to be opened
4804 *
4805 * Returns 0 on success, errno on failure (as defined in errno.h)
4806 */
4807static int et131x_open(struct net_device *netdev)
4808{
4809    struct et131x_adapter *adapter = netdev_priv(netdev);
4810    struct pci_dev *pdev = adapter->pdev;
4811    unsigned int irq = pdev->irq;
4812    int result;
4813
4814    /* Start the timer to track NIC errors */
4815    init_timer(&adapter->error_timer);
4816    adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4817    adapter->error_timer.function = et131x_error_timer_handler;
4818    adapter->error_timer.data = (unsigned long)adapter;
4819    add_timer(&adapter->error_timer);
4820
4821    result = request_irq(irq, et131x_isr,
4822                 IRQF_SHARED, netdev->name, netdev);
4823    if (result) {
4824        dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4825        return result;
4826    }
4827
4828    adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
4829
4830    et131x_up(netdev);
4831
4832    return result;
4833}
4834
4835/**
4836 * et131x_close - Close the device
4837 * @netdev: device to be closed
4838 *
4839 * Returns 0 on success, errno on failure (as defined in errno.h)
4840 */
4841static int et131x_close(struct net_device *netdev)
4842{
4843    struct et131x_adapter *adapter = netdev_priv(netdev);
4844
4845    et131x_down(netdev);
4846
4847    adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
4848    free_irq(adapter->pdev->irq, netdev);
4849
4850    /* Stop the error timer */
4851    return del_timer_sync(&adapter->error_timer);
4852}
4853
4854/**
4855 * et131x_ioctl - The I/O Control handler for the driver
4856 * @netdev: device on which the control request is being made
4857 * @reqbuf: a pointer to the IOCTL request buffer
4858 * @cmd: the IOCTL command code
4859 *
4860 * Returns 0 on success, errno on failure (as defined in errno.h)
4861 */
4862static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4863            int cmd)
4864{
4865    struct et131x_adapter *adapter = netdev_priv(netdev);
4866
4867    if (!adapter->phydev)
4868        return -EINVAL;
4869
4870    return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4871}
4872
4873/**
4874 * et131x_set_packet_filter - Configures the Rx Packet filtering on the device
4875 * @adapter: pointer to our private adapter structure
4876 *
4877 * FIXME: lot of dups with MAC code
4878 *
4879 * Returns 0 on success, errno on failure
4880 */
4881static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4882{
4883    int filter = adapter->packet_filter;
4884    int status = 0;
4885    u32 ctrl;
4886    u32 pf_ctrl;
4887
4888    ctrl = readl(&adapter->regs->rxmac.ctrl);
4889    pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4890
4891    /* Default to disabled packet filtering. Enable it in the individual
4892     * case statements that require the device to filter something
4893     */
4894    ctrl |= 0x04;
4895
4896    /* Set us to be in promiscuous mode so we receive everything, this
4897     * is also true when we get a packet filter of 0
4898     */
4899    if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4900        pf_ctrl &= ~7; /* Clear filter bits */
4901    else {
4902        /*
4903         * Set us up with Multicast packet filtering. Three cases are
4904         * possible - (1) we have a multi-cast list, (2) we receive ALL
4905         * multicast entries or (3) we receive none.
4906         */
4907        if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4908            pf_ctrl &= ~2; /* Multicast filter bit */
4909        else {
4910            et1310_setup_device_for_multicast(adapter);
4911            pf_ctrl |= 2;
4912            ctrl &= ~0x04;
4913        }
4914
4915        /* Set us up with Unicast packet filtering */
4916        if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4917            et1310_setup_device_for_unicast(adapter);
4918            pf_ctrl |= 4;
4919            ctrl &= ~0x04;
4920        }
4921
4922        /* Set us up with Broadcast packet filtering */
4923        if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4924            pf_ctrl |= 1; /* Broadcast filter bit */
4925            ctrl &= ~0x04;
4926        } else
4927            pf_ctrl &= ~1;
4928
4929        /* Setup the receive mac configuration registers - Packet
4930         * Filter control + the enable / disable for packet filter
4931         * in the control reg.
4932         */
4933        writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4934        writel(ctrl, &adapter->regs->rxmac.ctrl);
4935    }
4936    return status;
4937}
4938
4939/**
4940 * et131x_multicast - The handler to configure multicasting on the interface
4941 * @netdev: a pointer to a net_device struct representing the device
4942 */
4943static void et131x_multicast(struct net_device *netdev)
4944{
4945    struct et131x_adapter *adapter = netdev_priv(netdev);
4946    int packet_filter;
4947    unsigned long flags;
4948    struct netdev_hw_addr *ha;
4949    int i;
4950
4951    spin_lock_irqsave(&adapter->lock, flags);
4952
4953    /* Before we modify the platform-independent filter flags, store them
4954     * locally. This allows us to determine if anything's changed and if
4955     * we even need to bother the hardware
4956     */
4957    packet_filter = adapter->packet_filter;
4958
4959    /* Clear the 'multicast' flag locally; because we only have a single
4960     * flag to check multicast, and multiple multicast addresses can be
4961     * set, this is the easiest way to determine if more than one
4962     * multicast address is being set.
4963     */
4964    packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4965
4966    /* Check the net_device flags and set the device independent flags
4967     * accordingly
4968     */
4969
4970    if (netdev->flags & IFF_PROMISC)
4971        adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
4972    else
4973        adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4974
4975    if (netdev->flags & IFF_ALLMULTI)
4976        adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4977
4978    if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4979        adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
4980
4981    if (netdev_mc_count(netdev) < 1) {
4982        adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4983        adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4984    } else
4985        adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
4986
4987    /* Set values in the private adapter struct */
4988    i = 0;
4989    netdev_for_each_mc_addr(ha, netdev) {
4990        if (i == NIC_MAX_MCAST_LIST)
4991            break;
4992        memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4993    }
4994    adapter->multicast_addr_count = i;
4995
4996    /* Are the new flags different from the previous ones? If not, then no
4997     * action is required
4998     *
4999     * NOTE - This block will always update the multicast_list with the
5000     * hardware, even if the addresses aren't the same.
5001     */
5002    if (packet_filter != adapter->packet_filter) {
5003        /* Call the device's filter function */
5004        et131x_set_packet_filter(adapter);
5005    }
5006    spin_unlock_irqrestore(&adapter->lock, flags);
5007}
5008
5009/**
5010 * et131x_tx - The handler to tx a packet on the device
5011 * @skb: data to be Tx'd
5012 * @netdev: device on which data is to be Tx'd
5013 *
5014 * Returns 0 on success, errno on failure (as defined in errno.h)
5015 */
5016static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5017{
5018    int status = 0;
5019    struct et131x_adapter *adapter = netdev_priv(netdev);
5020
5021    /* stop the queue if it's getting full */
5022    if (adapter->tx_ring.used >= NUM_TCB - 1 &&
5023        !netif_queue_stopped(netdev))
5024        netif_stop_queue(netdev);
5025
5026    /* Save the timestamp for the TX timeout watchdog */
5027    netdev->trans_start = jiffies;
5028
5029    /* Call the device-specific data Tx routine */
5030    status = et131x_send_packets(skb, netdev);
5031
5032    /* Check status and manage the netif queue if necessary */
5033    if (status != 0) {
5034        if (status == -ENOMEM)
5035            status = NETDEV_TX_BUSY;
5036        else
5037            status = NETDEV_TX_OK;
5038    }
5039    return status;
5040}
5041
5042/**
5043 * et131x_tx_timeout - Timeout handler
5044 * @netdev: a pointer to a net_device struct representing the device
5045 *
5046 * The handler called when a Tx request times out. The timeout period is
5047 * specified by the 'tx_timeo" element in the net_device structure (see
5048 * et131x_alloc_device() to see how this value is set).
5049 */
5050static void et131x_tx_timeout(struct net_device *netdev)
5051{
5052    struct et131x_adapter *adapter = netdev_priv(netdev);
5053    struct tcb *tcb;
5054    unsigned long flags;
5055
5056    /* If the device is closed, ignore the timeout */
5057    if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5058        return;
5059
5060    /* Any nonrecoverable hardware error?
5061     * Checks adapter->flags for any failure in phy reading
5062     */
5063    if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5064        return;
5065
5066    /* Hardware failure? */
5067    if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5068        dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5069        return;
5070    }
5071
5072    /* Is send stuck? */
5073    spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5074
5075    tcb = adapter->tx_ring.send_head;
5076
5077    if (tcb != NULL) {
5078        tcb->count++;
5079
5080        if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5081            spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5082                           flags);
5083
5084            dev_warn(&adapter->pdev->dev,
5085                "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
5086                tcb->index,
5087                tcb->flags);
5088
5089            adapter->net_stats.tx_errors++;
5090
5091            /* perform reset of tx/rx */
5092            et131x_disable_txrx(netdev);
5093            et131x_enable_txrx(netdev);
5094            return;
5095        }
5096    }
5097
5098    spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5099}
5100
5101/**
5102 * et131x_change_mtu - The handler called to change the MTU for the device
5103 * @netdev: device whose MTU is to be changed
5104 * @new_mtu: the desired MTU
5105 *
5106 * Returns 0 on success, errno on failure (as defined in errno.h)
5107 */
5108static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5109{
5110    int result = 0;
5111    struct et131x_adapter *adapter = netdev_priv(netdev);
5112
5113    /* Make sure the requested MTU is valid */
5114    if (new_mtu < 64 || new_mtu > 9216)
5115        return -EINVAL;
5116
5117    et131x_disable_txrx(netdev);
5118    et131x_handle_send_interrupt(adapter);
5119    et131x_handle_recv_interrupt(adapter);
5120
5121    /* Set the new MTU */
5122    netdev->mtu = new_mtu;
5123
5124    /* Free Rx DMA memory */
5125    et131x_adapter_memory_free(adapter);
5126
5127    /* Set the config parameter for Jumbo Packet support */
5128    adapter->registry_jumbo_packet = new_mtu + 14;
5129    et131x_soft_reset(adapter);
5130
5131    /* Alloc and init Rx DMA memory */
5132    result = et131x_adapter_memory_alloc(adapter);
5133    if (result != 0) {
5134        dev_warn(&adapter->pdev->dev,
5135            "Change MTU failed; couldn't re-alloc DMA memory\n");
5136        return result;
5137    }
5138
5139    et131x_init_send(adapter);
5140
5141    et131x_hwaddr_init(adapter);
5142    memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5143
5144    /* Init the device with the new settings */
5145    et131x_adapter_setup(adapter);
5146
5147    et131x_enable_txrx(netdev);
5148
5149    return result;
5150}
5151
5152/**
5153 * et131x_set_mac_addr - handler to change the MAC address for the device
5154 * @netdev: device whose MAC is to be changed
5155 * @new_mac: the desired MAC address
5156 *
5157 * Returns 0 on success, errno on failure (as defined in errno.h)
5158 *
5159 * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14
5160 */
5161static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5162{
5163    int result = 0;
5164    struct et131x_adapter *adapter = netdev_priv(netdev);
5165    struct sockaddr *address = new_mac;
5166
5167    /* begin blux */
5168
5169    if (adapter == NULL)
5170        return -ENODEV;
5171
5172    /* Make sure the requested MAC is valid */
5173    if (!is_valid_ether_addr(address->sa_data))
5174        return -EADDRNOTAVAIL;
5175
5176    et131x_disable_txrx(netdev);
5177    et131x_handle_send_interrupt(adapter);
5178    et131x_handle_recv_interrupt(adapter);
5179
5180    /* Set the new MAC */
5181    /* netdev->set_mac_address = &new_mac; */
5182
5183    memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5184
5185    netdev_info(netdev, "Setting MAC address to %pM\n",
5186            netdev->dev_addr);
5187
5188    /* Free Rx DMA memory */
5189    et131x_adapter_memory_free(adapter);
5190
5191    et131x_soft_reset(adapter);
5192
5193    /* Alloc and init Rx DMA memory */
5194    result = et131x_adapter_memory_alloc(adapter);
5195    if (result != 0) {
5196        dev_err(&adapter->pdev->dev,
5197            "Change MAC failed; couldn't re-alloc DMA memory\n");
5198        return result;
5199    }
5200
5201    et131x_init_send(adapter);
5202
5203    et131x_hwaddr_init(adapter);
5204
5205    /* Init the device with the new settings */
5206    et131x_adapter_setup(adapter);
5207
5208    et131x_enable_txrx(netdev);
5209
5210    return result;
5211}
5212
5213static const struct net_device_ops et131x_netdev_ops = {
5214    .ndo_open = et131x_open,
5215    .ndo_stop = et131x_close,
5216    .ndo_start_xmit = et131x_tx,
5217    .ndo_set_rx_mode = et131x_multicast,
5218    .ndo_tx_timeout = et131x_tx_timeout,
5219    .ndo_change_mtu = et131x_change_mtu,
5220    .ndo_set_mac_address = et131x_set_mac_addr,
5221    .ndo_validate_addr = eth_validate_addr,
5222    .ndo_get_stats = et131x_stats,
5223    .ndo_do_ioctl = et131x_ioctl,
5224};
5225
5226/**
5227 * et131x_pci_setup - Perform device initialization
5228 * @pdev: a pointer to the device's pci_dev structure
5229 * @ent: this device's entry in the pci_device_id table
5230 *
5231 * Returns 0 on success, errno on failure (as defined in errno.h)
5232 *
5233 * Registered in the pci_driver structure, this function is called when the
5234 * PCI subsystem finds a new PCI device which matches the information
5235 * contained in the pci_device_id table. This routine is the equivalent to
5236 * a device insertion routine.
5237 */
5238static int __devinit et131x_pci_setup(struct pci_dev *pdev,
5239                   const struct pci_device_id *ent)
5240{
5241    struct net_device *netdev;
5242    struct et131x_adapter *adapter;
5243    int rc;
5244    int ii;
5245
5246    rc = pci_enable_device(pdev);
5247    if (rc < 0) {
5248        dev_err(&pdev->dev, "pci_enable_device() failed\n");
5249        goto out;
5250    }
5251
5252    /* Perform some basic PCI checks */
5253    if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5254        dev_err(&pdev->dev, "Can't find PCI device's base address\n");
5255        rc = -ENODEV;
5256        goto err_disable;
5257    }
5258
5259    rc = pci_request_regions(pdev, DRIVER_NAME);
5260    if (rc < 0) {
5261        dev_err(&pdev->dev, "Can't get PCI resources\n");
5262        goto err_disable;
5263    }
5264
5265    pci_set_master(pdev);
5266
5267    /* Check the DMA addressing support of this device */
5268    if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
5269        rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5270        if (rc < 0) {
5271            dev_err(&pdev->dev,
5272              "Unable to obtain 64 bit DMA for consistent allocations\n");
5273            goto err_release_res;
5274        }
5275    } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
5276        rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5277        if (rc < 0) {
5278            dev_err(&pdev->dev,
5279              "Unable to obtain 32 bit DMA for consistent allocations\n");
5280            goto err_release_res;
5281        }
5282    } else {
5283        dev_err(&pdev->dev, "No usable DMA addressing method\n");
5284        rc = -EIO;
5285        goto err_release_res;
5286    }
5287
5288    /* Allocate netdev and private adapter structs */
5289    netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5290    if (!netdev) {
5291        dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
5292        rc = -ENOMEM;
5293        goto err_release_res;
5294    }
5295
5296    netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
5297    netdev->netdev_ops = &et131x_netdev_ops;
5298
5299    SET_NETDEV_DEV(netdev, &pdev->dev);
5300    SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
5301
5302    adapter = et131x_adapter_init(netdev, pdev);
5303
5304    rc = et131x_pci_init(adapter, pdev);
5305    if (rc < 0)
5306        goto err_free_dev;
5307
5308    /* Map the bus-relative registers to system virtual memory */
5309    adapter->regs = pci_ioremap_bar(pdev, 0);
5310    if (!adapter->regs) {
5311        dev_err(&pdev->dev, "Cannot map device registers\n");
5312        rc = -ENOMEM;
5313        goto err_free_dev;
5314    }
5315
5316    /* If Phy COMA mode was enabled when we went down, disable it here. */
5317    writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
5318
5319    /* Issue a global reset to the et1310 */
5320    et131x_soft_reset(adapter);
5321
5322    /* Disable all interrupts (paranoid) */
5323    et131x_disable_interrupts(adapter);
5324
5325    /* Allocate DMA memory */
5326    rc = et131x_adapter_memory_alloc(adapter);
5327    if (rc < 0) {
5328        dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
5329        goto err_iounmap;
5330    }
5331
5332    /* Init send data structures */
5333    et131x_init_send(adapter);
5334
5335    /* Set up the task structure for the ISR's deferred handler */
5336    INIT_WORK(&adapter->task, et131x_isr_handler);
5337
5338    /* Copy address into the net_device struct */
5339    memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5340
5341    /* Init variable for counting how long we do not have link status */
5342    adapter->boot_coma = 0;
5343    et1310_disable_phy_coma(adapter);
5344
5345    rc = -ENOMEM;
5346
5347    /* Setup the mii_bus struct */
5348    adapter->mii_bus = mdiobus_alloc();
5349    if (!adapter->mii_bus) {
5350        dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
5351        goto err_mem_free;
5352    }
5353
5354    adapter->mii_bus->name = "et131x_eth_mii";
5355    snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
5356        (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5357    adapter->mii_bus->priv = netdev;
5358    adapter->mii_bus->read = et131x_mdio_read;
5359    adapter->mii_bus->write = et131x_mdio_write;
5360    adapter->mii_bus->reset = et131x_mdio_reset;
5361    adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
5362    if (!adapter->mii_bus->irq) {
5363        dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
5364        goto err_mdio_free;
5365    }
5366
5367    for (ii = 0; ii < PHY_MAX_ADDR; ii++)
5368        adapter->mii_bus->irq[ii] = PHY_POLL;
5369
5370    rc = mdiobus_register(adapter->mii_bus);
5371    if (rc < 0) {
5372        dev_err(&pdev->dev, "failed to register MII bus\n");
5373        goto err_mdio_free_irq;
5374    }
5375
5376    rc = et131x_mii_probe(netdev);
5377    if (rc < 0) {
5378        dev_err(&pdev->dev, "failed to probe MII bus\n");
5379        goto err_mdio_unregister;
5380    }
5381
5382    /* Setup et1310 as per the documentation */
5383    et131x_adapter_setup(adapter);
5384
5385    /* We can enable interrupts now
5386     *
5387     * NOTE - Because registration of interrupt handler is done in the
5388     * device's open(), defer enabling device interrupts to that
5389     * point
5390     */
5391
5392    /* Register the net_device struct with the Linux network layer */
5393    rc = register_netdev(netdev);
5394    if (rc < 0) {
5395        dev_err(&pdev->dev, "register_netdev() failed\n");
5396        goto err_phy_disconnect;
5397    }
5398
5399    /* Register the net_device struct with the PCI subsystem. Save a copy
5400     * of the PCI config space for this device now that the device has
5401     * been initialized, just in case it needs to be quickly restored.
5402     */
5403    pci_set_drvdata(pdev, netdev);
5404out:
5405    return rc;
5406
5407err_phy_disconnect:
5408    phy_disconnect(adapter->phydev);
5409err_mdio_unregister:
5410    mdiobus_unregister(adapter->mii_bus);
5411err_mdio_free_irq:
5412    kfree(adapter->mii_bus->irq);
5413err_mdio_free:
5414    mdiobus_free(adapter->mii_bus);
5415err_mem_free:
5416    et131x_adapter_memory_free(adapter);
5417err_iounmap:
5418    iounmap(adapter->regs);
5419err_free_dev:
5420    pci_dev_put(pdev);
5421    free_netdev(netdev);
5422err_release_res:
5423    pci_release_regions(pdev);
5424err_disable:
5425    pci_disable_device(pdev);
5426    goto out;
5427}
5428
5429static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5430    { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
5431    { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
5432    {0,}
5433};
5434MODULE_DEVICE_TABLE(pci, et131x_pci_table);
5435
5436static struct pci_driver et131x_driver = {
5437    .name = DRIVER_NAME,
5438    .id_table = et131x_pci_table,
5439    .probe = et131x_pci_setup,
5440    .remove = __devexit_p(et131x_pci_remove),
5441    .driver.pm = ET131X_PM_OPS,
5442};
5443
5444module_pci_driver(et131x_driver);
5445

Archive Download this file



interactive