Root/package/system/ep80579-drivers/patches/001-igbe_update.patch

1--- a/Embedded/src/GbE/gcu.h
2+++ b/Embedded/src/GbE/gcu.h
3@@ -2,7 +2,7 @@
4 
5 GPL LICENSE SUMMARY
6 
7- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
8+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
9 
10   This program is free software; you can redistribute it and/or modify
11   it under the terms of version 2 of the GNU General Public License as
12@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
13   Contact Information:
14   Intel Corporation
15 
16- version: Embedded.L.1.0.34
17+ version: Embedded.Release.Patch.L.1.0.7-5
18 
19   Contact Information:
20   
21--- a/Embedded/src/GbE/gcu_if.c
22+++ b/Embedded/src/GbE/gcu_if.c
23@@ -2,7 +2,7 @@
24 
25 GPL LICENSE SUMMARY
26 
27- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
28+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
29 
30   This program is free software; you can redistribute it and/or modify
31   it under the terms of version 2 of the GNU General Public License as
32@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
33   Contact Information:
34   Intel Corporation
35 
36- version: Embedded.L.1.0.34
37+ version: Embedded.Release.Patch.L.1.0.7-5
38 
39   Contact Information:
40   
41@@ -330,10 +330,17 @@ gcu_write_verify(uint32_t phy_num, uint3
42  */
43 void gcu_iegbe_resume(struct pci_dev *pdev)
44 {
45+#if ( ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) ) && \
46+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) )
47+ struct net_device *netdev = pci_get_drvdata(pdev);
48+ struct gcu_adapter *adapter = netdev_priv(netdev);
49+#endif
50+
51     GCU_DBG("%s\n", __func__);
52 
53     pci_restore_state(pdev);
54- pci_enable_device(pdev);
55+ if(!pci_enable_device(pdev))
56+ GCU_DBG("pci_enable_device failed!\n",);
57 
58     return;
59 }
60@@ -348,6 +355,12 @@ EXPORT_SYMBOL(gcu_iegbe_resume);
61  */
62 int gcu_iegbe_suspend(struct pci_dev *pdev, uint32_t state)
63 {
64+#if ( ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) ) && \
65+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) )
66+ struct net_device *netdev = pci_get_drvdata(pdev);
67+ struct gcu_adapter *adapter = netdev_priv(netdev);
68+#endif
69+
70     GCU_DBG("%s\n", __func__);
71 
72     pci_save_state(pdev);
73--- a/Embedded/src/GbE/gcu_if.h
74+++ b/Embedded/src/GbE/gcu_if.h
75@@ -2,7 +2,7 @@
76 
77 GPL LICENSE SUMMARY
78 
79- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
80+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
81 
82   This program is free software; you can redistribute it and/or modify
83   it under the terms of version 2 of the GNU General Public License as
84@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
85   Contact Information:
86   Intel Corporation
87 
88- version: Embedded.L.1.0.34
89+ version: Embedded.Release.Patch.L.1.0.7-5
90  
91   Contact Information:
92   
93--- a/Embedded/src/GbE/gcu_main.c
94+++ b/Embedded/src/GbE/gcu_main.c
95@@ -2,7 +2,7 @@
96 
97 GPL LICENSE SUMMARY
98 
99- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
100+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
101 
102   This program is free software; you can redistribute it and/or modify
103   it under the terms of version 2 of the GNU General Public License as
104@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
105   Contact Information:
106   Intel Corporation
107 
108- version: Embedded.L.1.0.34
109+ version: Embedded.Release.Patch.L.1.0.7-5
110 
111   Contact Information:
112 
113@@ -94,6 +94,7 @@ static struct pci_driver gcu_driver = {
114 
115 static struct gcu_adapter *global_adapter = 0;
116 static spinlock_t global_adapter_spinlock = SPIN_LOCK_UNLOCKED;
117+static unsigned long g_intflags = 0;
118 
119 MODULE_AUTHOR("Intel(R) Corporation");
120 MODULE_DESCRIPTION("Global Configuration Unit Driver");
121@@ -124,7 +125,7 @@ gcu_init_module(void)
122 
123     printk(KERN_INFO "%s\n", gcu_copyright);
124 
125- ret = pci_module_init(&gcu_driver);
126+ ret = pci_register_driver(&gcu_driver);
127     if(ret >= 0) {
128         register_reboot_notifier(&gcu_notifier_reboot);
129     }
130@@ -199,8 +200,6 @@ gcu_probe(struct pci_dev *pdev,
131         return -ENOMEM;
132     }
133 
134- SET_MODULE_OWNER(adapter);
135-
136     pci_set_drvdata(pdev, adapter);
137 
138     adapter->pdev = pdev;
139@@ -238,7 +237,6 @@ gcu_probe(struct pci_dev *pdev,
140     return 0;
141 }
142 
143-
144 /**
145  * gcu_probe_err - gcu_probe error handler
146  * @err: gcu_err_type
147@@ -295,7 +293,7 @@ gcu_notify_reboot(struct notifier_block
148     case SYS_DOWN:
149     case SYS_HALT:
150     case SYS_POWER_OFF:
151- while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
152+ while((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
153             if(pci_dev_driver(pdev) == &gcu_driver){
154                 gcu_suspend(pdev, 0x3);
155             }
156@@ -318,6 +316,11 @@ static int
157 gcu_suspend(struct pci_dev *pdev, uint32_t state)
158 {
159     /*struct gcu_adapter *adapter = pci_get_drvdata(pdev); */
160+#if ( ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,6) ) && \
161+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) )
162+ struct net_device *netdev = pci_get_drvdata(pdev);
163+ struct gcu_adapter *adapter = netdev_priv(netdev);
164+#endif
165 
166     GCU_DBG("%s\n", __func__);
167 
168@@ -338,7 +341,6 @@ gcu_suspend(struct pci_dev *pdev, uint32
169     return state;
170 }
171 
172-
173 /**
174  * alloc_gcu_adapter
175  *
176@@ -412,7 +414,7 @@ gcu_get_adapter(void)
177         return NULL;
178     }
179 
180- spin_lock(&global_adapter_spinlock);
181+ spin_lock_irqsave(&global_adapter_spinlock, g_intflags);
182 
183     return global_adapter;
184 }
185@@ -437,7 +439,7 @@ gcu_release_adapter(const struct gcu_ada
186         *adapter = 0;
187     }
188 
189- spin_unlock(&global_adapter_spinlock);
190+ spin_unlock_irqrestore(&global_adapter_spinlock, g_intflags);
191 
192     return;
193 }
194--- a/Embedded/src/GbE/gcu_reg.h
195+++ b/Embedded/src/GbE/gcu_reg.h
196@@ -2,7 +2,7 @@
197 
198 GPL LICENSE SUMMARY
199 
200- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
201+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
202 
203   This program is free software; you can redistribute it and/or modify
204   it under the terms of version 2 of the GNU General Public License as
205@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
206   Contact Information:
207   Intel Corporation
208 
209- version: Embedded.L.1.0.34
210+ version: Embedded.Release.Patch.L.1.0.7-5
211   
212   Contact Information:
213   
214--- a/Embedded/src/GbE/iegbe.7
215+++ b/Embedded/src/GbE/iegbe.7
216@@ -1,7 +1,7 @@
217 
218 .\" GPL LICENSE SUMMARY
219 .\"
220-.\" Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
221+.\" Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
222 .\"
223 .\" This program is free software; you can redistribute it and/or modify
224 .\" it under the terms of version 2 of the GNU General Public License as
225@@ -21,7 +21,7 @@
226 .\" Contact Information:
227 .\" Intel Corporation
228 .\"
229-.\" version: Embedded.L.1.0.34
230+.\" version: Embedded.Release.Patch.L.1.0.7-5
231 
232 .\" LICENSE
233 .\"
234--- a/Embedded/src/GbE/iegbe_ethtool.c
235+++ b/Embedded/src/GbE/iegbe_ethtool.c
236@@ -2,7 +2,7 @@
237  
238 GPL LICENSE SUMMARY
239 
240- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
241+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
242 
243   This program is free software; you can redistribute it and/or modify
244   it under the terms of version 2 of the GNU General Public License as
245@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
246   Contact Information:
247   Intel Corporation
248 
249- version: Embedded.L.1.0.34
250+ version: Embedded.Release.Patch.L.1.0.7-5
251 
252   Contact Information:
253   
254@@ -132,22 +132,6 @@ static const struct iegbe_stats iegbe_gs
255     { "cpp_master", E1000_STAT(icr_cpp_master) },
256     { "stat", E1000_STAT(icr_stat) },
257 #endif
258-#ifdef IEGBE_GBE_WORKAROUND
259- { "txqec", E1000_STAT(stats.txqec) },
260- { "tx_next_to_clean", E1000_STAT(stats.tx_next_to_clean) },
261- { "tx_next_to_use", E1000_STAT(stats.tx_next_to_use) },
262- { "num_tx_queues", E1000_STAT(stats.num_tx_queues) },
263-
264- { "num_rx_buf_alloc", E1000_STAT(stats.num_rx_buf_alloc) },
265- { "rx_next_to_clean", E1000_STAT(stats.rx_next_to_clean) },
266- { "rx_next_to_use", E1000_STAT(stats.rx_next_to_use) },
267- { "cc_gt_num_rx", E1000_STAT(stats.cc_gt_num_rx) },
268- { "tx_hnet", E1000_STAT(stats.tx_hnet) },
269- { "tx_hnentu", E1000_STAT(stats.tx_hnentu) },
270- { "RUC", E1000_STAT(stats.ruc) },
271- { "RFC", E1000_STAT(stats.rfc) },
272-
273-#endif
274 };
275 #define E1000_STATS_LEN \
276     sizeof(iegbe_gstrings_stats) / sizeof(struct iegbe_stats)
277@@ -158,7 +142,7 @@ static const char iegbe_gstrings_test[][
278     "Interrupt test (offline)", "Loopback test (offline)",
279     "Link test (on/offline)"
280 };
281-#define E1000_TEST_LEN (sizeof(iegbe_gstrings_test) / (ETH_GSTRING_LEN))
282+#define E1000_TEST_LEN (sizeof(iegbe_gstrings_test) / ETH_GSTRING_LEN)
283 #endif /* ETHTOOL_TEST */
284 
285 #define E1000_REGS_LEN 0x20
286@@ -176,9 +160,7 @@ iegbe_get_settings(struct net_device *ne
287                            SUPPORTED_10baseT_Full |
288                            SUPPORTED_100baseT_Half |
289                            SUPPORTED_100baseT_Full |
290-#ifndef IEGBE_10_100_ONLY
291                            SUPPORTED_1000baseT_Full|
292-#endif
293                            SUPPORTED_Autoneg |
294                            SUPPORTED_TP);
295 
296@@ -259,21 +241,13 @@ iegbe_set_settings(struct net_device *ne
297                           ADVERTISED_10baseT_Full |
298                           ADVERTISED_100baseT_Half |
299                           ADVERTISED_100baseT_Full |
300-#ifndef IEGBE_10_100_ONLY
301                           ADVERTISED_1000baseT_Full|
302-#endif
303-
304                           ADVERTISED_Autoneg |
305                           ADVERTISED_TP;
306                           ecmd->advertising = hw->autoneg_advertised;
307           }
308- } else {
309- uint16_t duplex;
310-
311- // ethtool uses DUPLEX_FULL/DUPLEX_HALF
312- // the driver needs FULL_DUPLEX/HALF_DUPLEX
313- duplex = (ecmd->duplex == DUPLEX_FULL) ? FULL_DUPLEX : HALF_DUPLEX;
314- if(iegbe_set_spd_dplx(adapter, ecmd->speed + duplex))
315+ } else
316+ if(iegbe_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)){
317             return -EINVAL;
318     }
319     /* reset the link */
320@@ -728,8 +702,8 @@ iegbe_set_ringparam(struct net_device *n
321     struct iegbe_rx_ring *rxdr, *rx_old, *rx_new;
322     int i, err, tx_ring_size, rx_ring_size;
323 
324- tx_ring_size = sizeof(struct iegbe_tx_ring) * adapter->num_queues;
325- rx_ring_size = sizeof(struct iegbe_rx_ring) * adapter->num_queues;
326+ tx_ring_size = sizeof(struct iegbe_tx_ring) * adapter->num_tx_queues;
327+ rx_ring_size = sizeof(struct iegbe_rx_ring) * adapter->num_rx_queues;
328 
329     if (netif_running(adapter->netdev)){
330         iegbe_down(adapter);
331@@ -768,10 +742,10 @@ iegbe_set_ringparam(struct net_device *n
332         E1000_MAX_TXD : E1000_MAX_82544_TXD));
333     E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
334 
335- for (i = 0; i < adapter->num_queues; i++) {
336- txdr[i].count = txdr->count;
337- rxdr[i].count = rxdr->count;
338- }
339+ for (i = 0; i < adapter->num_tx_queues; i++)
340+ txdr[i].count = txdr->count;
341+ for (i = 0; i < adapter->num_rx_queues; i++)
342+ rxdr[i].count = rxdr->count;
343 
344     if(netif_running(adapter->netdev)) {
345         /* Try to get new resources before deleting old */
346@@ -950,8 +924,7 @@ iegbe_eeprom_test(struct iegbe_adapter *
347 
348 static irqreturn_t
349 iegbe_test_intr(int irq,
350- void *data,
351- struct pt_regs *regs)
352+ void *data)
353 {
354     struct net_device *netdev = (struct net_device *) data;
355     struct iegbe_adapter *adapter = netdev_priv(netdev);
356@@ -973,7 +946,7 @@ iegbe_intr_test(struct iegbe_adapter *ad
357     /* Hook up test interrupt handler just for this test */
358      if(!request_irq(irq, &iegbe_test_intr, 0, netdev->name, netdev)) {
359          shared_int = FALSE;
360- } else if(request_irq(irq, &iegbe_test_intr, SA_SHIRQ,
361+ } else if(request_irq(irq, &iegbe_test_intr, IRQF_SHARED,
362                   netdev->name, netdev)){
363         *data = 1;
364         return -1;
365@@ -1393,7 +1366,7 @@ iegbe_set_phy_loopback(struct iegbe_adap
366              * attempt this 10 times.
367              */
368             while(iegbe_nonintegrated_phy_loopback(adapter) &&
369- count++ < 0xa) { };
370+ count++ < 0xa);
371             if(count < 0xb) {
372                 return 0;
373             }
374--- a/Embedded/src/GbE/iegbe.h
375+++ b/Embedded/src/GbE/iegbe.h
376@@ -1,7 +1,7 @@
377 /*******************************************************************************
378 GPL LICENSE SUMMARY
379 
380- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
381+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
382 
383   This program is free software; you can redistribute it and/or modify
384   it under the terms of version 2 of the GNU General Public License as
385@@ -21,7 +21,7 @@ GPL LICENSE SUMMARY
386   Contact Information:
387   Intel Corporation
388 
389- version: Embedded.L.1.0.34
390+ version: Embedded.Release.Patch.L.1.0.7-5
391 
392   Contact Information:
393 
394@@ -127,9 +127,12 @@ struct iegbe_adapter;
395 #define E1000_MIN_RXD 80
396 #define E1000_MAX_82544_RXD 4096
397 
398+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
399 /* Supported Rx Buffer Sizes */
400 #define E1000_RXBUFFER_128 128 /* Used for packet split */
401 #define E1000_RXBUFFER_256 256 /* Used for packet split */
402+#define E1000_RXBUFFER_512 512
403+#define E1000_RXBUFFER_1024 1024
404 #define E1000_RXBUFFER_2048 2048
405 #define E1000_RXBUFFER_4096 4096
406 #define E1000_RXBUFFER_8192 8192
407@@ -164,11 +167,9 @@ struct iegbe_adapter;
408 #define E1000_MASTER_SLAVE iegbe_ms_hw_default
409 #endif
410 
411-#ifdef NETIF_F_HW_VLAN_TX
412-#define E1000_MNG_VLAN_NONE -1
413-#endif
414+#define E1000_MNG_VLAN_NONE (-1)
415 /* Number of packet split data buffers (not including the header buffer) */
416-#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
417+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
418 
419 /* only works for sizes that are powers of 2 */
420 #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
421@@ -206,6 +207,7 @@ struct iegbe_tx_ring {
422     spinlock_t tx_lock;
423     uint16_t tdh;
424     uint16_t tdt;
425+ boolean_t last_tx_tso;
426     uint64_t pkt;
427 };
428 
429@@ -228,6 +230,9 @@ struct iegbe_rx_ring {
430     struct iegbe_ps_page *ps_page;
431     struct iegbe_ps_page_dma *ps_page_dma;
432 
433+ /* cpu for rx queue */
434+ int cpu;
435+
436     uint16_t rdh;
437     uint16_t rdt;
438     uint64_t pkt;
439@@ -252,10 +257,8 @@ struct iegbe_adapter {
440     struct timer_list tx_fifo_stall_timer;
441     struct timer_list watchdog_timer;
442     struct timer_list phy_info_timer;
443-#ifdef NETIF_F_HW_VLAN_TX
444     struct vlan_group *vlgrp;
445         uint16_t mng_vlan_id;
446-#endif
447     uint32_t bd_number;
448     uint32_t rx_buffer_len;
449     uint32_t part_num;
450@@ -265,8 +268,18 @@ struct iegbe_adapter {
451     uint16_t link_speed;
452     uint16_t link_duplex;
453     spinlock_t stats_lock;
454- atomic_t irq_sem;
455- struct work_struct tx_timeout_task;
456+ spinlock_t tx_queue_lock;
457+ unsigned int total_tx_bytes;
458+ unsigned int total_tx_packets;
459+ unsigned int total_rx_bytes;
460+ unsigned int total_rx_packets;
461+ /* Interrupt Throttle Rate */
462+ uint32_t itr;
463+ uint32_t itr_setting;
464+ uint16_t tx_itr;
465+ uint16_t rx_itr;
466+
467+ struct work_struct reset_task;
468     uint8_t fc_autoneg;
469 
470 #ifdef ETHTOOL_PHYS_ID
471@@ -276,9 +289,8 @@ struct iegbe_adapter {
472 
473     /* TX */
474     struct iegbe_tx_ring *tx_ring; /* One per active queue */
475-#ifdef CONFIG_E1000_MQ
476- struct iegbe_tx_ring **cpu_tx_ring; /* per-cpu */
477-#endif
478+ unsigned int restart_queue;
479+ unsigned long tx_queue_len;
480     uint32_t txd_cmd;
481     uint32_t tx_int_delay;
482     uint32_t tx_abs_int_delay;
483@@ -286,46 +298,33 @@ struct iegbe_adapter {
484     uint64_t gotcl_old;
485     uint64_t tpt_old;
486     uint64_t colc_old;
487+ uint32_t tx_timeout_count;
488     uint32_t tx_fifo_head;
489     uint32_t tx_head_addr;
490     uint32_t tx_fifo_size;
491+ uint8_t tx_timeout_factor;
492     atomic_t tx_fifo_stall;
493     boolean_t pcix_82544;
494     boolean_t detect_tx_hung;
495 
496     /* RX */
497-#ifdef CONFIG_E1000_NAPI
498- boolean_t (*clean_rx) (struct iegbe_adapter *adapter,
499+ bool (*clean_rx)(struct iegbe_adapter *adapter,
500                    struct iegbe_rx_ring *rx_ring,
501                    int *work_done, int work_to_do);
502-#else
503- boolean_t (*clean_rx) (struct iegbe_adapter *adapter,
504- struct iegbe_rx_ring *rx_ring);
505-#endif
506-
507-#ifdef IEGBE_GBE_WORKAROUND
508     void (*alloc_rx_buf) (struct iegbe_adapter *adapter,
509- struct iegbe_rx_ring *rx_ring,
510- int cleaned_count);
511-#else
512- void (*alloc_rx_buf) (struct iegbe_adapter *adapter,
513- struct iegbe_rx_ring *rx_ring);
514-#endif
515-
516+ struct iegbe_rx_ring *rx_ring,
517+ int cleaned_count);
518     struct iegbe_rx_ring *rx_ring; /* One per active queue */
519-#ifdef CONFIG_E1000_NAPI
520+ struct napi_struct napi;
521     struct net_device *polling_netdev; /* One per active queue */
522-#endif
523-#ifdef CONFIG_E1000_MQ
524- struct net_device **cpu_netdev; /* per-cpu */
525- struct call_async_data_struct rx_sched_call_data;
526- int cpu_for_queue[4];
527-#endif
528- int num_queues;
529+
530+ int num_tx_queues;
531+ int num_rx_queues;
532 
533     uint64_t hw_csum_err;
534     uint64_t hw_csum_good;
535     uint64_t rx_hdr_split;
536+ uint32_t alloc_rx_buff_failed;
537     uint32_t rx_int_delay;
538     uint32_t rx_abs_int_delay;
539     boolean_t rx_csum;
540@@ -334,8 +333,6 @@ struct iegbe_adapter {
541     uint64_t gorcl_old;
542     uint16_t rx_ps_bsize0;
543 
544- /* Interrupt Throttle Rate */
545- uint32_t itr;
546 
547     /* OS defined structs */
548     struct net_device *netdev;
549@@ -378,7 +375,21 @@ struct iegbe_adapter {
550 #ifdef CONFIG_PCI_MSI
551     boolean_t have_msi;
552 #endif
553-#define IEGBE_INTD_DISABLE 0x0400
554+ /* to not mess up cache alignment, always add to the bottom */
555+ boolean_t tso_force;
556+ boolean_t smart_power_down; /* phy smart power down */
557+ boolean_t quad_port_a;
558+ unsigned long flags;
559+ uint32_t eeprom_wol;
560+ int bars;
561+ int need_ioport;
562 };
563+
564+enum iegbe_state_t {
565+ __E1000_TESTING,
566+ __E1000_RESETTING,
567+ __E1000_DOWN
568+};
569+#define IEGBE_INTD_DISABLE 0x0400
570 #endif /* _IEGBE_H_ */
571 
572--- a/Embedded/src/GbE/iegbe_hw.c
573+++ b/Embedded/src/GbE/iegbe_hw.c
574@@ -2,7 +2,7 @@
575 
576 GPL LICENSE SUMMARY
577 
578- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
579+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
580 
581   This program is free software; you can redistribute it and/or modify
582   it under the terms of version 2 of the GNU General Public License as
583@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
584   Contact Information:
585   Intel Corporation
586 
587- version: Embedded.L.1.0.34
588+ version: Embedded.Release.Patch.L.1.0.7-5
589 
590   Contact Information:
591 
592@@ -2115,7 +2115,7 @@ iegbe_config_mac_to_phy(struct iegbe_hw
593 
594         ret_val = iegbe_oem_set_trans_gasket(hw);
595         if(ret_val){
596- return ret_val;
597+ return ret_val;
598         }
599         ret_val = iegbe_oem_phy_is_full_duplex(
600             hw, (int *) &is_FullDuplex);
601@@ -2164,7 +2164,7 @@ iegbe_config_mac_to_phy(struct iegbe_hw
602     }
603     /* Write the configured values back to the Device Control Reg. */
604     E1000_WRITE_REG(hw, CTRL, ctrl);
605- return E1000_SUCCESS;
606+ return ret_val;
607 }
608 
609 /*****************************************************************************
610@@ -2684,7 +2684,7 @@ iegbe_check_for_link(struct iegbe_hw *hw
611 
612             if(hw->autoneg_failed == 0) {
613                 hw->autoneg_failed = 1;
614- return 0;
615+ return E1000_SUCCESS;
616             }
617             DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\r\n");
618 
619@@ -5875,7 +5875,7 @@ iegbe_get_cable_length(struct iegbe_hw *
620                         max_agc = cur_agc;
621                 }
622             }
623-
624+
625             /* This is to fix a Klockwork defect, that the array index might
626              * be out of bounds. 113 is table size */
627             if (cur_agc < 0x71){
628--- a/Embedded/src/GbE/iegbe_hw.h
629+++ b/Embedded/src/GbE/iegbe_hw.h
630@@ -2,7 +2,7 @@
631  
632 GPL LICENSE SUMMARY
633 
634- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
635+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
636 
637   This program is free software; you can redistribute it and/or modify
638   it under the terms of version 2 of the GNU General Public License as
639@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
640   Contact Information:
641   Intel Corporation
642 
643- version: Embedded.L.1.0.34
644+ version: Embedded.Release.Patch.L.1.0.7-5
645 
646   Contact Information:
647  
648@@ -299,7 +299,7 @@ void iegbe_set_media_type(struct iegbe_h
649 /* Link Configuration */
650 int32_t iegbe_setup_link(struct iegbe_hw *hw);
651 int32_t iegbe_phy_setup_autoneg(struct iegbe_hw *hw);
652-void iegbe_config_collision_dist(struct iegbe_hw *hw);
653+void iegbe_config_collision_dist(struct iegbe_hw *hw);
654 int32_t iegbe_config_fc_after_link_up(struct iegbe_hw *hw);
655 int32_t iegbe_check_for_link(struct iegbe_hw *hw);
656 int32_t iegbe_get_speed_and_duplex(struct iegbe_hw *hw, uint16_t * speed, uint16_t * duplex);
657@@ -588,14 +588,6 @@ uint8_t iegbe_arc_subsystem_valid(struct
658  * o LSC = Link Status Change
659  */
660 
661-#ifdef IEGBE_GBE_WORKAROUND
662-#define IMS_ENABLE_MASK ( \
663- E1000_IMS_RXT0 | \
664- E1000_IMS_TXQE | \
665- E1000_IMS_RXDMT0 | \
666- E1000_IMS_RXSEQ | \
667- E1000_IMS_LSC)
668-#else
669 #define IMS_ENABLE_MASK ( \
670     E1000_IMS_RXT0 | \
671     E1000_IMS_TXDW | \
672@@ -606,8 +598,7 @@ uint8_t iegbe_arc_subsystem_valid(struct
673     E1000_ICR_PB | \
674     E1000_ICR_CPP_TARGET | \
675     E1000_ICR_CPP_MASTER | \
676- E1000_IMS_LSC)
677-#endif
678+ E1000_ICR_LSC)
679 
680 /* Number of high/low register pairs in the RAR. The RAR (Receive Address
681  * Registers) holds the directed and multicast addresses that we monitor. We
682@@ -923,10 +914,15 @@ struct iegbe_ffvt_entry {
683 #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
684 #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
685 #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
686-// Register conflict, does not exist for ICP_xxxx hardware
687-// #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
688 #define E1000_CTRL_AUX 0x000E0 /* Aux Control -RW */
689+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
690 #define E1000_RCTL 0x00100 /* RX Control - RW */
691+#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
692+#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
693+#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
694+#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
695+#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
696+#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
697 #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
698 #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
699 #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
700@@ -1282,8 +1278,6 @@ struct iegbe_ffvt_entry {
701 #define E1000_82542_FFMT E1000_FFMT
702 #define E1000_82542_FFVT E1000_FFVT
703 #define E1000_82542_HOST_IF E1000_HOST_IF
704-// Register conflict with ICP_xxxx hardware, no IAM
705-// #define E1000_82542_IAM E1000_IAM
706 #define E1000_82542_EEMNGCTL E1000_EEMNGCTL
707 #define E1000_82542_PSRCTL E1000_PSRCTL
708 #define E1000_82542_RAID E1000_RAID
709@@ -1329,6 +1323,7 @@ struct iegbe_hw_stats {
710     uint64_t algnerrc;
711     uint64_t symerrs;
712     uint64_t rxerrc;
713+ uint64_t txerrc;
714     uint64_t mpc;
715     uint64_t scc;
716     uint64_t ecol;
717@@ -1363,6 +1358,7 @@ struct iegbe_hw_stats {
718     uint64_t ruc;
719     uint64_t rfc;
720     uint64_t roc;
721+ uint64_t rlerrc;
722     uint64_t rjc;
723     uint64_t mgprc;
724     uint64_t mgpdc;
725@@ -1392,19 +1388,6 @@ struct iegbe_hw_stats {
726     uint64_t ictxqmtc;
727     uint64_t icrxdmtc;
728     uint64_t icrxoc;
729-#ifdef IEGBE_GBE_WORKAROUND
730- u64 txqec;
731- u64 tx_next_to_clean;
732- u64 tx_next_to_use;
733- u64 cc_gt_num_rx;
734- u64 tx_hnet;
735- u64 tx_hnentu;
736- u64 num_tx_queues;
737-
738- u64 num_rx_buf_alloc;
739- u64 rx_next_to_clean;
740- u64 rx_next_to_use;
741-#endif
742 };
743 
744 /* Structure containing variables used by the shared code (iegbe_hw.c) */
745@@ -1484,6 +1467,7 @@ struct iegbe_hw {
746     boolean_t ifs_params_forced;
747     boolean_t in_ifs_mode;
748     boolean_t mng_reg_access_disabled;
749+ boolean_t rx_needs_kicking;
750     boolean_t icp_xxxx_is_link_up;
751 };
752 
753@@ -2358,17 +2342,23 @@ struct iegbe_host_command_info {
754 #define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF
755 #define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00
756 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000
757+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
758+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
759 
760 /* PBA constants */
761+#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
762 #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
763 #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
764+#define E1000_PBA_20K 0x0014
765 #define E1000_PBA_22K 0x0016
766 #define E1000_PBA_24K 0x0018
767 #define E1000_PBA_30K 0x001E
768 #define E1000_PBA_32K 0x0020
769+#define E1000_PBA_34K 0x0022
770 #define E1000_PBA_38K 0x0026
771 #define E1000_PBA_40K 0x0028
772 #define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
773+#define E1000_PBS_16K E1000_PBA_16K
774 
775 /* Flow Control Constants */
776 #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
777@@ -2899,7 +2889,7 @@ struct iegbe_host_command_info {
778 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
779 #define M88E1011_I_REV_4 0x04
780 #define M88E1111_I_PHY_ID 0x01410CC2
781-#define M88E1141_E_PHY_ID 0x01410CD4
782+#define M88E1141_E_PHY_ID 0x01410CD0
783 #define L1LXT971A_PHY_ID 0x001378E0
784 
785 /* Miscellaneous PHY bit definitions. */
786--- a/Embedded/src/GbE/iegbe_main.c
787+++ b/Embedded/src/GbE/iegbe_main.c
788@@ -2,7 +2,7 @@
789 
790 GPL LICENSE SUMMARY
791 
792- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
793+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
794 
795   This program is free software; you can redistribute it and/or modify
796   it under the terms of version 2 of the GNU General Public License as
797@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
798   Contact Information:
799   Intel Corporation
800 
801- version: Embedded.L.1.0.34
802+ version: Embedded.Release.Patch.L.1.0.7-5
803 
804   Contact Information:
805 
806@@ -42,103 +42,15 @@ GPL LICENSE SUMMARY
807 
808 #include "iegbe.h"
809 #include "gcu_if.h"
810-
811-/* Change Log
812- * 6.0.58 4/20/05
813- * o iegbe_set_spd_dplx tests for compatible speed/duplex specification
814- * for fiber adapters
815- * 6.0.57 4/19/05
816- * o Added code to fix register test failure for devices >= 82571
817- *
818- * 6.0.52 3/15/05
819- * o Added stats_lock around iegbe_read_phy_reg commands to avoid concurrent
820- * calls, one from mii_ioctl and other from within update_stats while
821- * processing MIIREG ioctl.
822- *
823- * 6.1.2 4/13/05
824- * o Fixed ethtool diagnostics
825- * o Enabled flow control to take default eeprom settings
826- * o Added stats_lock around iegbe_read_phy_reg commands to avoid concurrent
827- * calls, one from mii_ioctl and other from within update_stats while processing
828- * MIIREG ioctl.
829- * 6.0.55 3/23/05
830- * o Support for MODULE_VERSION
831- * o Fix APM setting for 82544 based adapters
832- * 6.0.54 3/26/05
833- * o Added a timer to expire packets that were deferred for cleanup
834- * 6.0.52 3/15/05
835- * o Added stats_lock around iegbe_read_phy_reg commands to avoid concurrent
836- * calls, one from mii_ioctl and other from within update_stats while
837- * processing MIIREG ioctl.
838- * 6.0.47 3/2/05
839- * o Added enhanced functionality to the loopback diags to wrap the
840- * descriptor rings
841- * o Added manageability vlan filtering workaround.
842- *
843- * 6.0.44+ 2/15/05
844- * o Added code to handle raw packet based DHCP packets
845- * o Added code to fix the errata 10 buffer overflow issue
846- * o Sync up with WR01-05
847- * o applied Anton's patch to resolve tx hang in hardware
848- * o iegbe timeouts with early writeback patch
849- * o Removed Queensport IDs
850- * o fixed driver panic if MAC receives a bad large packets when packet
851- * split is enabled
852- * o Applied Andrew Mortons patch - iegbe stops working after resume
853- * 5.2.29 12/24/03
854- * o Bug fix: Endianess issue causing ethtool diags to fail on ppc.
855- * o Bug fix: Use pdev->irq instead of netdev->irq for MSI support.
856- * o Report driver message on user override of InterruptThrottleRate module
857- * parameter.
858- * o Bug fix: Change I/O address storage from uint32_t to unsigned long.
859- * o Feature: Added ethtool RINGPARAM support.
860- * o Feature: Added netpoll support.
861- * o Bug fix: Race between Tx queue and Tx clean fixed with a spin lock.
862- * o Bug fix: Allow 1000/Full setting for autoneg param for fiber connections.
863- * Jon D Mason [jonmason@us.ibm.com].
864- *
865- * 5.2.22 10/15/03
866- * o Bug fix: SERDES devices might be connected to a back-plane switch that
867- * doesn't support auto-neg, so add the capability to force 1000/Full.
868- * Also, since forcing 1000/Full, sample RxSynchronize bit to detect link
869- * state.
870- * o Bug fix: Flow control settings for hi/lo watermark didn't consider
871- * changes in the RX FIFO size, which could occur with Jumbo Frames or with
872- * the reduced FIFO in 82547.
873- * o Bug fix: Better propagation of error codes.
874- * [Janice Girouard (janiceg -a-t- us.ibm.com)]
875- * o Bug fix: hang under heavy Tx stress when running out of Tx descriptors;
876- * wasn't clearing context descriptor when backing out of send because of
877- * no-resource condition.
878- * o Bug fix: check netif_running in dev->poll so we don't have to hang in
879- * dev->close until all polls are finished. [Rober Olsson
880- * (robert.olsson@data.slu.se)].
881- * o Revert TxDescriptor ring size back to 256 since change to 1024 wasn't
882- * accepted into the kernel.
883- *
884- * 5.2.16 8/8/03
885- */
886-
887-#ifdef IEGBE_GBE_WORKAROUND
888-#define IEGBE_GBE_WORKAROUND_NUM_RX_DESCRIPTORS 1
889-#endif
890+#include <linux/ipv6.h>
891+#include <net/ip6_checksum.h>
892 
893 char iegbe_driver_name[] = "iegbe";
894 char iegbe_driver_string[] = "Gigabit Ethernet Controller Driver";
895-#ifndef CONFIG_E1000_NAPI
896-#define DRIVERNAPI
897-#else
898-#define DRIVERNAPI "-NAPI"
899-#endif
900-#define DRV_VERSION "0.8.0"DRIVERNAPI
901+#define DRV_VERSION "1.0.0-K28-NAPI"
902 char iegbe_driver_version[] = DRV_VERSION;
903-char iegbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
904+char iegbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
905 
906-#define E1000_FIFO_HDR 0x10
907-#define E1000_82547_PAD_LEN 0x3E0
908-#define MINIMUM_DHCP_PACKET_SIZE 282
909-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
910-#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
911 
912 /* iegbe_pci_tbl - PCI Device ID Table
913  *
914@@ -148,95 +60,48 @@ char iegbe_copyright[] = "Copyright (c)
915  * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
916  */
917 static struct pci_device_id iegbe_pci_tbl[] = {
918-/* INTEL_E1000_ETHERNET_DEVICE(0x1000), */
919-/* INTEL_E1000_ETHERNET_DEVICE(0x1001), */
920-/* INTEL_E1000_ETHERNET_DEVICE(0x1004), */
921-/* INTEL_E1000_ETHERNET_DEVICE(0x1008), */
922-/* INTEL_E1000_ETHERNET_DEVICE(0x1009), */
923-/* INTEL_E1000_ETHERNET_DEVICE(0x100C), */
924-/* INTEL_E1000_ETHERNET_DEVICE(0x100D), */
925-/* INTEL_E1000_ETHERNET_DEVICE(0x100E), */
926-/* INTEL_E1000_ETHERNET_DEVICE(0x100F), */
927-/* INTEL_E1000_ETHERNET_DEVICE(0x1010), */
928-/* INTEL_E1000_ETHERNET_DEVICE(0x1011), */
929-/* INTEL_E1000_ETHERNET_DEVICE(0x1012), */
930-/* INTEL_E1000_ETHERNET_DEVICE(0x1013), */
931-/* INTEL_E1000_ETHERNET_DEVICE(0x1014), */
932-/* INTEL_E1000_ETHERNET_DEVICE(0x1015), */
933-/* INTEL_E1000_ETHERNET_DEVICE(0x1016), */
934-/* INTEL_E1000_ETHERNET_DEVICE(0x1017), */
935-/* INTEL_E1000_ETHERNET_DEVICE(0x1018), */
936-/* INTEL_E1000_ETHERNET_DEVICE(0x1019), */
937-/* INTEL_E1000_ETHERNET_DEVICE(0x101A), */
938-/* INTEL_E1000_ETHERNET_DEVICE(0x101D), */
939-/* INTEL_E1000_ETHERNET_DEVICE(0x101E), */
940-/* INTEL_E1000_ETHERNET_DEVICE(0x1026), */
941-/* INTEL_E1000_ETHERNET_DEVICE(0x1027), */
942-/* INTEL_E1000_ETHERNET_DEVICE(0x1028), */
943-/* INTEL_E1000_ETHERNET_DEVICE(0x105E), */
944-/* INTEL_E1000_ETHERNET_DEVICE(0x105F), */
945-/* INTEL_E1000_ETHERNET_DEVICE(0x1060), */
946-/* INTEL_E1000_ETHERNET_DEVICE(0x1075), */
947-/* INTEL_E1000_ETHERNET_DEVICE(0x1076), */
948-/* INTEL_E1000_ETHERNET_DEVICE(0x1077), */
949-/* INTEL_E1000_ETHERNET_DEVICE(0x1078), */
950-/* INTEL_E1000_ETHERNET_DEVICE(0x1079), */
951-/* INTEL_E1000_ETHERNET_DEVICE(0x107A), */
952-/* INTEL_E1000_ETHERNET_DEVICE(0x107B), */
953-/* INTEL_E1000_ETHERNET_DEVICE(0x107C), */
954-/* INTEL_E1000_ETHERNET_DEVICE(0x107D), */
955-/* INTEL_E1000_ETHERNET_DEVICE(0x107E), */
956-/* INTEL_E1000_ETHERNET_DEVICE(0x107F), */
957-/* INTEL_E1000_ETHERNET_DEVICE(0x108A), */
958-/* INTEL_E1000_ETHERNET_DEVICE(0x108B), */
959-/* INTEL_E1000_ETHERNET_DEVICE(0x108C), */
960-/* INTEL_E1000_ETHERNET_DEVICE(0x109A), */
961- INTEL_E1000_ETHERNET_DEVICE(0x5040),
962- INTEL_E1000_ETHERNET_DEVICE(0x5041),
963- INTEL_E1000_ETHERNET_DEVICE(0x5042),
964- INTEL_E1000_ETHERNET_DEVICE(0x5043),
965- INTEL_E1000_ETHERNET_DEVICE(0x5044),
966- INTEL_E1000_ETHERNET_DEVICE(0x5045),
967- INTEL_E1000_ETHERNET_DEVICE(0x5046),
968- INTEL_E1000_ETHERNET_DEVICE(0x5047),
969- INTEL_E1000_ETHERNET_DEVICE(0x5048),
970- INTEL_E1000_ETHERNET_DEVICE(0x5049),
971- INTEL_E1000_ETHERNET_DEVICE(0x504A),
972- INTEL_E1000_ETHERNET_DEVICE(0x504B),
973- /* required last entry */
974+ INTEL_E1000_ETHERNET_DEVICE(0x5040),
975+ INTEL_E1000_ETHERNET_DEVICE(0x5041),
976+ INTEL_E1000_ETHERNET_DEVICE(0x5042),
977+ INTEL_E1000_ETHERNET_DEVICE(0x5043),
978+ INTEL_E1000_ETHERNET_DEVICE(0x5044),
979+ INTEL_E1000_ETHERNET_DEVICE(0x5045),
980+ INTEL_E1000_ETHERNET_DEVICE(0x5046),
981+ INTEL_E1000_ETHERNET_DEVICE(0x5047),
982+ INTEL_E1000_ETHERNET_DEVICE(0x5048),
983+ INTEL_E1000_ETHERNET_DEVICE(0x5049),
984+ INTEL_E1000_ETHERNET_DEVICE(0x504A),
985+ INTEL_E1000_ETHERNET_DEVICE(0x504B),
986+ /* required last entry */
987     {0,}
988 };
989 
990 MODULE_DEVICE_TABLE(pci, iegbe_pci_tbl);
991 
992-DEFINE_SPINLOCK(print_lock);
993 
994 int iegbe_up(struct iegbe_adapter *adapter);
995 void iegbe_down(struct iegbe_adapter *adapter);
996+void iegbe_reinit_locked(struct iegbe_adapter *adapter);
997 void iegbe_reset(struct iegbe_adapter *adapter);
998 int iegbe_set_spd_dplx(struct iegbe_adapter *adapter, uint16_t spddplx);
999 int iegbe_setup_all_tx_resources(struct iegbe_adapter *adapter);
1000 int iegbe_setup_all_rx_resources(struct iegbe_adapter *adapter);
1001 void iegbe_free_all_tx_resources(struct iegbe_adapter *adapter);
1002 void iegbe_free_all_rx_resources(struct iegbe_adapter *adapter);
1003-int iegbe_setup_tx_resources(struct iegbe_adapter *adapter,
1004+static int iegbe_setup_tx_resources(struct iegbe_adapter *adapter,
1005                              struct iegbe_tx_ring *txdr);
1006-int iegbe_setup_rx_resources(struct iegbe_adapter *adapter,
1007+static int iegbe_setup_rx_resources(struct iegbe_adapter *adapter,
1008                              struct iegbe_rx_ring *rxdr);
1009-void iegbe_free_tx_resources(struct iegbe_adapter *adapter,
1010+static void iegbe_free_tx_resources(struct iegbe_adapter *adapter,
1011                              struct iegbe_tx_ring *tx_ring);
1012-void iegbe_free_rx_resources(struct iegbe_adapter *adapter,
1013+static void iegbe_free_rx_resources(struct iegbe_adapter *adapter,
1014                              struct iegbe_rx_ring *rx_ring);
1015 void iegbe_update_stats(struct iegbe_adapter *adapter);
1016-
1017 static int iegbe_init_module(void);
1018 static void iegbe_exit_module(void);
1019 static int iegbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
1020 static void __devexit iegbe_remove(struct pci_dev *pdev);
1021 static int iegbe_alloc_queues(struct iegbe_adapter *adapter);
1022-#ifdef CONFIG_E1000_MQ
1023-static void iegbe_setup_queue_mapping(struct iegbe_adapter *adapter);
1024-#endif
1025 static int iegbe_sw_init(struct iegbe_adapter *adapter);
1026 static int iegbe_open(struct net_device *netdev);
1027 static int iegbe_close(struct net_device *netdev);
1028@@ -249,7 +114,8 @@ static void iegbe_clean_tx_ring(struct i
1029                                 struct iegbe_tx_ring *tx_ring);
1030 static void iegbe_clean_rx_ring(struct iegbe_adapter *adapter,
1031                                 struct iegbe_rx_ring *rx_ring);
1032-static void iegbe_set_multi(struct net_device *netdev);
1033+
1034+static void iegbe_set_rx_mode(struct net_device *netdev);
1035 static void iegbe_update_phy_info(unsigned long data);
1036 static void iegbe_watchdog(unsigned long data);
1037 static void iegbe_82547_tx_fifo_stall(unsigned long data);
1038@@ -257,66 +123,46 @@ static int iegbe_xmit_frame(struct sk_bu
1039 static struct net_device_stats * iegbe_get_stats(struct net_device *netdev);
1040 static int iegbe_change_mtu(struct net_device *netdev, int new_mtu);
1041 static int iegbe_set_mac(struct net_device *netdev, void *p);
1042-static irqreturn_t iegbe_intr(int irq, void *data, struct pt_regs *regs);
1043+static irqreturn_t iegbe_intr(int irq, void *data);
1044 
1045-void iegbe_tasklet(unsigned long);
1046+static irqreturn_t iegbe_intr_msi(int irq, void *data);
1047 
1048-#ifndef IEGBE_GBE_WORKAROUND
1049-static boolean_t iegbe_clean_tx_irq(struct iegbe_adapter *adapter,
1050+static bool iegbe_clean_tx_irq(struct iegbe_adapter *adapter,
1051                                     struct iegbe_tx_ring *tx_ring);
1052-#endif
1053-
1054-#ifdef CONFIG_E1000_NAPI
1055-static int iegbe_clean(struct net_device *poll_dev, int *budget);
1056-static boolean_t iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
1057+static int iegbe_clean(struct napi_struct *napi, int budget);
1058+static bool iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
1059                                     struct iegbe_rx_ring *rx_ring,
1060                                     int *work_done, int work_to_do);
1061-static boolean_t iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
1062+static bool iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
1063                                        struct iegbe_rx_ring *rx_ring,
1064                                        int *work_done, int work_to_do);
1065-#else
1066-static boolean_t iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
1067- struct iegbe_rx_ring *rx_ring);
1068-static boolean_t iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
1069- struct iegbe_rx_ring *rx_ring);
1070-#endif
1071 
1072-#ifdef IEGBE_GBE_WORKAROUND
1073+
1074 static void iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
1075                                    struct iegbe_rx_ring *rx_ring,
1076                                    int cleaned_count);
1077 static void iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
1078                                       struct iegbe_rx_ring *rx_ring,
1079                                       int cleaned_count);
1080-#else
1081-static void iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
1082- struct iegbe_rx_ring *rx_ring);
1083-static void iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
1084- struct iegbe_rx_ring *rx_ring);
1085-#endif
1086+
1087 
1088 static int iegbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
1089-#ifdef SIOCGMIIPHY
1090 static int iegbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
1091- int cmd);
1092-#endif
1093+ int cmd);
1094 void set_ethtool_ops(struct net_device *netdev);
1095 extern int ethtool_ioctl(struct ifreq *ifr);
1096 static void iegbe_enter_82542_rst(struct iegbe_adapter *adapter);
1097 static void iegbe_leave_82542_rst(struct iegbe_adapter *adapter);
1098 static void iegbe_tx_timeout(struct net_device *dev);
1099-static void iegbe_tx_timeout_task(struct net_device *dev);
1100+static void iegbe_reset_task(struct work_struct *work);
1101 static void iegbe_smartspeed(struct iegbe_adapter *adapter);
1102 static inline int iegbe_82547_fifo_workaround(struct iegbe_adapter *adapter,
1103- struct sk_buff *skb);
1104+ struct sk_buff *skb);
1105 
1106-#ifdef NETIF_F_HW_VLAN_TX
1107-static void iegbe_vlan_rx_register(struct net_device *netdev,
1108- struct vlan_group *grp);
1109+static void iegbe_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
1110 static void iegbe_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
1111 static void iegbe_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
1112 static void iegbe_restore_vlan(struct iegbe_adapter *adapter);
1113-#endif
1114 
1115 static int iegbe_notify_reboot(struct notifier_block *,
1116                                unsigned long event,
1117@@ -331,15 +177,17 @@ static int iegbe_resume(struct pci_dev *
1118 static void iegbe_netpoll (struct net_device *netdev);
1119 #endif
1120 
1121-#ifdef CONFIG_E1000_MQ
1122-/* for multiple Rx queues */
1123+#define COPYBREAK_DEFAULT 256
1124+static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
1125+module_param(copybreak, uint, 0644);
1126+MODULE_PARM_DESC(copybreak,
1127+ "Maximum size of packet that is copied to a new buffer on receive");
1128 void iegbe_rx_schedule(void *data);
1129-#endif
1130 
1131 struct notifier_block iegbe_notifier_reboot = {
1132- .notifier_call = iegbe_notify_reboot,
1133- .next = NULL,
1134- .priority = 0
1135+ .notifier_call = iegbe_notify_reboot,
1136+ .next = NULL,
1137+ .priority = 0
1138 };
1139 
1140 /* Exported from other modules */
1141@@ -347,14 +195,14 @@ struct notifier_block iegbe_notifier_reb
1142 extern void iegbe_check_options(struct iegbe_adapter *adapter);
1143 
1144 static struct pci_driver iegbe_driver = {
1145- .name = iegbe_driver_name,
1146- .id_table = iegbe_pci_tbl,
1147- .probe = iegbe_probe,
1148- .remove = __devexit_p(iegbe_remove),
1149- /* Power Managment Hooks */
1150+ .name = iegbe_driver_name,
1151+ .id_table = iegbe_pci_tbl,
1152+ .probe = iegbe_probe,
1153+ .remove = __devexit_p(iegbe_remove),
1154+ /* Power Managment Hooks */
1155 #ifdef CONFIG_PM
1156- .suspend = iegbe_suspend,
1157- .resume = iegbe_resume
1158+ .suspend = iegbe_suspend,
1159+ .resume = iegbe_resume
1160 #endif
1161 };
1162 
1163@@ -364,46 +212,17 @@ MODULE_LICENSE("GPL");
1164 MODULE_VERSION(DRV_VERSION);
1165 
1166 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1167-module_param(debug, int, 0);
1168+module_param(debug, int, 0x0);
1169 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
1170 
1171-static uint8_t gcu_suspend = 0;
1172-static uint8_t gcu_resume = 0;
1173+static uint8_t gcu_suspend = 0x0;
1174+static uint8_t gcu_resume = 0x0;
1175 struct pci_dev *gcu = NULL;
1176 
1177-unsigned long tasklet_data;
1178-DECLARE_TASKLET(iegbe_reset_tasklet, iegbe_tasklet, (unsigned long) &tasklet_data);
1179 
1180 /**
1181  * iegbe_iegbe_tasklet -*
1182  **/
1183-void iegbe_tasklet(unsigned long data)
1184-{
1185- char* err_msg = "TEST";
1186- uint32_t *icr = (uint32_t*) data;
1187- uint32_t gbe = *icr & 0x000000FF;
1188- if( *icr & E1000_ICR_RX_DESC_FIFO_PAR) { /* 21 */
1189- err_msg = "DMA Transmit Descriptor 2-bit ECC Error!";
1190- }
1191- if( *icr & E1000_ICR_TX_DESC_FIFO_PAR) { /* 20 */
1192- err_msg = "DMA Receive Descriptor 2-bit ECC Error!";
1193- }
1194- if( *icr & E1000_ICR_PB) { /* 23 */
1195- err_msg = "DMA Packet Buffer 2-bit ECC Error!";
1196- }
1197- if( *icr & E1000_ICR_CPP_TARGET) { /* 27 */
1198- err_msg = "Statistic Register ECC Error!";
1199- }
1200- if( *icr & E1000_ICR_CPP_MASTER) {
1201- err_msg = "CPP Error!";
1202- }
1203- spin_lock(&print_lock);
1204- printk("IEGBE%d: System Reset due to: %s\n", gbe, err_msg);
1205- dump_stack();
1206- spin_unlock(&print_lock);
1207- panic(err_msg);
1208- return;
1209-}
1210 /**
1211  * iegbe_init_module - Driver Registration Routine
1212  *
1213@@ -411,21 +230,24 @@ void iegbe_tasklet(unsigned long data)
1214  * loaded. All it does is register with the PCI subsystem.
1215  **/
1216 
1217-static int __init
1218-iegbe_init_module(void)
1219+static int __init iegbe_init_module(void)
1220 {
1221- int ret;
1222+ int ret;
1223 
1224     printk(KERN_INFO "%s - version %s\n",
1225- iegbe_driver_string, iegbe_driver_version);
1226+ iegbe_driver_string, iegbe_driver_version);
1227 
1228- printk(KERN_INFO "%s\n", iegbe_copyright);
1229+ printk(KERN_INFO "%s\n", iegbe_copyright);
1230 
1231- ret = pci_module_init(&iegbe_driver);
1232- if(ret >= 0) {
1233- register_reboot_notifier(&iegbe_notifier_reboot);
1234- }
1235- return ret;
1236+ ret = pci_register_driver(&iegbe_driver);
1237+ if (copybreak != COPYBREAK_DEFAULT) {
1238+ if (copybreak == 0)
1239+ printk(KERN_INFO "iegbe: copybreak disabled\n");
1240+ else
1241+ printk(KERN_INFO "iegbe: copybreak enabled for "
1242+ "packets <= %u bytes\n", copybreak);
1243+ }
1244+ return ret;
1245 }
1246 
1247 module_init(iegbe_init_module);
1248@@ -437,29 +259,51 @@ module_init(iegbe_init_module);
1249  * from memory.
1250  **/
1251 
1252-static void __exit
1253-iegbe_exit_module(void)
1254+static void __exit iegbe_exit_module(void)
1255 {
1256-
1257- unregister_reboot_notifier(&iegbe_notifier_reboot);
1258- pci_unregister_driver(&iegbe_driver);
1259+ pci_unregister_driver(&iegbe_driver);
1260 }
1261 
1262 module_exit(iegbe_exit_module);
1263 
1264+static int iegbe_request_irq(struct iegbe_adapter *adapter)
1265+{
1266+ struct net_device *netdev = adapter->netdev;
1267+ irq_handler_t handler = iegbe_intr;
1268+ int irq_flags = IRQF_SHARED;
1269+ int err;
1270+ adapter->have_msi = !pci_enable_msi(adapter->pdev);
1271+ if (adapter->have_msi) {
1272+ handler = iegbe_intr_msi;
1273+ irq_flags = 0;
1274+ }
1275+ err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
1276+ netdev);
1277+ if (err) {
1278+ if (adapter->have_msi)
1279+ pci_disable_msi(adapter->pdev);
1280+ DPRINTK(PROBE, ERR,
1281+ "Unable to allocate interrupt Error: %d\n", err);
1282+ }
1283+ return err;
1284+}
1285+static void iegbe_free_irq(struct iegbe_adapter *adapter)
1286+{
1287+ struct net_device *netdev = adapter->netdev;
1288+ free_irq(adapter->pdev->irq, netdev);
1289+ if (adapter->have_msi)
1290+ pci_disable_msi(adapter->pdev);
1291+}
1292 /**
1293  * iegbe_irq_disable - Mask off interrupt generation on the NIC
1294  * @adapter: board private structure
1295  **/
1296 
1297-static inline void
1298-iegbe_irq_disable(struct iegbe_adapter *adapter)
1299+static void iegbe_irq_disable(struct iegbe_adapter *adapter)
1300 {
1301-
1302- atomic_inc(&adapter->irq_sem);
1303- E1000_WRITE_REG(&adapter->hw, IMC, ~0);
1304- E1000_WRITE_FLUSH(&adapter->hw);
1305- synchronize_irq(adapter->pdev->irq);
1306+ E1000_WRITE_REG(&adapter->hw, IMC, ~0);
1307+ E1000_WRITE_FLUSH(&adapter->hw);
1308+ synchronize_irq(adapter->pdev->irq);
1309 }
1310 
1311 /**
1312@@ -470,244 +314,414 @@ iegbe_irq_disable(struct iegbe_adapter *
1313 static inline void
1314 iegbe_irq_enable(struct iegbe_adapter *adapter)
1315 {
1316-
1317- if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
1318- E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
1319- E1000_WRITE_FLUSH(&adapter->hw);
1320- }
1321+ E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
1322+ E1000_WRITE_FLUSH(&adapter->hw);
1323 }
1324-#ifdef NETIF_F_HW_VLAN_TX
1325-void
1326-iegbe_update_mng_vlan(struct iegbe_adapter *adapter)
1327-{
1328- struct net_device *netdev = adapter->netdev;
1329- uint16_t vid = adapter->hw.mng_cookie.vlan_id;
1330- uint16_t old_vid = adapter->mng_vlan_id;
1331 
1332- if(adapter->vlgrp) {
1333- if(!adapter->vlgrp->vlan_devices[vid]) {
1334- if(adapter->hw.mng_cookie.status &
1335- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
1336- iegbe_vlan_rx_add_vid(netdev, vid);
1337- adapter->mng_vlan_id = vid;
1338- } else {
1339- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1340- }
1341- if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
1342- (vid != old_vid) &&
1343- !adapter->vlgrp->vlan_devices[old_vid]) {
1344- iegbe_vlan_rx_kill_vid(netdev, old_vid);
1345- }
1346- }
1347-}
1348+static void iegbe_update_mng_vlan(struct iegbe_adapter *adapter)
1349+{
1350+ struct iegbe_hw *hw = &adapter->hw;
1351+ struct net_device *netdev = adapter->netdev;
1352+ u16 vid = hw->mng_cookie.vlan_id;
1353+ u16 old_vid = adapter->mng_vlan_id;
1354+ if (adapter->vlgrp) {
1355+ if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1356+ if (hw->mng_cookie.status &
1357+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
1358+ iegbe_vlan_rx_add_vid(netdev, vid);
1359+ adapter->mng_vlan_id = vid;
1360+ } else
1361+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1362+
1363+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
1364+ (vid != old_vid) &&
1365+ !vlan_group_get_device(adapter->vlgrp, old_vid))
1366+ iegbe_vlan_rx_kill_vid(netdev, old_vid);
1367+ } else
1368+ adapter->mng_vlan_id = vid;
1369+ }
1370 }
1371-#endif
1372 
1373-int
1374-iegbe_up(struct iegbe_adapter *adapter)
1375+/**
1376+ * iegbe_configure - configure the hardware for RX and TX
1377+ * @adapter = private board structure
1378+ **/
1379+static void iegbe_configure(struct iegbe_adapter *adapter)
1380 {
1381     struct net_device *netdev = adapter->netdev;
1382- int i, err;
1383- uint16_t pci_cmd;
1384-
1385- /* hardware has been reset, we need to reload some things */
1386-
1387- /* Reset the PHY if it was previously powered down */
1388- if(adapter->hw.media_type == iegbe_media_type_copper
1389- || (adapter->hw.media_type == iegbe_media_type_oem
1390- && iegbe_oem_phy_is_copper(&adapter->hw))) {
1391- uint16_t mii_reg;
1392- iegbe_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
1393- if(mii_reg & MII_CR_POWER_DOWN){
1394- iegbe_phy_reset(&adapter->hw);
1395- }
1396- }
1397+ int i;
1398 
1399- iegbe_set_multi(netdev);
1400+ iegbe_set_rx_mode(netdev);
1401 
1402-#ifdef NETIF_F_HW_VLAN_TX
1403     iegbe_restore_vlan(adapter);
1404-#endif
1405 
1406     iegbe_configure_tx(adapter);
1407     iegbe_setup_rctl(adapter);
1408     iegbe_configure_rx(adapter);
1409+ /* call E1000_DESC_UNUSED which always leaves
1410+ * at least 1 descriptor unused to make sure
1411+ * next_to_use != next_to_clean */
1412+ for (i = 0; i < adapter->num_rx_queues; i++) {
1413+ struct iegbe_rx_ring *ring = &adapter->rx_ring[i];
1414+ adapter->alloc_rx_buf(adapter, ring,
1415+ E1000_DESC_UNUSED(ring));
1416+ }
1417 
1418-#ifdef IEGBE_GBE_WORKAROUND
1419- for (i = 0; i < adapter->num_queues; i++)
1420- adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i],
1421- IEGBE_GBE_WORKAROUND_NUM_RX_DESCRIPTORS + 1);
1422-#else
1423- for (i = 0; i < adapter->num_queues; i++)
1424- adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
1425-#endif
1426+ adapter->tx_queue_len = netdev->tx_queue_len;
1427+}
1428 
1429-#ifdef CONFIG_PCI_MSI
1430- if(adapter->hw.mac_type > iegbe_82547_rev_2
1431- || adapter->hw.mac_type == iegbe_icp_xxxx) {
1432- adapter->have_msi = TRUE;
1433- if((err = pci_enable_msi(adapter->pdev))) {
1434- DPRINTK(PROBE, ERR,
1435- "Unable to allocate MSI interrupt Error: %d\n", err);
1436- adapter->have_msi = FALSE;
1437- }
1438- }
1439- pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
1440- pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd | IEGBE_INTD_DISABLE);
1441+int iegbe_up(struct iegbe_adapter *adapter)
1442+{
1443+ /* hardware has been reset, we need to reload some things */
1444+ iegbe_configure(adapter);
1445 
1446-#endif
1447- if((err = request_irq(adapter->pdev->irq, &iegbe_intr,
1448- SA_SHIRQ | SA_SAMPLE_RANDOM,
1449- netdev->name, netdev))) {
1450- DPRINTK(PROBE, ERR,
1451- "Unable to allocate interrupt Error: %d\n", err);
1452- return err;
1453- }
1454+ clear_bit(__E1000_DOWN, &adapter->flags);
1455 
1456- mod_timer(&adapter->watchdog_timer, jiffies);
1457+ napi_enable(&adapter->napi);
1458 
1459-#ifdef CONFIG_E1000_NAPI
1460- netif_poll_enable(netdev);
1461-#endif
1462     iegbe_irq_enable(adapter);
1463 
1464+ adapter->hw.get_link_status = 0x1;
1465     return 0;
1466 }
1467 
1468-void
1469-iegbe_down(struct iegbe_adapter *adapter)
1470-{
1471- struct net_device *netdev = adapter->netdev;
1472-
1473- iegbe_irq_disable(adapter);
1474-#ifdef CONFIG_E1000_MQ
1475- while (atomic_read(&adapter->rx_sched_call_data.count) != 0) { };
1476-#endif
1477- free_irq(adapter->pdev->irq, netdev);
1478-#ifdef CONFIG_PCI_MSI
1479- if((adapter->hw.mac_type > iegbe_82547_rev_2
1480- || adapter->hw.mac_type == iegbe_icp_xxxx)
1481- && adapter->have_msi == TRUE) {
1482- pci_disable_msi(adapter->pdev);
1483- }
1484-#endif
1485- del_timer_sync(&adapter->tx_fifo_stall_timer);
1486- del_timer_sync(&adapter->watchdog_timer);
1487- del_timer_sync(&adapter->phy_info_timer);
1488+/**
1489+ * iegbe_power_up_phy - restore link in case the phy was powered down
1490+ * @adapter: address of board private structure
1491+ *
1492+ * The phy may be powered down to save power and turn off link when the
1493+ * driver is unloaded and wake on lan is not enabled (among others)
1494+ * *** this routine MUST be followed by a call to iegbe_reset ***
1495+ *
1496+ **/
1497 
1498-#ifdef CONFIG_E1000_NAPI
1499- netif_poll_disable(netdev);
1500-#endif
1501- adapter->link_speed = 0;
1502- adapter->link_duplex = 0;
1503- netif_carrier_off(netdev);
1504- netif_stop_queue(netdev);
1505+void iegbe_power_up_phy(struct iegbe_adapter *adapter)
1506+{
1507+ struct iegbe_hw *hw = &adapter->hw;
1508+ u16 mii_reg = 0;
1509 
1510- iegbe_reset(adapter);
1511- iegbe_clean_all_tx_rings(adapter);
1512- iegbe_clean_all_rx_rings(adapter);
1513+ /* Just clear the power down bit to wake the phy back up */
1514+ if (hw->media_type == iegbe_media_type_copper) {
1515+ /* according to the manual, the phy will retain its
1516+ * settings across a power-down/up cycle */
1517+ iegbe_read_phy_reg(hw, PHY_CTRL, &mii_reg);
1518+ mii_reg &= ~MII_CR_POWER_DOWN;
1519+ iegbe_write_phy_reg(hw, PHY_CTRL, mii_reg);
1520+ }
1521+}
1522 
1523- /* If WoL is not enabled and management mode is not IAMT
1524- * or if WoL is not enabled and OEM PHY is copper based,
1525- * power down the PHY so no link is implied when interface is down */
1526- if(!adapter->wol
1527- && ((adapter->hw.mac_type >= iegbe_82540
1528- && adapter->hw.media_type == iegbe_media_type_copper
1529- && !iegbe_check_mng_mode(&adapter->hw)
1530- && !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN))
1531- || (adapter->hw.media_type == iegbe_media_type_oem
1532- && iegbe_oem_phy_is_copper(&adapter->hw)))){
1533+static void iegbe_power_down_phy(struct iegbe_adapter *adapter)
1534+{
1535+ struct iegbe_hw *hw = &adapter->hw;
1536 
1537- uint16_t mii_reg;
1538- iegbe_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
1539+ /* Power down the PHY so no link is implied when interface is down *
1540+ * The PHY cannot be powered down if any of the following is true *
1541+ * (a) WoL is enabled
1542+ * (b) AMT is active
1543+ * (c) SoL/IDER session is active */
1544+ if (!adapter->wol && hw->mac_type >= iegbe_82540 &&
1545+ hw->media_type == iegbe_media_type_copper) {
1546+ u16 mii_reg = 0;
1547+
1548+ switch (hw->mac_type) {
1549+ case iegbe_82540:
1550+ case iegbe_82545:
1551+ case iegbe_82545_rev_3:
1552+ case iegbe_82546:
1553+ case iegbe_82546_rev_3:
1554+ case iegbe_82541:
1555+ case iegbe_82541_rev_2:
1556+ case iegbe_82547:
1557+ case iegbe_82547_rev_2:
1558+ if (E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)
1559+ goto out;
1560+ break;
1561+ case iegbe_82571:
1562+ case iegbe_82572:
1563+ case iegbe_82573:
1564+ if (iegbe_check_mng_mode(hw) ||
1565+ iegbe_check_phy_reset_block(hw))
1566+ goto out;
1567+ break;
1568+ default:
1569+ goto out;
1570+ }
1571+ iegbe_read_phy_reg(hw, PHY_CTRL, &mii_reg);
1572         mii_reg |= MII_CR_POWER_DOWN;
1573- iegbe_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
1574+ iegbe_write_phy_reg(hw, PHY_CTRL, mii_reg);
1575         mdelay(1);
1576     }
1577+out:
1578+ return;
1579 }
1580 
1581-void
1582-iegbe_reset(struct iegbe_adapter *adapter)
1583+void iegbe_down(struct iegbe_adapter *adapter)
1584 {
1585- struct net_device *netdev = adapter->netdev;
1586- uint32_t pba, manc;
1587- uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
1588- uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
1589+ struct net_device *netdev = adapter->netdev;
1590 
1591+ /* signal that we're down so the interrupt handler does not
1592+ * reschedule our watchdog timer */
1593+ set_bit(__E1000_DOWN, &adapter->flags);
1594 
1595- /* Repartition Pba for greater than 9k mtu
1596- * To take effect CTRL.RST is required.
1597- */
1598+ napi_disable(&adapter->napi);
1599 
1600- switch (adapter->hw.mac_type) {
1601- case iegbe_82547:
1602- case iegbe_82547_rev_2:
1603- pba = E1000_PBA_30K;
1604- break;
1605- case iegbe_82571:
1606- case iegbe_82572:
1607- pba = E1000_PBA_38K;
1608- break;
1609- case iegbe_82573:
1610- pba = E1000_PBA_12K;
1611+ iegbe_irq_disable(adapter);
1612+
1613+ del_timer_sync(&adapter->tx_fifo_stall_timer);
1614+ del_timer_sync(&adapter->watchdog_timer);
1615+ del_timer_sync(&adapter->phy_info_timer);
1616+
1617+ netdev->tx_queue_len = adapter->tx_queue_len;
1618+ adapter->link_speed = 0;
1619+ adapter->link_duplex = 0;
1620+ netif_carrier_off(netdev);
1621+ netif_stop_queue(netdev);
1622+
1623+ iegbe_reset(adapter);
1624+ iegbe_clean_all_tx_rings(adapter);
1625+ iegbe_clean_all_rx_rings(adapter);
1626+}
1627+void iegbe_reinit_locked(struct iegbe_adapter *adapter)
1628+{
1629+ WARN_ON(in_interrupt());
1630+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
1631+ msleep(1);
1632+ iegbe_down(adapter);
1633+ iegbe_up(adapter);
1634+ clear_bit(__E1000_RESETTING, &adapter->flags);
1635+}
1636+
1637+void iegbe_reset(struct iegbe_adapter *adapter)
1638+{
1639+ struct iegbe_hw *hw = &adapter->hw;
1640+ u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1641+ u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
1642+ bool legacy_pba_adjust = false;
1643+
1644+ /* Repartition Pba for greater than 9k mtu
1645+ * To take effect CTRL.RST is required.
1646+ */
1647+
1648+ switch (hw->mac_type) {
1649+ case iegbe_82542_rev2_0:
1650+ case iegbe_82542_rev2_1:
1651+ case iegbe_82543:
1652+ case iegbe_82544:
1653+ case iegbe_82540:
1654+ case iegbe_82541:
1655+ case iegbe_82541_rev_2:
1656+ case iegbe_icp_xxxx:
1657+ legacy_pba_adjust = true;
1658+ pba = E1000_PBA_48K;
1659         break;
1660- default:
1661+ case iegbe_82545:
1662+ case iegbe_82545_rev_3:
1663+ case iegbe_82546:
1664+ case iegbe_82546_rev_3:
1665         pba = E1000_PBA_48K;
1666         break;
1667- }
1668+ case iegbe_82547:
1669+ case iegbe_82573:
1670+ case iegbe_82547_rev_2:
1671+ legacy_pba_adjust = true;
1672+ pba = E1000_PBA_30K;
1673+ break;
1674+ case iegbe_82571:
1675+ case iegbe_82572:
1676+ case iegbe_undefined:
1677+ case iegbe_num_macs:
1678+ break;
1679+ }
1680+
1681+ if (legacy_pba_adjust) {
1682+ if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
1683+ pba -= 8; /* allocate more FIFO for Tx */
1684+ /* send an XOFF when there is enough space in the
1685+ * Rx FIFO to hold one extra full size Rx packet
1686+ */
1687 
1688- if((adapter->hw.mac_type != iegbe_82573) &&
1689- (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
1690- pba -= 0x8; /* allocate more FIFO for Tx */
1691- /* send an XOFF when there is enough space in the
1692- * Rx FIFO to hold one extra full size Rx packet
1693- */
1694- fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
1695- ETHERNET_FCS_SIZE + 0x1;
1696- fc_low_water_mark = fc_high_water_mark + 0x8;
1697- }
1698 
1699+ if (hw->mac_type == iegbe_82547) {
1700+ adapter->tx_fifo_head = 0;
1701+ adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
1702+ adapter->tx_fifo_size =
1703+ (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
1704+ atomic_set(&adapter->tx_fifo_stall, 0);
1705+ }
1706+ } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) {
1707+ E1000_WRITE_REG(&adapter->hw, PBA, pba);
1708+
1709+ /* To maintain wire speed transmits, the Tx FIFO should be
1710+ * large enough to accomodate two full transmit packets,
1711+ * rounded up to the next 1KB and expressed in KB. Likewise,
1712+ * the Rx FIFO should be large enough to accomodate at least
1713+ * one full receive packet and is similarly rounded up and
1714+ * expressed in KB. */
1715+ pba = E1000_READ_REG(&adapter->hw, PBA);
1716+ /* upper 16 bits has Tx packet buffer allocation size in KB */
1717+ tx_space = pba >> 16;
1718+ /* lower 16 bits has Rx packet buffer allocation size in KB */
1719+ pba &= 0xffff;
1720+ /* don't include ethernet FCS because hardware appends/strips */
1721+ min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE +
1722+ VLAN_TAG_SIZE;
1723+ min_tx_space = min_rx_space;
1724+ min_tx_space *= 2;
1725+ min_tx_space = ALIGN(min_tx_space, 1024);
1726+ min_tx_space >>= 10;
1727+ min_rx_space = ALIGN(min_rx_space, 1024);
1728+ min_rx_space >>= 10;
1729+
1730+ /* If current Tx allocation is less than the min Tx FIFO size,
1731+ * and the min Tx FIFO size is less than the current Rx FIFO
1732+ * allocation, take space away from current Rx allocation */
1733+ if (tx_space < min_tx_space &&
1734+ ((min_tx_space - tx_space) < pba)) {
1735+ pba = pba - (min_tx_space - tx_space);
1736+
1737+ /* PCI/PCIx hardware has PBA alignment constraints */
1738+ switch (hw->mac_type) {
1739+ case iegbe_82545 ... iegbe_82546_rev_3:
1740+ pba &= ~(E1000_PBA_8K - 1);
1741+ break;
1742+ default:
1743+ break;
1744+ }
1745 
1746- if(adapter->hw.mac_type == iegbe_82547) {
1747- adapter->tx_fifo_head = 0;
1748- adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
1749- adapter->tx_fifo_size =
1750- (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
1751- atomic_set(&adapter->tx_fifo_stall, 0);
1752+ /* if short on rx space, rx wins and must trump tx
1753+ * adjustment or use Early Receive if available */
1754+ if (pba < min_rx_space) {
1755+ switch (hw->mac_type) {
1756+ case iegbe_82573:
1757+ /* ERT enabled in iegbe_configure_rx */
1758+ break;
1759+ default:
1760+ pba = min_rx_space;
1761+ break;
1762+ }
1763+ }
1764+ }
1765     }
1766 
1767     E1000_WRITE_REG(&adapter->hw, PBA, pba);
1768 
1769     /* flow control settings */
1770- adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
1771- fc_high_water_mark;
1772- adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
1773- fc_low_water_mark;
1774- adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
1775- adapter->hw.fc_send_xon = 1;
1776- adapter->hw.fc = adapter->hw.original_fc;
1777+ /* Set the FC high water mark to 90% of the FIFO size.
1778+ * Required to clear last 3 LSB */
1779+ fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
1780+ /* We can't use 90% on small FIFOs because the remainder
1781+ * would be less than 1 full frame. In this case, we size
1782+ * it to allow at least a full frame above the high water
1783+ * mark. */
1784+ if (pba < E1000_PBA_16K)
1785+ fc_high_water_mark = (pba * 1024) - 1600;
1786+
1787+ hw->fc_high_water = fc_high_water_mark;
1788+ hw->fc_low_water = fc_high_water_mark - 8;
1789+ hw->fc_pause_time = E1000_FC_PAUSE_TIME;
1790+ hw->fc_send_xon = 1;
1791+ hw->fc = hw->original_fc;
1792 
1793     /* Allow time for pending master requests to run */
1794- iegbe_reset_hw(&adapter->hw);
1795- if(adapter->hw.mac_type >= iegbe_82544){
1796+ iegbe_reset_hw(hw);
1797+ if (hw->mac_type >= iegbe_82544)
1798         E1000_WRITE_REG(&adapter->hw, WUC, 0);
1799- }
1800- if(iegbe_init_hw(&adapter->hw)) {
1801+
1802+ if (iegbe_init_hw(hw))
1803         DPRINTK(PROBE, ERR, "Hardware Error\n");
1804- }
1805-#ifdef NETIF_F_HW_VLAN_TX
1806     iegbe_update_mng_vlan(adapter);
1807-#endif
1808+
1809+ /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
1810+ if (hw->mac_type >= iegbe_82544 &&
1811+ hw->mac_type <= iegbe_82547_rev_2 &&
1812+ hw->autoneg == 1 &&
1813+ hw->autoneg_advertised == ADVERTISE_1000_FULL) {
1814+ u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
1815+ /* clear phy power management bit if we are in gig only mode,
1816+ * which if enabled will attempt negotiation to 100Mb, which
1817+ * can cause a loss of link at power off or driver unload */
1818+ ctrl &= ~E1000_CTRL_SWDPIN3;
1819+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
1820+ }
1821+
1822     /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1823     E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
1824 
1825- iegbe_reset_adaptive(&adapter->hw);
1826- iegbe_phy_get_info(&adapter->hw, &adapter->phy_info);
1827- if(adapter->en_mng_pt) {
1828- manc = E1000_READ_REG(&adapter->hw, MANC);
1829- manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
1830- E1000_WRITE_REG(&adapter->hw, MANC, manc);
1831+ iegbe_reset_adaptive(hw);
1832+ iegbe_phy_get_info(hw, &adapter->phy_info);
1833+
1834+ if (!adapter->smart_power_down &&
1835+ (hw->mac_type == iegbe_82571 ||
1836+ hw->mac_type == iegbe_82572)) {
1837+ u16 phy_data = 0;
1838+ /* speed up time to link by disabling smart power down, ignore
1839+ * the return value of this function because there is nothing
1840+ * different we would do if it failed */
1841+ iegbe_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1842+ &phy_data);
1843+ phy_data &= ~IGP02E1000_PM_SPD;
1844+ iegbe_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1845+ phy_data);
1846+ }
1847+
1848+}
1849+
1850+/**
1851+ * Dump the eeprom for users having checksum issues
1852+ **/
1853+static void iegbe_dump_eeprom(struct iegbe_adapter *adapter)
1854+{
1855+ struct net_device *netdev = adapter->netdev;
1856+ struct ethtool_eeprom eeprom;
1857+ const struct ethtool_ops *ops = netdev->ethtool_ops;
1858+ u8 *data;
1859+ int i;
1860+ u16 csum_old, csum_new = 0;
1861+
1862+ eeprom.len = ops->get_eeprom_len(netdev);
1863+ eeprom.offset = 0;
1864+
1865+ data = kmalloc(eeprom.len, GFP_KERNEL);
1866+ if (!data) {
1867+ printk(KERN_ERR "Unable to allocate memory to dump EEPROM"
1868+ " data\n");
1869+ return;
1870     }
1871+
1872+ ops->get_eeprom(netdev, &eeprom, data);
1873+
1874+ csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
1875+ (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
1876+ for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
1877+ csum_new += data[i] + (data[i + 1] << 8);
1878+ csum_new = EEPROM_SUM - csum_new;
1879+
1880+ printk(KERN_ERR "/*********************/\n");
1881+ printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old);
1882+ printk(KERN_ERR "Calculated : 0x%04x\n", csum_new);
1883+
1884+ printk(KERN_ERR "Offset Values\n");
1885+ printk(KERN_ERR "======== ======\n");
1886+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
1887+
1888+ printk(KERN_ERR "Include this output when contacting your support "
1889+ "provider.\n");
1890+ printk(KERN_ERR "This is not a software error! Something bad "
1891+ "happened to your hardware or\n");
1892+ printk(KERN_ERR "EEPROM image. Ignoring this "
1893+ "problem could result in further problems,\n");
1894+ printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n");
1895+ printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, "
1896+ "which is invalid\n");
1897+ printk(KERN_ERR "and requires you to set the proper MAC "
1898+ "address manually before continuing\n");
1899+ printk(KERN_ERR "to enable this network device.\n");
1900+ printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
1901+ "to your hardware vendor\n");
1902+ printk(KERN_ERR "or Intel Customer Support.\n");
1903+ printk(KERN_ERR "/*********************/\n");
1904+
1905+ kfree(data);
1906 }
1907 
1908 /**
1909@@ -721,184 +735,166 @@ iegbe_reset(struct iegbe_adapter *adapte
1910  * The OS initialization, configuring of the adapter private structure,
1911  * and a hardware reset occur.
1912  **/
1913-
1914-static int __devinit
1915-iegbe_probe(struct pci_dev *pdev,
1916+static int __devinit iegbe_probe(struct pci_dev *pdev,
1917             const struct pci_device_id *ent)
1918 {
1919- struct net_device *netdev;
1920- struct iegbe_adapter *adapter;
1921- unsigned long mmio_start, mmio_len;
1922- uint32_t ctrl_ext;
1923- uint32_t swsm;
1924+ struct net_device *netdev;
1925+ struct iegbe_adapter *adapter;
1926+ struct iegbe_hw *hw;
1927 
1928     static int cards_found = 0;
1929+ int i, err, pci_using_dac;
1930+ u16 eeprom_data = 0;
1931+ u16 eeprom_apme_mask = E1000_EEPROM_APME;
1932+ int bars;
1933+ DECLARE_MAC_BUF(mac);
1934 
1935- int i, err, pci_using_dac;
1936- uint16_t eeprom_data = 0;
1937- uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
1938+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
1939+ err = pci_enable_device(pdev);
1940 
1941+ if (err)
1942+ return err;
1943 
1944- if((err = pci_enable_device(pdev))) {
1945- return err;
1946- }
1947- if(!(err = pci_set_dma_mask(pdev, PCI_DMA_64BIT))) {
1948+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1949+ !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1950         pci_using_dac = 1;
1951- } else {
1952- if((err = pci_set_dma_mask(pdev, PCI_DMA_32BIT))) {
1953- E1000_ERR("No usable DMA configuration, aborting\n");
1954- return err;
1955- }
1956+ } else {
1957+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1958+ if (err) {
1959+ err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1960+ if (err) {
1961+ E1000_ERR("No usable DMA configuration, "
1962+ "aborting\n");
1963+ goto err_dma;
1964+ }
1965+ }
1966         pci_using_dac = 0;
1967- }
1968-
1969- if((err = pci_request_regions(pdev, iegbe_driver_name))) {
1970- return err;
1971     }
1972- pci_set_master(pdev);
1973 
1974- netdev = alloc_etherdev(sizeof(struct iegbe_adapter));
1975- if(!netdev) {
1976- err = -ENOMEM;
1977- goto err_alloc_etherdev;
1978- }
1979+ err = pci_request_selected_regions(pdev, bars, iegbe_driver_name);
1980+ if (err)
1981+ goto err_pci_reg;
1982+
1983+ pci_set_master(pdev);
1984+
1985+ err = -ENOMEM;
1986+ netdev = alloc_etherdev(sizeof(struct iegbe_adapter));
1987+ if (!netdev)
1988+ goto err_alloc_etherdev;
1989 
1990- SET_MODULE_OWNER(netdev);
1991     SET_NETDEV_DEV(netdev, &pdev->dev);
1992 
1993- pci_set_drvdata(pdev, netdev);
1994- adapter = netdev_priv(netdev);
1995- adapter->netdev = netdev;
1996- adapter->pdev = pdev;
1997- adapter->hw.back = adapter;
1998- adapter->msg_enable = (0x1 << debug) - 0x1;
1999-
2000- mmio_start = pci_resource_start(pdev, BAR_0);
2001- mmio_len = pci_resource_len(pdev, BAR_0);
2002-
2003- adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
2004- if(!adapter->hw.hw_addr) {
2005- err = -EIO;
2006- goto err_ioremap;
2007- }
2008-
2009- for(i = BAR_1; i <= BAR_5; i++) {
2010- if(pci_resource_len(pdev, i) == 0) {
2011- continue;
2012- }
2013- if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2014- adapter->hw.io_base = pci_resource_start(pdev, i);
2015- break;
2016- }
2017- }
2018-
2019- netdev->open = &iegbe_open;
2020- netdev->stop = &iegbe_close;
2021- netdev->hard_start_xmit = &iegbe_xmit_frame;
2022- netdev->get_stats = &iegbe_get_stats;
2023- netdev->set_multicast_list = &iegbe_set_multi;
2024+ pci_set_drvdata(pdev, netdev);
2025+ adapter = netdev_priv(netdev);
2026+ adapter->netdev = netdev;
2027+ adapter->pdev = pdev;
2028+ adapter->msg_enable = (1 << debug) - 1;
2029+ adapter->bars = bars;
2030+
2031+ hw = &adapter->hw;
2032+ hw->back = adapter;
2033+
2034+ err = -EIO;
2035+ hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
2036+ pci_resource_len(pdev, BAR_0));
2037+ if (!hw->hw_addr)
2038+ goto err_ioremap;
2039+
2040+ netdev->open = &iegbe_open;
2041+ netdev->stop = &iegbe_close;
2042+ netdev->hard_start_xmit = &iegbe_xmit_frame;
2043+ netdev->get_stats = &iegbe_get_stats;
2044+ netdev->set_rx_mode = &iegbe_set_rx_mode;
2045     netdev->set_mac_address = &iegbe_set_mac;
2046- netdev->change_mtu = &iegbe_change_mtu;
2047- netdev->do_ioctl = &iegbe_ioctl;
2048+ netdev->change_mtu = &iegbe_change_mtu;
2049+ netdev->do_ioctl = &iegbe_ioctl;
2050     set_ethtool_ops(netdev);
2051-#ifdef HAVE_TX_TIMEOUT
2052- netdev->tx_timeout = &iegbe_tx_timeout;
2053- netdev->watchdog_timeo = 0x5 * HZ;
2054-#endif
2055-#ifdef CONFIG_E1000_NAPI
2056- netdev->poll = &iegbe_clean;
2057- netdev->weight = 0x40;
2058-#endif
2059-#ifdef NETIF_F_HW_VLAN_TX
2060- netdev->vlan_rx_register = iegbe_vlan_rx_register;
2061- netdev->vlan_rx_add_vid = iegbe_vlan_rx_add_vid;
2062- netdev->vlan_rx_kill_vid = iegbe_vlan_rx_kill_vid;
2063-#endif
2064+ netdev->tx_timeout = &iegbe_tx_timeout;
2065+ netdev->watchdog_timeo = 5 * HZ;
2066+ netif_napi_add(netdev, &adapter->napi, iegbe_clean, 64);
2067+ netdev->vlan_rx_register = iegbe_vlan_rx_register;
2068+ netdev->vlan_rx_add_vid = iegbe_vlan_rx_add_vid;
2069+ netdev->vlan_rx_kill_vid = iegbe_vlan_rx_kill_vid;
2070 #ifdef CONFIG_NET_POLL_CONTROLLER
2071- netdev->poll_controller = iegbe_netpoll;
2072+ netdev->poll_controller = iegbe_netpoll;
2073 #endif
2074- strcpy(netdev->name, pci_name(pdev));
2075+ strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2076 
2077- netdev->mem_start = mmio_start;
2078- netdev->mem_end = mmio_start + mmio_len;
2079- netdev->base_addr = adapter->hw.io_base;
2080 
2081- adapter->bd_number = cards_found;
2082+ adapter->bd_number = cards_found;
2083 
2084- /* setup the private structure */
2085+ /* setup the private structure */
2086 
2087- if((err = iegbe_sw_init(adapter))) {
2088- goto err_sw_init;
2089- }
2090- if((err = iegbe_check_phy_reset_block(&adapter->hw))) {
2091- DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
2092- }
2093-#ifdef MAX_SKB_FRAGS
2094- if(adapter->hw.mac_type >= iegbe_82543) {
2095-#ifdef NETIF_F_HW_VLAN_TX
2096- netdev->features = NETIF_F_SG |
2097- NETIF_F_HW_CSUM |
2098- NETIF_F_HW_VLAN_TX |
2099- NETIF_F_HW_VLAN_RX |
2100- NETIF_F_HW_VLAN_FILTER;
2101-#else
2102- netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
2103-#endif
2104- }
2105+ err = iegbe_sw_init(adapter);
2106+ if (err)
2107+ goto err_sw_init;
2108+ err = -EIO;
2109+ if (iegbe_check_phy_reset_block(hw))
2110+ DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
2111 
2112-#ifdef NETIF_F_TSO
2113- if((adapter->hw.mac_type >= iegbe_82544) &&
2114- (adapter->hw.mac_type != iegbe_82547)) {
2115- netdev->features |= NETIF_F_TSO;
2116- }
2117-#ifdef NETIF_F_TSO_IPV6
2118- if(adapter->hw.mac_type > iegbe_82547_rev_2) {
2119- netdev->features |= NETIF_F_TSO_IPV6;
2120- }
2121-#endif
2122-#endif
2123- if(pci_using_dac) {
2124- netdev->features |= NETIF_F_HIGHDMA;
2125+ if (hw->mac_type >= iegbe_82543) {
2126+ netdev->features = NETIF_F_SG |
2127+ NETIF_F_HW_CSUM |
2128+ NETIF_F_HW_VLAN_TX |
2129+ NETIF_F_HW_VLAN_RX |
2130+ NETIF_F_HW_VLAN_FILTER;
2131     }
2132-#endif
2133-#ifdef NETIF_F_LLTX
2134- netdev->features |= NETIF_F_LLTX;
2135-#endif
2136 
2137- adapter->en_mng_pt = iegbe_enable_mng_pass_thru(&adapter->hw);
2138+ if ((hw->mac_type >= iegbe_82544) &&
2139+ (hw->mac_type != iegbe_82547))
2140+ netdev->features |= NETIF_F_TSO;
2141 
2142- /* before reading the EEPROM, reset the controller to
2143- * put the device in a known good starting state */
2144+ if (hw->mac_type > iegbe_82547_rev_2)
2145+ netdev->features |= NETIF_F_TSO6;
2146+ if (pci_using_dac)
2147+ netdev->features |= NETIF_F_HIGHDMA;
2148+
2149+ netdev->features |= NETIF_F_LLTX;
2150 
2151- iegbe_reset_hw(&adapter->hw);
2152+ adapter->en_mng_pt = iegbe_enable_mng_pass_thru(hw);
2153 
2154- /* make sure the EEPROM is good */
2155- if(iegbe_validate_eeprom_checksum(&adapter->hw) < 0) {
2156- DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
2157- err = -EIO;
2158+ /* initialize eeprom parameters */
2159+
2160+ if (iegbe_init_eeprom_params(hw)) {
2161+ E1000_ERR("EEPROM initialization failed\n");
2162         goto err_eeprom;
2163     }
2164 
2165- /* copy the MAC address out of the EEPROM */
2166+ /* before reading the EEPROM, reset the controller to
2167+ * put the device in a known good starting state */
2168 
2169- if(iegbe_read_mac_addr(&adapter->hw)) {
2170- DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
2171- }
2172- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2173+ iegbe_reset_hw(hw);
2174 
2175- if(!is_valid_ether_addr(netdev->dev_addr)) {
2176- DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
2177- err = -EIO;
2178- goto err_eeprom;
2179- }
2180+ /* make sure the EEPROM is good */
2181+ if (iegbe_validate_eeprom_checksum(hw) < 0) {
2182+ DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
2183+ iegbe_dump_eeprom(adapter);
2184+ /*
2185+ * set MAC address to all zeroes to invalidate and temporary
2186+ * disable this device for the user. This blocks regular
2187+ * traffic while still permitting ethtool ioctls from reaching
2188+ * the hardware as well as allowing the user to run the
2189+ * interface after manually setting a hw addr using
2190+ * `ip set address`
2191+ */
2192+ memset(hw->mac_addr, 0, netdev->addr_len);
2193+ } else {
2194+ /* copy the MAC address out of the EEPROM */
2195+ if (iegbe_read_mac_addr(hw))
2196+ DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
2197+ }
2198+ /* don't block initalization here due to bad MAC address */
2199+ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
2200+ memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
2201 
2202- iegbe_read_part_num(&adapter->hw, &(adapter->part_num));
2203+ if (!is_valid_ether_addr(netdev->perm_addr))
2204+ DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
2205 
2206- iegbe_get_bus_info(&adapter->hw);
2207+ iegbe_get_bus_info(hw);
2208 
2209     init_timer(&adapter->tx_fifo_stall_timer);
2210     adapter->tx_fifo_stall_timer.function = &iegbe_82547_tx_fifo_stall;
2211- adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
2212+ adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
2213 
2214     init_timer(&adapter->watchdog_timer);
2215     adapter->watchdog_timer.function = &iegbe_watchdog;
2216@@ -906,75 +902,50 @@ iegbe_probe(struct pci_dev *pdev,
2217 
2218     init_timer(&adapter->phy_info_timer);
2219     adapter->phy_info_timer.function = &iegbe_update_phy_info;
2220- adapter->phy_info_timer.data = (unsigned long) adapter;
2221-
2222- INIT_WORK(&adapter->tx_timeout_task,
2223- (void (*)(void *))iegbe_tx_timeout_task, netdev);
2224+ adapter->phy_info_timer.data = (unsigned long)adapter;
2225 
2226- /* we're going to reset, so assume we have no link for now */
2227-
2228- netif_carrier_off(netdev);
2229- netif_stop_queue(netdev);
2230+ INIT_WORK(&adapter->reset_task, iegbe_reset_task);
2231 
2232- iegbe_check_options(adapter);
2233+ iegbe_check_options(adapter);
2234 
2235- /* Initial Wake on LAN setting
2236- * If APM wake is enabled in the EEPROM,
2237- * enable the ACPI Magic Packet filter
2238- */
2239+ /* Initial Wake on LAN setting
2240+ * If APM wake is enabled in the EEPROM,
2241+ * enable the ACPI Magic Packet filter
2242+ */
2243 
2244- switch(adapter->hw.mac_type) {
2245- case iegbe_82542_rev2_0:
2246- case iegbe_82542_rev2_1:
2247- case iegbe_82543:
2248- break;
2249- case iegbe_82544:
2250- iegbe_read_eeprom(&adapter->hw,
2251- EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
2252- eeprom_apme_mask = E1000_EEPROM_82544_APM;
2253- break;
2254+ switch(adapter->hw.mac_type) {
2255+ case iegbe_82542_rev2_0:
2256+ case iegbe_82542_rev2_1:
2257+ case iegbe_82543:
2258+ break;
2259+ case iegbe_82544:
2260+ iegbe_read_eeprom(&adapter->hw,
2261+ EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
2262+ eeprom_apme_mask = E1000_EEPROM_82544_APM;
2263+ break;
2264     case iegbe_icp_xxxx:
2265- iegbe_read_eeprom(&adapter->hw,
2266- EEPROM_INIT_CONTROL3_ICP_xxxx(adapter->bd_number),
2267- 1, &eeprom_data);
2268- eeprom_apme_mask = EEPROM_CTRL3_APME_ICP_xxxx;
2269- break;
2270- case iegbe_82546:
2271- case iegbe_82546_rev_3:
2272- if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
2273- && (adapter->hw.media_type == iegbe_media_type_copper)) {
2274- iegbe_read_eeprom(&adapter->hw,
2275- EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2276- break;
2277- }
2278- /* Fall Through */
2279- default:
2280- iegbe_read_eeprom(&adapter->hw,
2281- EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
2282- break;
2283- }
2284+ iegbe_read_eeprom(&adapter->hw,
2285+ EEPROM_INIT_CONTROL3_ICP_xxxx(adapter->bd_number),
2286+ 1, &eeprom_data);
2287+ eeprom_apme_mask = EEPROM_CTRL3_APME_ICP_xxxx;
2288+ break;
2289+ case iegbe_82546:
2290+ case iegbe_82546_rev_3:
2291+ if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
2292+ && (adapter->hw.media_type == iegbe_media_type_copper)) {
2293+ iegbe_read_eeprom(&adapter->hw,
2294+ EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2295+ break;
2296+ }
2297+ /* Fall Through */
2298+ default:
2299+ iegbe_read_eeprom(&adapter->hw,
2300+ EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
2301+ break;
2302+ }
2303     if(eeprom_data & eeprom_apme_mask) {
2304- adapter->wol |= E1000_WUFC_MAG;
2305+ adapter->wol |= E1000_WUFC_MAG;
2306     }
2307- /* reset the hardware with the new settings */
2308- iegbe_reset(adapter);
2309-
2310- /* Let firmware know the driver has taken over */
2311- switch(adapter->hw.mac_type) {
2312- case iegbe_82571:
2313- case iegbe_82572:
2314- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2315- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
2316- ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2317- break;
2318- case iegbe_82573:
2319- swsm = E1000_READ_REG(&adapter->hw, SWSM);
2320- E1000_WRITE_REG(&adapter->hw, SWSM,
2321- swsm | E1000_SWSM_DRV_LOAD);
2322- break;
2323- default:
2324- break;
2325- }
2326 
2327     /* The ICP_xxxx device has multiple, duplicate interrupt
2328      * registers, so disable all but the first one
2329@@ -987,24 +958,40 @@ iegbe_probe(struct pci_dev *pdev,
2330         E1000_WRITE_REG(&adapter->hw, IMC2, ~0UL);
2331     }
2332 
2333- strcpy(netdev->name, "eth%d");
2334- if((err = register_netdev(netdev))) {
2335- goto err_register;
2336- }
2337+ iegbe_reset(adapter);
2338+ netif_carrier_off(netdev);
2339+ netif_stop_queue(netdev);
2340+ strcpy(netdev->name, "eth%d");
2341+ err = register_netdev(netdev);
2342+ if (err)
2343+ goto err_register;
2344+
2345     DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
2346 
2347- cards_found++;
2348- return 0;
2349+ cards_found++;
2350+ return 0;
2351 
2352 err_register:
2353-err_sw_init:
2354 err_eeprom:
2355- iounmap(adapter->hw.hw_addr);
2356+ if (!iegbe_check_phy_reset_block(hw))
2357+ iegbe_phy_hw_reset(hw);
2358+ if (hw->flash_address)
2359+ iounmap(hw->flash_address);
2360+ for (i = 0; i < adapter->num_rx_queues; i++)
2361+ dev_put(&adapter->polling_netdev[i]);
2362+ kfree(adapter->tx_ring);
2363+ kfree(adapter->rx_ring);
2364+ kfree(adapter->polling_netdev);
2365+err_sw_init:
2366+ iounmap(hw->hw_addr);
2367 err_ioremap:
2368- free_netdev(netdev);
2369+ free_netdev(netdev);
2370 err_alloc_etherdev:
2371- pci_release_regions(pdev);
2372- return err;
2373+ pci_release_selected_regions(pdev, bars);
2374+err_pci_reg:
2375+err_dma:
2376+ pci_disable_device(pdev);
2377+ return err;
2378 }
2379 
2380 /**
2381@@ -1020,64 +1007,36 @@ err_alloc_etherdev:
2382 static void __devexit
2383 iegbe_remove(struct pci_dev *pdev)
2384 {
2385- struct net_device *netdev = pci_get_drvdata(pdev);
2386- struct iegbe_adapter *adapter = netdev_priv(netdev);
2387- uint32_t ctrl_ext;
2388- uint32_t manc, swsm;
2389-#ifdef CONFIG_E1000_NAPI
2390- int i;
2391-#endif
2392-
2393- if(adapter->hw.mac_type >= iegbe_82540
2394- && adapter->hw.mac_type != iegbe_icp_xxxx
2395- && adapter->hw.media_type == iegbe_media_type_copper) {
2396- manc = E1000_READ_REG(&adapter->hw, MANC);
2397- if(manc & E1000_MANC_SMBUS_EN) {
2398- manc |= E1000_MANC_ARP_EN;
2399- E1000_WRITE_REG(&adapter->hw, MANC, manc);
2400- }
2401- }
2402-
2403- switch(adapter->hw.mac_type) {
2404- case iegbe_82571:
2405- case iegbe_82572:
2406- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2407- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
2408- ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2409- break;
2410- case iegbe_82573:
2411- swsm = E1000_READ_REG(&adapter->hw, SWSM);
2412- E1000_WRITE_REG(&adapter->hw, SWSM,
2413- swsm & ~E1000_SWSM_DRV_LOAD);
2414- break;
2415-
2416- default:
2417- break;
2418- }
2419+ struct net_device *netdev = pci_get_drvdata(pdev);
2420+ struct iegbe_adapter *adapter = netdev_priv(netdev);
2421+ uint32_t manc;
2422+ int i;
2423+
2424+ if(adapter->hw.mac_type >= iegbe_82540
2425+ && adapter->hw.mac_type != iegbe_icp_xxxx
2426+ && adapter->hw.media_type == iegbe_media_type_copper) {
2427+ manc = E1000_READ_REG(&adapter->hw, MANC);
2428+ if(manc & E1000_MANC_SMBUS_EN) {
2429+ manc |= E1000_MANC_ARP_EN;
2430+ E1000_WRITE_REG(&adapter->hw, MANC, manc);
2431+ }
2432+ }
2433 
2434- unregister_netdev(netdev);
2435-#ifdef CONFIG_E1000_NAPI
2436- for (i = 0; i < adapter->num_queues; i++)
2437+ unregister_netdev(netdev);
2438+ for (i = 0x0; i < adapter->num_rx_queues; i++)
2439         dev_put(&adapter->polling_netdev[i]);
2440-#endif
2441 
2442     if(!iegbe_check_phy_reset_block(&adapter->hw)) {
2443- iegbe_phy_hw_reset(&adapter->hw);
2444+ iegbe_phy_hw_reset(&adapter->hw);
2445     }
2446- kfree(adapter->tx_ring);
2447- kfree(adapter->rx_ring);
2448-#ifdef CONFIG_E1000_NAPI
2449- kfree(adapter->polling_netdev);
2450-#endif
2451+ kfree(adapter->tx_ring);
2452+ kfree(adapter->rx_ring);
2453+ kfree(adapter->polling_netdev);
2454 
2455- iounmap(adapter->hw.hw_addr);
2456- pci_release_regions(pdev);
2457+ iounmap(adapter->hw.hw_addr);
2458+ pci_release_regions(pdev);
2459 
2460-#ifdef CONFIG_E1000_MQ
2461- free_percpu(adapter->cpu_netdev);
2462- free_percpu(adapter->cpu_tx_ring);
2463-#endif
2464- free_netdev(netdev);
2465+ free_netdev(netdev);
2466 }
2467 
2468 /**
2469@@ -1092,118 +1051,78 @@ iegbe_remove(struct pci_dev *pdev)
2470 static int __devinit
2471 iegbe_sw_init(struct iegbe_adapter *adapter)
2472 {
2473- struct iegbe_hw *hw = &adapter->hw;
2474- struct net_device *netdev = adapter->netdev;
2475- struct pci_dev *pdev = adapter->pdev;
2476-#ifdef CONFIG_E1000_NAPI
2477- int i;
2478-#endif
2479+ struct iegbe_hw *hw = &adapter->hw;
2480+ struct net_device *netdev = adapter->netdev;
2481+ struct pci_dev *pdev = adapter->pdev;
2482+ int i;
2483 
2484- /* PCI config space info */
2485+ /* PCI config space info */
2486 
2487- hw->vendor_id = pdev->vendor;
2488- hw->device_id = pdev->device;
2489- hw->subsystem_vendor_id = pdev->subsystem_vendor;
2490- hw->subsystem_id = pdev->subsystem_device;
2491+ hw->vendor_id = pdev->vendor;
2492+ hw->device_id = pdev->device;
2493+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
2494+ hw->subsystem_id = pdev->subsystem_device;
2495 
2496- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2497+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
2498 
2499- pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
2500+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
2501 
2502- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2503- adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
2504- hw->max_frame_size = netdev->mtu +
2505- ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2506- hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
2507+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2508+ adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
2509+ hw->max_frame_size = netdev->mtu +
2510+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2511+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
2512 
2513- /* identify the MAC */
2514+ /* identify the MAC */
2515 
2516- if(iegbe_set_mac_type(hw)) {
2517+ if (iegbe_set_mac_type(hw)) {
2518         DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
2519         return -EIO;
2520     }
2521 
2522- /* initialize eeprom parameters */
2523-
2524- if(iegbe_init_eeprom_params(hw)) {
2525- E1000_ERR("EEPROM initialization failed\n");
2526- return -EIO;
2527- }
2528-
2529- switch(hw->mac_type) {
2530- default:
2531- break;
2532- case iegbe_82541:
2533- case iegbe_82547:
2534- case iegbe_82541_rev_2:
2535- case iegbe_82547_rev_2:
2536- hw->phy_init_script = 0x1;
2537- break;
2538- }
2539-
2540- iegbe_set_media_type(hw);
2541+ iegbe_set_media_type(hw);
2542 
2543- hw->wait_autoneg_complete = FALSE;
2544- hw->tbi_compatibility_en = TRUE;
2545- hw->adaptive_ifs = TRUE;
2546+ hw->wait_autoneg_complete = FALSE;
2547+ hw->tbi_compatibility_en = TRUE;
2548+ hw->adaptive_ifs = TRUE;
2549 
2550- /* Copper options */
2551+ /* Copper options */
2552 
2553- if(hw->media_type == iegbe_media_type_copper
2554+ if(hw->media_type == iegbe_media_type_copper
2555         || (hw->media_type == iegbe_media_type_oem
2556             && iegbe_oem_phy_is_copper(&adapter->hw))) {
2557- hw->mdix = AUTO_ALL_MODES;
2558- hw->disable_polarity_correction = FALSE;
2559- hw->master_slave = E1000_MASTER_SLAVE;
2560- }
2561+ hw->mdix = AUTO_ALL_MODES;
2562+ hw->disable_polarity_correction = FALSE;
2563+ hw->master_slave = E1000_MASTER_SLAVE;
2564+ }
2565 
2566-#ifdef CONFIG_E1000_MQ
2567- /* Number of supported queues */
2568- switch (hw->mac_type) {
2569- case iegbe_82571:
2570- case iegbe_82572:
2571- adapter->num_queues = 0x2;
2572- break;
2573- default:
2574- adapter->num_queues = 0x1;
2575- break;
2576- }
2577- adapter->num_queues = min(adapter->num_queues, num_online_cpus());
2578-#else
2579- adapter->num_queues = 0x1;
2580-#endif
2581+ adapter->num_tx_queues = 0x1;
2582+ adapter->num_rx_queues = 0x1;
2583 
2584     if (iegbe_alloc_queues(adapter)) {
2585         DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2586         return -ENOMEM;
2587     }
2588 
2589-#ifdef CONFIG_E1000_NAPI
2590- for (i = 0; i < adapter->num_queues; i++) {
2591+ for (i = 0; i < adapter->num_rx_queues; i++) {
2592         adapter->polling_netdev[i].priv = adapter;
2593- adapter->polling_netdev[i].poll = &iegbe_clean;
2594- adapter->polling_netdev[i].weight = 0x40;
2595         dev_hold(&adapter->polling_netdev[i]);
2596         set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
2597     }
2598-#endif
2599-
2600-#ifdef CONFIG_E1000_MQ
2601- iegbe_setup_queue_mapping(adapter);
2602-#endif
2603+ spin_lock_init(&adapter->tx_queue_lock);
2604 
2605         /*
2606- * for ICP_XXXX style controllers, it is necessary to keep
2607- * track of the last known state of the link to determine if
2608- * the link experienced a change in state when iegbe_watchdog
2609- * fires
2610- */
2611- adapter->hw.icp_xxxx_is_link_up = FALSE;
2612+ * for ICP_XXXX style controllers, it is necessary to keep
2613+ * track of the last known state of the link to determine if
2614+ * the link experienced a change in state when iegbe_watchdog
2615+ * fires
2616+ */
2617+ adapter->hw.icp_xxxx_is_link_up = FALSE;
2618 
2619- atomic_set(&adapter->irq_sem, 1);
2620- spin_lock_init(&adapter->stats_lock);
2621+ spin_lock_init(&adapter->stats_lock);
2622 
2623- return 0;
2624+ set_bit(__E1000_DOWN, &adapter->flags);
2625+ return 0x0;
2626 }
2627 
2628 /**
2629@@ -1218,71 +1137,31 @@ iegbe_sw_init(struct iegbe_adapter *adap
2630 static int __devinit
2631 iegbe_alloc_queues(struct iegbe_adapter *adapter)
2632 {
2633- int size;
2634 
2635- size = sizeof(struct iegbe_tx_ring) * adapter->num_queues;
2636- adapter->tx_ring = kmalloc(size, GFP_KERNEL);
2637- if (!adapter->tx_ring){
2638+
2639+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2640+ sizeof(struct iegbe_tx_ring), GFP_KERNEL);
2641+ if (!adapter->tx_ring)
2642         return -ENOMEM;
2643- }
2644- memset(adapter->tx_ring, 0, size);
2645 
2646- size = sizeof(struct iegbe_rx_ring) * adapter->num_queues;
2647- adapter->rx_ring = kmalloc(size, GFP_KERNEL);
2648+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2649+ sizeof(struct iegbe_rx_ring), GFP_KERNEL);
2650     if (!adapter->rx_ring) {
2651         kfree(adapter->tx_ring);
2652         return -ENOMEM;
2653     }
2654- memset(adapter->rx_ring, 0, size);
2655 
2656-#ifdef CONFIG_E1000_NAPI
2657- size = sizeof(struct net_device) * adapter->num_queues;
2658- adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
2659+ adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
2660+ sizeof(struct net_device),
2661+ GFP_KERNEL);
2662     if (!adapter->polling_netdev) {
2663         kfree(adapter->tx_ring);
2664         kfree(adapter->rx_ring);
2665         return -ENOMEM;
2666     }
2667- memset(adapter->polling_netdev, 0, size);
2668-#endif
2669-
2670- return E1000_SUCCESS;
2671-}
2672 
2673-#ifdef CONFIG_E1000_MQ
2674-static void __devinit
2675-iegbe_setup_queue_mapping(struct iegbe_adapter *adapter)
2676-{
2677- int i, cpu;
2678-
2679- adapter->rx_sched_call_data.func = iegbe_rx_schedule;
2680- adapter->rx_sched_call_data.info = adapter->netdev;
2681- cpus_clear(adapter->rx_sched_call_data.cpumask);
2682-
2683- adapter->cpu_netdev = alloc_percpu(struct net_device *);
2684- adapter->cpu_tx_ring = alloc_percpu(struct iegbe_tx_ring *);
2685-
2686- lock_cpu_hotplug();
2687- i = 0;
2688- for_each_online_cpu(cpu) {
2689- *per_cpu_ptr(adapter->cpu_tx_ring, cpu) =
2690- &adapter->tx_ring[i % adapter->num_queues];
2691- /* This is incomplete because we'd like to assign separate
2692- * physical cpus to these netdev polling structures and
2693- * avoid saturating a subset of cpus.
2694- */
2695- if (i < adapter->num_queues) {
2696- *per_cpu_ptr(adapter->cpu_netdev, cpu) =
2697- &adapter->polling_netdev[i];
2698- adapter->cpu_for_queue[i] = cpu;
2699- } else {
2700- *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
2701- }
2702- i++;
2703- }
2704- unlock_cpu_hotplug();
2705+ return E1000_SUCCESS;
2706 }
2707-#endif
2708 
2709 /**
2710  * iegbe_open - Called when a network interface is made active
2711@@ -1300,40 +1179,62 @@ iegbe_setup_queue_mapping(struct iegbe_a
2712 static int
2713 iegbe_open(struct net_device *netdev)
2714 {
2715- struct iegbe_adapter *adapter = netdev_priv(netdev);
2716- int err;
2717+ struct iegbe_adapter *adapter = netdev_priv(netdev);
2718+ struct iegbe_hw *hw = &adapter->hw;
2719+ int err;
2720+
2721 
2722+ /* allocate receive descriptors */
2723+ if (test_bit(__E1000_TESTING, &adapter->flags))
2724+ return -EBUSY;
2725 
2726- /* allocate receive descriptors */
2727+ /* allocate transmit descriptors */
2728+ err = iegbe_setup_all_tx_resources(adapter);
2729+ if (err)
2730+ goto err_setup_tx;
2731 
2732- if ((err = iegbe_setup_all_rx_resources(adapter))) {
2733+ err = iegbe_setup_all_rx_resources(adapter);
2734+ if (err)
2735         goto err_setup_rx;
2736- }
2737- /* allocate transmit descriptors */
2738- if ((err = iegbe_setup_all_tx_resources(adapter))) {
2739- goto err_setup_tx;
2740- }
2741- if ((err = iegbe_up(adapter))) {
2742- goto err_up;
2743- }
2744-#ifdef NETIF_F_HW_VLAN_TX
2745- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2746- if ((adapter->hw.mng_cookie.status &
2747- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
2748- iegbe_update_mng_vlan(adapter);
2749- }
2750-#endif
2751 
2752- return E1000_SUCCESS;
2753+ iegbe_power_up_phy(adapter);
2754+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2755+ if ((hw->mng_cookie.status &
2756+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
2757+ iegbe_update_mng_vlan(adapter);
2758+ }
2759+
2760+ /* before we allocate an interrupt, we must be ready to handle it.
2761+ * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2762+ * as soon as we call pci_request_irq, so we have to setup our
2763+ * clean_rx handler before we do so. */
2764+ iegbe_configure(adapter);
2765+ err = iegbe_request_irq(adapter);
2766+ if (err)
2767+ goto err_req_irq;
2768 
2769-err_up:
2770- iegbe_free_all_tx_resources(adapter);
2771-err_setup_tx:
2772- iegbe_free_all_rx_resources(adapter);
2773+ /* From here on the code is the same as iegbe_up() */
2774+ clear_bit(__E1000_DOWN, &adapter->flags);
2775+
2776+ napi_enable(&adapter->napi);
2777+
2778+ iegbe_irq_enable(adapter);
2779+
2780+ netif_start_queue(netdev);
2781+
2782+ /* fire a link status change interrupt to start the watchdog */
2783+
2784+ return E1000_SUCCESS;
2785+
2786+err_req_irq:
2787+ iegbe_power_down_phy(adapter);
2788+ iegbe_free_all_rx_resources(adapter);
2789 err_setup_rx:
2790- iegbe_reset(adapter);
2791+ iegbe_free_all_tx_resources(adapter);
2792+err_setup_tx:
2793+ iegbe_reset(adapter);
2794 
2795- return err;
2796+ return err;
2797 }
2798 
2799 /**
2800@@ -1348,22 +1249,25 @@ err_setup_rx:
2801  * hardware, and all transmit and receive resources are freed.
2802  **/
2803 
2804-static int
2805-iegbe_close(struct net_device *netdev)
2806+static int iegbe_close(struct net_device *netdev)
2807 {
2808- struct iegbe_adapter *adapter = netdev_priv(netdev);
2809-
2810- iegbe_down(adapter);
2811-
2812- iegbe_free_all_tx_resources(adapter);
2813- iegbe_free_all_rx_resources(adapter);
2814+ struct iegbe_adapter *adapter = netdev_priv(netdev);
2815+ struct iegbe_hw *hw = &adapter->hw;
2816 
2817-#ifdef NETIF_F_HW_VLAN_TX
2818- if((adapter->hw.mng_cookie.status &
2819- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
2820+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
2821+ iegbe_down(adapter);
2822+ iegbe_power_down_phy(adapter);
2823+ iegbe_free_irq(adapter);
2824+
2825+ iegbe_free_all_tx_resources(adapter);
2826+ iegbe_free_all_rx_resources(adapter);
2827+
2828+ if ((hw->mng_cookie.status &
2829+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
2830+ !(adapter->vlgrp &&
2831+ vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
2832         iegbe_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2833     }
2834-#endif
2835     return 0;
2836 }
2837 
2838@@ -1375,19 +1279,19 @@ iegbe_close(struct net_device *netdev)
2839  **/
2840 static inline boolean_t
2841 iegbe_check_64k_bound(struct iegbe_adapter *adapter,
2842- void *start, unsigned long len)
2843+ void *start, unsigned long len)
2844 {
2845- unsigned long begin = (unsigned long) start;
2846- unsigned long end = begin + len;
2847+ unsigned long begin = (unsigned long) start;
2848+ unsigned long end = begin + len;
2849 
2850- /* First rev 82545 and 82546 need to not allow any memory
2851- * write location to cross 64k boundary due to errata 23 */
2852- if(adapter->hw.mac_type == iegbe_82545 ||
2853- adapter->hw.mac_type == iegbe_82546) {
2854- return ((begin ^ (end - 1)) >> 0x10) != 0 ? FALSE : TRUE;
2855- }
2856+ /* First rev 82545 and 82546 need to not allow any memory
2857+ * write location to cross 64k boundary due to errata 23 */
2858+ if(adapter->hw.mac_type == iegbe_82545 ||
2859+ adapter->hw.mac_type == iegbe_82546) {
2860+ return ((begin ^ (end - 1)) >> 0x10) != 0x0 ? FALSE : TRUE;
2861+ }
2862 
2863- return TRUE;
2864+ return TRUE;
2865 }
2866 
2867 /**
2868@@ -1398,102 +1302,98 @@ iegbe_check_64k_bound(struct iegbe_adapt
2869  * Return 0 on success, negative on failure
2870  **/
2871 
2872-int
2873-iegbe_setup_tx_resources(struct iegbe_adapter *adapter,
2874+static int iegbe_setup_tx_resources(struct iegbe_adapter *adapter,
2875                          struct iegbe_tx_ring *txdr)
2876 {
2877- struct pci_dev *pdev = adapter->pdev;
2878- int size;
2879+ struct pci_dev *pdev = adapter->pdev;
2880+ int size;
2881 
2882- size = sizeof(struct iegbe_buffer) * txdr->count;
2883- txdr->buffer_info = vmalloc(size);
2884- if (!txdr->buffer_info) {
2885- DPRINTK(PROBE, ERR,
2886- "Unable to allocate memory for the transmit descriptor ring\n");
2887- return -ENOMEM;
2888- }
2889+ size = sizeof(struct iegbe_buffer) * txdr->count;
2890+ txdr->buffer_info = vmalloc(size);
2891+ if (!txdr->buffer_info) {
2892+ DPRINTK(PROBE, ERR,
2893+ "Unable to allocate memory for the transmit descriptor ring\n");
2894+ return -ENOMEM;
2895+ }
2896     memset(txdr->buffer_info, 0, size);
2897- memset(&txdr->previous_buffer_info, 0, sizeof(struct iegbe_buffer));
2898 
2899- /* round up to nearest 4K */
2900+ /* round up to nearest 4K */
2901 
2902- txdr->size = txdr->count * sizeof(struct iegbe_tx_desc);
2903- E1000_ROUNDUP(txdr->size, 0x1000);
2904+ txdr->size = txdr->count * sizeof(struct iegbe_tx_desc);
2905+ txdr->size = ALIGN(txdr->size, 4096);
2906 
2907- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2908- if (!txdr->desc) {
2909+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2910+ if (!txdr->desc) {
2911 setup_tx_desc_die:
2912- vfree(txdr->buffer_info);
2913- DPRINTK(PROBE, ERR,
2914- "Unable to allocate memory for the transmit descriptor ring\n");
2915- return -ENOMEM;
2916- }
2917-
2918- /* Fix for errata 23, can't cross 64kB boundary */
2919- if (!iegbe_check_64k_bound(adapter, txdr->desc, txdr->size)) {
2920- void *olddesc = txdr->desc;
2921- dma_addr_t olddma = txdr->dma;
2922- DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
2923- "at %p\n", txdr->size, txdr->desc);
2924- /* Try again, without freeing the previous */
2925- txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2926- /* Failed allocation, critical failure */
2927- if (!txdr->desc) {
2928- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
2929- goto setup_tx_desc_die;
2930- }
2931+ vfree(txdr->buffer_info);
2932+ DPRINTK(PROBE, ERR,
2933+ "Unable to allocate memory for the transmit descriptor ring\n");
2934+ return -ENOMEM;
2935+ }
2936+
2937+ /* Fix for errata 23, can't cross 64kB boundary */
2938+ if (!iegbe_check_64k_bound(adapter, txdr->desc, txdr->size)) {
2939+ void *olddesc = txdr->desc;
2940+ dma_addr_t olddma = txdr->dma;
2941+ DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
2942+ "at %p\n", txdr->size, txdr->desc);
2943+ /* Try again, without freeing the previous */
2944+ txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2945+ /* Failed allocation, critical failure */
2946+ if (!txdr->desc) {
2947+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
2948+ goto setup_tx_desc_die;
2949+ }
2950 
2951- if (!iegbe_check_64k_bound(adapter, txdr->desc, txdr->size)) {
2952- /* give up */
2953- pci_free_consistent(pdev, txdr->size, txdr->desc,
2954- txdr->dma);
2955- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
2956- DPRINTK(PROBE, ERR,
2957- "Unable to allocate aligned memory "
2958- "for the transmit descriptor ring\n");
2959- vfree(txdr->buffer_info);
2960- return -ENOMEM;
2961- } else {
2962- /* Free old allocation, new allocation was successful */
2963- pci_free_consistent(pdev, txdr->size, olddesc, olddma);
2964- }
2965- }
2966+ if (!iegbe_check_64k_bound(adapter, txdr->desc, txdr->size)) {
2967+ /* give up */
2968+ pci_free_consistent(pdev, txdr->size, txdr->desc,
2969+ txdr->dma);
2970+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
2971+ DPRINTK(PROBE, ERR,
2972+ "Unable to allocate aligned memory "
2973+ "for the transmit descriptor ring\n");
2974+ vfree(txdr->buffer_info);
2975+ return -ENOMEM;
2976+ } else {
2977+ /* Free old allocation, new allocation was successful */
2978+ pci_free_consistent(pdev, txdr->size, olddesc, olddma);
2979+ }
2980+ }
2981     memset(txdr->desc, 0, txdr->size);
2982 
2983     txdr->next_to_use = 0;
2984     txdr->next_to_clean = 0;
2985- spin_lock_init(&txdr->tx_lock);
2986+ spin_lock_init(&txdr->tx_lock);
2987 
2988     return 0;
2989 }
2990 
2991 /**
2992  * iegbe_setup_all_tx_resources - wrapper to allocate Tx resources
2993- * (Descriptors) for all queues
2994+ * (Descriptors) for all queues
2995  * @adapter: board private structure
2996  *
2997- * If this function returns with an error, then it's possible one or
2998- * more of the rings is populated (while the rest are not). It is the
2999- * callers duty to clean those orphaned rings.
3000- *
3001  * Return 0 on success, negative on failure
3002  **/
3003 
3004-int
3005-iegbe_setup_all_tx_resources(struct iegbe_adapter *adapter)
3006+int iegbe_setup_all_tx_resources(struct iegbe_adapter *adapter)
3007 {
3008     int i, err = 0;
3009 
3010- for (i = 0; i < adapter->num_queues; i++) {
3011+ for (i = 0; i < adapter->num_tx_queues; i++) {
3012         err = iegbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
3013         if (err) {
3014             DPRINTK(PROBE, ERR,
3015                 "Allocation for Tx Queue %u failed\n", i);
3016+ for (i-- ; i >= 0; i--)
3017+ iegbe_free_tx_resources(adapter,
3018+ &adapter->tx_ring[i]);
3019             break;
3020         }
3021     }
3022 
3023- return err;
3024+ return err;
3025 }
3026 
3027 /**
3028@@ -1512,113 +1412,108 @@ iegbe_configure_tx(struct iegbe_adapter
3029 
3030     /* Setup the HW Tx Head and Tail descriptor pointers */
3031 
3032- switch (adapter->num_queues) {
3033+ switch (adapter->num_tx_queues) {
3034     case 0x2:
3035         tdba = adapter->tx_ring[0x1].dma;
3036         tdlen = adapter->tx_ring[0x1].count *
3037- sizeof(struct iegbe_tx_desc);
3038- E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
3039+ sizeof(struct iegbe_tx_desc);
3040+ E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
3041         E1000_WRITE_REG(hw, TDBAH1, (tdba >> 0x20));
3042- E1000_WRITE_REG(hw, TDLEN1, tdlen);
3043- E1000_WRITE_REG(hw, TDH1, 0);
3044- E1000_WRITE_REG(hw, TDT1, 0);
3045+ E1000_WRITE_REG(hw, TDLEN1, tdlen);
3046+ E1000_WRITE_REG(hw, TDH1, 0x0);
3047+ E1000_WRITE_REG(hw, TDT1, 0x0);
3048         adapter->tx_ring[0x1].tdh = E1000_TDH1;
3049         adapter->tx_ring[0x1].tdt = E1000_TDT1;
3050- /* Fall Through */
3051+ /* Fall Through */
3052     case 0x1:
3053- default:
3054- tdba = adapter->tx_ring[0].dma;
3055- tdlen = adapter->tx_ring[0].count *
3056- sizeof(struct iegbe_tx_desc);
3057- E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
3058+ default:
3059+ tdba = adapter->tx_ring[0x0].dma;
3060+ tdlen = adapter->tx_ring[0x0].count *
3061+ sizeof(struct iegbe_tx_desc);
3062+ E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
3063         E1000_WRITE_REG(hw, TDBAH, (tdba >> 0x20));
3064- E1000_WRITE_REG(hw, TDLEN, tdlen);
3065- E1000_WRITE_REG(hw, TDH, 0);
3066- E1000_WRITE_REG(hw, TDT, 0);
3067- adapter->tx_ring[0].tdh = E1000_TDH;
3068- adapter->tx_ring[0].tdt = E1000_TDT;
3069- break;
3070- }
3071-
3072- /* Set the default values for the Tx Inter Packet Gap timer */
3073-
3074- switch (hw->mac_type) {
3075- case iegbe_82542_rev2_0:
3076- case iegbe_82542_rev2_1:
3077- tipg = DEFAULT_82542_TIPG_IPGT;
3078- tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3079- tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3080- break;
3081- default:
3082- switch(hw->media_type) {
3083- case iegbe_media_type_fiber:
3084- case iegbe_media_type_internal_serdes:
3085- tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3086- break;
3087- case iegbe_media_type_copper:
3088- tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3089- break;
3090- case iegbe_media_type_oem:
3091- default:
3092+ E1000_WRITE_REG(hw, TDLEN, tdlen);
3093+ E1000_WRITE_REG(hw, TDH, 0x0);
3094+ E1000_WRITE_REG(hw, TDT, 0x0);
3095+ adapter->tx_ring[0x0].tdh = E1000_TDH;
3096+ adapter->tx_ring[0x0].tdt = E1000_TDT;
3097+ break;
3098+ }
3099+
3100+ /* Set the default values for the Tx Inter Packet Gap timer */
3101+
3102+ switch (hw->mac_type) {
3103+ case iegbe_82542_rev2_0:
3104+ case iegbe_82542_rev2_1:
3105+ tipg = DEFAULT_82542_TIPG_IPGT;
3106+ tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3107+ tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3108+ break;
3109+ default:
3110+ switch(hw->media_type) {
3111+ case iegbe_media_type_fiber:
3112+ case iegbe_media_type_internal_serdes:
3113+ tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3114+ break;
3115+ case iegbe_media_type_copper:
3116+ tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3117+ break;
3118+ case iegbe_media_type_oem:
3119+ default:
3120             tipg = (0xFFFFFFFFUL >> (sizeof(tipg)*0x8 -
3121                 E1000_TIPG_IPGR1_SHIFT))
3122- & iegbe_oem_get_tipg(&adapter->hw);
3123- break;
3124- }
3125- tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3126- tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3127- }
3128- E1000_WRITE_REG(hw, TIPG, tipg);
3129+ & iegbe_oem_get_tipg(&adapter->hw);
3130+ break;
3131+ }
3132+ tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3133+ tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3134+ }
3135+ E1000_WRITE_REG(hw, TIPG, tipg);
3136 
3137- /* Set the Tx Interrupt Delay register */
3138+ /* Set the Tx Interrupt Delay register */
3139 
3140- E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
3141+ E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
3142     if (hw->mac_type >= iegbe_82540) {
3143- E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
3144+ E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
3145     }
3146- /* Program the Transmit Control Register */
3147+ /* Program the Transmit Control Register */
3148 
3149- tctl = E1000_READ_REG(hw, TCTL);
3150+ tctl = E1000_READ_REG(hw, TCTL);
3151 
3152- tctl &= ~E1000_TCTL_CT;
3153- tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
3154- (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
3155+ tctl &= ~E1000_TCTL_CT;
3156+ tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
3157+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
3158 
3159- E1000_WRITE_REG(hw, TCTL, tctl);
3160+ E1000_WRITE_REG(hw, TCTL, tctl);
3161 
3162- if (hw->mac_type == iegbe_82571 || hw->mac_type == iegbe_82572) {
3163- tarc = E1000_READ_REG(hw, TARC0);
3164+ if (hw->mac_type == iegbe_82571 || hw->mac_type == iegbe_82572) {
3165+ tarc = E1000_READ_REG(hw, TARC0);
3166         tarc |= ((0x1 << 0x19) | (0x1 << 0x15));
3167- E1000_WRITE_REG(hw, TARC0, tarc);
3168- tarc = E1000_READ_REG(hw, TARC1);
3169+ E1000_WRITE_REG(hw, TARC0, tarc);
3170+ tarc = E1000_READ_REG(hw, TARC1);
3171         tarc |= (0x1 << 0x19);
3172         if (tctl & E1000_TCTL_MULR) {
3173             tarc &= ~(0x1 << 0x1c);
3174         } else {
3175             tarc |= (0x1 << 0x1c);
3176         }
3177- E1000_WRITE_REG(hw, TARC1, tarc);
3178- }
3179+ E1000_WRITE_REG(hw, TARC1, tarc);
3180+ }
3181 
3182- iegbe_config_collision_dist(hw);
3183+ iegbe_config_collision_dist(hw);
3184 
3185- /* Setup Transmit Descriptor Settings for eop descriptor */
3186- adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
3187- E1000_TXD_CMD_IFCS;
3188+ /* Setup Transmit Descriptor Settings for eop descriptor */
3189+ adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
3190+ E1000_TXD_CMD_IFCS;
3191 
3192     if (hw->mac_type < iegbe_82543) {
3193- adapter->txd_cmd |= E1000_TXD_CMD_RPS;
3194+ adapter->txd_cmd |= E1000_TXD_CMD_RPS;
3195     } else {
3196-#ifdef IEGBE_GBE_WORKAROUND
3197- /* Disable the RS bit in the Tx descriptor */
3198- adapter->txd_cmd &= ~E1000_TXD_CMD_RS;
3199-#else
3200- adapter->txd_cmd |= E1000_TXD_CMD_RS;
3201-#endif
3202+ adapter->txd_cmd |= E1000_TXD_CMD_RS;
3203     }
3204- /* Cache if we're 82544 running in PCI-X because we'll
3205- * need this to apply a workaround later in the send path. */
3206- if (hw->mac_type == iegbe_82544 &&
3207+ /* Cache if we're 82544 running in PCI-X because we'll
3208+ * need this to apply a workaround later in the send path. */
3209+ if (hw->mac_type == iegbe_82544 &&
3210         hw->bus_type == iegbe_bus_type_pcix) {
3211         adapter->pcix_82544 = 0x1;
3212      }
3213@@ -1632,96 +1527,95 @@ iegbe_configure_tx(struct iegbe_adapter
3214  * Returns 0 on success, negative on failure
3215  **/
3216 
3217-int
3218-iegbe_setup_rx_resources(struct iegbe_adapter *adapter,
3219+static int iegbe_setup_rx_resources(struct iegbe_adapter *adapter,
3220                          struct iegbe_rx_ring *rxdr)
3221 {
3222- struct pci_dev *pdev = adapter->pdev;
3223- int size, desc_len;
3224-
3225- size = sizeof(struct iegbe_buffer) * rxdr->count;
3226- rxdr->buffer_info = vmalloc(size);
3227- if (!rxdr->buffer_info) {
3228- DPRINTK(PROBE, ERR,
3229- "Unable to allocate memory for the receive descriptor ring\n");
3230- return -ENOMEM;
3231- }
3232- memset(rxdr->buffer_info, 0, size);
3233-
3234- size = sizeof(struct iegbe_ps_page) * rxdr->count;
3235- rxdr->ps_page = kmalloc(size, GFP_KERNEL);
3236- if (!rxdr->ps_page) {
3237- vfree(rxdr->buffer_info);
3238- DPRINTK(PROBE, ERR,
3239- "Unable to allocate memory for the receive descriptor ring\n");
3240- return -ENOMEM;
3241- }
3242- memset(rxdr->ps_page, 0, size);
3243-
3244- size = sizeof(struct iegbe_ps_page_dma) * rxdr->count;
3245- rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
3246- if (!rxdr->ps_page_dma) {
3247- vfree(rxdr->buffer_info);
3248- kfree(rxdr->ps_page);
3249- DPRINTK(PROBE, ERR,
3250- "Unable to allocate memory for the receive descriptor ring\n");
3251- return -ENOMEM;
3252- }
3253- memset(rxdr->ps_page_dma, 0, size);
3254+ struct iegbe_hw *hw = &adapter->hw;
3255+ struct pci_dev *pdev = adapter->pdev;
3256+ int size, desc_len;
3257 
3258- if (adapter->hw.mac_type <= iegbe_82547_rev_2) {
3259- desc_len = sizeof(struct iegbe_rx_desc);
3260- } else {
3261- desc_len = sizeof(union iegbe_rx_desc_packet_split);
3262+ size = sizeof(struct iegbe_buffer) * rxdr->count;
3263+ rxdr->buffer_info = vmalloc(size);
3264+ if (!rxdr->buffer_info) {
3265+ DPRINTK(PROBE, ERR,
3266+ "Unable to allocate memory for the receive descriptor ring\n");
3267+ return -ENOMEM;
3268     }
3269- /* Round up to nearest 4K */
3270-
3271- rxdr->size = rxdr->count * desc_len;
3272- E1000_ROUNDUP(rxdr->size, 0x1000);
3273-
3274- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
3275+ memset(rxdr->buffer_info, 0, size);
3276 
3277- if (!rxdr->desc) {
3278- DPRINTK(PROBE, ERR,
3279- "Unable to allocate memory for the receive descriptor ring\n");
3280+ rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct iegbe_ps_page),
3281+ GFP_KERNEL);
3282+ if (!rxdr->ps_page) {
3283+ vfree(rxdr->buffer_info);
3284+ DPRINTK(PROBE, ERR,
3285+ "Unable to allocate memory for the receive descriptor ring\n");
3286+ return -ENOMEM;
3287+ }
3288+
3289+ rxdr->ps_page_dma = kcalloc(rxdr->count,
3290+ sizeof(struct iegbe_ps_page_dma),
3291+ GFP_KERNEL);
3292+ if (!rxdr->ps_page_dma) {
3293+ vfree(rxdr->buffer_info);
3294+ kfree(rxdr->ps_page);
3295+ DPRINTK(PROBE, ERR,
3296+ "Unable to allocate memory for the receive descriptor ring\n");
3297+ return -ENOMEM;
3298+ }
3299+
3300+ if (hw->mac_type <= iegbe_82547_rev_2)
3301+ desc_len = sizeof(struct iegbe_rx_desc);
3302+ else
3303+ desc_len = sizeof(union iegbe_rx_desc_packet_split);
3304+
3305+ /* Round up to nearest 4K */
3306+
3307+ rxdr->size = rxdr->count * desc_len;
3308+ rxdr->size = ALIGN(rxdr->size, 4096);
3309+
3310+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
3311+
3312+ if (!rxdr->desc) {
3313+ DPRINTK(PROBE, ERR,
3314+ "Unable to allocate memory for the receive descriptor ring\n");
3315 setup_rx_desc_die:
3316- vfree(rxdr->buffer_info);
3317- kfree(rxdr->ps_page);
3318- kfree(rxdr->ps_page_dma);
3319- return -ENOMEM;
3320- }
3321-
3322- /* Fix for errata 23, can't cross 64kB boundary */
3323- if (!iegbe_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
3324- void *olddesc = rxdr->desc;
3325- dma_addr_t olddma = rxdr->dma;
3326- DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
3327- "at %p\n", rxdr->size, rxdr->desc);
3328- /* Try again, without freeing the previous */
3329- rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
3330- /* Failed allocation, critical failure */
3331- if (!rxdr->desc) {
3332- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
3333- DPRINTK(PROBE, ERR,
3334- "Unable to allocate memory "
3335- "for the receive descriptor ring\n");
3336- goto setup_rx_desc_die;
3337- }
3338+ vfree(rxdr->buffer_info);
3339+ kfree(rxdr->ps_page);
3340+ kfree(rxdr->ps_page_dma);
3341+ return -ENOMEM;
3342+ }
3343+
3344+ /* Fix for errata 23, can't cross 64kB boundary */
3345+ if (!iegbe_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
3346+ void *olddesc = rxdr->desc;
3347+ dma_addr_t olddma = rxdr->dma;
3348+ DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
3349+ "at %p\n", rxdr->size, rxdr->desc);
3350+ /* Try again, without freeing the previous */
3351+ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
3352+ /* Failed allocation, critical failure */
3353+ if (!rxdr->desc) {
3354+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
3355+ DPRINTK(PROBE, ERR,
3356+ "Unable to allocate memory "
3357+ "for the receive descriptor ring\n");
3358+ goto setup_rx_desc_die;
3359+ }
3360 
3361- if (!iegbe_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
3362- /* give up */
3363- pci_free_consistent(pdev, rxdr->size, rxdr->desc,
3364- rxdr->dma);
3365- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
3366- DPRINTK(PROBE, ERR,
3367- "Unable to allocate aligned memory "
3368- "for the receive descriptor ring\n");
3369- goto setup_rx_desc_die;
3370- } else {
3371- /* Free old allocation, new allocation was successful */
3372- pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
3373- }
3374- }
3375+ if (!iegbe_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
3376+ /* give up */
3377+ pci_free_consistent(pdev, rxdr->size, rxdr->desc,
3378+ rxdr->dma);
3379+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
3380+ DPRINTK(PROBE, ERR,
3381+ "Unable to allocate aligned memory "
3382+ "for the receive descriptor ring\n");
3383+ goto setup_rx_desc_die;
3384+ } else {
3385+ /* Free old allocation, new allocation was successful */
3386+ pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
3387+ }
3388+ }
3389     memset(rxdr->desc, 0, rxdr->size);
3390 
3391     rxdr->next_to_clean = 0;
3392@@ -1732,7 +1626,7 @@ setup_rx_desc_die:
3393 
3394 /**
3395  * iegbe_setup_all_rx_resources - wrapper to allocate Rx resources
3396- * (Descriptors) for all queues
3397+ * (Descriptors) for all queues
3398  * @adapter: board private structure
3399  *
3400  * If this function returns with an error, then it's possible one or
3401@@ -1742,21 +1636,23 @@ setup_rx_desc_die:
3402  * Return 0 on success, negative on failure
3403  **/
3404 
3405-int
3406-iegbe_setup_all_rx_resources(struct iegbe_adapter *adapter)
3407+int iegbe_setup_all_rx_resources(struct iegbe_adapter *adapter)
3408 {
3409     int i, err = 0;
3410 
3411- for (i = 0; i < adapter->num_queues; i++) {
3412+ for (i = 0; i < adapter->num_rx_queues; i++) {
3413         err = iegbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
3414         if (err) {
3415             DPRINTK(PROBE, ERR,
3416                 "Allocation for Rx Queue %u failed\n", i);
3417+ for (i-- ; i >= 0; i--)
3418+ iegbe_free_rx_resources(adapter,
3419+ &adapter->rx_ring[i]);
3420             break;
3421         }
3422     }
3423 
3424- return err;
3425+ return err;
3426 }
3427 
3428 /**
3429@@ -1765,105 +1661,104 @@ iegbe_setup_all_rx_resources(struct iegb
3430  **/
3431 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
3432             (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3433-static void
3434-iegbe_setup_rctl(struct iegbe_adapter *adapter)
3435+static void iegbe_setup_rctl(struct iegbe_adapter *adapter)
3436 {
3437- uint32_t rctl, rfctl;
3438- uint32_t psrctl = 0;
3439-#ifdef CONFIG_E1000_PACKET_SPLIT
3440- uint32_t pages = 0;
3441-#endif
3442-
3443- rctl = E1000_READ_REG(&adapter->hw, RCTL);
3444-
3445- rctl &= ~(0x3 << E1000_RCTL_MO_SHIFT);
3446-
3447- rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3448- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3449- (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
3450-
3451- if(adapter->hw.tbi_compatibility_on == 0x1) {
3452- rctl |= E1000_RCTL_SBP;
3453- } else {
3454- rctl &= ~E1000_RCTL_SBP;
3455- }
3456- if(adapter->netdev->mtu <= ETH_DATA_LEN) {
3457- rctl &= ~E1000_RCTL_LPE;
3458- } else {
3459- rctl |= E1000_RCTL_LPE;
3460- }
3461- /* Setup buffer sizes */
3462- if(adapter->hw.mac_type >= iegbe_82571) {
3463- /* We can now specify buffers in 1K increments.
3464- * BSIZE and BSEX are ignored in this case. */
3465- rctl |= adapter->rx_buffer_len << 0x11;
3466- } else {
3467- rctl &= ~E1000_RCTL_SZ_4096;
3468- rctl |= E1000_RCTL_BSEX;
3469- switch (adapter->rx_buffer_len) {
3470- case E1000_RXBUFFER_2048:
3471- default:
3472- rctl |= E1000_RCTL_SZ_2048;
3473+ struct iegbe_hw *hw = &adapter->hw;
3474+ u32 rctl, rfctl;
3475+ u32 psrctl = 0;
3476+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
3477+ u32 pages = 0;
3478+#endif
3479+
3480+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
3481+
3482+ rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3483+
3484+ rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3485+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3486+ (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
3487+
3488+ if (hw->tbi_compatibility_on == 1)
3489+ rctl |= E1000_RCTL_SBP;
3490+ else
3491+ rctl &= ~E1000_RCTL_SBP;
3492+
3493+ if (adapter->netdev->mtu <= ETH_DATA_LEN)
3494+ rctl &= ~E1000_RCTL_LPE;
3495+ else
3496+ rctl |= E1000_RCTL_LPE;
3497+
3498+ /* Setup buffer sizes */
3499+ /* We can now specify buffers in 1K increments.
3500+ * BSIZE and BSEX are ignored in this case. */
3501+ rctl &= ~E1000_RCTL_SZ_4096;
3502+ rctl |= E1000_RCTL_BSEX;
3503+ switch (adapter->rx_buffer_len) {
3504+ case E1000_RXBUFFER_256:
3505+ rctl |= E1000_RCTL_SZ_256;
3506             rctl &= ~E1000_RCTL_BSEX;
3507             break;
3508- case E1000_RXBUFFER_4096:
3509- rctl |= E1000_RCTL_SZ_4096;
3510- break;
3511- case E1000_RXBUFFER_8192:
3512- rctl |= E1000_RCTL_SZ_8192;
3513- break;
3514- case E1000_RXBUFFER_16384:
3515- rctl |= E1000_RCTL_SZ_16384;
3516- break;
3517- }
3518- }
3519+ case E1000_RXBUFFER_2048:
3520+ default:
3521+ rctl |= E1000_RCTL_SZ_2048;
3522+ rctl &= ~E1000_RCTL_BSEX;
3523+ break;
3524+ case E1000_RXBUFFER_4096:
3525+ rctl |= E1000_RCTL_SZ_4096;
3526+ break;
3527+ case E1000_RXBUFFER_8192:
3528+ rctl |= E1000_RCTL_SZ_8192;
3529+ break;
3530+ case E1000_RXBUFFER_16384:
3531+ rctl |= E1000_RCTL_SZ_16384;
3532+ break;
3533+ }
3534 
3535-#ifdef CONFIG_E1000_PACKET_SPLIT
3536- /* 82571 and greater support packet-split where the protocol
3537- * header is placed in skb->data and the packet data is
3538- * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3539- * In the case of a non-split, skb->data is linearly filled,
3540- * followed by the page buffers. Therefore, skb->data is
3541- * sized to hold the largest protocol header.
3542- */
3543- pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3544- if ((adapter->hw.mac_type > iegbe_82547_rev_2) && (pages <= 0x3) &&
3545- PAGE_SIZE <= 0x4000) {
3546- adapter->rx_ps_pages = pages;
3547- } else {
3548+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
3549+ /* 82571 and greater support packet-split where the protocol
3550+ * header is placed in skb->data and the packet data is
3551+ * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3552+ * In the case of a non-split, skb->data is linearly filled,
3553+ * followed by the page buffers. Therefore, skb->data is
3554+ * sized to hold the largest protocol header.
3555+ */
3556+ pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3557+ if ((hw->mac_type >= iegbe_82571) && (pages <= 3) &&
3558+ PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
3559+ adapter->rx_ps_pages = pages;
3560+ else
3561         adapter->rx_ps_pages = 0;
3562- }
3563 #endif
3564- if (adapter->rx_ps_pages) {
3565- /* Configure extra packet-split registers */
3566- rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
3567- rfctl |= E1000_RFCTL_EXTEN;
3568- /* disable IPv6 packet split support */
3569- rfctl |= E1000_RFCTL_IPV6_DIS;
3570- E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
3571-
3572- rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
3573-
3574- psrctl |= adapter->rx_ps_bsize0 >>
3575- E1000_PSRCTL_BSIZE0_SHIFT;
3576-
3577- switch (adapter->rx_ps_pages) {
3578- case 0x3:
3579- psrctl |= PAGE_SIZE <<
3580- E1000_PSRCTL_BSIZE3_SHIFT;
3581- case 0x2:
3582- psrctl |= PAGE_SIZE <<
3583- E1000_PSRCTL_BSIZE2_SHIFT;
3584- case 0x1:
3585- psrctl |= PAGE_SIZE >>
3586- E1000_PSRCTL_BSIZE1_SHIFT;
3587- break;
3588- }
3589+ if (adapter->rx_ps_pages) {
3590+ /* Configure extra packet-split registers */
3591+ rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
3592+ rfctl |= E1000_RFCTL_EXTEN;
3593+ /* disable IPv6 packet split support */
3594+ rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
3595+ E1000_RFCTL_NEW_IPV6_EXT_DIS);
3596+
3597+ rctl |= E1000_RCTL_DTYP_PS;
3598+
3599+ psrctl |= adapter->rx_ps_bsize0 >>
3600+ E1000_PSRCTL_BSIZE0_SHIFT;
3601+
3602+ switch (adapter->rx_ps_pages) {
3603+ case 3:
3604+ psrctl |= PAGE_SIZE <<
3605+ E1000_PSRCTL_BSIZE3_SHIFT;
3606+ case 2:
3607+ psrctl |= PAGE_SIZE <<
3608+ E1000_PSRCTL_BSIZE2_SHIFT;
3609+ case 1:
3610+ psrctl |= PAGE_SIZE >>
3611+ E1000_PSRCTL_BSIZE1_SHIFT;
3612+ break;
3613+ }
3614 
3615- E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
3616- }
3617+ E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
3618+ }
3619 
3620- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3621+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3622 }
3623 
3624 /**
3625@@ -1873,145 +1768,87 @@ iegbe_setup_rctl(struct iegbe_adapter *a
3626  * Configure the Rx unit of the MAC after a reset.
3627  **/
3628 
3629-static void
3630-iegbe_configure_rx(struct iegbe_adapter *adapter)
3631+static void iegbe_configure_rx(struct iegbe_adapter *adapter)
3632 {
3633- uint64_t rdba;
3634- struct iegbe_hw *hw = &adapter->hw;
3635- uint32_t rdlen, rctl, rxcsum, ctrl_ext;
3636-#ifdef CONFIG_E1000_MQ
3637- uint32_t reta, mrqc;
3638- int i;
3639-#endif
3640+ u64 rdba;
3641+ struct iegbe_hw *hw = &adapter->hw;
3642+ u32 rdlen, rctl, rxcsum, ctrl_ext;
3643 
3644- if (adapter->rx_ps_pages) {
3645+ if (adapter->rx_ps_pages) {
3646         rdlen = adapter->rx_ring[0].count *
3647- sizeof(union iegbe_rx_desc_packet_split);
3648- adapter->clean_rx = iegbe_clean_rx_irq_ps;
3649- adapter->alloc_rx_buf = iegbe_alloc_rx_buffers_ps;
3650- } else {
3651+ sizeof(union iegbe_rx_desc_packet_split);
3652+ adapter->clean_rx = iegbe_clean_rx_irq_ps;
3653+ adapter->alloc_rx_buf = iegbe_alloc_rx_buffers_ps;
3654+ } else {
3655         rdlen = adapter->rx_ring[0].count *
3656- sizeof(struct iegbe_rx_desc);
3657- adapter->clean_rx = iegbe_clean_rx_irq;
3658- adapter->alloc_rx_buf = iegbe_alloc_rx_buffers;
3659- }
3660+ sizeof(struct iegbe_rx_desc);
3661+ adapter->clean_rx = iegbe_clean_rx_irq;
3662+ adapter->alloc_rx_buf = iegbe_alloc_rx_buffers;
3663+ }
3664 
3665- /* disable receives while setting up the descriptors */
3666- rctl = E1000_READ_REG(hw, RCTL);
3667- E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3668+ /* disable receives while setting up the descriptors */
3669+ rctl = E1000_READ_REG(hw, RCTL);
3670+ E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3671 
3672- /* set the Receive Delay Timer Register */
3673- E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
3674+ /* set the Receive Delay Timer Register */
3675+ E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
3676 
3677- if (hw->mac_type >= iegbe_82540) {
3678- E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
3679- if(adapter->itr > 0x1) {
3680- E1000_WRITE_REG(hw, ITR,
3681- 0x3b9aca00 / (adapter->itr * 0x100));
3682+ if (hw->mac_type >= iegbe_82540) {
3683+ E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
3684+ if (adapter->itr_setting != 0)
3685+ E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (adapter->itr * 256));
3686         }
3687- }
3688 
3689- if (hw->mac_type >= iegbe_82571) {
3690- /* Reset delay timers after every interrupt */
3691- ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
3692- ctrl_ext |= E1000_CTRL_EXT_CANC;
3693- E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
3694- E1000_WRITE_FLUSH(hw);
3695- }
3696+ if (hw->mac_type >= iegbe_82571) {
3697+ /* Reset delay timers after every interrupt */
3698+ ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
3699+ ctrl_ext |= E1000_CTRL_EXT_CANC;
3700+ E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
3701+ E1000_WRITE_FLUSH(hw);
3702+ }
3703 
3704     /* Setup the HW Rx Head and Tail Descriptor Pointers and
3705      * the Base and Length of the Rx Descriptor Ring */
3706- switch (adapter->num_queues) {
3707-#ifdef CONFIG_E1000_MQ
3708- case 0x2:
3709- rdba = adapter->rx_ring[0x1].dma;
3710- E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
3711- E1000_WRITE_REG(hw, RDBAH1, (rdba >> 0x20));
3712- E1000_WRITE_REG(hw, RDLEN1, rdlen);
3713- E1000_WRITE_REG(hw, RDH1, 0);
3714- E1000_WRITE_REG(hw, RDT1, 0);
3715- adapter->rx_ring[1].rdh = E1000_RDH1;
3716- adapter->rx_ring[1].rdt = E1000_RDT1;
3717- /* Fall Through */
3718-#endif
3719- case 0x1:
3720- default:
3721+ switch (adapter->num_rx_queues) {
3722+ case 1:
3723+ default:
3724         rdba = adapter->rx_ring[0].dma;
3725- E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
3726+ E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
3727         E1000_WRITE_REG(hw, RDBAH, (rdba >> 0x20));
3728- E1000_WRITE_REG(hw, RDLEN, rdlen);
3729- E1000_WRITE_REG(hw, RDH, 0);
3730- E1000_WRITE_REG(hw, RDT, 0);
3731- adapter->rx_ring[0].rdh = E1000_RDH;
3732- adapter->rx_ring[0].rdt = E1000_RDT;
3733- break;
3734- }
3735+ E1000_WRITE_REG(hw, RDLEN, rdlen);
3736+ adapter->rx_ring[0].rdh = ((hw->mac_type >= iegbe_82543) ? E1000_RDH : E1000_82542_RDH);
3737+ adapter->rx_ring[0].rdt = ((hw->mac_type >= iegbe_82543) ? E1000_RDT : E1000_82542_RDT);
3738+ break;
3739+ }
3740 
3741-#ifdef CONFIG_E1000_MQ
3742- if (adapter->num_queues > 0x1) {
3743- uint32_t random[0xa];
3744-
3745- get_random_bytes(&random[0], FORTY);
3746-
3747- if (hw->mac_type <= iegbe_82572) {
3748- E1000_WRITE_REG(hw, RSSIR, 0);
3749- E1000_WRITE_REG(hw, RSSIM, 0);
3750- }
3751 
3752- switch (adapter->num_queues) {
3753- case 0x2:
3754- default:
3755- reta = 0x00800080;
3756- mrqc = E1000_MRQC_ENABLE_RSS_2Q;
3757- break;
3758- }
3759-
3760- /* Fill out redirection table */
3761- for (i = 0; i < 0x20; i++)
3762- E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
3763- /* Fill out hash function seeds */
3764- for (i = 0; i < 0xa; i++)
3765- E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
3766-
3767- mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
3768- E1000_MRQC_RSS_FIELD_IPV4_TCP);
3769- E1000_WRITE_REG(hw, MRQC, mrqc);
3770- }
3771-
3772- /* Multiqueue and packet checksumming are mutually exclusive. */
3773- if (hw->mac_type >= iegbe_82571) {
3774- rxcsum = E1000_READ_REG(hw, RXCSUM);
3775- rxcsum |= E1000_RXCSUM_PCSD;
3776- E1000_WRITE_REG(hw, RXCSUM, rxcsum);
3777- }
3778-
3779-#else
3780+ /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3781+ if (hw->mac_type >= iegbe_82543) {
3782+ rxcsum = E1000_READ_REG(hw, RXCSUM);
3783+ if(adapter->rx_csum == TRUE) {
3784+ rxcsum |= E1000_RXCSUM_TUOFL;
3785+
3786+ /* Enable 82571 IPv4 payload checksum for UDP fragments
3787+ * Must be used in conjunction with packet-split. */
3788+ if ((hw->mac_type >= iegbe_82571) &&
3789+ (adapter->rx_ps_pages)) {
3790+ rxcsum |= E1000_RXCSUM_IPPCSE;
3791+ }
3792+ } else {
3793+ rxcsum &= ~E1000_RXCSUM_TUOFL;
3794+ /* don't need to clear IPPCSE as it defaults to 0 */
3795+ }
3796+ E1000_WRITE_REG(hw, RXCSUM, rxcsum);
3797+ }
3798 
3799- /* Enable 82543 Receive Checksum Offload for TCP and UDP */
3800- if (hw->mac_type >= iegbe_82543) {
3801- rxcsum = E1000_READ_REG(hw, RXCSUM);
3802- if(adapter->rx_csum == TRUE) {
3803- rxcsum |= E1000_RXCSUM_TUOFL;
3804-
3805- /* Enable 82571 IPv4 payload checksum for UDP fragments
3806- * Must be used in conjunction with packet-split. */
3807- if ((hw->mac_type >= iegbe_82571) &&
3808- (adapter->rx_ps_pages)) {
3809- rxcsum |= E1000_RXCSUM_IPPCSE;
3810- }
3811- } else {
3812- rxcsum &= ~E1000_RXCSUM_TUOFL;
3813- /* don't need to clear IPPCSE as it defaults to 0 */
3814- }
3815- E1000_WRITE_REG(hw, RXCSUM, rxcsum);
3816- }
3817-#endif /* CONFIG_E1000_MQ */
3818+ /* enable early receives on 82573, only takes effect if using > 2048
3819+ * byte total frame size. for example only for jumbo frames */
3820+#define E1000_ERT_2048 0x100
3821+ if (hw->mac_type == iegbe_82573)
3822+ E1000_WRITE_REG(&adapter->hw, ERT, E1000_ERT_2048);
3823 
3824- if (hw->mac_type == iegbe_82573) {
3825- E1000_WRITE_REG(hw, ERT, 0x0100);
3826- }
3827     /* Enable Receives */
3828- E1000_WRITE_REG(hw, RCTL, rctl);
3829+ E1000_WRITE_REG(hw, RCTL, rctl);
3830 }
3831 
3832 /**
3833@@ -2022,20 +1859,19 @@ iegbe_configure_rx(struct iegbe_adapter
3834  * Free all transmit software resources
3835  **/
3836 
3837-void
3838-iegbe_free_tx_resources(struct iegbe_adapter *adapter,
3839+static void iegbe_free_tx_resources(struct iegbe_adapter *adapter,
3840                         struct iegbe_tx_ring *tx_ring)
3841 {
3842- struct pci_dev *pdev = adapter->pdev;
3843+ struct pci_dev *pdev = adapter->pdev;
3844 
3845- iegbe_clean_tx_ring(adapter, tx_ring);
3846+ iegbe_clean_tx_ring(adapter, tx_ring);
3847 
3848- vfree(tx_ring->buffer_info);
3849- tx_ring->buffer_info = NULL;
3850+ vfree(tx_ring->buffer_info);
3851+ tx_ring->buffer_info = NULL;
3852 
3853- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
3854+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
3855 
3856- tx_ring->desc = NULL;
3857+ tx_ring->desc = NULL;
3858 }
3859 
3860 /**
3861@@ -2048,85 +1884,29 @@ iegbe_free_tx_resources(struct iegbe_ada
3862 void
3863 iegbe_free_all_tx_resources(struct iegbe_adapter *adapter)
3864 {
3865- int i;
3866+ int i;
3867 
3868- for (i = 0; i < adapter->num_queues; i++)
3869+ for (i = 0x0; i < adapter->num_tx_queues; i++)
3870         iegbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
3871 }
3872 
3873 static inline void
3874 iegbe_unmap_and_free_tx_resource(struct iegbe_adapter *adapter,
3875- struct iegbe_buffer *buffer_info)
3876-{
3877- if(buffer_info->dma) {
3878- pci_unmap_page(adapter->pdev,
3879- buffer_info->dma,
3880- buffer_info->length,
3881- PCI_DMA_TODEVICE);
3882- buffer_info->dma = 0;
3883- }
3884- if(buffer_info->skb) {
3885- dev_kfree_skb_any(buffer_info->skb);
3886- buffer_info->skb = NULL;
3887- }
3888-}
3889-
3890-#ifdef IEGBE_GBE_WORKAROUND
3891-/**
3892- * iegbe_clean_tx_ring_partial - Free Tx Buffers without using the DD
3893- * bit in the descriptor
3894- * @adapter: board private structure
3895- * @tx_ring: ring to be cleaned
3896- **/
3897-static void iegbe_clean_tx_ring_partial(struct iegbe_adapter *adapter,
3898- struct iegbe_tx_ring *tx_ring)
3899+ struct iegbe_buffer *buffer_info)
3900 {
3901- struct iegbe_buffer *buffer_info;
3902- struct iegbe_tx_desc *tx_desc;
3903- struct net_device *netdev = adapter->netdev;
3904- unsigned int i;
3905- unsigned tail;
3906- unsigned head;
3907- int cleaned = FALSE;
3908-
3909- tail = readl(adapter->hw.hw_addr + tx_ring->tdt);
3910- head = readl(adapter->hw.hw_addr + tx_ring->tdh);
3911-
3912- if (head != tail) {
3913- adapter->stats.tx_hnet++;
3914- }
3915- if (head != tx_ring->next_to_use) {
3916- adapter->stats.tx_hnentu++;
3917- }
3918- /* Free all the Tx ring sk_buffs from next_to_clean up until
3919- * the current head pointer
3920- */
3921- i = tx_ring->next_to_clean;
3922- while(i != head) {
3923- cleaned = TRUE;
3924- tx_desc = E1000_TX_DESC(*tx_ring, i);
3925-
3926- buffer_info = &tx_ring->buffer_info[i];
3927- iegbe_unmap_and_free_tx_resource(adapter, buffer_info);
3928-
3929- tx_desc->upper.data = 0;
3930-
3931- if (unlikely(++i == tx_ring->count)) { i = 0; }
3932-
3933- }
3934- tx_ring->next_to_clean = head;
3935-
3936- spin_lock(&tx_ring->tx_lock);
3937-
3938- /* Wake up the queue if it's currently stopped */
3939- if (unlikely(cleaned && netif_queue_stopped(netdev) &&
3940- netif_carrier_ok(netdev))) {
3941- netif_wake_queue(netdev);
3942+ if(buffer_info->dma) {
3943+ pci_unmap_page(adapter->pdev,
3944+ buffer_info->dma,
3945+ buffer_info->length,
3946+ PCI_DMA_TODEVICE);
3947+ buffer_info->dma = 0x0;
3948+ }
3949+ if(buffer_info->skb) {
3950+ dev_kfree_skb_any(buffer_info->skb);
3951+ buffer_info->skb = NULL;
3952     }
3953-
3954- spin_unlock(&tx_ring->tx_lock);
3955 }
3956-#endif
3957+
3958 
3959 /**
3960  * iegbe_clean_tx_ring - Free Tx Buffers
3961@@ -2134,38 +1914,34 @@ static void iegbe_clean_tx_ring_partial(
3962  * @tx_ring: ring to be cleaned
3963  **/
3964 
3965-static void
3966-iegbe_clean_tx_ring(struct iegbe_adapter *adapter,
3967+static void iegbe_clean_tx_ring(struct iegbe_adapter *adapter,
3968                     struct iegbe_tx_ring *tx_ring)
3969 {
3970- struct iegbe_buffer *buffer_info;
3971- unsigned long size;
3972- unsigned int i;
3973-
3974- /* Free all the Tx ring sk_buffs */
3975+ struct iegbe_hw *hw = &adapter->hw;
3976+ struct iegbe_buffer *buffer_info;
3977+ unsigned long size;
3978+ unsigned int i;
3979 
3980- if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
3981- iegbe_unmap_and_free_tx_resource(adapter,
3982- &tx_ring->previous_buffer_info);
3983- }
3984+ /* Free all the Tx ring sk_buffs */
3985 
3986     for (i = 0; i < tx_ring->count; i++) {
3987- buffer_info = &tx_ring->buffer_info[i];
3988- iegbe_unmap_and_free_tx_resource(adapter, buffer_info);
3989- }
3990+ buffer_info = &tx_ring->buffer_info[i];
3991+ iegbe_unmap_and_free_tx_resource(adapter, buffer_info);
3992+ }
3993 
3994- size = sizeof(struct iegbe_buffer) * tx_ring->count;
3995+ size = sizeof(struct iegbe_buffer) * tx_ring->count;
3996     memset(tx_ring->buffer_info, 0, size);
3997 
3998- /* Zero out the descriptor ring */
3999+ /* Zero out the descriptor ring */
4000 
4001     memset(tx_ring->desc, 0, tx_ring->size);
4002 
4003     tx_ring->next_to_use = 0;
4004     tx_ring->next_to_clean = 0;
4005+ tx_ring->last_tx_tso = 0;
4006 
4007- writel(0, adapter->hw.hw_addr + tx_ring->tdh);
4008- writel(0, adapter->hw.hw_addr + tx_ring->tdt);
4009+ writel(0, hw->hw_addr + tx_ring->tdh);
4010+ writel(0, hw->hw_addr + tx_ring->tdt);
4011 }
4012 
4013 /**
4014@@ -2173,12 +1949,11 @@ iegbe_clean_tx_ring(struct iegbe_adapter
4015  * @adapter: board private structure
4016  **/
4017 
4018-static void
4019-iegbe_clean_all_tx_rings(struct iegbe_adapter *adapter)
4020+static void iegbe_clean_all_tx_rings(struct iegbe_adapter *adapter)
4021 {
4022- int i;
4023+ int i;
4024 
4025- for (i = 0; i < adapter->num_queues; i++)
4026+ for (i = 0; i < adapter->num_tx_queues; i++)
4027         iegbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
4028 }
4029 
4030@@ -2190,24 +1965,23 @@ iegbe_clean_all_tx_rings(struct iegbe_ad
4031  * Free all receive software resources
4032  **/
4033 
4034-void
4035-iegbe_free_rx_resources(struct iegbe_adapter *adapter,
4036+static void iegbe_free_rx_resources(struct iegbe_adapter *adapter,
4037                         struct iegbe_rx_ring *rx_ring)
4038 {
4039- struct pci_dev *pdev = adapter->pdev;
4040+ struct pci_dev *pdev = adapter->pdev;
4041 
4042- iegbe_clean_rx_ring(adapter, rx_ring);
4043+ iegbe_clean_rx_ring(adapter, rx_ring);
4044 
4045- vfree(rx_ring->buffer_info);
4046- rx_ring->buffer_info = NULL;
4047- kfree(rx_ring->ps_page);
4048- rx_ring->ps_page = NULL;
4049- kfree(rx_ring->ps_page_dma);
4050- rx_ring->ps_page_dma = NULL;
4051+ vfree(rx_ring->buffer_info);
4052+ rx_ring->buffer_info = NULL;
4053+ kfree(rx_ring->ps_page);
4054+ rx_ring->ps_page = NULL;
4055+ kfree(rx_ring->ps_page_dma);
4056+ rx_ring->ps_page_dma = NULL;
4057 
4058- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
4059+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
4060 
4061- rx_ring->desc = NULL;
4062+ rx_ring->desc = NULL;
4063 }
4064 
4065 /**
4066@@ -2217,12 +1991,11 @@ iegbe_free_rx_resources(struct iegbe_ada
4067  * Free all receive software resources
4068  **/
4069 
4070-void
4071-iegbe_free_all_rx_resources(struct iegbe_adapter *adapter)
4072+void iegbe_free_all_rx_resources(struct iegbe_adapter *adapter)
4073 {
4074- int i;
4075+ int i;
4076 
4077- for (i = 0; i < adapter->num_queues; i++)
4078+ for (i = 0; i < adapter->num_rx_queues; i++)
4079         iegbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
4080 }
4081 
4082@@ -2232,60 +2005,59 @@ iegbe_free_all_rx_resources(struct iegbe
4083  * @rx_ring: ring to free buffers from
4084  **/
4085 
4086-static void
4087-iegbe_clean_rx_ring(struct iegbe_adapter *adapter,
4088+static void iegbe_clean_rx_ring(struct iegbe_adapter *adapter,
4089                     struct iegbe_rx_ring *rx_ring)
4090 {
4091- struct iegbe_buffer *buffer_info;
4092- struct iegbe_ps_page *ps_page;
4093- struct iegbe_ps_page_dma *ps_page_dma;
4094- struct pci_dev *pdev = adapter->pdev;
4095- unsigned long size;
4096- unsigned int i, j;
4097-
4098- /* Free all the Rx ring sk_buffs */
4099+ struct iegbe_hw *hw = &adapter->hw;
4100+ struct iegbe_buffer *buffer_info;
4101+ struct iegbe_ps_page *ps_page;
4102+ struct iegbe_ps_page_dma *ps_page_dma;
4103+ struct pci_dev *pdev = adapter->pdev;
4104+ unsigned long size;
4105+ unsigned int i, j;
4106+
4107+ /* Free all the Rx ring sk_buffs */
4108+
4109+ for (i = 0; i < rx_ring->count; i++) {
4110+ buffer_info = &rx_ring->buffer_info[i];
4111+ if(buffer_info->skb) {
4112+ pci_unmap_single(pdev,
4113+ buffer_info->dma,
4114+ buffer_info->length,
4115+ PCI_DMA_FROMDEVICE);
4116 
4117- for(i = 0; i < rx_ring->count; i++) {
4118- buffer_info = &rx_ring->buffer_info[i];
4119- if(buffer_info->skb) {
4120- ps_page = &rx_ring->ps_page[i];
4121- ps_page_dma = &rx_ring->ps_page_dma[i];
4122- pci_unmap_single(pdev,
4123- buffer_info->dma,
4124- buffer_info->length,
4125- PCI_DMA_FROMDEVICE);
4126-
4127- dev_kfree_skb(buffer_info->skb);
4128- buffer_info->skb = NULL;
4129-
4130- for(j = 0; j < adapter->rx_ps_pages; j++) {
4131- if(!ps_page->ps_page[j]) { break; }
4132- pci_unmap_single(pdev,
4133- ps_page_dma->ps_page_dma[j],
4134- PAGE_SIZE, PCI_DMA_FROMDEVICE);
4135- ps_page_dma->ps_page_dma[j] = 0;
4136- put_page(ps_page->ps_page[j]);
4137- ps_page->ps_page[j] = NULL;
4138- }
4139+ dev_kfree_skb(buffer_info->skb);
4140+ buffer_info->skb = NULL;
4141         }
4142- }
4143+ ps_page = &rx_ring->ps_page[i];
4144+ ps_page_dma = &rx_ring->ps_page_dma[i];
4145+ for (j = 0; j < adapter->rx_ps_pages; j++) {
4146+ if (!ps_page->ps_page[j]) break;
4147+ pci_unmap_page(pdev,
4148+ ps_page_dma->ps_page_dma[j],
4149+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
4150+ ps_page_dma->ps_page_dma[j] = 0;
4151+ put_page(ps_page->ps_page[j]);
4152+ ps_page->ps_page[j] = NULL;
4153+ }
4154+ }
4155 
4156- size = sizeof(struct iegbe_buffer) * rx_ring->count;
4157+ size = sizeof(struct iegbe_buffer) * rx_ring->count;
4158     memset(rx_ring->buffer_info, 0, size);
4159- size = sizeof(struct iegbe_ps_page) * rx_ring->count;
4160+ size = sizeof(struct iegbe_ps_page) * rx_ring->count;
4161     memset(rx_ring->ps_page, 0, size);
4162- size = sizeof(struct iegbe_ps_page_dma) * rx_ring->count;
4163+ size = sizeof(struct iegbe_ps_page_dma) * rx_ring->count;
4164     memset(rx_ring->ps_page_dma, 0, size);
4165 
4166- /* Zero out the descriptor ring */
4167+ /* Zero out the descriptor ring */
4168 
4169     memset(rx_ring->desc, 0, rx_ring->size);
4170 
4171     rx_ring->next_to_clean = 0;
4172     rx_ring->next_to_use = 0;
4173 
4174- writel(0, adapter->hw.hw_addr + rx_ring->rdh);
4175- writel(0, adapter->hw.hw_addr + rx_ring->rdt);
4176+ writel(0, hw->hw_addr + rx_ring->rdh);
4177+ writel(0, hw->hw_addr + rx_ring->rdt);
4178 }
4179 
4180 /**
4181@@ -2293,60 +2065,54 @@ iegbe_clean_rx_ring(struct iegbe_adapter
4182  * @adapter: board private structure
4183  **/
4184 
4185-static void
4186-iegbe_clean_all_rx_rings(struct iegbe_adapter *adapter)
4187+static void iegbe_clean_all_rx_rings(struct iegbe_adapter *adapter)
4188 {
4189- int i;
4190+ int i;
4191 
4192- for (i = 0; i < adapter->num_queues; i++)
4193+ for (i = 0; i < adapter->num_rx_queues; i++)
4194         iegbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
4195 }
4196 
4197 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
4198  * and memory write and invalidate disabled for certain operations
4199  */
4200-static void
4201-iegbe_enter_82542_rst(struct iegbe_adapter *adapter)
4202+static void iegbe_enter_82542_rst(struct iegbe_adapter *adapter)
4203 {
4204- struct net_device *netdev = adapter->netdev;
4205- uint32_t rctl;
4206+ struct net_device *netdev = adapter->netdev;
4207+ uint32_t rctl;
4208 
4209- iegbe_pci_clear_mwi(&adapter->hw);
4210+ iegbe_pci_clear_mwi(&adapter->hw);
4211 
4212- rctl = E1000_READ_REG(&adapter->hw, RCTL);
4213- rctl |= E1000_RCTL_RST;
4214- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4215- E1000_WRITE_FLUSH(&adapter->hw);
4216+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
4217+ rctl |= E1000_RCTL_RST;
4218+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4219+ E1000_WRITE_FLUSH(&adapter->hw);
4220     mdelay(0x5);
4221 
4222     if(netif_running(netdev)) {
4223- iegbe_clean_all_rx_rings(adapter);
4224-}
4225+ iegbe_clean_all_rx_rings(adapter);
4226+ }
4227 }
4228 
4229 static void
4230 iegbe_leave_82542_rst(struct iegbe_adapter *adapter)
4231 {
4232- struct net_device *netdev = adapter->netdev;
4233- uint32_t rctl;
4234+ struct net_device *netdev = adapter->netdev;
4235+ uint32_t rctl;
4236 
4237- rctl = E1000_READ_REG(&adapter->hw, RCTL);
4238- rctl &= ~E1000_RCTL_RST;
4239- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4240- E1000_WRITE_FLUSH(&adapter->hw);
4241+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
4242+ rctl &= ~E1000_RCTL_RST;
4243+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4244+ E1000_WRITE_FLUSH(&adapter->hw);
4245     mdelay(0x5);
4246 
4247     if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) {
4248- iegbe_pci_set_mwi(&adapter->hw);
4249+ iegbe_pci_set_mwi(&adapter->hw);
4250     }
4251     if(netif_running(netdev)) {
4252+ struct iegbe_rx_ring *ring = &adapter->rx_ring[0x0];
4253         iegbe_configure_rx(adapter);
4254-#ifdef IEGBE_GBE_WORKAROUND
4255- iegbe_alloc_rx_buffers(adapter, &adapter->rx_ring[0],
4256- IEGBE_GBE_WORKAROUND_NUM_RX_DESCRIPTORS + 1);
4257-#else
4258- iegbe_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
4259-#endif
4260+ adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
4261     }
4262 }
4263 
4264@@ -2358,133 +2124,153 @@ iegbe_leave_82542_rst(struct iegbe_adapt
4265  * Returns 0 on success, negative on failure
4266  **/
4267 
4268-static int
4269-iegbe_set_mac(struct net_device *netdev, void *p)
4270+static int iegbe_set_mac(struct net_device *netdev, void *p)
4271 {
4272- struct iegbe_adapter *adapter = netdev_priv(netdev);
4273- struct sockaddr *addr = p;
4274+ struct iegbe_adapter *adapter = netdev_priv(netdev);
4275+ struct sockaddr *addr = p;
4276 
4277     if(!is_valid_ether_addr(addr->sa_data)) {
4278- return -EADDRNOTAVAIL;
4279+ return -EADDRNOTAVAIL;
4280     }
4281- /* 82542 2.0 needs to be in reset to write receive address registers */
4282+ /* 82542 2.0 needs to be in reset to write receive address registers */
4283 
4284     if(adapter->hw.mac_type == iegbe_82542_rev2_0) {
4285- iegbe_enter_82542_rst(adapter);
4286+ iegbe_enter_82542_rst(adapter);
4287     }
4288- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4289- memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
4290+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4291+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
4292 
4293- iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
4294+ iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr, 0x0);
4295 
4296- /* With 82571 controllers, LAA may be overwritten (with the default)
4297- * due to controller reset from the other port. */
4298- if (adapter->hw.mac_type == iegbe_82571) {
4299- /* activate the work around */
4300+ /* With 82571 controllers, LAA may be overwritten (with the default)
4301+ * due to controller reset from the other port. */
4302+ if (adapter->hw.mac_type == iegbe_82571) {
4303+ /* activate the work around */
4304         adapter->hw.laa_is_present = 0x1;
4305 
4306- /* Hold a copy of the LAA in RAR[14] This is done so that
4307- * between the time RAR[0] gets clobbered and the time it
4308- * gets fixed (in iegbe_watchdog), the actual LAA is in one
4309- * of the RARs and no incoming packets directed to this port
4310- * are dropped. Eventaully the LAA will be in RAR[0] and
4311- * RAR[14] */
4312- iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr,
4313+ /* Hold a copy of the LAA in RAR[14] This is done so that
4314+ * between the time RAR[0] gets clobbered and the time it
4315+ * gets fixed (in iegbe_watchdog), the actual LAA is in one
4316+ * of the RARs and no incoming packets directed to this port
4317+ * are dropped. Eventaully the LAA will be in RAR[0] and
4318+ * RAR[14] */
4319+ iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr,
4320                     E1000_RAR_ENTRIES - 0x1);
4321- }
4322+ }
4323 
4324     if(adapter->hw.mac_type == iegbe_82542_rev2_0) {
4325- iegbe_leave_82542_rst(adapter);
4326+ iegbe_leave_82542_rst(adapter);
4327     }
4328- return 0;
4329+ return 0x0;
4330 }
4331 
4332 /**
4333- * iegbe_set_multi - Multicast and Promiscuous mode set
4334+ * iegbe_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4335  * @netdev: network interface device structure
4336  *
4337- * The set_multi entry point is called whenever the multicast address
4338- * list or the network interface flags are updated. This routine is
4339- * responsible for configuring the hardware for proper multicast,
4340+ * The set_rx_mode entry point is called whenever the unicast or multicast
4341+ * address lists or the network interface flags are updated. This routine is
4342+ * responsible for configuring the hardware for proper unicast, multicast,
4343  * promiscuous mode, and all-multi behavior.
4344  **/
4345 
4346-static void
4347-iegbe_set_multi(struct net_device *netdev)
4348+static void iegbe_set_rx_mode(struct net_device *netdev)
4349 {
4350     struct iegbe_adapter *adapter = netdev_priv(netdev);
4351     struct iegbe_hw *hw = &adapter->hw;
4352- struct dev_mc_list *mc_ptr;
4353- uint32_t rctl;
4354- uint32_t hash_value;
4355+ struct dev_addr_list *uc_ptr;
4356+ struct dev_addr_list *mc_ptr;
4357+ u32 rctl;
4358+ u32 hash_value;
4359     int i, rar_entries = E1000_RAR_ENTRIES;
4360+int mta_reg_count = E1000_NUM_MTA_REGISTERS;
4361 
4362     /* reserve RAR[14] for LAA over-write work-around */
4363- if (adapter->hw.mac_type == iegbe_82571) {
4364+ if (hw->mac_type == iegbe_82571)
4365         rar_entries--;
4366- }
4367+
4368     /* Check for Promiscuous and All Multicast modes */
4369 
4370- rctl = E1000_READ_REG(hw, RCTL);
4371+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
4372 
4373     if (netdev->flags & IFF_PROMISC) {
4374         rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
4375- } else if (netdev->flags & IFF_ALLMULTI) {
4376- rctl |= E1000_RCTL_MPE;
4377- rctl &= ~E1000_RCTL_UPE;
4378+ rctl &= ~E1000_RCTL_VFE;
4379     } else {
4380- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
4381+ if (netdev->flags & IFF_ALLMULTI) {
4382+ rctl |= E1000_RCTL_MPE;
4383+ } else {
4384+ rctl &= ~E1000_RCTL_MPE;
4385+ }
4386+ }
4387+
4388+ uc_ptr = NULL;
4389+ if (netdev->uc_count > rar_entries - 1) {
4390+ rctl |= E1000_RCTL_UPE;
4391+ } else if (!(netdev->flags & IFF_PROMISC)) {
4392+ rctl &= ~E1000_RCTL_UPE;
4393+ uc_ptr = netdev->uc_list;
4394     }
4395 
4396- E1000_WRITE_REG(hw, RCTL, rctl);
4397+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4398 
4399     /* 82542 2.0 needs to be in reset to write receive address registers */
4400 
4401- if (hw->mac_type == iegbe_82542_rev2_0) {
4402+ if (hw->mac_type == iegbe_82542_rev2_0)
4403         iegbe_enter_82542_rst(adapter);
4404- }
4405- /* load the first 14 multicast address into the exact filters 1-14
4406+
4407+ /* load the first 14 addresses into the exact filters 1-14. Unicast
4408+ * addresses take precedence to avoid disabling unicast filtering
4409+ * when possible.
4410+ *
4411      * RAR 0 is used for the station MAC adddress
4412      * if there are not 14 addresses, go ahead and clear the filters
4413      * -- with 82571 controllers only 0-13 entries are filled here
4414      */
4415     mc_ptr = netdev->mc_list;
4416 
4417- for (i = 0x1; i < rar_entries; i++) {
4418- if (mc_ptr) {
4419- iegbe_rar_set(hw, mc_ptr->dmi_addr, i);
4420+ for (i = 1; i < rar_entries; i++) {
4421+ if (uc_ptr) {
4422+ iegbe_rar_set(hw, uc_ptr->da_addr, i);
4423+ uc_ptr = uc_ptr->next;
4424+ } else if (mc_ptr) {
4425+ iegbe_rar_set(hw, mc_ptr->da_addr, i);
4426             mc_ptr = mc_ptr->next;
4427         } else {
4428- E1000_WRITE_REG_ARRAY(hw, RA, i << 0x1, 0);
4429- E1000_WRITE_REG_ARRAY(hw, RA, (i << 0x1) + 0x1, 0);
4430+ E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
4431+ E1000_WRITE_FLUSH(&adapter->hw);
4432+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
4433+ E1000_WRITE_FLUSH(&adapter->hw);
4434         }
4435     }
4436+ WARN_ON(uc_ptr != NULL);
4437 
4438     /* clear the old settings from the multicast hash table */
4439 
4440- for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
4441+ for (i = 0; i < mta_reg_count; i++) {
4442         E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
4443+ E1000_WRITE_FLUSH(&adapter->hw);
4444+ }
4445 
4446     /* load any remaining addresses into the hash table */
4447 
4448     for (; mc_ptr; mc_ptr = mc_ptr->next) {
4449- hash_value = iegbe_hash_mc_addr(hw, mc_ptr->dmi_addr);
4450+ hash_value = iegbe_hash_mc_addr(hw, mc_ptr->da_addr);
4451         iegbe_mta_set(hw, hash_value);
4452     }
4453 
4454- if (hw->mac_type == iegbe_82542_rev2_0) {
4455+ if (hw->mac_type == iegbe_82542_rev2_0)
4456         iegbe_leave_82542_rst(adapter);
4457 }
4458-}
4459 
4460 /* Need to wait a few seconds after link up to get diagnostic information from
4461  * the phy */
4462 
4463-static void
4464-iegbe_update_phy_info(unsigned long data)
4465+static void iegbe_update_phy_info(unsigned long data)
4466 {
4467- struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
4468- iegbe_phy_get_info(&adapter->hw, &adapter->phy_info);
4469+ struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
4470+ struct iegbe_hw *hw = &adapter->hw;
4471+ iegbe_phy_get_info(hw, &adapter->phy_info);
4472 }
4473 
4474 /**
4475@@ -2492,54 +2278,54 @@ iegbe_update_phy_info(unsigned long data
4476  * @data: pointer to adapter cast into an unsigned long
4477  **/
4478 
4479-static void
4480-iegbe_82547_tx_fifo_stall(unsigned long data)
4481+static void iegbe_82547_tx_fifo_stall(unsigned long data)
4482 {
4483- struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
4484- struct net_device *netdev = adapter->netdev;
4485- uint32_t tctl;
4486+ struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
4487+ struct net_device *netdev = adapter->netdev;
4488+ u32 tctl;
4489 
4490- if(atomic_read(&adapter->tx_fifo_stall)) {
4491- if((E1000_READ_REG(&adapter->hw, TDT) ==
4492- E1000_READ_REG(&adapter->hw, TDH)) &&
4493- (E1000_READ_REG(&adapter->hw, TDFT) ==
4494- E1000_READ_REG(&adapter->hw, TDFH)) &&
4495- (E1000_READ_REG(&adapter->hw, TDFTS) ==
4496- E1000_READ_REG(&adapter->hw, TDFHS))) {
4497- tctl = E1000_READ_REG(&adapter->hw, TCTL);
4498- E1000_WRITE_REG(&adapter->hw, TCTL,
4499- tctl & ~E1000_TCTL_EN);
4500- E1000_WRITE_REG(&adapter->hw, TDFT,
4501- adapter->tx_head_addr);
4502- E1000_WRITE_REG(&adapter->hw, TDFH,
4503- adapter->tx_head_addr);
4504- E1000_WRITE_REG(&adapter->hw, TDFTS,
4505- adapter->tx_head_addr);
4506- E1000_WRITE_REG(&adapter->hw, TDFHS,
4507- adapter->tx_head_addr);
4508- E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
4509- E1000_WRITE_FLUSH(&adapter->hw);
4510-
4511- adapter->tx_fifo_head = 0;
4512- atomic_set(&adapter->tx_fifo_stall, 0);
4513- netif_wake_queue(netdev);
4514- } else {
4515+ if(atomic_read(&adapter->tx_fifo_stall)) {
4516+ if((E1000_READ_REG(&adapter->hw, TDT) ==
4517+ E1000_READ_REG(&adapter->hw, TDH)) &&
4518+ (E1000_READ_REG(&adapter->hw, TDFT) ==
4519+ E1000_READ_REG(&adapter->hw, TDFH)) &&
4520+ (E1000_READ_REG(&adapter->hw, TDFTS) ==
4521+ E1000_READ_REG(&adapter->hw, TDFHS))) {
4522+ tctl = E1000_READ_REG(&adapter->hw, TCTL);
4523+ E1000_WRITE_REG(&adapter->hw, TCTL,
4524+ tctl & ~E1000_TCTL_EN);
4525+ E1000_WRITE_REG(&adapter->hw, TDFT,
4526+ adapter->tx_head_addr);
4527+ E1000_WRITE_REG(&adapter->hw, TDFH,
4528+ adapter->tx_head_addr);
4529+ E1000_WRITE_REG(&adapter->hw, TDFTS,
4530+ adapter->tx_head_addr);
4531+ E1000_WRITE_REG(&adapter->hw, TDFHS,
4532+ adapter->tx_head_addr);
4533+ E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
4534+ E1000_WRITE_FLUSH(&adapter->hw);
4535+
4536+ adapter->tx_fifo_head = 0x0;
4537+ atomic_set(&adapter->tx_fifo_stall, 0x0);
4538+ netif_wake_queue(netdev);
4539+ } else {
4540             mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 0x1);
4541- }
4542- }
4543+ }
4544+ }
4545 }
4546 
4547+
4548 /**
4549  * iegbe_watchdog - Timer Call-back
4550  * @data: pointer to adapter cast into an unsigned long
4551  **/
4552-static void
4553-iegbe_watchdog(unsigned long data)
4554+static void iegbe_watchdog(unsigned long data)
4555 {
4556- struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
4557- struct net_device *netdev = adapter->netdev;
4558- struct iegbe_tx_ring *txdr = &adapter->tx_ring[0];
4559- uint32_t link;
4560+ struct iegbe_adapter *adapter = (struct iegbe_adapter *) data;
4561+ struct iegbe_hw *hw = &adapter->hw;
4562+ struct net_device *netdev = adapter->netdev;
4563+ struct iegbe_tx_ring *txdr = adapter->tx_ring;
4564+ u32 link, tctl;
4565 
4566    /*
4567     * Test the PHY for link status on icp_xxxx MACs.
4568@@ -2547,123 +2333,305 @@ iegbe_watchdog(unsigned long data)
4569     * in the adapter->hw structure, then set hw->get_link_status = 1
4570     */
4571     if(adapter->hw.mac_type == iegbe_icp_xxxx) {
4572- int isUp = 0;
4573+ int isUp = 0x0;
4574         int32_t ret_val;
4575 
4576         ret_val = iegbe_oem_phy_is_link_up(&adapter->hw, &isUp);
4577         if(ret_val != E1000_SUCCESS) {
4578- isUp = 0;
4579- }
4580+ isUp = 0x0;
4581+ }
4582         if(isUp != adapter->hw.icp_xxxx_is_link_up) {
4583             adapter->hw.get_link_status = 0x1;
4584         }
4585     }
4586 
4587- iegbe_check_for_link(&adapter->hw);
4588- if (adapter->hw.mac_type == iegbe_82573) {
4589- iegbe_enable_tx_pkt_filtering(&adapter->hw);
4590+ iegbe_check_for_link(&adapter->hw);
4591+ if (adapter->hw.mac_type == iegbe_82573) {
4592+ iegbe_enable_tx_pkt_filtering(&adapter->hw);
4593 #ifdef NETIF_F_HW_VLAN_TX
4594         if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) {
4595- iegbe_update_mng_vlan(adapter);
4596+ iegbe_update_mng_vlan(adapter);
4597         }
4598 #endif
4599- }
4600+ }
4601 
4602- if ((adapter->hw.media_type == iegbe_media_type_internal_serdes) &&
4603- !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) {
4604- link = !adapter->hw.serdes_link_down;
4605- } else {
4606+ if ((adapter->hw.media_type == iegbe_media_type_internal_serdes) &&
4607+ !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) {
4608+ link = !adapter->hw.serdes_link_down;
4609+ } else {
4610 
4611- if(adapter->hw.mac_type != iegbe_icp_xxxx) {
4612- link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
4613- } else {
4614- int isUp = 0;
4615+ if(adapter->hw.mac_type != iegbe_icp_xxxx) {
4616+ link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
4617+ } else {
4618+ int isUp = 0x0;
4619             if(iegbe_oem_phy_is_link_up(&adapter->hw, &isUp) != E1000_SUCCESS) {
4620- isUp = 0;
4621+ isUp = 0x0;
4622                 }
4623- link = isUp;
4624- }
4625- }
4626+ link = isUp;
4627+ }
4628+ }
4629 
4630- if (link) {
4631- if (!netif_carrier_ok(netdev)) {
4632- iegbe_get_speed_and_duplex(&adapter->hw,
4633- &adapter->link_speed,
4634- &adapter->link_duplex);
4635-
4636- DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
4637- adapter->link_speed,
4638- adapter->link_duplex == FULL_DUPLEX ?
4639- "Full Duplex" : "Half Duplex");
4640+ if (link) {
4641+ if (!netif_carrier_ok(netdev)) {
4642+ u32 ctrl;
4643+ bool txb2b = true;
4644+ iegbe_get_speed_and_duplex(hw,
4645+ &adapter->link_speed,
4646+ &adapter->link_duplex);
4647 
4648- netif_carrier_on(netdev);
4649- netif_wake_queue(netdev);
4650- mod_timer(&adapter->phy_info_timer, jiffies + 0x2 * HZ);
4651+ ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4652+ DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
4653+ "Flow Control: %s\n",
4654+ adapter->link_speed,
4655+ adapter->link_duplex == FULL_DUPLEX ?
4656+ "Full Duplex" : "Half Duplex",
4657+ ((ctrl & E1000_CTRL_TFCE) && (ctrl &
4658+ E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
4659+ E1000_CTRL_RFCE) ? "RX" : ((ctrl &
4660+ E1000_CTRL_TFCE) ? "TX" : "None" )));
4661+
4662+ /* tweak tx_queue_len according to speed/duplex
4663+ * and adjust the timeout factor */
4664+ netdev->tx_queue_len = adapter->tx_queue_len;
4665+ adapter->tx_timeout_factor = 1;
4666+ switch (adapter->link_speed) {
4667+ case SPEED_10:
4668+ txb2b = false;
4669+ netdev->tx_queue_len = 10;
4670+ adapter->tx_timeout_factor = 8;
4671+ break;
4672+ case SPEED_100:
4673+ txb2b = false;
4674+ netdev->tx_queue_len = 100;
4675+ break;
4676+ }
4677+ if ((hw->mac_type == iegbe_82571 ||
4678+ hw->mac_type == iegbe_82572) &&
4679+ !txb2b) {
4680+ u32 tarc0;
4681+ tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
4682+ tarc0 &= ~(1 << 21);
4683+ E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
4684+ }
4685+ /* disable TSO for pcie and 10/100 speeds, to avoid
4686+ * some hardware issues */
4687+ if (!adapter->tso_force &&
4688+ hw->bus_type == iegbe_bus_type_pci_express){
4689+ switch (adapter->link_speed) {
4690+ case SPEED_10:
4691+ case SPEED_100:
4692+ DPRINTK(PROBE,INFO,
4693+ "10/100 speed: disabling TSO\n");
4694+ netdev->features &= ~NETIF_F_TSO;
4695+ netdev->features &= ~NETIF_F_TSO6;
4696+ break;
4697+ case SPEED_1000:
4698+ netdev->features |= NETIF_F_TSO;
4699+ netdev->features |= NETIF_F_TSO6;
4700+ break;
4701+ default:
4702+ break;
4703+ }
4704+ }
4705+ tctl = E1000_READ_REG(&adapter->hw, TCTL);
4706+ tctl |= E1000_TCTL_EN;
4707+ E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
4708+ netif_carrier_on(netdev);
4709+ netif_wake_queue(netdev);
4710+ mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
4711             adapter->smartspeed = 0;
4712+ } else {
4713+ if (hw->rx_needs_kicking) {
4714+ u32 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4715+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl | E1000_RCTL_EN);
4716+ }
4717         }
4718- } else {
4719- if (netif_carrier_ok(netdev)) {
4720+ } else {
4721+ if (netif_carrier_ok(netdev)) {
4722             adapter->link_speed = 0;
4723             adapter->link_duplex = 0;
4724- DPRINTK(LINK, INFO, "NIC Link is Down\n");
4725- netif_carrier_off(netdev);
4726- netif_stop_queue(netdev);
4727- mod_timer(&adapter->phy_info_timer, jiffies + 0x2 * HZ);
4728- }
4729+ DPRINTK(LINK, INFO, "NIC Link is Down\n");
4730+ netif_carrier_off(netdev);
4731+ netif_stop_queue(netdev);
4732+ mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
4733+ }
4734 
4735- iegbe_smartspeed(adapter);
4736- }
4737+ iegbe_smartspeed(adapter);
4738+ }
4739+
4740+ iegbe_update_stats(adapter);
4741+
4742+ hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4743+ adapter->tpt_old = adapter->stats.tpt;
4744+ hw->collision_delta = adapter->stats.colc - adapter->colc_old;
4745+ adapter->colc_old = adapter->stats.colc;
4746+
4747+ adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
4748+ adapter->gorcl_old = adapter->stats.gorcl;
4749+ adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
4750+ adapter->gotcl_old = adapter->stats.gotcl;
4751+
4752+ iegbe_update_adaptive(hw);
4753+
4754+ if (!netif_carrier_ok(netdev)) {
4755+ if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
4756+ /* We've lost link, so the controller stops DMA,
4757+ * but we've got queued Tx work that's never going
4758+ * to get done, so reset controller to flush Tx.
4759+ * (Do the reset outside of interrupt context). */
4760+ adapter->tx_timeout_count++;
4761+ schedule_work(&adapter->reset_task);
4762+ }
4763+ }
4764+
4765+ /* Cause software interrupt to ensure rx ring is cleaned */
4766+ E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
4767+
4768+ /* Force detection of hung controller every watchdog period */
4769+ adapter->detect_tx_hung = TRUE;
4770+
4771+ /* With 82571 controllers, LAA may be overwritten due to controller
4772+ * reset from the other port. Set the appropriate LAA in RAR[0] */
4773+ if (adapter->hw.mac_type == iegbe_82571 && adapter->hw.laa_is_present) {
4774+ iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr, 0x0);
4775+ }
4776+ /* Reset the timer */
4777+ mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4778+}
4779+
4780+enum latency_range {
4781+ lowest_latency = 0,
4782+ low_latency = 1,
4783+ bulk_latency = 2,
4784+ latency_invalid = 255
4785+};
4786 
4787- iegbe_update_stats(adapter);
4788+/**
4789+ * iegbe_update_itr - update the dynamic ITR value based on statistics
4790+ * Stores a new ITR value based on packets and byte
4791+ * counts during the last interrupt. The advantage of per interrupt
4792+ * computation is faster updates and more accurate ITR for the current
4793+ * traffic pattern. Constants in this function were computed
4794+ * based on theoretical maximum wire speed and thresholds were set based
4795+ * on testing data as well as attempting to minimize response time
4796+ * while increasing bulk throughput.
4797+ * this functionality is controlled by the InterruptThrottleRate module
4798+ * parameter (see iegbe_param.c)
4799+ * @adapter: pointer to adapter
4800+ * @itr_setting: current adapter->itr
4801+ * @packets: the number of packets during this measurement interval
4802+ * @bytes: the number of bytes during this measurement interval
4803+ **/
4804+static unsigned int iegbe_update_itr(struct iegbe_adapter *adapter,
4805+ u16 itr_setting, int packets, int bytes)
4806+{
4807+ unsigned int retval = itr_setting;
4808+ struct iegbe_hw *hw = &adapter->hw;
4809 
4810- adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4811- adapter->tpt_old = adapter->stats.tpt;
4812- adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
4813- adapter->colc_old = adapter->stats.colc;
4814-
4815- adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
4816- adapter->gorcl_old = adapter->stats.gorcl;
4817- adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
4818- adapter->gotcl_old = adapter->stats.gotcl;
4819-
4820- iegbe_update_adaptive(&adapter->hw);
4821-
4822- if (adapter->num_queues == 0x1 && !netif_carrier_ok(netdev)) {
4823- if (E1000_DESC_UNUSED(txdr) + 0x1 < txdr->count) {
4824- /* We've lost link, so the controller stops DMA,
4825- * but we've got queued Tx work that's never going
4826- * to get done, so reset controller to flush Tx.
4827- * (Do the reset outside of interrupt context). */
4828- schedule_work(&adapter->tx_timeout_task);
4829+ if (unlikely(hw->mac_type < iegbe_82540))
4830+ goto update_itr_done;
4831+
4832+ if (packets == 0)
4833+ goto update_itr_done;
4834+
4835+ switch (itr_setting) {
4836+ case lowest_latency:
4837+ /* jumbo frames get bulk treatment*/
4838+ if (bytes/packets > 8000)
4839+ retval = bulk_latency;
4840+ else if ((packets < 5) && (bytes > 512))
4841+ retval = low_latency;
4842+ break;
4843+ case low_latency: /* 50 usec aka 20000 ints/s */
4844+ if (bytes > 10000) {
4845+ /* jumbo frames need bulk latency setting */
4846+ if (bytes/packets > 8000)
4847+ retval = bulk_latency;
4848+ else if ((packets < 10) || ((bytes/packets) > 1200))
4849+ retval = bulk_latency;
4850+ else if ((packets > 35))
4851+ retval = lowest_latency;
4852+ } else if (bytes/packets > 2000)
4853+ retval = bulk_latency;
4854+ else if (packets <= 2 && bytes < 512)
4855+ retval = lowest_latency;
4856+ break;
4857+ case bulk_latency: /* 250 usec aka 4000 ints/s */
4858+ if (bytes > 25000) {
4859+ if (packets > 35)
4860+ retval = low_latency;
4861+ } else if (bytes < 6000) {
4862+ retval = low_latency;
4863         }
4864+ break;
4865     }
4866 
4867- /* Dynamic mode for Interrupt Throttle Rate (ITR) */
4868- if (adapter->hw.mac_type >= iegbe_82540 && adapter->itr == 0x1) {
4869- /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
4870- * asymmetrical Tx or Rx gets ITR=8000; everyone
4871- * else is between 2000-8000. */
4872- uint32_t goc = (adapter->gotcl + adapter->gorcl) / 0x2710;
4873- uint32_t dif = (adapter->gotcl > adapter->gorcl ?
4874- adapter->gotcl - adapter->gorcl :
4875- adapter->gorcl - adapter->gotcl) / 0x2710;
4876- uint32_t itr = goc > 0 ? (dif * 0x1770 / goc + 0x7d0) : 0x1f40;
4877- E1000_WRITE_REG(&adapter->hw, ITR, 0x3b9aca00 / (itr * 0x100));
4878- }
4879+update_itr_done:
4880+ return retval;
4881+}
4882 
4883- /* Cause software interrupt to ensure rx ring is cleaned */
4884- E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
4885+static void iegbe_set_itr(struct iegbe_adapter *adapter)
4886+{
4887+ struct iegbe_hw *hw = &adapter->hw;
4888+ u16 current_itr;
4889+ u32 new_itr = adapter->itr;
4890 
4891- /* Force detection of hung controller every watchdog period */
4892- adapter->detect_tx_hung = TRUE;
4893+ if (unlikely(hw->mac_type < iegbe_82540))
4894+ return;
4895 
4896- /* With 82571 controllers, LAA may be overwritten due to controller
4897- * reset from the other port. Set the appropriate LAA in RAR[0] */
4898- if (adapter->hw.mac_type == iegbe_82571 && adapter->hw.laa_is_present) {
4899- iegbe_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
4900- }
4901- /* Reset the timer */
4902- mod_timer(&adapter->watchdog_timer, jiffies + 0x2 * HZ);
4903+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4904+ if (unlikely(adapter->link_speed != SPEED_1000)) {
4905+ current_itr = 0;
4906+ new_itr = 4000;
4907+ goto set_itr_now;
4908+ }
4909+
4910+ adapter->tx_itr = iegbe_update_itr(adapter,
4911+ adapter->tx_itr,
4912+ adapter->total_tx_packets,
4913+ adapter->total_tx_bytes);
4914+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
4915+ if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
4916+ adapter->tx_itr = low_latency;
4917+
4918+ adapter->rx_itr = iegbe_update_itr(adapter,
4919+ adapter->rx_itr,
4920+ adapter->total_rx_packets,
4921+ adapter->total_rx_bytes);
4922+ /* conservative mode (itr 3) eliminates the lowest_latency setting */
4923+ if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
4924+ adapter->rx_itr = low_latency;
4925+
4926+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
4927+
4928+ switch (current_itr) {
4929+ /* counts and packets in update_itr are dependent on these numbers */
4930+ case lowest_latency:
4931+ new_itr = 70000;
4932+ break;
4933+ case low_latency:
4934+ new_itr = 20000; /* aka hwitr = ~200 */
4935+ break;
4936+ case bulk_latency:
4937+ new_itr = 4000;
4938+ break;
4939+ default:
4940+ break;
4941+ }
4942+
4943+set_itr_now:
4944+ if (new_itr != adapter->itr) {
4945+ /* this attempts to bias the interrupt rate towards Bulk
4946+ * by adding intermediate steps when interrupt rate is
4947+ * increasing */
4948+ new_itr = new_itr > adapter->itr ?
4949+ min(adapter->itr + (new_itr >> 2), new_itr) :
4950+ new_itr;
4951+ adapter->itr = new_itr;
4952+ E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (new_itr * 256));
4953+ }
4954+
4955+ return;
4956 }
4957 
4958 #define E1000_TX_FLAGS_CSUM 0x00000001
4959@@ -2673,55 +2641,48 @@ iegbe_watchdog(unsigned long data)
4960 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4961 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4962 
4963-static inline int
4964-iegbe_tso(struct iegbe_adapter *adapter, struct iegbe_tx_ring *tx_ring,
4965- struct sk_buff *skb)
4966+static int iegbe_tso(struct iegbe_adapter *adapter,
4967+ struct iegbe_tx_ring *tx_ring, struct sk_buff *skb)
4968 {
4969-#ifdef NETIF_F_TSO
4970     struct iegbe_context_desc *context_desc;
4971+ struct iegbe_buffer *buffer_info;
4972     unsigned int i;
4973- uint32_t cmd_length = 0;
4974- uint16_t ipcse = 0, tucse, mss;
4975- uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
4976+ u32 cmd_length = 0;
4977+ u16 ipcse = 0, tucse, mss;
4978+ u8 ipcss, ipcso, tucss, tucso, hdr_len;
4979     int err;
4980 
4981     if (skb_is_gso(skb)) {
4982         if (skb_header_cloned(skb)) {
4983             err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4984- if (err) {
4985+ if (err)
4986                 return err;
4987         }
4988- }
4989 
4990- hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 0x2));
4991+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4992         mss = skb_shinfo(skb)->gso_size;
4993         if (skb->protocol == htons(ETH_P_IP)) {
4994- skb->nh.iph->tot_len = 0;
4995- skb->nh.iph->check = 0;
4996- skb->h.th->check =
4997- ~csum_tcpudp_magic(skb->nh.iph->saddr,
4998- skb->nh.iph->daddr,
4999- 0,
5000- IPPROTO_TCP,
5001- 0);
5002+ struct iphdr *iph = ip_hdr(skb);
5003+ iph->tot_len = 0;
5004+ iph->check = 0;
5005+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5006+ iph->daddr, 0,
5007+ IPPROTO_TCP,
5008+ 0);
5009             cmd_length = E1000_TXD_CMD_IP;
5010- ipcse = skb->h.raw - skb->data - 0x1;
5011-#ifdef NETIF_F_TSO_IPV6
5012- } else if (skb->protocol == ntohs(ETH_P_IPV6)) {
5013- skb->nh.ipv6h->payload_len = 0;
5014- skb->h.th->check =
5015- ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
5016- &skb->nh.ipv6h->daddr,
5017- 0,
5018- IPPROTO_TCP,
5019- 0);
5020+ ipcse = skb_transport_offset(skb) - 1;
5021+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
5022+ ipv6_hdr(skb)->payload_len = 0;
5023+ tcp_hdr(skb)->check =
5024+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5025+ &ipv6_hdr(skb)->daddr,
5026+ 0, IPPROTO_TCP, 0);
5027             ipcse = 0;
5028-#endif
5029         }
5030- ipcss = skb->nh.raw - skb->data;
5031- ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
5032- tucss = skb->h.raw - skb->data;
5033- tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
5034+ ipcss = skb_network_offset(skb);
5035+ ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5036+ tucss = skb_transport_offset(skb);
5037+ tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5038         tucse = 0;
5039 
5040         cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5041@@ -2729,6 +2690,7 @@ iegbe_tso(struct iegbe_adapter *adapter,
5042 
5043         i = tx_ring->next_to_use;
5044         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5045+ buffer_info = &tx_ring->buffer_info[i];
5046 
5047         context_desc->lower_setup.ip_fields.ipcss = ipcss;
5048         context_desc->lower_setup.ip_fields.ipcso = ipcso;
5049@@ -2740,205 +2702,218 @@ iegbe_tso(struct iegbe_adapter *adapter,
5050         context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5051         context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5052 
5053- if (++i == tx_ring->count) { i = 0; }
5054+ buffer_info->time_stamp = jiffies;
5055+ buffer_info->next_to_watch = i;
5056+
5057+ if (++i == tx_ring->count) i = 0;
5058         tx_ring->next_to_use = i;
5059 
5060- return TRUE;
5061+ return true;
5062     }
5063-#endif
5064-
5065- return FALSE;
5066+ return false;
5067 }
5068 
5069-static inline boolean_t
5070-iegbe_tx_csum(struct iegbe_adapter *adapter, struct iegbe_tx_ring *tx_ring,
5071- struct sk_buff *skb)
5072+static bool iegbe_tx_csum(struct iegbe_adapter *adapter,
5073+ struct iegbe_tx_ring *tx_ring, struct sk_buff *skb)
5074 {
5075     struct iegbe_context_desc *context_desc;
5076+ struct iegbe_buffer *buffer_info;
5077     unsigned int i;
5078- uint8_t css;
5079+ u8 css;
5080 
5081- if (likely(skb->ip_summed == CHECKSUM_HW)) {
5082- css = skb->h.raw - skb->data;
5083+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
5084+ css = skb_transport_offset(skb);
5085 
5086- i = tx_ring->next_to_use;
5087- context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5088+ i = tx_ring->next_to_use;
5089+ buffer_info = &tx_ring->buffer_info[i];
5090+ context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5091 
5092+ context_desc->lower_setup.ip_config = 0;
5093         context_desc->upper_setup.tcp_fields.tucss = css;
5094- context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
5095+ context_desc->upper_setup.tcp_fields.tucso =
5096+ css + skb->csum_offset;
5097         context_desc->upper_setup.tcp_fields.tucse = 0;
5098         context_desc->tcp_seg_setup.data = 0;
5099         context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
5100 
5101- if (unlikely(++i == tx_ring->count)) { i = 0; }
5102+ buffer_info->time_stamp = jiffies;
5103+ buffer_info->next_to_watch = i;
5104+
5105+ if (unlikely(++i == tx_ring->count)) i = 0;
5106         tx_ring->next_to_use = i;
5107 
5108- return TRUE;
5109+ return true;
5110     }
5111 
5112- return FALSE;
5113+ return false;
5114 }
5115 
5116-#define E1000_MAX_TXD_PWR 12
5117-#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
5118+#define E1000_MAX_TXD_PWR 12
5119+#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
5120 
5121-static inline int
5122-iegbe_tx_map(struct iegbe_adapter *adapter, struct iegbe_tx_ring *tx_ring,
5123- struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
5124- unsigned int nr_frags, unsigned int mss)
5125+static int iegbe_tx_map(struct iegbe_adapter *adapter,
5126+ struct iegbe_tx_ring *tx_ring,
5127+ struct sk_buff *skb, unsigned int first,
5128+ unsigned int max_per_txd, unsigned int nr_frags,
5129+ unsigned int mss)
5130 {
5131- struct iegbe_buffer *buffer_info;
5132- unsigned int len = skb->len;
5133+ struct iegbe_hw *hw = &adapter->hw;
5134+ struct iegbe_buffer *buffer_info;
5135+ unsigned int len = skb->len;
5136     unsigned int offset = 0, size, count = 0, i;
5137-#ifdef MAX_SKB_FRAGS
5138- unsigned int f;
5139- len -= skb->data_len;
5140-#endif
5141+ unsigned int f;
5142+ len -= skb->data_len;
5143 
5144- i = tx_ring->next_to_use;
5145+ i = tx_ring->next_to_use;
5146+
5147+ while(len) {
5148+ buffer_info = &tx_ring->buffer_info[i];
5149+ size = min(len, max_per_txd);
5150+ /* Workaround for Controller erratum --
5151+ * descriptor for non-tso packet in a linear SKB that follows a
5152+ * tso gets written back prematurely before the data is fully
5153+ * DMA'd to the controller */
5154+ if (!skb->data_len && tx_ring->last_tx_tso &&
5155+ !skb_is_gso(skb)) {
5156+ tx_ring->last_tx_tso = 0;
5157+ size -= 4;
5158+ }
5159 
5160- while(len) {
5161- buffer_info = &tx_ring->buffer_info[i];
5162- size = min(len, max_per_txd);
5163-#ifdef NETIF_F_TSO
5164         /* Workaround for premature desc write-backs
5165          * in TSO mode. Append 4-byte sentinel desc */
5166- if(unlikely(mss && !nr_frags && size == len && size > 0x8)) {
5167- size -= 0x4;
5168+ if (unlikely(mss && !nr_frags && size == len && size > 8))
5169+ size -= 4;
5170+ /* work-around for errata 10 and it applies
5171+ * to all controllers in PCI-X mode
5172+ * The fix is to make sure that the first descriptor of a
5173+ * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
5174+ */
5175+ if (unlikely((hw->bus_type == iegbe_bus_type_pcix) &&
5176+ (size > 2015) && count == 0))
5177+ size = 2015;
5178+
5179+ /* Workaround for potential 82544 hang in PCI-X. Avoid
5180+ * terminating buffers within evenly-aligned dwords. */
5181+ if(unlikely(adapter->pcix_82544 &&
5182+ !((unsigned long)(skb->data + offset + size - 1) & 4) &&
5183+ size > 4))
5184+ size -= 4;
5185+
5186+ buffer_info->length = size;
5187+ buffer_info->dma =
5188+ pci_map_single(adapter->pdev,
5189+ skb->data + offset,
5190+ size,
5191+ PCI_DMA_TODEVICE);
5192+ buffer_info->time_stamp = jiffies;
5193+ buffer_info->next_to_watch = i;
5194+
5195+ len -= size;
5196+ offset += size;
5197+ count++;
5198+ if (unlikely(++i == tx_ring->count)) i = 0;
5199+ }
5200+
5201+ for (f = 0; f < nr_frags; f++) {
5202+ struct skb_frag_struct *frag;
5203+
5204+ frag = &skb_shinfo(skb)->frags[f];
5205+ len = frag->size;
5206+ offset = frag->page_offset;
5207+
5208+ while(len) {
5209+ buffer_info = &tx_ring->buffer_info[i];
5210+ size = min(len, max_per_txd);
5211+ /* Workaround for premature desc write-backs
5212+ * in TSO mode. Append 4-byte sentinel desc */
5213+ if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
5214+ size -= 4;
5215+ /* Workaround for potential 82544 hang in PCI-X.
5216+ * Avoid terminating buffers within evenly-aligned
5217+ * dwords. */
5218+ if(unlikely(adapter->pcix_82544 &&
5219+ !((unsigned long)(frag->page+offset+size-1) & 4) &&
5220+ size > 4))
5221+ size -= 4;
5222+
5223+ buffer_info->length = size;
5224+ buffer_info->dma =
5225+ pci_map_page(adapter->pdev,
5226+ frag->page,
5227+ offset,
5228+ size,
5229+ PCI_DMA_TODEVICE);
5230+ buffer_info->time_stamp = jiffies;
5231+ buffer_info->next_to_watch = i;
5232+
5233+ len -= size;
5234+ offset += size;
5235+ count++;
5236+ if (unlikely(++i == tx_ring->count)) i = 0;
5237         }
5238-#endif
5239- /* work-around for errata 10 and it applies
5240- * to all controllers in PCI-X mode
5241- * The fix is to make sure that the first descriptor of a
5242- * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
5243- */
5244- if(unlikely((adapter->hw.bus_type == iegbe_bus_type_pcix) &&
5245- (size > 0x7df) && count == 0)) {
5246- size = 0x7df;
5247- }
5248- /* Workaround for potential 82544 hang in PCI-X. Avoid
5249- * terminating buffers within evenly-aligned dwords. */
5250- if(unlikely(adapter->pcix_82544 &&
5251- !((unsigned long)(skb->data + offset + size - 0x8) & 0x4) &&
5252- size > 0x4)) {
5253- size -= 0x4;
5254- }
5255- buffer_info->length = size;
5256- buffer_info->dma =
5257- pci_map_single(adapter->pdev,
5258- skb->data + offset,
5259- size,
5260- PCI_DMA_TODEVICE);
5261- buffer_info->time_stamp = jiffies;
5262-
5263- len -= size;
5264- offset += size;
5265- count++;
5266- if(unlikely(++i == tx_ring->count)) { i = 0; }
5267- }
5268-
5269-#ifdef MAX_SKB_FRAGS
5270- for(f = 0; f < nr_frags; f++) {
5271- struct skb_frag_struct *frag;
5272-
5273- frag = &skb_shinfo(skb)->frags[f];
5274- len = frag->size;
5275- offset = frag->page_offset;
5276-
5277- while(len) {
5278- buffer_info = &tx_ring->buffer_info[i];
5279- size = min(len, max_per_txd);
5280-#ifdef NETIF_F_TSO
5281- /* Workaround for premature desc write-backs
5282- * in TSO mode. Append 4-byte sentinel desc */
5283- if(unlikely(mss && f == (nr_frags-0x1) &&
5284- size == len && size > 0x8)) {
5285- size -= 0x4;
5286- }
5287-#endif
5288- /* Workaround for potential 82544 hang in PCI-X.
5289- * Avoid terminating buffers within evenly-aligned
5290- * dwords. */
5291- if(unlikely(adapter->pcix_82544 &&
5292- !((unsigned long)(frag->page+offset+size-0x1) & 0x4) &&
5293- size > 0x4)) {
5294- size -= 0x4;
5295- }
5296- buffer_info->length = size;
5297- buffer_info->dma =
5298- pci_map_page(adapter->pdev,
5299- frag->page,
5300- offset,
5301- size,
5302- PCI_DMA_TODEVICE);
5303- buffer_info->time_stamp = jiffies;
5304-
5305- len -= size;
5306- offset += size;
5307- count++;
5308- if(unlikely(++i == tx_ring->count)) { i = 0; }
5309- }
5310- }
5311-#endif
5312+ }
5313 
5314- i = (i == 0) ? tx_ring->count - 0x1 : i - 0x1;
5315- tx_ring->buffer_info[i].skb = skb;
5316- tx_ring->buffer_info[first].next_to_watch = i;
5317+ i = (i == 0) ? tx_ring->count - 1 : i - 1;
5318+ tx_ring->buffer_info[i].skb = skb;
5319+ tx_ring->buffer_info[first].next_to_watch = i;
5320 
5321- return count;
5322+ return count;
5323 }
5324 
5325-static inline void
5326-iegbe_tx_queue(struct iegbe_adapter *adapter, struct iegbe_tx_ring *tx_ring,
5327- int tx_flags, int count)
5328+static void iegbe_tx_queue(struct iegbe_adapter *adapter,
5329+ struct iegbe_tx_ring *tx_ring, int tx_flags,
5330+ int count)
5331 {
5332+ struct iegbe_hw *hw = &adapter->hw;
5333     struct iegbe_tx_desc *tx_desc = NULL;
5334     struct iegbe_buffer *buffer_info;
5335- uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5336+ u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5337     unsigned int i;
5338 
5339- if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
5340+ if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
5341         txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5342                      E1000_TXD_CMD_TSE;
5343- txd_upper |= E1000_TXD_POPTS_TXSM << 0x8;
5344+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5345 
5346- if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) {
5347- txd_upper |= E1000_TXD_POPTS_IXSM << 0x8;
5348- }
5349+ if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
5350+ txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5351     }
5352 
5353- if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
5354+ if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
5355         txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5356- txd_upper |= E1000_TXD_POPTS_TXSM << 0x8;
5357- }
5358-
5359- if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
5360- txd_lower |= E1000_TXD_CMD_VLE;
5361- txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5362+ txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5363     }
5364 
5365- i = tx_ring->next_to_use;
5366+ if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
5367+ txd_lower |= E1000_TXD_CMD_VLE;
5368+ txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5369+ }
5370 
5371- while(count--) {
5372- buffer_info = &tx_ring->buffer_info[i];
5373- tx_desc = E1000_TX_DESC(*tx_ring, i);
5374- tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5375- tx_desc->lower.data =
5376- cpu_to_le32(txd_lower | buffer_info->length);
5377- tx_desc->upper.data = cpu_to_le32(txd_upper);
5378- if(unlikely(++i == tx_ring->count)) { i = 0; }
5379- }
5380- if(tx_desc != NULL) {
5381- tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5382- }
5383- /* Force memory writes to complete before letting h/w
5384- * know there are new descriptors to fetch. (Only
5385- * applicable for weak-ordered memory model archs,
5386- * such as IA-64). */
5387- wmb();
5388+ i = tx_ring->next_to_use;
5389 
5390- tx_ring->next_to_use = i;
5391- writel(i, adapter->hw.hw_addr + tx_ring->tdt);
5392+ while(count--) {
5393+ buffer_info = &tx_ring->buffer_info[i];
5394+ tx_desc = E1000_TX_DESC(*tx_ring, i);
5395+ tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5396+ tx_desc->lower.data =
5397+ cpu_to_le32(txd_lower | buffer_info->length);
5398+ tx_desc->upper.data = cpu_to_le32(txd_upper);
5399+ if (unlikely(++i == tx_ring->count)) i = 0;
5400+ }
5401+
5402+ tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5403+
5404+ /* Force memory writes to complete before letting h/w
5405+ * know there are new descriptors to fetch. (Only
5406+ * applicable for weak-ordered memory model archs,
5407+ * such as IA-64). */
5408+ wmb();
5409+
5410+ tx_ring->next_to_use = i;
5411+ writel(i, hw->hw_addr + tx_ring->tdt);
5412+ /* we need this if more than one processor can write to our tail
5413+ * at a time, it syncronizes IO on IA64/Altix systems */
5414+ mmiowb();
5415 }
5416 
5417 /**
5418@@ -2950,113 +2925,132 @@ iegbe_tx_queue(struct iegbe_adapter *ada
5419  * to the beginning of the Tx FIFO.
5420  **/
5421 
5422-static inline int
5423-iegbe_82547_fifo_workaround(struct iegbe_adapter *adapter, struct sk_buff *skb)
5424+#define E1000_FIFO_HDR 0x10
5425+#define E1000_82547_PAD_LEN 0x3E0
5426+static int iegbe_82547_fifo_workaround(struct iegbe_adapter *adapter,
5427+ struct sk_buff *skb)
5428 {
5429- uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
5430- uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
5431+ u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
5432+ u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
5433 
5434- E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
5435+ skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
5436 
5437- if(adapter->link_duplex != HALF_DUPLEX) {
5438- goto no_fifo_stall_required;
5439- }
5440- if(atomic_read(&adapter->tx_fifo_stall)) {
5441- return 1;
5442+ if (adapter->link_duplex != HALF_DUPLEX)
5443+ goto no_fifo_stall_required;
5444+
5445+ if (atomic_read(&adapter->tx_fifo_stall))
5446+ return 1;
5447+
5448+ if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
5449+ atomic_set(&adapter->tx_fifo_stall, 1);
5450+ return 1;
5451     }
5452- if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
5453- atomic_set(&adapter->tx_fifo_stall, 0x1);
5454- return 1;
5455- }
5456 
5457 no_fifo_stall_required:
5458- adapter->tx_fifo_head += skb_fifo_len;
5459- if(adapter->tx_fifo_head >= adapter->tx_fifo_size) {
5460- adapter->tx_fifo_head -= adapter->tx_fifo_size;
5461- }
5462+ adapter->tx_fifo_head += skb_fifo_len;
5463+ if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
5464+ adapter->tx_fifo_head -= adapter->tx_fifo_size;
5465     return 0;
5466 }
5467 
5468-static inline int
5469-iegbe_transfer_dhcp_info(struct iegbe_adapter *adapter, struct sk_buff *skb)
5470+#define MINIMUM_DHCP_PACKET_SIZE 282
5471+static int iegbe_transfer_dhcp_info(struct iegbe_adapter *adapter,
5472+ struct sk_buff *skb)
5473 {
5474     struct iegbe_hw *hw = &adapter->hw;
5475- uint16_t length, offset;
5476-#ifdef NETIF_F_HW_VLAN_TX
5477- if(vlan_tx_tag_present(skb)) {
5478- if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5479- ( adapter->hw.mng_cookie.status &
5480- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) {
5481+ u16 length, offset;
5482+ if (vlan_tx_tag_present(skb)) {
5483+ if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
5484+ ( hw->mng_cookie.status &
5485+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
5486             return 0;
5487     }
5488- }
5489-#endif
5490- if(htons(ETH_P_IP) == skb->protocol) {
5491- const struct iphdr *ip = skb->nh.iph;
5492- if(IPPROTO_UDP == ip->protocol) {
5493- struct udphdr *udp = (struct udphdr *)(skb->h.uh);
5494- if(ntohs(udp->dest) == 0x43) { /* 0x43 = 67 */
5495- offset = (uint8_t *)udp + 0x8 - skb->data;
5496- length = skb->len - offset;
5497-
5498- return iegbe_mng_write_dhcp_info(hw,
5499- (uint8_t *)udp + 0x8, length);
5500- }
5501- }
5502- } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
5503- struct ethhdr *eth = (struct ethhdr *) skb->data;
5504- if((htons(ETH_P_IP) == eth->h_proto)) {
5505+ if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
5506+ struct ethhdr *eth = (struct ethhdr *)skb->data;
5507+ if ((htons(ETH_P_IP) == eth->h_proto)) {
5508             const struct iphdr *ip =
5509- (struct iphdr *)((uint8_t *)skb->data+0xe);
5510- if(IPPROTO_UDP == ip->protocol) {
5511+ (struct iphdr *)((u8 *)skb->data+14);
5512+ if (IPPROTO_UDP == ip->protocol) {
5513                 struct udphdr *udp =
5514- (struct udphdr *)((uint8_t *)ip +
5515- (ip->ihl << 0x2));
5516- if(ntohs(udp->dest) == 0x43) {
5517- offset = (uint8_t *)udp + 0x8 - skb->data;
5518+ (struct udphdr *)((u8 *)ip +
5519+ (ip->ihl << 2));
5520+ if (ntohs(udp->dest) == 67) {
5521+ offset = (u8 *)udp + 8 - skb->data;
5522                     length = skb->len - offset;
5523 
5524                     return iegbe_mng_write_dhcp_info(hw,
5525- (uint8_t *)udp + 0x8,
5526+ (u8 *)udp + 8,
5527                             length);
5528- }
5529+ }
5530             }
5531         }
5532     }
5533     return 0;
5534 }
5535 
5536-static int
5537-iegbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
5538+static int __iegbe_maybe_stop_tx(struct net_device *netdev, int size)
5539+{
5540+ struct iegbe_adapter *adapter = netdev_priv(netdev);
5541+ struct iegbe_tx_ring *tx_ring = adapter->tx_ring;
5542+
5543+ netif_stop_queue(netdev);
5544+ /* Herbert's original patch had:
5545+ * smp_mb__after_netif_stop_queue();
5546+ * but since that doesn't exist yet, just open code it. */
5547+ smp_mb();
5548+
5549+ /* We need to check again in a case another CPU has just
5550+ * made room available. */
5551+ if (likely(E1000_DESC_UNUSED(tx_ring) < size))
5552+ return -EBUSY;
5553+
5554+ /* A reprieve! */
5555+ netif_start_queue(netdev);
5556+ ++adapter->restart_queue;
5557+ return 0;
5558+}
5559+
5560+static int iegbe_maybe_stop_tx(struct net_device *netdev,
5561+ struct iegbe_tx_ring *tx_ring, int size)
5562+{
5563+ if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
5564+ return 0;
5565+ return __iegbe_maybe_stop_tx(netdev, size);
5566+}
5567+
5568+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
5569+static int iegbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
5570 {
5571     struct iegbe_adapter *adapter = netdev_priv(netdev);
5572+ struct iegbe_hw *hw = &adapter->hw;
5573     struct iegbe_tx_ring *tx_ring;
5574     unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
5575     unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
5576     unsigned int tx_flags = 0;
5577- unsigned int len = skb->len;
5578+ unsigned int len = skb->len - skb->data_len;
5579     unsigned long flags = 0;
5580- unsigned int nr_frags = 0;
5581- unsigned int mss = 0;
5582+ unsigned int nr_frags;
5583+ unsigned int mss;
5584     int count = 0;
5585- int tso;
5586-#ifdef MAX_SKB_FRAGS
5587+ int tso;
5588     unsigned int f;
5589- len -= skb->data_len;
5590-#endif
5591 
5592-#ifdef CONFIG_E1000_MQ
5593- tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
5594-#else
5595+ /* This goes back to the question of how to logically map a tx queue
5596+ * to a flow. Right now, performance is impacted slightly negatively
5597+ * if using multiple tx queues. If the stack breaks away from a
5598+ * single qdisc implementation, we can look at this again. */
5599     tx_ring = adapter->tx_ring;
5600-#endif
5601 
5602     if (unlikely(skb->len <= 0)) {
5603         dev_kfree_skb_any(skb);
5604         return NETDEV_TX_OK;
5605     }
5606 
5607-#ifdef NETIF_F_TSO
5608+ /* 82571 and newer doesn't need the workaround that limited descriptor
5609+ * length to 4kB */
5610+ if (hw->mac_type >= iegbe_82571)
5611+ max_per_txd = 8192;
5612+
5613     mss = skb_shinfo(skb)->gso_size;
5614     /* The controller does a simple calculation to
5615      * make sure there is enough room in the FIFO before
5616@@ -3064,164 +3058,150 @@ iegbe_xmit_frame(struct sk_buff *skb, st
5617      * 4 = ceil(buffer len/mss). To make sure we don't
5618      * overrun the FIFO, adjust the max buffer len if mss
5619      * drops. */
5620- if(mss) {
5621- max_per_txd = min(mss << 0x2, max_per_txd);
5622- max_txd_pwr = fls(max_per_txd) - 0x1;
5623+ if (mss) {
5624+ u8 hdr_len;
5625+ max_per_txd = min(mss << 2, max_per_txd);
5626+ max_txd_pwr = fls(max_per_txd) - 1;
5627+
5628+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5629+ * points to just header, pull a few bytes of payload from
5630+ * frags into skb->data */
5631+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5632+ if (skb->data_len && hdr_len == len) {
5633+ switch (hw->mac_type) {
5634+ case iegbe_82544:
5635+ /* Make sure we have room to chop off 4 bytes,
5636+ * and that the end alignment will work out to
5637+ * this hardware's requirements
5638+ * NOTE: this is a TSO only workaround
5639+ * if end byte alignment not correct move us
5640+ * into the next dword */
5641+ break;
5642+ /* fall through */
5643+ case iegbe_82571:
5644+ case iegbe_82572:
5645+ case iegbe_82573:
5646+ break;
5647+ default:
5648+ /* do nothing */
5649+ break;
5650+ }
5651+ }
5652     }
5653 
5654- if((mss) || (skb->ip_summed == CHECKSUM_HW)) {
5655+ /* reserve a descriptor for the offload context */
5656+ if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5657         count++;
5658- }
5659     count++;
5660-#else
5661- if(skb->ip_summed == CHECKSUM_HW) {
5662+
5663+ /* Controller Erratum workaround */
5664+ if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
5665         count++;
5666- {
5667-#endif
5668+
5669     count += TXD_USE_COUNT(len, max_txd_pwr);
5670 
5671- if(adapter->pcix_82544) {
5672+ if (adapter->pcix_82544)
5673         count++;
5674- }
5675+
5676     /* work-around for errata 10 and it applies to all controllers
5677      * in PCI-X mode, so add one more descriptor to the count
5678      */
5679- if(unlikely((adapter->hw.bus_type == iegbe_bus_type_pcix) &&
5680- (len > 0x7df))) {
5681+ if (unlikely((hw->bus_type == iegbe_bus_type_pcix) &&
5682+ (len > 2015)))
5683         count++;
5684- }
5685-#ifdef MAX_SKB_FRAGS
5686+
5687     nr_frags = skb_shinfo(skb)->nr_frags;
5688- for(f = 0; f < nr_frags; f++)
5689+ for (f = 0; f < nr_frags; f++)
5690         count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
5691                        max_txd_pwr);
5692- if(adapter->pcix_82544) {
5693+ if (adapter->pcix_82544)
5694         count += nr_frags;
5695- }
5696-#ifdef NETIF_F_TSO
5697- /* TSO Workaround for 82571/2 Controllers -- if skb->data
5698- * points to just header, pull a few bytes of payload from
5699- * frags into skb->data */
5700- if (skb_is_gso(skb)) {
5701- uint8_t hdr_len;
5702- hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 0x2));
5703- if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
5704- (adapter->hw.mac_type == iegbe_82571 ||
5705- adapter->hw.mac_type == iegbe_82572)) {
5706- unsigned int pull_size;
5707- pull_size = min((unsigned int)0x4, skb->data_len);
5708- if (!__pskb_pull_tail(skb, pull_size)) {
5709- printk(KERN_ERR "__pskb_pull_tail failed.\n");
5710- dev_kfree_skb_any(skb);
5711- return -EFAULT;
5712- }
5713- }
5714- }
5715-#endif
5716-#endif
5717 
5718- if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == iegbe_82573) ) {
5719+
5720+ if (hw->tx_pkt_filtering &&
5721+ (hw->mac_type == iegbe_82573))
5722         iegbe_transfer_dhcp_info(adapter, skb);
5723- }
5724-#ifdef NETIF_F_LLTX
5725- local_irq_save(flags);
5726- if (!spin_trylock(&tx_ring->tx_lock)) {
5727+
5728+ if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags))
5729         /* Collision - tell upper layer to requeue */
5730- local_irq_restore(flags);
5731         return NETDEV_TX_LOCKED;
5732- }
5733-#else
5734- spin_lock_irqsave(&tx_ring->tx_lock, flags);
5735-#endif
5736 
5737     /* need: count + 2 desc gap to keep tail from touching
5738      * head, otherwise try next time */
5739- if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 0x2)) {
5740- netif_stop_queue(netdev);
5741+ if (unlikely(iegbe_maybe_stop_tx(netdev, tx_ring, count + 2))) {
5742         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
5743         return NETDEV_TX_BUSY;
5744     }
5745 
5746- if(unlikely(adapter->hw.mac_type == iegbe_82547)) {
5747- if(unlikely(iegbe_82547_fifo_workaround(adapter, skb))) {
5748+ if (unlikely(hw->mac_type == iegbe_82547)) {
5749+ if (unlikely(iegbe_82547_fifo_workaround(adapter, skb))) {
5750             netif_stop_queue(netdev);
5751- mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
5752+ mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
5753             spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
5754             return NETDEV_TX_BUSY;
5755         }
5756     }
5757 
5758-#ifndef NETIF_F_LLTX
5759- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
5760-#endif
5761-
5762-#ifdef NETIF_F_HW_VLAN_TX
5763- if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
5764+ if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
5765         tx_flags |= E1000_TX_FLAGS_VLAN;
5766         tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5767     }
5768-#endif
5769 
5770     first = tx_ring->next_to_use;
5771 
5772     tso = iegbe_tso(adapter, tx_ring, skb);
5773     if (tso < 0) {
5774         dev_kfree_skb_any(skb);
5775-#ifdef NETIF_F_LLTX
5776         spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
5777-#endif
5778         return NETDEV_TX_OK;
5779     }
5780 
5781- if (likely(tso)) {
5782+ if (likely(tso)) {
5783+ tx_ring->last_tx_tso = 1;
5784         tx_flags |= E1000_TX_FLAGS_TSO;
5785- } else if (likely(iegbe_tx_csum(adapter, tx_ring, skb))) {
5786+ } else if (likely(iegbe_tx_csum(adapter, tx_ring, skb)))
5787         tx_flags |= E1000_TX_FLAGS_CSUM;
5788- }
5789+
5790     /* Old method was to assume IPv4 packet by default if TSO was enabled.
5791      * 82571 hardware supports TSO capabilities for IPv6 as well...
5792      * no longer assume, we must. */
5793- if (likely(skb->protocol == ntohs(ETH_P_IP))) {
5794+ if (likely(skb->protocol == htons(ETH_P_IP)))
5795         tx_flags |= E1000_TX_FLAGS_IPV4;
5796- }
5797+
5798     iegbe_tx_queue(adapter, tx_ring, tx_flags,
5799                    iegbe_tx_map(adapter, tx_ring, skb, first,
5800                                 max_per_txd, nr_frags, mss));
5801 
5802     netdev->trans_start = jiffies;
5803 
5804-#ifdef NETIF_F_LLTX
5805     /* Make sure there is space in the ring for the next send. */
5806- if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 0x2)) {
5807- netif_stop_queue(netdev);
5808- }
5809- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
5810-#endif
5811+ iegbe_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
5812 
5813+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
5814     return NETDEV_TX_OK;
5815 }
5816 
5817+
5818 /**
5819  * iegbe_tx_timeout - Respond to a Tx Hang
5820  * @netdev: network interface device structure
5821  **/
5822 
5823-static void
5824-iegbe_tx_timeout(struct net_device *netdev)
5825+static void iegbe_tx_timeout(struct net_device *netdev)
5826 {
5827- struct iegbe_adapter *adapter = netdev_priv(netdev);
5828+ struct iegbe_adapter *adapter = netdev_priv(netdev);
5829 
5830- /* Do the reset outside of interrupt context */
5831- schedule_work(&adapter->tx_timeout_task);
5832+ /* Do the reset outside of interrupt context */
5833+ adapter->tx_timeout_count++;
5834+ schedule_work(&adapter->reset_task);
5835 }
5836 
5837-static void
5838-iegbe_tx_timeout_task(struct net_device *netdev)
5839+static void iegbe_reset_task(struct work_struct *work)
5840 {
5841- struct iegbe_adapter *adapter = netdev_priv(netdev);
5842+ struct iegbe_adapter *adapter =
5843+ container_of(work, struct iegbe_adapter, reset_task);
5844 
5845- iegbe_down(adapter);
5846- iegbe_up(adapter);
5847+ iegbe_reinit_locked(adapter);
5848 }
5849 
5850 /**
5851@@ -3232,13 +3212,12 @@ iegbe_tx_timeout_task(struct net_device
5852  * The statistics are actually updated from the timer callback.
5853  **/
5854 
5855-static struct net_device_stats *
5856-iegbe_get_stats(struct net_device *netdev)
5857+static struct net_device_stats *iegbe_get_stats(struct net_device *netdev)
5858 {
5859- struct iegbe_adapter *adapter = netdev_priv(netdev);
5860+ struct iegbe_adapter *adapter = netdev_priv(netdev);
5861 
5862- iegbe_update_stats(adapter);
5863- return &adapter->net_stats;
5864+ /* only return the current stats */
5865+ return &adapter->net_stats;
5866 }
5867 
5868 /**
5869@@ -3249,67 +3228,55 @@ iegbe_get_stats(struct net_device *netde
5870  * Returns 0 on success, negative on failure
5871  **/
5872 
5873-static int
5874-iegbe_change_mtu(struct net_device *netdev, int new_mtu)
5875+static int iegbe_change_mtu(struct net_device *netdev, int new_mtu)
5876 {
5877- struct iegbe_adapter *adapter = netdev_priv(netdev);
5878- int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
5879+ struct iegbe_adapter *adapter = netdev_priv(netdev);
5880+ struct iegbe_hw *hw = &adapter->hw;
5881+ int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
5882 
5883- if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
5884- (max_frame > MAX_JUMBO_FRAME_SIZE)) {
5885- DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
5886- return -EINVAL;
5887- }
5888+ if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
5889+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
5890+ DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
5891+ return -EINVAL;
5892+ }
5893 
5894+ /* Adapter-specific max frame size limits. */
5895+ switch (hw->mac_type) {
5896+ case iegbe_undefined ... iegbe_82542_rev2_1:
5897+ if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
5898+ DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
5899+ return -EINVAL;
5900+ }
5901+ break;
5902+ case iegbe_82571:
5903+ case iegbe_82572:
5904 #define MAX_STD_JUMBO_FRAME_SIZE 9234
5905- /* might want this to be bigger enum check... */
5906- /* 82571 controllers limit jumbo frame size to 10500 bytes */
5907- if ((adapter->hw.mac_type == iegbe_82571 ||
5908- adapter->hw.mac_type == iegbe_82572) &&
5909- max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
5910- DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
5911- "on 82571 and 82572 controllers.\n");
5912- return -EINVAL;
5913- }
5914-
5915- if(adapter->hw.mac_type == iegbe_82573 &&
5916- max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
5917- DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
5918- "on 82573\n");
5919- return -EINVAL;
5920- }
5921-
5922- if(adapter->hw.mac_type > iegbe_82547_rev_2) {
5923- adapter->rx_buffer_len = max_frame;
5924- E1000_ROUNDUP(adapter->rx_buffer_len, 0x1024);
5925- } else {
5926- if(unlikely((adapter->hw.mac_type < iegbe_82543) &&
5927- (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
5928- DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
5929- "on 82542\n");
5930- return -EINVAL;
5931-
5932- } else {
5933- if(max_frame <= E1000_RXBUFFER_2048) {
5934- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
5935- } else if(max_frame <= E1000_RXBUFFER_4096) {
5936- adapter->rx_buffer_len = E1000_RXBUFFER_4096;
5937- } else if(max_frame <= E1000_RXBUFFER_8192) {
5938- adapter->rx_buffer_len = E1000_RXBUFFER_8192;
5939- } else if(max_frame <= E1000_RXBUFFER_16384) {
5940- adapter->rx_buffer_len = E1000_RXBUFFER_16384;
5941- }
5942+ if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
5943+ DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
5944+ return -EINVAL;
5945         }
5946+ break;
5947+ default:
5948+ break;
5949     }
5950+ if (max_frame <= E1000_RXBUFFER_256)
5951+ adapter->rx_buffer_len = E1000_RXBUFFER_256;
5952+ else if (max_frame <= E1000_RXBUFFER_2048)
5953+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
5954+ else if (max_frame <= E1000_RXBUFFER_4096)
5955+ adapter->rx_buffer_len = E1000_RXBUFFER_4096;
5956+ else if (max_frame <= E1000_RXBUFFER_8192)
5957+ adapter->rx_buffer_len = E1000_RXBUFFER_8192;
5958+ else if (max_frame <= E1000_RXBUFFER_16384)
5959+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
5960 
5961- netdev->mtu = new_mtu;
5962+ /* adjust allocation if LPE protects us, and we aren't using SBP */
5963 
5964- if(netif_running(netdev)) {
5965- iegbe_down(adapter);
5966- iegbe_up(adapter);
5967- }
5968+ netdev->mtu = new_mtu;
5969+ hw->max_frame_size = max_frame;
5970 
5971- adapter->hw.max_frame_size = max_frame;
5972+ if (netif_running(netdev))
5973+ iegbe_reinit_locked(adapter);
5974 
5975     return 0;
5976 }
5977@@ -3319,224 +3286,189 @@ iegbe_change_mtu(struct net_device *netd
5978  * @adapter: board private structure
5979  **/
5980 
5981-void
5982-iegbe_update_stats(struct iegbe_adapter *adapter)
5983+void iegbe_update_stats(struct iegbe_adapter *adapter)
5984 {
5985- struct iegbe_hw *hw = &adapter->hw;
5986- unsigned long flags = 0;
5987- uint16_t phy_tmp;
5988+ struct iegbe_hw *hw = &adapter->hw;
5989+ unsigned long flags = 0x0;
5990+ uint16_t phy_tmp;
5991 
5992 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
5993 
5994- spin_lock_irqsave(&adapter->stats_lock, flags);
5995+ spin_lock_irqsave(&adapter->stats_lock, flags);
5996 
5997- /* these counters are modified from iegbe_adjust_tbi_stats,
5998- * called from the interrupt context, so they must only
5999- * be written while holding adapter->stats_lock
6000- */
6001+ /* these counters are modified from iegbe_adjust_tbi_stats,
6002+ * called from the interrupt context, so they must only
6003+ * be written while holding adapter->stats_lock
6004+ */
6005 
6006- adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
6007- adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
6008- adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
6009- adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
6010- adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
6011- adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
6012- adapter->stats.roc += E1000_READ_REG(hw, ROC);
6013- adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
6014- adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
6015- adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
6016- adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
6017- adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
6018- adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
6019-
6020- adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
6021- adapter->stats.mpc += E1000_READ_REG(hw, MPC);
6022- adapter->stats.scc += E1000_READ_REG(hw, SCC);
6023- adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
6024- adapter->stats.mcc += E1000_READ_REG(hw, MCC);
6025- adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
6026- adapter->stats.dc += E1000_READ_REG(hw, DC);
6027- adapter->stats.sec += E1000_READ_REG(hw, SEC);
6028- adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
6029- adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
6030- adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
6031- adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
6032- adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
6033- adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
6034- adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
6035- adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
6036- adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
6037- adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
6038- adapter->stats.ruc += E1000_READ_REG(hw, RUC);
6039- adapter->stats.rfc += E1000_READ_REG(hw, RFC);
6040- adapter->stats.rjc += E1000_READ_REG(hw, RJC);
6041- adapter->stats.torl += E1000_READ_REG(hw, TORL);
6042- adapter->stats.torh += E1000_READ_REG(hw, TORH);
6043- adapter->stats.totl += E1000_READ_REG(hw, TOTL);
6044- adapter->stats.toth += E1000_READ_REG(hw, TOTH);
6045- adapter->stats.tpr += E1000_READ_REG(hw, TPR);
6046- adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
6047- adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
6048- adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
6049- adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
6050- adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
6051- adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
6052- adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
6053- adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
6054-
6055- /* used for adaptive IFS */
6056-
6057- hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
6058- adapter->stats.tpt += hw->tx_packet_delta;
6059- hw->collision_delta = E1000_READ_REG(hw, COLC);
6060- adapter->stats.colc += hw->collision_delta;
6061-
6062- if(hw->mac_type >= iegbe_82543) {
6063- adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
6064- adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
6065- adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
6066- adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
6067- adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
6068- adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
6069- }
6070- if(hw->mac_type > iegbe_82547_rev_2) {
6071- adapter->stats.iac += E1000_READ_REG(hw, IAC);
6072- adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
6073- adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
6074- adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
6075- adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
6076- adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
6077- adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
6078- adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
6079- adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
6080- }
6081-
6082- /* Fill out the OS statistics structure */
6083-
6084- adapter->net_stats.rx_packets = adapter->stats.gprc;
6085- adapter->net_stats.tx_packets = adapter->stats.gptc;
6086- adapter->net_stats.rx_bytes = adapter->stats.gorcl;
6087- adapter->net_stats.tx_bytes = adapter->stats.gotcl;
6088- adapter->net_stats.multicast = adapter->stats.mprc;
6089- adapter->net_stats.collisions = adapter->stats.colc;
6090-
6091- /* Rx Errors */
6092-
6093- adapter->net_stats.rx_errors = adapter->stats.rxerrc +
6094- adapter->stats.crcerrs + adapter->stats.algnerrc +
6095- adapter->stats.rlec + adapter->stats.mpc +
6096- adapter->stats.cexterr;
6097- adapter->net_stats.rx_dropped = adapter->stats.mpc;
6098- adapter->net_stats.rx_length_errors = adapter->stats.rlec;
6099- adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
6100- adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
6101- adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
6102- adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
6103-
6104- /* Tx Errors */
6105-
6106- adapter->net_stats.tx_errors = adapter->stats.ecol +
6107- adapter->stats.latecol;
6108- adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
6109- adapter->net_stats.tx_window_errors = adapter->stats.latecol;
6110- adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
6111+ adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
6112+ adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
6113+ adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
6114+ adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
6115+ adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
6116+ adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
6117+ adapter->stats.roc += E1000_READ_REG(hw, ROC);
6118+ adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
6119+ adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
6120+ adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
6121+ adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
6122+ adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
6123+ adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
6124+
6125+ adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
6126+ adapter->stats.mpc += E1000_READ_REG(hw, MPC);
6127+ adapter->stats.scc += E1000_READ_REG(hw, SCC);
6128+ adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
6129+ adapter->stats.mcc += E1000_READ_REG(hw, MCC);
6130+ adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
6131+ adapter->stats.dc += E1000_READ_REG(hw, DC);
6132+ adapter->stats.sec += E1000_READ_REG(hw, SEC);
6133+ adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
6134+ adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
6135+ adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
6136+ adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
6137+ adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
6138+ adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
6139+ adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
6140+ adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
6141+ adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
6142+ adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
6143+ adapter->stats.ruc += E1000_READ_REG(hw, RUC);
6144+ adapter->stats.rfc += E1000_READ_REG(hw, RFC);
6145+ adapter->stats.rjc += E1000_READ_REG(hw, RJC);
6146+ adapter->stats.torl += E1000_READ_REG(hw, TORL);
6147+ adapter->stats.torh += E1000_READ_REG(hw, TORH);
6148+ adapter->stats.totl += E1000_READ_REG(hw, TOTL);
6149+ adapter->stats.toth += E1000_READ_REG(hw, TOTH);
6150+ adapter->stats.tpr += E1000_READ_REG(hw, TPR);
6151+ adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
6152+ adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
6153+ adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
6154+ adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
6155+ adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
6156+ adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
6157+ adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
6158+ adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
6159+
6160+ /* used for adaptive IFS */
6161+
6162+ hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
6163+ adapter->stats.tpt += hw->tx_packet_delta;
6164+ hw->collision_delta = E1000_READ_REG(hw, COLC);
6165+ adapter->stats.colc += hw->collision_delta;
6166+
6167+ if(hw->mac_type >= iegbe_82543) {
6168+ adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
6169+ adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
6170+ adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
6171+ adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
6172+ adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
6173+ adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
6174+ }
6175+ if(hw->mac_type > iegbe_82547_rev_2) {
6176+ adapter->stats.iac += E1000_READ_REG(hw, IAC);
6177+ adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
6178+ adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
6179+ adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
6180+ adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
6181+ adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
6182+ adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
6183+ adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
6184+ adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
6185+ }
6186+
6187+ /* Fill out the OS statistics structure */
6188+
6189+ adapter->net_stats.rx_packets = adapter->stats.gprc;
6190+ adapter->net_stats.tx_packets = adapter->stats.gptc;
6191+ adapter->net_stats.rx_bytes = adapter->stats.gorcl;
6192+ adapter->net_stats.tx_bytes = adapter->stats.gotcl;
6193+ adapter->net_stats.multicast = adapter->stats.mprc;
6194+ adapter->net_stats.collisions = adapter->stats.colc;
6195+
6196+ /* Rx Errors */
6197+
6198+ adapter->net_stats.rx_errors = adapter->stats.rxerrc +
6199+ adapter->stats.crcerrs + adapter->stats.algnerrc +
6200+ adapter->stats.rlec + adapter->stats.mpc +
6201+ adapter->stats.cexterr;
6202+ adapter->net_stats.rx_dropped = adapter->stats.mpc;
6203+ adapter->net_stats.rx_length_errors = adapter->stats.rlec;
6204+ adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
6205+ adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
6206+ adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
6207+ adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
6208+
6209+ /* Tx Errors */
6210+
6211+ adapter->net_stats.tx_errors = adapter->stats.ecol +
6212+ adapter->stats.latecol;
6213+ adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
6214+ adapter->net_stats.tx_window_errors = adapter->stats.latecol;
6215+ adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
6216 
6217- /* Tx Dropped needs to be maintained elsewhere */
6218+ /* Tx Dropped needs to be maintained elsewhere */
6219 
6220- /* Phy Stats */
6221+ /* Phy Stats */
6222 
6223- if(hw->media_type == iegbe_media_type_copper
6224+ if(hw->media_type == iegbe_media_type_copper
6225        || (hw->media_type == iegbe_media_type_oem
6226            && iegbe_oem_phy_is_copper(&adapter->hw))) {
6227- if((adapter->link_speed == SPEED_1000) &&
6228- (!iegbe_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
6229- phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
6230- adapter->phy_stats.idle_errors += phy_tmp;
6231- }
6232+ if((adapter->link_speed == SPEED_1000) &&
6233+ (!iegbe_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
6234+ phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
6235+ adapter->phy_stats.idle_errors += phy_tmp;
6236+ }
6237 
6238- if((hw->mac_type <= iegbe_82546) &&
6239- (hw->phy_type == iegbe_phy_m88) &&
6240+ if((hw->mac_type <= iegbe_82546) &&
6241+ (hw->phy_type == iegbe_phy_m88) &&
6242            !iegbe_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) {
6243- adapter->phy_stats.receive_errors += phy_tmp;
6244- }
6245+ adapter->phy_stats.receive_errors += phy_tmp;
6246+ }
6247     }
6248 
6249- spin_unlock_irqrestore(&adapter->stats_lock, flags);
6250+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
6251 }
6252 
6253-#ifdef CONFIG_E1000_MQ
6254-void
6255-iegbe_rx_schedule(void *data)
6256+/**
6257+ * iegbe_intr_msi - Interrupt Handler
6258+ * @irq: interrupt number
6259+ * @data: pointer to a network interface device structure
6260+ **/
6261+
6262+static irqreturn_t iegbe_intr_msi(int irq, void *data)
6263 {
6264- struct net_device *poll_dev, *netdev = data;
6265- struct iegbe_adapter *adapter = netdev->priv;
6266- int this_cpu = get_cpu();
6267-
6268- poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
6269- if (poll_dev == NULL) {
6270- put_cpu();
6271- return;
6272+ struct net_device *netdev = data;
6273+ struct iegbe_adapter *adapter = netdev_priv(netdev);
6274+ struct iegbe_hw *hw = &adapter->hw;
6275+ u32 icr = E1000_READ_REG(&adapter->hw, ICR);
6276+ if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
6277+ hw->get_link_status = 1;
6278+ if (!test_bit(__E1000_DOWN, &adapter->flags))
6279+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
6280     }
6281 
6282- if (likely(netif_rx_schedule_prep(poll_dev))) {
6283- __netif_rx_schedule(poll_dev);
6284- } else {
6285- iegbe_irq_enable(adapter);
6286- }
6287- put_cpu();
6288-}
6289-#endif
6290-
6291-#ifdef IEGBE_GBE_WORKAROUND
6292-/*
6293- * Check for tx hang condition. This is the condition where a
6294- * decsriptor is in the hardware and hasn't been processed for a
6295- * while. This code is similar to the check in iegbe_clean_rx_irq()
6296- */
6297-static void
6298-iegbe_tx_hang_check(struct iegbe_adapter *adapter,
6299- struct iegbe_tx_ring *tx_ring)
6300-{
6301- struct net_device *netdev = adapter->netdev;
6302- unsigned int i;
6303+ if(unlikely(icr & (E1000_ICR_RX_DESC_FIFO_PAR
6304+ | E1000_ICR_TX_DESC_FIFO_PAR
6305+ | E1000_ICR_PB
6306+ | E1000_ICR_CPP_TARGET
6307+ | E1000_ICR_CPP_MASTER ))) {
6308 
6309- /* Check for a hang condition using the buffer currently at the Tx
6310- head pointer */
6311- i = readl(adapter->hw.hw_addr + tx_ring->tdh);
6312-
6313- if (adapter->detect_tx_hung) {
6314- /* Detect a transmit hang in hardware, this serializes the
6315- * check with the clearing of time_stamp and movement of i */
6316- adapter->detect_tx_hung = FALSE;
6317-
6318- if (tx_ring->buffer_info[i].dma &&
6319- time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
6320- && !(E1000_READ_REG(&adapter->hw, STATUS) &
6321- E1000_STATUS_TXOFF)) {
6322-
6323- /* detected Tx unit hang */
6324- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
6325- " TDH <%x>\n"
6326- " TDT <%x>\n"
6327- " next_to_use <%x>\n"
6328- " next_to_clean <%x>\n"
6329- "buffer_info[tdh]\n"
6330- " dma <%zx>\n"
6331- " time_stamp <%lx>\n"
6332- " jiffies <%lx>\n",
6333- readl(adapter->hw.hw_addr + tx_ring->tdh),
6334- readl(adapter->hw.hw_addr + tx_ring->tdt),
6335- tx_ring->next_to_use,
6336- tx_ring->next_to_clean,
6337- (size_t)tx_ring->buffer_info[i].dma,
6338- tx_ring->buffer_info[i].time_stamp,
6339- jiffies);
6340- netif_stop_queue(netdev);
6341- }
6342+ iegbe_irq_disable(adapter);
6343+ printk("Critical error! ICR = 0x%x\n", icr);
6344+ return IRQ_HANDLED;
6345     }
6346-}
6347+ if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
6348+ adapter->total_tx_bytes = 0;
6349+ adapter->total_tx_packets = 0;
6350+ adapter->total_rx_bytes = 0;
6351+ adapter->total_rx_packets = 0;
6352+ __netif_rx_schedule(netdev, &adapter->napi);
6353+ } else
6354+ iegbe_irq_enable(adapter);
6355 
6356-#endif
6357+ return IRQ_HANDLED;
6358+}
6359 
6360 /**
6361  * iegbe_intr - Interrupt Handler
6362@@ -3546,364 +3478,208 @@ iegbe_tx_hang_check(struct iegbe_adapter
6363  **/
6364 
6365 static irqreturn_t
6366-iegbe_intr(int irq, void *data, struct pt_regs *regs)
6367+iegbe_intr(int irq, void *data)
6368 {
6369- struct net_device *netdev = data;
6370- struct iegbe_adapter *adapter = netdev_priv(netdev);
6371- struct iegbe_hw *hw = &adapter->hw;
6372- uint32_t rctl, tctl;
6373- uint32_t icr = E1000_READ_REG(hw, ICR);
6374-#ifndef CONFIG_E1000_NAPI
6375- uint32_t i;
6376-#ifdef IEGBE_GBE_WORKAROUND
6377- int rx_cleaned;
6378-#endif
6379-#endif
6380+ struct net_device *netdev = data;
6381+ struct iegbe_adapter *adapter = netdev_priv(netdev);
6382+ struct iegbe_hw *hw = &adapter->hw;
6383+ u32 icr = E1000_READ_REG(&adapter->hw, ICR);
6384 
6385- if(unlikely(!icr)) {
6386+ if (unlikely(!icr))
6387         return IRQ_NONE; /* Not our interrupt */
6388- }
6389+
6390+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
6391+ * not set, then the adapter didn't send an interrupt */
6392+ if (unlikely(hw->mac_type >= iegbe_82571 &&
6393+ !(icr & E1000_ICR_INT_ASSERTED)))
6394+ return IRQ_NONE;
6395+
6396+
6397     if(unlikely(icr & (E1000_ICR_RX_DESC_FIFO_PAR
6398- | E1000_ICR_TX_DESC_FIFO_PAR
6399- | E1000_ICR_PB
6400- | E1000_ICR_CPP_TARGET
6401- | E1000_ICR_CPP_MASTER ))) {
6402+ | E1000_ICR_TX_DESC_FIFO_PAR
6403+ | E1000_ICR_PB
6404+ | E1000_ICR_CPP_TARGET
6405+ | E1000_ICR_CPP_MASTER ))) {
6406 
6407         iegbe_irq_disable(adapter);
6408- tctl = E1000_READ_REG(&adapter->hw, TCTL);
6409- rctl = E1000_READ_REG(&adapter->hw, RCTL);
6410- E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_TCTL_EN);
6411- E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
6412-
6413- tasklet_data = (unsigned long) (icr + adapter->bd_number);
6414- tasklet_schedule(&iegbe_reset_tasklet);
6415-
6416- return IRQ_HANDLED;
6417- }
6418-
6419-#ifdef CONFIG_E1000_NAPI
6420- atomic_inc(&adapter->irq_sem);
6421-#ifdef IEGBE_GBE_WORKAROUND
6422- /* Ensure that the TXQE interrupt is enabled in NAPI mode */
6423- E1000_WRITE_REG(hw, IMC, ~E1000_IMS_TXQE);
6424-#else
6425- E1000_WRITE_REG(hw, IMC, ~0);
6426-#endif
6427- E1000_WRITE_FLUSH(hw);
6428-#ifdef CONFIG_E1000_MQ
6429- if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
6430- cpu_set(adapter->cpu_for_queue[0],
6431- adapter->rx_sched_call_data.cpumask);
6432- for (i = 1; i < adapter->num_queues; i++) {
6433- cpu_set(adapter->cpu_for_queue[i],
6434- adapter->rx_sched_call_data.cpumask);
6435- atomic_inc(&adapter->irq_sem);
6436- }
6437- atomic_set(&adapter->rx_sched_call_data.count, i);
6438- smp_call_async_mask(&adapter->rx_sched_call_data);
6439- } else {
6440- DEBUGOUT("call_data.count == %u\n",
6441- atomic_read(&adapter->rx_sched_call_data.count));
6442+ printk("Critical error! ICR = 0x%x\n", icr);
6443+ return IRQ_HANDLED;
6444     }
6445-#else
6446- if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) {
6447- __netif_rx_schedule(&adapter->polling_netdev[0]);
6448- } else {
6449- iegbe_irq_enable(adapter);
6450- }
6451-#endif
6452-
6453-#ifdef IEGBE_GBE_WORKAROUND
6454- /* Clean the Tx ring */
6455- for (i = 0; i < E1000_MAX_INTR; i++) {
6456- adapter->stats.rx_next_to_clean = adapter->rx_ring->next_to_clean;
6457- adapter->stats.rx_next_to_use = adapter->rx_ring->next_to_use;
6458-
6459- adapter->stats.tx_next_to_clean = adapter->tx_ring->next_to_clean;
6460- adapter->stats.tx_next_to_use = adapter->tx_ring->next_to_use;
6461-
6462- /* Only clean Tx descriptors for a TXQE interrupt */
6463- if(icr & E1000_ICR_TXQE) {
6464- adapter->stats.txqec++;
6465- iegbe_clean_tx_ring_partial(adapter, adapter->tx_ring);
6466- }
6467- else {
6468- iegbe_tx_hang_check(adapter, adapter->tx_ring);
6469- }
6470- }
6471 
6472-#endif /*IEGBE_GBE_WORKAROUND */
6473-
6474-#else
6475- /* Writing IMC and IMS is needed for 82547.
6476- * Due to Hub Link bus being occupied, an interrupt
6477- * de-assertion message is not able to be sent.
6478- * When an interrupt assertion message is generated later,
6479- * two messages are re-ordered and sent out.
6480- * That causes APIC to think 82547 is in de-assertion
6481- * state, while 82547 is in assertion state, resulting
6482- * in dead lock. Writing IMC forces 82547 into
6483- * de-assertion state.
6484- */
6485- if (hw->mac_type == iegbe_82547 || hw->mac_type == iegbe_82547_rev_2) {
6486- atomic_inc(&adapter->irq_sem);
6487- E1000_WRITE_REG(hw, IMC, ~0);
6488- }
6489-
6490-#ifdef IEGBE_GBE_WORKAROUND
6491-
6492- for (i = 0; i < E1000_MAX_INTR; i++) {
6493- rx_cleaned = adapter->clean_rx(adapter, adapter->rx_ring);
6494- adapter->stats.rx_next_to_clean = adapter->rx_ring->next_to_clean;
6495- adapter->stats.rx_next_to_use = adapter->rx_ring->next_to_use;
6496-
6497- adapter->stats.tx_next_to_clean = adapter->tx_ring->next_to_clean;
6498- adapter->stats.tx_next_to_use = adapter->tx_ring->next_to_use;
6499-
6500- /* Only clean Tx descriptors for a TXQE interrupt */
6501- if(icr & E1000_ICR_TXQE) {
6502- adapter->stats.txqec++;
6503- iegbe_clean_tx_ring_partial(adapter, adapter->tx_ring);
6504- }
6505- else {
6506- iegbe_tx_hang_check(adapter, adapter->tx_ring);
6507- }
6508- if(!rx_cleaned) {
6509- break;
6510- }
6511+ /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
6512+ * need for the IMC write */
6513+ if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
6514+ hw->get_link_status = 1;
6515+ /* guard against interrupt when we're going down */
6516+ if (!test_bit(__E1000_DOWN, &adapter->flags))
6517+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
6518+
6519     }
6520 
6521-#else
6522- for (i = 0; i < E1000_MAX_INTR; i++)
6523- if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
6524- !iegbe_clean_tx_irq(adapter, adapter->tx_ring))) {
6525- break;
6526- }
6527-#endif
6528-
6529- if (hw->mac_type == iegbe_82547 || hw->mac_type == iegbe_82547_rev_2) {
6530- iegbe_irq_enable(adapter);
6531- }
6532-#endif
6533-#ifdef E1000_COUNT_ICR
6534- adapter->icr_txdw += icr & 0x01UL;
6535- icr >>= 0x1;
6536- adapter->icr_txqe += icr & 0x01UL;
6537- icr >>= 0x1;
6538- adapter->icr_lsc += icr & 0x01UL;
6539- icr >>= 0x1;
6540- adapter->icr_rxseq += icr & 0x01UL;
6541- icr >>= 0x1;
6542- adapter->icr_rxdmt += icr & 0x01UL;
6543- icr >>= 0x1;
6544- adapter->icr_rxo += icr & 0x01UL;
6545- icr >>= 0x1;
6546- adapter->icr_rxt += icr & 0x01UL;
6547- if(hw->mac_type != iegbe_icp_xxxx) {
6548- icr >>= 0x2;
6549- adapter->icr_mdac += icr & 0x01UL;
6550- icr >>= 0x1;
6551- adapter->icr_rxcfg += icr & 0x01UL;
6552- icr >>= 0x1;
6553- adapter->icr_gpi += icr & 0x01UL;
6554- } else {
6555- icr >>= 0x4;
6556- }
6557- if(hw->mac_type == iegbe_icp_xxxx) {
6558- icr >>= 0xc;
6559- adapter->icr_pb += icr & 0x01UL;
6560- icr >>= 0x3;
6561- adapter->icr_intmem_icp_xxxx += icr & 0x01UL;
6562- icr >>= 0x1;
6563- adapter->icr_cpp_target += icr & 0x01UL;
6564- icr >>= 0x1;
6565- adapter->icr_cpp_master += icr & 0x01UL;
6566- icr >>= 0x1;
6567- adapter->icr_stat += icr & 0x01UL;
6568+ if (unlikely(hw->mac_type < iegbe_82571)) {
6569+ E1000_WRITE_REG(&adapter->hw, IMC, ~0);
6570+ E1000_WRITE_FLUSH(&adapter->hw);
6571     }
6572-#endif
6573+ if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) {
6574+ adapter->total_tx_bytes = 0;
6575+ adapter->total_tx_packets = 0;
6576+ adapter->total_rx_bytes = 0;
6577+ adapter->total_rx_packets = 0;
6578+ __netif_rx_schedule(netdev, &adapter->napi);
6579+ } else
6580+ /* this really should not happen! if it does it is basically a
6581+ * bug, but not a hard error, so enable ints and continue */
6582+ iegbe_irq_enable(adapter);
6583 
6584     return IRQ_HANDLED;
6585 }
6586 
6587-#ifdef CONFIG_E1000_NAPI
6588 /**
6589  * iegbe_clean - NAPI Rx polling callback
6590  * @adapter: board private structure
6591  **/
6592-
6593-static int
6594-iegbe_clean(struct net_device *poll_dev, int *budget)
6595+static int iegbe_clean(struct napi_struct *napi, int budget)
6596 {
6597- struct iegbe_adapter *adapter;
6598- int work_to_do = min(*budget, poll_dev->quota);
6599- int tx_cleaned, i = 0, work_done = 0;
6600+ struct iegbe_adapter *adapter = container_of(napi, struct iegbe_adapter, napi);
6601+ struct net_device *poll_dev = adapter->netdev;
6602+ int tx_cleaned = 0, work_done = 0;
6603 
6604     /* Must NOT use netdev_priv macro here. */
6605     adapter = poll_dev->priv;
6606 
6607- /* Keep link state information with original netdev */
6608- if (!netif_carrier_ok(adapter->netdev)) {
6609- goto quit_polling;
6610- }
6611- while (poll_dev != &adapter->polling_netdev[i]) {
6612- i++;
6613- if (unlikely(i == adapter->num_queues)) {
6614- BUG();
6615- }
6616- }
6617-
6618-#ifdef IEGBE_GBE_WORKAROUND
6619- /* Tx descriptors are cleaned in iegbe_intr(). No need to clean
6620- them here */
6621- tx_cleaned = FALSE;
6622-#else
6623- tx_cleaned = iegbe_clean_tx_irq(adapter, &adapter->tx_ring[i]);
6624-#endif
6625- adapter->clean_rx(adapter, &adapter->rx_ring[i],
6626- &work_done, work_to_do);
6627-
6628- *budget -= work_done;
6629- poll_dev->quota -= work_done;
6630-
6631- /* If no Tx and not enough Rx work done, exit the polling mode */
6632- if((!tx_cleaned && (work_done == 0)) ||
6633- !netif_running(adapter->netdev)) {
6634-quit_polling:
6635- netif_rx_complete(poll_dev);
6636+ /* iegbe_clean is called per-cpu. This lock protects
6637+ * tx_ring[0] from being cleaned by multiple cpus
6638+ * simultaneously. A failure obtaining the lock means
6639+ * tx_ring[0] is currently being cleaned anyway. */
6640+ if (spin_trylock(&adapter->tx_queue_lock)) {
6641+ tx_cleaned = iegbe_clean_tx_irq(adapter,
6642+ &adapter->tx_ring[0]);
6643+ spin_unlock(&adapter->tx_queue_lock);
6644+ }
6645+
6646+ adapter->clean_rx(adapter, &adapter->rx_ring[0],
6647+ &work_done, budget);
6648+
6649+ if (tx_cleaned)
6650+ work_done = budget;
6651+
6652+ /* If budget not fully consumed, exit the polling mode */
6653+ if (work_done < budget) {
6654+ if (likely(adapter->itr_setting & 3))
6655+ iegbe_set_itr(adapter);
6656+ netif_rx_complete(poll_dev, napi);
6657         iegbe_irq_enable(adapter);
6658- return 0;
6659     }
6660 
6661- return 1;
6662+ return work_done;
6663 }
6664 
6665-#endif
6666-
6667-
6668-#ifndef IEGBE_GBE_WORKAROUND
6669 /**
6670  * iegbe_clean_tx_irq - Reclaim resources after transmit completes
6671  * @adapter: board private structure
6672  **/
6673-
6674-static boolean_t
6675-iegbe_clean_tx_irq(struct iegbe_adapter *adapter,
6676+static bool iegbe_clean_tx_irq(struct iegbe_adapter *adapter,
6677                    struct iegbe_tx_ring *tx_ring)
6678 {
6679- struct net_device *netdev = adapter->netdev;
6680- struct iegbe_tx_desc *tx_desc, *eop_desc;
6681- struct iegbe_buffer *buffer_info;
6682- unsigned int i, eop;
6683- boolean_t cleaned = FALSE;
6684+ struct iegbe_hw *hw = &adapter->hw;
6685+ struct net_device *netdev = adapter->netdev;
6686+ struct iegbe_tx_desc *tx_desc, *eop_desc;
6687+ struct iegbe_buffer *buffer_info;
6688+ unsigned int i, eop;
6689+ unsigned int count = 0;
6690+ bool cleaned = false;
6691+ unsigned int total_tx_bytes=0, total_tx_packets=0;
6692 
6693- i = tx_ring->next_to_clean;
6694- eop = tx_ring->buffer_info[i].next_to_watch;
6695- eop_desc = E1000_TX_DESC(*tx_ring, eop);
6696+ i = tx_ring->next_to_clean;
6697+ eop = tx_ring->buffer_info[i].next_to_watch;
6698+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
6699 
6700     while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
6701- /* Premature writeback of Tx descriptors clear (free buffers
6702- * and unmap pci_mapping) previous_buffer_info */
6703- if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
6704- iegbe_unmap_and_free_tx_resource(adapter,
6705- &tx_ring->previous_buffer_info);
6706- }
6707-
6708- for (cleaned = FALSE; !cleaned; ) {
6709- tx_desc = E1000_TX_DESC(*tx_ring, i);
6710- buffer_info = &tx_ring->buffer_info[i];
6711- cleaned = (i == eop);
6712-
6713-#ifdef NETIF_F_TSO
6714- if (!(netdev->features & NETIF_F_TSO)) {
6715-#endif
6716- iegbe_unmap_and_free_tx_resource(adapter,
6717- buffer_info);
6718-#ifdef NETIF_F_TSO
6719- } else {
6720- if (cleaned) {
6721- memcpy(&tx_ring->previous_buffer_info,
6722- buffer_info,
6723- sizeof(struct iegbe_buffer));
6724- memset(buffer_info, 0,
6725- sizeof(struct iegbe_buffer));
6726- } else {
6727- iegbe_unmap_and_free_tx_resource(
6728- adapter, buffer_info);
6729- }
6730- }
6731-#endif
6732-
6733- tx_desc->buffer_addr = 0;
6734- tx_desc->lower.data = 0;
6735+ for (cleaned = false; !cleaned; ) {
6736+ tx_desc = E1000_TX_DESC(*tx_ring, i);
6737+ buffer_info = &tx_ring->buffer_info[i];
6738+ cleaned = (i == eop);
6739+
6740+ if (cleaned) {
6741+ struct sk_buff *skb = buffer_info->skb;
6742+ unsigned int segs = 0, bytecount;
6743+ segs = skb_shinfo(skb)->gso_segs ?: 1;
6744+ bytecount = ((segs - 1) * skb_headlen(skb)) +
6745+ skb->len;
6746+ total_tx_packets += segs;
6747+ total_tx_bytes += bytecount;
6748+ }
6749+ iegbe_unmap_and_free_tx_resource(adapter, buffer_info);
6750             tx_desc->upper.data = 0;
6751 
6752- if (unlikely(++i == tx_ring->count)) { i = 0; }
6753- }
6754-
6755- tx_ring->pkt++;
6756+ if (unlikely(++i == tx_ring->count)) i = 0;
6757+ }
6758 
6759- eop = tx_ring->buffer_info[i].next_to_watch;
6760- eop_desc = E1000_TX_DESC(*tx_ring, eop);
6761- }
6762+ eop = tx_ring->buffer_info[i].next_to_watch;
6763+ eop_desc = E1000_TX_DESC(*tx_ring, eop);
6764+#define E1000_TX_WEIGHT 64
6765+ /* weight of a sort for tx, to avoid endless transmit cleanup */
6766+ if (count++ == E1000_TX_WEIGHT)
6767+ break;
6768+ }
6769 
6770     tx_ring->next_to_clean = i;
6771 
6772- spin_lock(&tx_ring->tx_lock);
6773+#define TX_WAKE_THRESHOLD 32
6774 
6775- if (unlikely(cleaned && netif_queue_stopped(netdev) &&
6776- netif_carrier_ok(netdev))) {
6777- netif_wake_queue(netdev);
6778- }
6779- spin_unlock(&tx_ring->tx_lock);
6780-
6781- if (adapter->detect_tx_hung) {
6782- /* Detect a transmit hang in hardware, this serializes the
6783- * check with the clearing of time_stamp and movement of i */
6784- adapter->detect_tx_hung = FALSE;
6785-
6786- if (tx_ring->buffer_info[i].dma &&
6787- time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
6788- && !(E1000_READ_REG(&adapter->hw, STATUS) &
6789- E1000_STATUS_TXOFF)) {
6790-
6791- /* detected Tx unit hang */
6792- i = tx_ring->next_to_clean;
6793- eop = tx_ring->buffer_info[i].next_to_watch;
6794- eop_desc = E1000_TX_DESC(*tx_ring, eop);
6795- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
6796- " TDH <%x>\n"
6797- " TDT <%x>\n"
6798- " next_to_use <%x>\n"
6799- " next_to_clean <%x>\n"
6800- "buffer_info[next_to_clean]\n"
6801- " dma <%zx>\n"
6802- " time_stamp <%lx>\n"
6803- " next_to_watch <%x>\n"
6804- " jiffies <%lx>\n"
6805- " next_to_watch.status <%x>\n",
6806- readl(adapter->hw.hw_addr + tx_ring->tdh),
6807- readl(adapter->hw.hw_addr + tx_ring->tdt),
6808- tx_ring->next_to_use,
6809- i,
6810- (size_t)tx_ring->buffer_info[i].dma,
6811- tx_ring->buffer_info[i].time_stamp,
6812- eop,
6813- jiffies,
6814- eop_desc->upper.fields.status);
6815- netif_stop_queue(netdev);
6816+ if (unlikely(cleaned && netif_carrier_ok(netdev) &&
6817+ E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
6818+ /* Make sure that anybody stopping the queue after this
6819+ * sees the new next_to_clean.
6820+ */
6821+ smp_mb();
6822+ if (netif_queue_stopped(netdev)) {
6823+ netif_wake_queue(netdev);
6824+ ++adapter->restart_queue;
6825         }
6826     }
6827-#ifdef NETIF_F_TSO
6828- if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
6829- time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ))) {
6830- iegbe_unmap_and_free_tx_resource(
6831- adapter, &tx_ring->previous_buffer_info);
6832+
6833+ if (adapter->detect_tx_hung) {
6834+ /* Detect a transmit hang in hardware, this serializes the
6835+ * check with the clearing of time_stamp and movement of i */
6836+ adapter->detect_tx_hung = false;
6837+
6838+ if (tx_ring->buffer_info[eop].dma &&
6839+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
6840+ (adapter->tx_timeout_factor * HZ))
6841+ && !(E1000_READ_REG(hw, STATUS) & E1000_STATUS_TXOFF)) {
6842+
6843+ /* detected Tx unit hang */
6844+ DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
6845+ " Tx Queue <%lu>\n"
6846+ " TDH <%x>\n"
6847+ " TDT <%x>\n"
6848+ " next_to_use <%x>\n"
6849+ " next_to_clean <%x>\n"
6850+ "buffer_info[next_to_clean]\n"
6851+ " time_stamp <%lx>\n"
6852+ " next_to_watch <%x>\n"
6853+ " jiffies <%lx>\n"
6854+ " next_to_watch.status <%x>\n",
6855+ (unsigned long)((tx_ring - adapter->tx_ring) /
6856+ sizeof(struct iegbe_tx_ring)),
6857+ readl(hw->hw_addr + tx_ring->tdh),
6858+ readl(hw->hw_addr + tx_ring->tdt),
6859+ tx_ring->next_to_use,
6860+ tx_ring->next_to_clean,
6861+ tx_ring->buffer_info[eop].time_stamp,
6862+ eop,
6863+ jiffies,
6864+ eop_desc->upper.fields.status);
6865+ netif_stop_queue(netdev);
6866+ }
6867     }
6868-#endif
6869- return cleaned;
6870+ adapter->total_tx_bytes += total_tx_bytes;
6871+ adapter->total_tx_packets += total_tx_packets;
6872+ adapter->net_stats.tx_bytes += total_tx_bytes;
6873+ adapter->net_stats.tx_packets += total_tx_packets;
6874+ return cleaned;
6875 }
6876-#endif
6877 
6878 /**
6879  * iegbe_rx_checksum - Receive Checksum Offload for 82543
6880@@ -3913,192 +3689,193 @@ iegbe_clean_tx_irq(struct iegbe_adapter
6881  * @sk_buff: socket buffer with received data
6882  **/
6883 
6884-static inline void
6885-iegbe_rx_checksum(struct iegbe_adapter *adapter,
6886- uint32_t status_err, uint32_t csum,
6887- struct sk_buff *skb)
6888+static void iegbe_rx_checksum(struct iegbe_adapter *adapter, u32 status_err,
6889+ u32 csum, struct sk_buff *skb)
6890 {
6891- uint16_t status = (uint16_t)status_err;
6892- uint8_t errors = (uint8_t)(status_err >> 0x18);
6893+ struct iegbe_hw *hw = &adapter->hw;
6894+ u16 status = (u16)status_err;
6895+ u8 errors = (u8)(status_err >> 24);
6896     skb->ip_summed = CHECKSUM_NONE;
6897 
6898- /* 82543 or newer only */
6899- if(unlikely(adapter->hw.mac_type < iegbe_82543)) { return; }
6900- /* Ignore Checksum bit is set */
6901- if(unlikely(status & E1000_RXD_STAT_IXSM)) { return; }
6902- /* TCP/UDP checksum error bit is set */
6903- if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
6904- /* let the stack verify checksum errors */
6905- adapter->hw_csum_err++;
6906- return;
6907- }
6908- /* TCP/UDP Checksum has not been calculated */
6909- if(adapter->hw.mac_type <= iegbe_82547_rev_2) {
6910- if(!(status & E1000_RXD_STAT_TCPCS)) {
6911- return;
6912+ /* 82543 or newer only */
6913+ if (unlikely(hw->mac_type < iegbe_82543)) return;
6914+ /* Ignore Checksum bit is set */
6915+ if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
6916+ /* TCP/UDP checksum error bit is set */
6917+ if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
6918+ /* let the stack verify checksum errors */
6919+ adapter->hw_csum_err++;
6920+ return;
6921+ }
6922+ /* TCP/UDP Checksum has not been calculated */
6923+ if (hw->mac_type <= iegbe_82547_rev_2) {
6924+ if (!(status & E1000_RXD_STAT_TCPCS))
6925+ return;
6926+ } else {
6927+ if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
6928+ return;
6929         }
6930- } else {
6931- if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) {
6932- return;
6933- }
6934+ /* It must be a TCP or UDP packet with a valid checksum */
6935+ if(likely(status & E1000_RXD_STAT_TCPCS)) {
6936+ /* TCP checksum is good */
6937+ skb->ip_summed = CHECKSUM_UNNECESSARY;
6938+ } else if (hw->mac_type > iegbe_82547_rev_2) {
6939+ /* IP fragment with UDP payload */
6940+ /* Hardware complements the payload checksum, so we undo it
6941+ * and then put the value in host order for further stack use.
6942+ */
6943+ __sum16 sum = (__force __sum16)htons(csum);
6944+ skb->csum = csum_unfold(~sum);
6945+ skb->ip_summed = CHECKSUM_COMPLETE;
6946     }
6947- /* It must be a TCP or UDP packet with a valid checksum */
6948- if(likely(status & E1000_RXD_STAT_TCPCS)) {
6949- /* TCP checksum is good */
6950- skb->ip_summed = CHECKSUM_UNNECESSARY;
6951- } else if(adapter->hw.mac_type > iegbe_82547_rev_2) {
6952- /* IP fragment with UDP payload */
6953- /* Hardware complements the payload checksum, so we undo it
6954- * and then put the value in host order for further stack use.
6955- */
6956- csum = ntohl(csum ^ 0xFFFF);
6957- skb->csum = csum;
6958- skb->ip_summed = CHECKSUM_HW;
6959- }
6960- adapter->hw_csum_good++;
6961+ adapter->hw_csum_good++;
6962 }
6963 
6964 /**
6965  * iegbe_clean_rx_irq - Send received data up the network stack; legacy
6966  * @adapter: board private structure
6967  **/
6968-
6969-static boolean_t
6970-#ifdef CONFIG_E1000_NAPI
6971-iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
6972+static bool iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
6973                    struct iegbe_rx_ring *rx_ring,
6974                    int *work_done, int work_to_do)
6975-#else
6976-iegbe_clean_rx_irq(struct iegbe_adapter *adapter,
6977- struct iegbe_rx_ring *rx_ring)
6978-#endif
6979 {
6980- struct net_device *netdev = adapter->netdev;
6981- struct pci_dev *pdev = adapter->pdev;
6982- struct iegbe_rx_desc *rx_desc;
6983- struct iegbe_buffer *buffer_info;
6984- struct sk_buff *skb;
6985- unsigned long flags = 0;
6986- uint32_t length;
6987- uint8_t last_byte;
6988- unsigned int i;
6989- boolean_t cleaned = FALSE;
6990-
6991-#ifdef IEGBE_GBE_WORKAROUND
6992- /* Need to keep track of the amount of Rx descriptors that we
6993- cleaned to ensure that we don't supply too many back to the
6994- hardware */
6995- int cleaned_count = 0;
6996-#endif
6997-
6998- i = rx_ring->next_to_clean;
6999- rx_desc = E1000_RX_DESC(*rx_ring, i);
7000-
7001- while(rx_desc->status & E1000_RXD_STAT_DD) {
7002- buffer_info = &rx_ring->buffer_info[i];
7003-#ifdef CONFIG_E1000_NAPI
7004- if(*work_done >= work_to_do) {
7005- break;
7006- }
7007- (*work_done)++;
7008-#endif
7009- cleaned = TRUE;
7010+ struct iegbe_hw *hw = &adapter->hw;
7011+ struct net_device *netdev = adapter->netdev;
7012+ struct pci_dev *pdev = adapter->pdev;
7013+ struct iegbe_rx_desc *rx_desc, *next_rxd;
7014+ struct iegbe_buffer *buffer_info, *next_buffer;
7015+ unsigned long flags;
7016+ u32 length;
7017+ u8 last_byte;
7018+ unsigned int i;
7019+ int cleaned_count = 0;
7020+ bool cleaned = false;
7021+ unsigned int total_rx_bytes=0, total_rx_packets=0;
7022 
7023-#ifdef IEGBE_GBE_WORKAROUND
7024- cleaned_count++;
7025-#endif
7026+ i = rx_ring->next_to_clean;
7027+ rx_desc = E1000_RX_DESC(*rx_ring, i);
7028+ buffer_info = &rx_ring->buffer_info[i];
7029 
7030- pci_unmap_single(pdev,
7031- buffer_info->dma,
7032- buffer_info->length,
7033- PCI_DMA_FROMDEVICE);
7034+ while(rx_desc->status & E1000_RXD_STAT_DD) {
7035+ struct sk_buff *skb;
7036+ u8 status;
7037+ if (*work_done >= work_to_do)
7038+ break;
7039+ (*work_done)++;
7040 
7041+ status = rx_desc->status;
7042         skb = buffer_info->skb;
7043- length = le16_to_cpu(rx_desc->length);
7044+ buffer_info->skb = NULL;
7045+ prefetch(skb->data - NET_IP_ALIGN);
7046+ if (++i == rx_ring->count) i = 0;
7047+ next_rxd = E1000_RX_DESC(*rx_ring, i);
7048+ prefetch(next_rxd);
7049+ next_buffer = &rx_ring->buffer_info[i];
7050+ cleaned = true;
7051+ cleaned_count++;
7052+ pci_unmap_single(pdev,
7053+ buffer_info->dma,
7054+ buffer_info->length,
7055+ PCI_DMA_FROMDEVICE);
7056+
7057+ length = le16_to_cpu(rx_desc->length);
7058+
7059+ if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
7060+ /* All receives must fit into a single buffer */
7061+ E1000_DBG("%s: Receive packet consumed multiple"
7062+ " buffers\n", netdev->name);
7063+ buffer_info->skb = skb;
7064+ goto next_desc;
7065+ }
7066 
7067- if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
7068- /* All receives must fit into a single buffer */
7069- E1000_DBG("%s: Receive packet consumed multiple"
7070- " buffers\n", netdev->name);
7071- dev_kfree_skb_irq(skb);
7072- goto next_desc;
7073- }
7074+ if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
7075+ last_byte = *(skb->data + length - 1);
7076+ if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
7077+ last_byte)) {
7078+ spin_lock_irqsave(&adapter->stats_lock, flags);
7079+ iegbe_tbi_adjust_stats(hw, &adapter->stats,
7080+ length, skb->data);
7081+ spin_unlock_irqrestore(&adapter->stats_lock,
7082+ flags);
7083+ length--;
7084+ } else {
7085+ buffer_info->skb = skb;
7086+ goto next_desc;
7087+ }
7088+ }
7089 
7090- if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
7091- last_byte = *(skb->data + length - 0x1);
7092- if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
7093- rx_desc->errors, length, last_byte)) {
7094- spin_lock_irqsave(&adapter->stats_lock, flags);
7095- iegbe_tbi_adjust_stats(&adapter->hw,
7096- &adapter->stats,
7097- length, skb->data);
7098- spin_unlock_irqrestore(&adapter->stats_lock,
7099- flags);
7100- length--;
7101- } else {
7102- dev_kfree_skb_irq(skb);
7103- goto next_desc;
7104+ /* adjust length to remove Ethernet CRC, this must be
7105+ * done after the TBI_ACCEPT workaround above */
7106+ length -= 4;
7107+
7108+ /* probably a little skewed due to removing CRC */
7109+ total_rx_bytes += length;
7110+ total_rx_packets++;
7111+
7112+ /* code added for copybreak, this should improve
7113+ * performance for small packets with large amounts
7114+ * of reassembly being done in the stack */
7115+ if (length < copybreak) {
7116+ struct sk_buff *new_skb =
7117+ netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
7118+ if (new_skb) {
7119+ skb_reserve(new_skb, NET_IP_ALIGN);
7120+ skb_copy_to_linear_data_offset(new_skb,
7121+ -NET_IP_ALIGN,
7122+ (skb->data -
7123+ NET_IP_ALIGN),
7124+ (length +
7125+ NET_IP_ALIGN));
7126+ /* save the skb in buffer_info as good */
7127+ buffer_info->skb = skb;
7128+ skb = new_skb;
7129             }
7130+ /* else just continue with the old one */
7131         }
7132-
7133- /* Good Receive */
7134- skb_put(skb, length - ETHERNET_FCS_SIZE);
7135+ /* Good Receive */
7136+ skb_put(skb, length);
7137 
7138         /* Receive Checksum Offload */
7139         iegbe_rx_checksum(adapter,
7140- (uint32_t)(rx_desc->status) |
7141- ((uint32_t)(rx_desc->errors) << 0x18),
7142- rx_desc->csum, skb);
7143+ (u32)(status) |
7144+ ((u32)(rx_desc->errors) << 24),
7145+ le16_to_cpu(rx_desc->csum), skb);
7146+
7147         skb->protocol = eth_type_trans(skb, netdev);
7148-#ifdef CONFIG_E1000_NAPI
7149-#ifdef NETIF_F_HW_VLAN_TX
7150- if(unlikely(adapter->vlgrp &&
7151- (rx_desc->status & E1000_RXD_STAT_VP))) {
7152+
7153+ if (unlikely(adapter->vlgrp &&
7154+ (status & E1000_RXD_STAT_VP))) {
7155             vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
7156- le16_to_cpu(rx_desc->special) &
7157- E1000_RXD_SPC_VLAN_MASK);
7158+ le16_to_cpu(rx_desc->special));
7159         } else {
7160             netif_receive_skb(skb);
7161         }
7162-#else
7163- netif_receive_skb(skb);
7164-#endif
7165-#else /* CONFIG_E1000_NAPI */
7166-#ifdef NETIF_F_HW_VLAN_TX
7167- if(unlikely(adapter->vlgrp &&
7168- (rx_desc->status & E1000_RXD_STAT_VP))) {
7169- vlan_hwaccel_rx(skb, adapter->vlgrp,
7170- le16_to_cpu(rx_desc->special) &
7171- E1000_RXD_SPC_VLAN_MASK);
7172- } else {
7173- netif_rx(skb);
7174- }
7175-#else
7176- netif_rx(skb);
7177-#endif
7178-#endif /* CONFIG_E1000_NAPI */
7179+
7180         netdev->last_rx = jiffies;
7181- rx_ring->pkt++;
7182 
7183 next_desc:
7184         rx_desc->status = 0;
7185- buffer_info->skb = NULL;
7186- if(unlikely(++i == rx_ring->count)) { i = 0; }
7187 
7188- rx_desc = E1000_RX_DESC(*rx_ring, i);
7189+ /* return some buffers to hardware, one at a time is too slow */
7190+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
7191+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
7192+ cleaned_count = 0;
7193+ }
7194+
7195+ /* use prefetched values */
7196+ rx_desc = next_rxd;
7197+ buffer_info = next_buffer;
7198     }
7199     rx_ring->next_to_clean = i;
7200 
7201-#ifdef IEGBE_GBE_WORKAROUND
7202- /* Only allocate the number of buffers that we have actually
7203- cleaned! */
7204- if (cleaned_count) {
7205- adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
7206- }
7207-#else
7208- adapter->alloc_rx_buf(adapter, rx_ring);
7209-#endif
7210-
7211+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
7212+ if (cleaned_count)
7213+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
7214+
7215+ adapter->total_rx_packets += total_rx_packets;
7216+ adapter->total_rx_bytes += total_rx_bytes;
7217+ adapter->net_stats.rx_bytes += total_rx_bytes;
7218+ adapter->net_stats.rx_packets += total_rx_packets;
7219     return cleaned;
7220 }
7221 
7222@@ -4107,161 +3884,153 @@ next_desc:
7223  * @adapter: board private structure
7224  **/
7225 
7226-static boolean_t
7227-#ifdef CONFIG_E1000_NAPI
7228-iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
7229+static bool iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
7230                       struct iegbe_rx_ring *rx_ring,
7231                       int *work_done, int work_to_do)
7232-#else
7233-iegbe_clean_rx_irq_ps(struct iegbe_adapter *adapter,
7234- struct iegbe_rx_ring *rx_ring)
7235-#endif
7236 {
7237- union iegbe_rx_desc_packet_split *rx_desc;
7238- struct net_device *netdev = adapter->netdev;
7239- struct pci_dev *pdev = adapter->pdev;
7240- struct iegbe_buffer *buffer_info;
7241- struct iegbe_ps_page *ps_page;
7242- struct iegbe_ps_page_dma *ps_page_dma;
7243- struct sk_buff *skb;
7244- unsigned int i, j;
7245- uint32_t length, staterr;
7246- boolean_t cleaned = FALSE;
7247-
7248-#ifdef IEGBE_GBE_WORKAROUND
7249- /* Need to keep track of the amount of Rx descriptors that we
7250- cleaned to ensure that we don't supply too many back to the
7251- hardware */
7252- int cleaned_count = 0;
7253-#endif
7254-
7255- i = rx_ring->next_to_clean;
7256- rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
7257- staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
7258-
7259- while(staterr & E1000_RXD_STAT_DD) {
7260- buffer_info = &rx_ring->buffer_info[i];
7261- ps_page = &rx_ring->ps_page[i];
7262- ps_page_dma = &rx_ring->ps_page_dma[i];
7263-#ifdef CONFIG_E1000_NAPI
7264- if(unlikely(*work_done >= work_to_do)) {
7265- break;
7266- }
7267- (*work_done)++;
7268-#endif
7269- cleaned = TRUE;
7270-
7271-#ifdef IEGBE_GBE_WORKAROUND
7272- cleaned_count++;
7273-#endif
7274+ union iegbe_rx_desc_packet_split *rx_desc, *next_rxd;
7275+ struct net_device *netdev = adapter->netdev;
7276+ struct pci_dev *pdev = adapter->pdev;
7277+ struct iegbe_buffer *buffer_info, *next_buffer;
7278+ struct iegbe_ps_page *ps_page;
7279+ struct iegbe_ps_page_dma *ps_page_dma;
7280+ struct sk_buff *skb;
7281+ unsigned int i, j;
7282+ u32 length, staterr;
7283+ int cleaned_count = 0;
7284+ bool cleaned = false;
7285+ unsigned int total_rx_bytes=0, total_rx_packets=0;
7286+
7287+ i = rx_ring->next_to_clean;
7288+ rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
7289+ staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
7290+ buffer_info = &rx_ring->buffer_info[i];
7291 
7292- pci_unmap_single(pdev, buffer_info->dma,
7293- buffer_info->length,
7294- PCI_DMA_FROMDEVICE);
7295+ while(staterr & E1000_RXD_STAT_DD) {
7296+ ps_page = &rx_ring->ps_page[i];
7297+ ps_page_dma = &rx_ring->ps_page_dma[i];
7298+
7299+ if (unlikely(*work_done >= work_to_do))
7300+ break;
7301+ (*work_done)++;
7302 
7303         skb = buffer_info->skb;
7304+ prefetch(skb->data - NET_IP_ALIGN);
7305+ if (++i == rx_ring->count) i = 0;
7306+ next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
7307+ prefetch(next_rxd);
7308+ next_buffer = &rx_ring->buffer_info[i];
7309+ cleaned = true;
7310+ cleaned_count++;
7311+ pci_unmap_single(pdev, buffer_info->dma,
7312+ buffer_info->length,
7313+ PCI_DMA_FROMDEVICE);
7314+
7315+ if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
7316+ E1000_DBG("%s: Packet Split buffers didn't pick up"
7317+ " the full packet\n", netdev->name);
7318+ dev_kfree_skb_irq(skb);
7319+ goto next_desc;
7320+ }
7321 
7322- if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
7323- E1000_DBG("%s: Packet Split buffers didn't pick up"
7324- " the full packet\n", netdev->name);
7325- dev_kfree_skb_irq(skb);
7326- goto next_desc;
7327- }
7328-
7329- if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
7330- dev_kfree_skb_irq(skb);
7331- goto next_desc;
7332- }
7333-
7334- length = le16_to_cpu(rx_desc->wb.middle.length0);
7335+ if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
7336+ dev_kfree_skb_irq(skb);
7337+ goto next_desc;
7338+ }
7339 
7340- if(unlikely(!length)) {
7341- E1000_DBG("%s: Last part of the packet spanning"
7342- " multiple descriptors\n", netdev->name);
7343- dev_kfree_skb_irq(skb);
7344- goto next_desc;
7345- }
7346+ length = le16_to_cpu(rx_desc->wb.middle.length0);
7347 
7348- /* Good Receive */
7349- skb_put(skb, length);
7350-
7351- for(j = 0; j < adapter->rx_ps_pages; j++) {
7352- if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) {
7353- break;
7354- }
7355- pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
7356- PAGE_SIZE, PCI_DMA_FROMDEVICE);
7357- ps_page_dma->ps_page_dma[j] = 0;
7358- skb_shinfo(skb)->frags[j].page =
7359- ps_page->ps_page[j];
7360- ps_page->ps_page[j] = NULL;
7361- skb_shinfo(skb)->frags[j].page_offset = 0;
7362- skb_shinfo(skb)->frags[j].size = length;
7363- skb_shinfo(skb)->nr_frags++;
7364- skb->len += length;
7365- skb->data_len += length;
7366- }
7367+ if(unlikely(!length)) {
7368+ E1000_DBG("%s: Last part of the packet spanning"
7369+ " multiple descriptors\n", netdev->name);
7370+ dev_kfree_skb_irq(skb);
7371+ goto next_desc;
7372+ }
7373 
7374- iegbe_rx_checksum(adapter, staterr,
7375- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
7376- skb->protocol = eth_type_trans(skb, netdev);
7377+ /* Good Receive */
7378+ skb_put(skb, length);
7379 
7380- if(likely(rx_desc->wb.upper.header_status &
7381- E1000_RXDPS_HDRSTAT_HDRSP)) {
7382- adapter->rx_hdr_split++;
7383-#ifdef HAVE_RX_ZERO_COPY
7384- skb_shinfo(skb)->zero_copy = TRUE;
7385-#endif
7386- }
7387-#ifdef CONFIG_E1000_NAPI
7388-#ifdef NETIF_F_HW_VLAN_TX
7389- if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
7390- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
7391- le16_to_cpu(rx_desc->wb.middle.vlan) &
7392- E1000_RXD_SPC_VLAN_MASK);
7393- } else {
7394- netif_receive_skb(skb);
7395- }
7396-#else
7397- netif_receive_skb(skb);
7398-#endif
7399-#else /* CONFIG_E1000_NAPI */
7400-#ifdef NETIF_F_HW_VLAN_TX
7401- if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
7402- vlan_hwaccel_rx(skb, adapter->vlgrp,
7403- le16_to_cpu(rx_desc->wb.middle.vlan) &
7404- E1000_RXD_SPC_VLAN_MASK);
7405- } else {
7406- netif_rx(skb);
7407- }
7408-#else
7409- netif_rx(skb);
7410-#endif
7411-#endif /* CONFIG_E1000_NAPI */
7412- netdev->last_rx = jiffies;
7413- rx_ring->pkt++;
7414+ {
7415+ int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
7416+ if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
7417+ u8 *vaddr;
7418+ pci_dma_sync_single_for_cpu(pdev,
7419+ ps_page_dma->ps_page_dma[0],
7420+ PAGE_SIZE,
7421+ PCI_DMA_FROMDEVICE);
7422+ vaddr = kmap_atomic(ps_page->ps_page[0],
7423+ KM_SKB_DATA_SOFTIRQ);
7424+ memcpy(skb_tail_pointer(skb), vaddr, l1);
7425+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
7426+ pci_dma_sync_single_for_device(pdev,
7427+ ps_page_dma->ps_page_dma[0],
7428+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
7429+ l1 -= 4;
7430+ skb_put(skb, l1);
7431+ goto copydone;
7432+ } /* if */
7433+ }
7434+ for (j = 0; j < adapter->rx_ps_pages; j++) {
7435+ length = le16_to_cpu(rx_desc->wb.upper.length[j]);
7436+ if (!length)
7437+ break;
7438+ pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
7439+ PAGE_SIZE, PCI_DMA_FROMDEVICE);
7440+ ps_page_dma->ps_page_dma[j] = 0;
7441+ skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
7442+ length);
7443+ ps_page->ps_page[j] = NULL;
7444+ skb->len += length;
7445+ skb->data_len += length;
7446+ skb->truesize += length;
7447+ }
7448 
7449-next_desc:
7450- rx_desc->wb.middle.status_error &= ~0xFF;
7451- buffer_info->skb = NULL;
7452- if(unlikely(++i == rx_ring->count)) { i = 0; }
7453+ pskb_trim(skb, skb->len - 4);
7454+copydone:
7455+ total_rx_bytes += skb->len;
7456+ total_rx_packets++;
7457+ iegbe_rx_checksum(adapter, staterr,
7458+ le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
7459+ skb->protocol = eth_type_trans(skb, netdev);
7460+
7461+ if(likely(rx_desc->wb.upper.header_status &
7462+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
7463+ adapter->rx_hdr_split++;
7464+
7465+ if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
7466+ vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
7467+ le16_to_cpu(rx_desc->wb.middle.vlan));
7468+ } else {
7469+ netif_receive_skb(skb);
7470+ }
7471 
7472- rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
7473- staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
7474- }
7475- rx_ring->next_to_clean = i;
7476+ netdev->last_rx = jiffies;
7477 
7478-#ifdef IEGBE_GBE_WORKAROUND
7479- /* Only allocate the number of buffers that we have actually
7480- cleaned! */
7481- if (cleaned_count) {
7482- adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
7483- }
7484-#else
7485- adapter->alloc_rx_buf(adapter, rx_ring);
7486-#endif
7487+next_desc:
7488+ rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
7489+ buffer_info->skb = NULL;
7490 
7491- return cleaned;
7492+ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
7493+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
7494+ cleaned_count = 0;
7495+ }
7496+
7497+ /* use prefetched values */
7498+ rx_desc = next_rxd;
7499+ buffer_info = next_buffer;
7500+ staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
7501+ }
7502+ rx_ring->next_to_clean = i;
7503+
7504+ cleaned_count = E1000_DESC_UNUSED(rx_ring);
7505+ if (cleaned_count)
7506+ adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
7507+
7508+ adapter->total_rx_packets += total_rx_packets;
7509+ adapter->total_rx_bytes += total_rx_bytes;
7510+ adapter->net_stats.rx_bytes += total_rx_bytes;
7511+ adapter->net_stats.rx_packets += total_rx_packets;
7512+ return cleaned;
7513 }
7514 
7515 /**
7516@@ -4269,142 +4038,115 @@ next_desc:
7517  * @adapter: address of board private structure
7518  **/
7519 
7520-#ifdef IEGBE_GBE_WORKAROUND
7521-static void
7522-iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
7523+
7524+static void iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
7525                        struct iegbe_rx_ring *rx_ring,
7526                        int cleaned_count)
7527-#else
7528-static void
7529-iegbe_alloc_rx_buffers(struct iegbe_adapter *adapter,
7530- struct iegbe_rx_ring *rx_ring)
7531-#endif
7532 {
7533- struct net_device *netdev = adapter->netdev;
7534- struct pci_dev *pdev = adapter->pdev;
7535- struct iegbe_rx_desc *rx_desc;
7536- struct iegbe_buffer *buffer_info;
7537- struct sk_buff *skb;
7538- unsigned int i;
7539- unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
7540-
7541- i = rx_ring->next_to_use;
7542- buffer_info = &rx_ring->buffer_info[i];
7543+ struct iegbe_hw *hw = &adapter->hw;
7544+ struct net_device *netdev = adapter->netdev;
7545+ struct pci_dev *pdev = adapter->pdev;
7546+ struct iegbe_rx_desc *rx_desc;
7547+ struct iegbe_buffer *buffer_info;
7548+ struct sk_buff *skb;
7549+ unsigned int i;
7550+ unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
7551 
7552-#ifdef IEGBE_GBE_WORKAROUND
7553- if (cleaned_count > IEGBE_GBE_WORKAROUND_NUM_RX_DESCRIPTORS) {
7554- adapter->stats.cc_gt_num_rx++;
7555- }
7556- while(cleaned_count-- && !buffer_info->skb) {
7557-#else
7558- while(!buffer_info->skb) {
7559-#endif
7560- skb = dev_alloc_skb(bufsz);
7561+ i = rx_ring->next_to_use;
7562+ buffer_info = &rx_ring->buffer_info[i];
7563 
7564- if(unlikely(!skb)) {
7565- /* Better luck next round */
7566- break;
7567- }
7568+ while (cleaned_count--) {
7569+ skb = buffer_info->skb;
7570+ if (skb) {
7571+ skb_trim(skb, 0);
7572+ goto map_skb;
7573+ }
7574+ skb = netdev_alloc_skb(netdev, bufsz);
7575+
7576+ if(unlikely(!skb)) {
7577+ /* Better luck next round */
7578+ adapter->alloc_rx_buff_failed++;
7579+ break;
7580+ }
7581 
7582- /* Fix for errata 23, can't cross 64kB boundary */
7583- if(!iegbe_check_64k_bound(adapter, skb->data, bufsz)) {
7584- struct sk_buff *oldskb = skb;
7585- DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
7586- "at %p\n", bufsz, skb->data);
7587- /* Try again, without freeing the previous */
7588- skb = dev_alloc_skb(bufsz);
7589- /* Failed allocation, critical failure */
7590- if(!skb) {
7591- dev_kfree_skb(oldskb);
7592- break;
7593- }
7594+ /* Fix for errata 23, can't cross 64kB boundary */
7595+ if(!iegbe_check_64k_bound(adapter, skb->data, bufsz)) {
7596+ struct sk_buff *oldskb = skb;
7597+ DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
7598+ "at %p\n", bufsz, skb->data);
7599+ /* Try again, without freeing the previous */
7600+ skb = netdev_alloc_skb(netdev, bufsz);
7601+ /* Failed allocation, critical failure */
7602+ if(!skb) {
7603+ dev_kfree_skb(oldskb);
7604+ break;
7605+ }
7606 
7607- if(!iegbe_check_64k_bound(adapter, skb->data, bufsz)) {
7608- /* give up */
7609- dev_kfree_skb(skb);
7610- dev_kfree_skb(oldskb);
7611- break; /* while !buffer_info->skb */
7612- } else {
7613- /* Use new allocation */
7614- dev_kfree_skb(oldskb);
7615+ if(!iegbe_check_64k_bound(adapter, skb->data, bufsz)) {
7616+ /* give up */
7617+ dev_kfree_skb(skb);
7618+ dev_kfree_skb(oldskb);
7619+ break; /* while !buffer_info->skb */
7620             }
7621- }
7622- /* Make buffer alignment 2 beyond a 16 byte boundary
7623- * this will result in a 16 byte aligned IP header after
7624- * the 14 byte MAC header is removed
7625- */
7626- skb_reserve(skb, NET_IP_ALIGN);
7627-
7628- skb->dev = netdev;
7629-
7630- buffer_info->skb = skb;
7631- buffer_info->length = adapter->rx_buffer_len;
7632- buffer_info->dma = pci_map_single(pdev,
7633- skb->data,
7634- adapter->rx_buffer_len,
7635- PCI_DMA_FROMDEVICE);
7636-
7637- /* Fix for errata 23, can't cross 64kB boundary */
7638- if(!iegbe_check_64k_bound(adapter,
7639- (void *)(unsigned long)buffer_info->dma,
7640- adapter->rx_buffer_len)) {
7641- DPRINTK(RX_ERR, ERR,
7642- "dma align check failed: %u bytes at %p\n",
7643- adapter->rx_buffer_len,
7644- (void *)(unsigned long)buffer_info->dma);
7645- dev_kfree_skb(skb);
7646- buffer_info->skb = NULL;
7647-
7648- pci_unmap_single(pdev, buffer_info->dma,
7649- adapter->rx_buffer_len,
7650- PCI_DMA_FROMDEVICE);
7651-
7652- break; /* while !buffer_info->skb */
7653- }
7654- rx_desc = E1000_RX_DESC(*rx_ring, i);
7655- rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
7656-
7657-#ifdef IEGBE_GBE_WORKAROUND_DISABLED
7658- adapter->stats.num_rx_buf_alloc++;
7659+ /* Use new allocation */
7660+ dev_kfree_skb(oldskb);
7661+ }
7662+ /* Make buffer alignment 2 beyond a 16 byte boundary
7663+ * this will result in a 16 byte aligned IP header after
7664+ * the 14 byte MAC header is removed
7665+ */
7666+ skb_reserve(skb, NET_IP_ALIGN);
7667+
7668+
7669+ buffer_info->skb = skb;
7670+ buffer_info->length = adapter->rx_buffer_len;
7671+map_skb:
7672+ buffer_info->dma = pci_map_single(pdev,
7673+ skb->data,
7674+ adapter->rx_buffer_len,
7675+ PCI_DMA_FROMDEVICE);
7676+
7677+ /* Fix for errata 23, can't cross 64kB boundary */
7678+ if(!iegbe_check_64k_bound(adapter,
7679+ (void *)(unsigned long)buffer_info->dma,
7680+ adapter->rx_buffer_len)) {
7681+ DPRINTK(RX_ERR, ERR,
7682+ "dma align check failed: %u bytes at %p\n",
7683+ adapter->rx_buffer_len,
7684+ (void *)(unsigned long)buffer_info->dma);
7685+ dev_kfree_skb(skb);
7686+ buffer_info->skb = NULL;
7687+
7688+ pci_unmap_single(pdev, buffer_info->dma,
7689+ adapter->rx_buffer_len,
7690+ PCI_DMA_FROMDEVICE);
7691 
7692- /* Force memory writes to complete before letting h/w
7693- * know there are new descriptors to fetch. (Only
7694- * applicable for weak-ordered memory model archs,
7695- * such as IA-64). */
7696- wmb();
7697- writel(i, adapter->hw.hw_addr + rx_ring->rdt);
7698+ break; /* while !buffer_info->skb */
7699+ }
7700+ rx_desc = E1000_RX_DESC(*rx_ring, i);
7701+ rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
7702 
7703-#endif
7704-#ifndef IEGBE_GBE_WORKAROUND
7705- if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 0x1)) == i)) {
7706- /* Force memory writes to complete before letting h/w
7707- * know there are new descriptors to fetch. (Only
7708- * applicable for weak-ordered memory model archs,
7709- * such as IA-64). */
7710- wmb();
7711- writel(i, adapter->hw.hw_addr + rx_ring->rdt);
7712- }
7713-#endif
7714- if(unlikely(++i == rx_ring->count)) { i = 0; }
7715- buffer_info = &rx_ring->buffer_info[i];
7716- }
7717+ /* Force memory writes to complete before letting h/w
7718+ * know there are new descriptors to fetch. (Only
7719+ * applicable for weak-ordered memory model archs,
7720+ * such as IA-64). */
7721+ if (unlikely(++i == rx_ring->count))
7722+ i = 0;
7723+ buffer_info = &rx_ring->buffer_info[i];
7724+ }
7725 
7726-#ifdef IEGBE_GBE_WORKAROUND
7727     if (likely(rx_ring->next_to_use != i)) {
7728- rx_ring->next_to_use = i;
7729- if (unlikely(i-- == 0)) {
7730- i = (rx_ring->count - 0x1);
7731- }
7732+ rx_ring->next_to_use = i;
7733+ if (unlikely(i-- == 0))
7734+ i = (rx_ring->count - 1);
7735+
7736         /* Force memory writes to complete before letting h/w
7737          * know there are new descriptors to fetch. (Only
7738          * applicable for weak-ordered memory model archs,
7739          * such as IA-64). */
7740         wmb();
7741- writel(i, adapter->hw.hw_addr + rx_ring->rdt);
7742+ writel(i, hw->hw_addr + rx_ring->rdt);
7743     }
7744-#else
7745- rx_ring->next_to_use = i;
7746-#endif
7747 }
7748 
7749 /**
7750@@ -4412,49 +4154,41 @@ iegbe_alloc_rx_buffers(struct iegbe_adap
7751  * @adapter: address of board private structure
7752  **/
7753 
7754-#ifdef IEGBE_GBE_WORKAROUND
7755-static void
7756-iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
7757+
7758+static void iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
7759                           struct iegbe_rx_ring *rx_ring,
7760                           int cleaned_count)
7761-#else
7762-static void
7763-iegbe_alloc_rx_buffers_ps(struct iegbe_adapter *adapter,
7764- struct iegbe_rx_ring *rx_ring)
7765-#endif
7766 {
7767- struct net_device *netdev = adapter->netdev;
7768- struct pci_dev *pdev = adapter->pdev;
7769- union iegbe_rx_desc_packet_split *rx_desc;
7770- struct iegbe_buffer *buffer_info;
7771- struct iegbe_ps_page *ps_page;
7772- struct iegbe_ps_page_dma *ps_page_dma;
7773- struct sk_buff *skb;
7774- unsigned int i, j;
7775-
7776- i = rx_ring->next_to_use;
7777- buffer_info = &rx_ring->buffer_info[i];
7778- ps_page = &rx_ring->ps_page[i];
7779- ps_page_dma = &rx_ring->ps_page_dma[i];
7780+ struct iegbe_hw *hw = &adapter->hw;
7781+ struct net_device *netdev = adapter->netdev;
7782+ struct pci_dev *pdev = adapter->pdev;
7783+ union iegbe_rx_desc_packet_split *rx_desc;
7784+ struct iegbe_buffer *buffer_info;
7785+ struct iegbe_ps_page *ps_page;
7786+ struct iegbe_ps_page_dma *ps_page_dma;
7787+ struct sk_buff *skb;
7788+ unsigned int i, j;
7789+
7790+ i = rx_ring->next_to_use;
7791+ buffer_info = &rx_ring->buffer_info[i];
7792+ ps_page = &rx_ring->ps_page[i];
7793+ ps_page_dma = &rx_ring->ps_page_dma[i];
7794 
7795-#ifdef IEGBE_GBE_WORKAROUND
7796- while(cleaned_count-- && !buffer_info->skb) {
7797-#else
7798- while(!buffer_info->skb) {
7799-#endif
7800- rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
7801+ while (cleaned_count--) {
7802+ rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
7803 
7804         for (j = 0; j < PS_PAGE_BUFFERS; j++) {
7805- if (j < adapter->rx_ps_pages) {
7806- if (likely(!ps_page->ps_page[j])) {
7807- ps_page->ps_page[j] =
7808- alloc_page(GFP_ATOMIC);
7809+ if (j < adapter->rx_ps_pages) {
7810+ if (likely(!ps_page->ps_page[j])) {
7811+ ps_page->ps_page[j] =
7812+ alloc_page(GFP_ATOMIC);
7813                     if (unlikely(!ps_page->ps_page[j])) {
7814- goto no_buffers;
7815+ adapter->alloc_rx_buff_failed++;
7816+ goto no_buffers;
7817                     }
7818- ps_page_dma->ps_page_dma[j] =
7819- pci_map_page(pdev,
7820- ps_page->ps_page[j],
7821+ ps_page_dma->ps_page_dma[j] =
7822+ pci_map_page(pdev,
7823+ ps_page->ps_page[j],
7824                                 0, PAGE_SIZE,
7825                                 PCI_DMA_FROMDEVICE);
7826                 }
7827@@ -4462,26 +4196,26 @@ iegbe_alloc_rx_buffers_ps(struct iegbe_a
7828                  * change because each write-back erases
7829                  * this info.
7830                  */
7831- rx_desc->read.buffer_addr[j+0x1] =
7832+ rx_desc->read.buffer_addr[j+1] =
7833                      cpu_to_le64(ps_page_dma->ps_page_dma[j]);
7834- } else {
7835- rx_desc->read.buffer_addr[j+0x1] = ~0;
7836- }
7837+ } else
7838+ rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
7839         }
7840 
7841- skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
7842+ skb = netdev_alloc_skb(netdev,
7843+ adapter->rx_ps_bsize0 + NET_IP_ALIGN);
7844 
7845- if (unlikely(!skb)) {
7846+ if (unlikely(!skb)) {
7847+ adapter->alloc_rx_buff_failed++;
7848             break;
7849- }
7850+ }
7851+
7852         /* Make buffer alignment 2 beyond a 16 byte boundary
7853          * this will result in a 16 byte aligned IP header after
7854          * the 14 byte MAC header is removed
7855          */
7856         skb_reserve(skb, NET_IP_ALIGN);
7857 
7858- skb->dev = netdev;
7859-
7860         buffer_info->skb = skb;
7861         buffer_info->length = adapter->rx_ps_bsize0;
7862         buffer_info->dma = pci_map_single(pdev, skb->data,
7863@@ -4490,27 +4224,28 @@ iegbe_alloc_rx_buffers_ps(struct iegbe_a
7864 
7865         rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
7866 
7867- if (unlikely((i & ~(E1000_RX_BUFFER_WRITE - 0x1)) == i)) {
7868- /* Force memory writes to complete before letting h/w
7869- * know there are new descriptors to fetch. (Only
7870- * applicable for weak-ordered memory model archs,
7871- * such as IA-64). */
7872- wmb();
7873- /* Hardware increments by 16 bytes, but packet split
7874- * descriptors are 32 bytes...so we increment tail
7875- * twice as much.
7876- */
7877- writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
7878- }
7879-
7880- if (unlikely(++i == rx_ring->count)) { i = 0; }
7881+ if (unlikely(++i == rx_ring->count)) i = 0;
7882         buffer_info = &rx_ring->buffer_info[i];
7883         ps_page = &rx_ring->ps_page[i];
7884         ps_page_dma = &rx_ring->ps_page_dma[i];
7885     }
7886 
7887 no_buffers:
7888- rx_ring->next_to_use = i;
7889+ if (likely(rx_ring->next_to_use != i)) {
7890+ rx_ring->next_to_use = i;
7891+ if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
7892+
7893+ /* Force memory writes to complete before letting h/w
7894+ * know there are new descriptors to fetch. (Only
7895+ * applicable for weak-ordered memory model archs,
7896+ * such as IA-64). */
7897+ wmb();
7898+ /* Hardware increments by 16 bytes, but packet split
7899+ * descriptors are 32 bytes...so we increment tail
7900+ * twice as much.
7901+ */
7902+ writel(i<<1, hw->hw_addr + rx_ring->rdt);
7903+ }
7904 }
7905 
7906 /**
7907@@ -4521,52 +4256,52 @@ no_buffers:
7908 static void
7909 iegbe_smartspeed(struct iegbe_adapter *adapter)
7910 {
7911- uint16_t phy_status;
7912- uint16_t phy_ctrl;
7913+ uint16_t phy_status;
7914+ uint16_t phy_ctrl;
7915 
7916- if((adapter->hw.phy_type != iegbe_phy_igp) || !adapter->hw.autoneg ||
7917+ if((adapter->hw.phy_type != iegbe_phy_igp) || !adapter->hw.autoneg ||
7918        !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) {
7919- return;
7920+ return;
7921     }
7922- if(adapter->smartspeed == 0) {
7923- /* If Master/Slave config fault is asserted twice,
7924- * we assume back-to-back */
7925- iegbe_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
7926+ if(adapter->smartspeed == 0x0) {
7927+ /* If Master/Slave config fault is asserted twice,
7928+ * we assume back-to-back */
7929+ iegbe_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
7930         if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) { return; }
7931- iegbe_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
7932+ iegbe_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
7933         if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) { return; }
7934- iegbe_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
7935- if(phy_ctrl & CR_1000T_MS_ENABLE) {
7936- phy_ctrl &= ~CR_1000T_MS_ENABLE;
7937- iegbe_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
7938- phy_ctrl);
7939- adapter->smartspeed++;
7940- if(!iegbe_phy_setup_autoneg(&adapter->hw) &&
7941- !iegbe_read_phy_reg(&adapter->hw, PHY_CTRL,
7942- &phy_ctrl)) {
7943- phy_ctrl |= (MII_CR_AUTO_NEG_EN |
7944- MII_CR_RESTART_AUTO_NEG);
7945- iegbe_write_phy_reg(&adapter->hw, PHY_CTRL,
7946- phy_ctrl);
7947- }
7948- }
7949- return;
7950- } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
7951- /* If still no link, perhaps using 2/3 pair cable */
7952- iegbe_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
7953- phy_ctrl |= CR_1000T_MS_ENABLE;
7954- iegbe_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
7955- if(!iegbe_phy_setup_autoneg(&adapter->hw) &&
7956- !iegbe_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
7957- phy_ctrl |= (MII_CR_AUTO_NEG_EN |
7958- MII_CR_RESTART_AUTO_NEG);
7959- iegbe_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
7960- }
7961- }
7962- /* Restart process after E1000_SMARTSPEED_MAX iterations */
7963+ iegbe_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
7964+ if(phy_ctrl & CR_1000T_MS_ENABLE) {
7965+ phy_ctrl &= ~CR_1000T_MS_ENABLE;
7966+ iegbe_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
7967+ phy_ctrl);
7968+ adapter->smartspeed++;
7969+ if(!iegbe_phy_setup_autoneg(&adapter->hw) &&
7970+ !iegbe_read_phy_reg(&adapter->hw, PHY_CTRL,
7971+ &phy_ctrl)) {
7972+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
7973+ MII_CR_RESTART_AUTO_NEG);
7974+ iegbe_write_phy_reg(&adapter->hw, PHY_CTRL,
7975+ phy_ctrl);
7976+ }
7977+ }
7978+ return;
7979+ } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
7980+ /* If still no link, perhaps using 2/3 pair cable */
7981+ iegbe_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
7982+ phy_ctrl |= CR_1000T_MS_ENABLE;
7983+ iegbe_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
7984+ if(!iegbe_phy_setup_autoneg(&adapter->hw) &&
7985+ !iegbe_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
7986+ phy_ctrl |= (MII_CR_AUTO_NEG_EN |
7987+ MII_CR_RESTART_AUTO_NEG);
7988+ iegbe_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
7989+ }
7990+ }
7991+ /* Restart process after E1000_SMARTSPEED_MAX iterations */
7992     if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX) {
7993- adapter->smartspeed = 0;
7994-}
7995+ adapter->smartspeed = 0x0;
7996+ }
7997 }
7998 
7999 /**
8000@@ -4576,23 +4311,22 @@ iegbe_smartspeed(struct iegbe_adapter *a
8001  * @cmd:
8002  **/
8003 
8004-static int
8005-iegbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8006+static int iegbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8007 {
8008- switch (cmd) {
8009+ switch (cmd) {
8010 #ifdef SIOCGMIIPHY
8011- case SIOCGMIIPHY:
8012- case SIOCGMIIREG:
8013- case SIOCSMIIREG:
8014- return iegbe_mii_ioctl(netdev, ifr, cmd);
8015+ case SIOCGMIIPHY:
8016+ case SIOCGMIIREG:
8017+ case SIOCSMIIREG:
8018+ return iegbe_mii_ioctl(netdev, ifr, cmd);
8019 #endif
8020 #ifdef ETHTOOL_OPS_COMPAT
8021- case SIOCETHTOOL:
8022- return ethtool_ioctl(ifr);
8023+ case SIOCETHTOOL:
8024+ return ethtool_ioctl(ifr);
8025 #endif
8026- default:
8027- return -EOPNOTSUPP;
8028- }
8029+ default:
8030+ return -EOPNOTSUPP;
8031+ }
8032 }
8033 
8034 #ifdef SIOCGMIIPHY
8035@@ -4603,534 +4337,510 @@ iegbe_ioctl(struct net_device *netdev, s
8036  * @cmd:
8037  **/
8038 
8039-static int
8040-iegbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
8041+static int iegbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
8042+ int cmd)
8043 {
8044- struct iegbe_adapter *adapter = netdev_priv(netdev);
8045- struct mii_ioctl_data *data = if_mii(ifr);
8046- int retval;
8047- uint16_t mii_reg;
8048- uint16_t spddplx;
8049- unsigned long flags;
8050-
8051- if((adapter->hw.media_type == iegbe_media_type_oem &&
8052- !iegbe_oem_phy_is_copper(&adapter->hw)) ||
8053- adapter->hw.media_type == iegbe_media_type_fiber ||
8054- adapter->hw.media_type == iegbe_media_type_internal_serdes ) {
8055- return -EOPNOTSUPP;
8056- }
8057- switch (cmd) {
8058- case SIOCGMIIPHY:
8059- data->phy_id = adapter->hw.phy_addr;
8060- break;
8061- case SIOCGMIIREG:
8062+ struct iegbe_adapter *adapter = netdev_priv(netdev);
8063+ struct mii_ioctl_data *data = if_mii(ifr);
8064+ int retval;
8065+ uint16_t mii_reg;
8066+ uint16_t spddplx;
8067+ unsigned long flags = 0;
8068+
8069+ if((adapter->hw.media_type == iegbe_media_type_oem
8070+ && !iegbe_oem_phy_is_copper(&adapter->hw))
8071+ ||adapter->hw.media_type != iegbe_media_type_copper) {
8072+ return -EOPNOTSUPP;
8073+ }
8074+ switch (cmd) {
8075+ case SIOCGMIIPHY:
8076+ data->phy_id = adapter->hw.phy_addr;
8077+ break;
8078+ case SIOCGMIIREG:
8079         if(!capable(CAP_NET_ADMIN)) {
8080- return -EPERM;
8081+ return -EPERM;
8082         }
8083- spin_lock_irqsave(&adapter->stats_lock, flags);
8084- if(iegbe_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8085- &data->val_out)) {
8086- spin_unlock_irqrestore(&adapter->stats_lock, flags);
8087- return -EIO;
8088- }
8089- spin_unlock_irqrestore(&adapter->stats_lock, flags);
8090- break;
8091- case SIOCSMIIREG:
8092+ spin_lock_irqsave(&adapter->stats_lock, flags);
8093+ if(iegbe_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
8094+ &data->val_out)) {
8095+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
8096+ return -EIO;
8097+ }
8098+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
8099+ break;
8100+ case SIOCSMIIREG:
8101         if(!capable(CAP_NET_ADMIN)){
8102- return -EPERM;
8103+ return -EPERM;
8104         }
8105         if(data->reg_num & ~(0x1F)) {
8106- return -EFAULT;
8107+ return -EFAULT;
8108         }
8109- mii_reg = data->val_in;
8110- spin_lock_irqsave(&adapter->stats_lock, flags);
8111- if(iegbe_write_phy_reg(&adapter->hw, data->reg_num,
8112- mii_reg)) {
8113- spin_unlock_irqrestore(&adapter->stats_lock, flags);
8114- return -EIO;
8115- }
8116- switch(adapter->hw.phy_type) {
8117- case iegbe_phy_m88:
8118- switch (data->reg_num) {
8119- case PHY_CTRL:
8120+ mii_reg = data->val_in;
8121+ spin_lock_irqsave(&adapter->stats_lock, flags);
8122+ if(iegbe_write_phy_reg(&adapter->hw, data->reg_num,
8123+ mii_reg)) {
8124+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
8125+ return -EIO;
8126+ }
8127+ switch(adapter->hw.phy_type) {
8128+ case iegbe_phy_m88:
8129+ switch (data->reg_num) {
8130+ case PHY_CTRL:
8131                 if(mii_reg & MII_CR_POWER_DOWN) {
8132- break;
8133+ break;
8134                 }
8135- if(mii_reg & MII_CR_AUTO_NEG_EN) {
8136- adapter->hw.autoneg = 1;
8137- adapter->hw.autoneg_advertised = 0x2F;
8138- } else {
8139+ if(mii_reg & MII_CR_AUTO_NEG_EN) {
8140+ adapter->hw.autoneg = 1;
8141+ adapter->hw.autoneg_advertised = 0x2F;
8142+ } else {
8143                     if(mii_reg & 0x40){
8144- spddplx = SPEED_1000;
8145+ spddplx = SPEED_1000;
8146                     } else if(mii_reg & 0x2000) {
8147- spddplx = SPEED_100;
8148+ spddplx = SPEED_100;
8149                     } else {
8150- spddplx = SPEED_10;
8151+ spddplx = SPEED_10;
8152                           }
8153- spddplx += (mii_reg & 0x100)
8154- ? FULL_DUPLEX :
8155- HALF_DUPLEX;
8156- retval = iegbe_set_spd_dplx(adapter,
8157- spddplx);
8158- if(retval) {
8159- spin_unlock_irqrestore(
8160- &adapter->stats_lock,
8161- flags);
8162- return retval;
8163- }
8164- }
8165- if(netif_running(adapter->netdev)) {
8166- iegbe_down(adapter);
8167- iegbe_up(adapter);
8168+ spddplx += (mii_reg & 0x100)
8169+ ? FULL_DUPLEX :
8170+ HALF_DUPLEX;
8171+ retval = iegbe_set_spd_dplx(adapter,
8172+ spddplx);
8173+ if(retval) {
8174+ spin_unlock_irqrestore(
8175+ &adapter->stats_lock,
8176+ flags);
8177+ return retval;
8178+ }
8179+ }
8180+ if(netif_running(adapter->netdev)) {
8181+ iegbe_down(adapter);
8182+ iegbe_up(adapter);
8183                 } else {
8184- iegbe_reset(adapter);
8185+ iegbe_reset(adapter);
8186                 }
8187- break;
8188- case M88E1000_PHY_SPEC_CTRL:
8189- case M88E1000_EXT_PHY_SPEC_CTRL:
8190- if(iegbe_phy_reset(&adapter->hw)) {
8191- spin_unlock_irqrestore(
8192- &adapter->stats_lock, flags);
8193- return -EIO;
8194- }
8195- break;
8196- }
8197- break;
8198+ break;
8199+ case M88E1000_PHY_SPEC_CTRL:
8200+ case M88E1000_EXT_PHY_SPEC_CTRL:
8201+ if(iegbe_phy_reset(&adapter->hw)) {
8202+ spin_unlock_irqrestore(
8203+ &adapter->stats_lock, flags);
8204+ return -EIO;
8205+ }
8206+ break;
8207+ }
8208+ break;
8209 
8210- case iegbe_phy_oem:
8211- retval = iegbe_oem_mii_ioctl(adapter, flags, ifr, cmd);
8212- if(retval) {
8213- spin_unlock_irqrestore(
8214- &adapter->stats_lock, flags);
8215- return retval;
8216- }
8217- break;
8218+ case iegbe_phy_oem:
8219+ retval = iegbe_oem_mii_ioctl(adapter, flags, ifr, cmd);
8220+ if(retval) {
8221+ spin_unlock_irqrestore(
8222+ &adapter->stats_lock, flags);
8223+ return retval;
8224+ }
8225+ break;
8226 
8227- default:
8228- switch (data->reg_num) {
8229- case PHY_CTRL:
8230+ default:
8231+ switch (data->reg_num) {
8232+ case PHY_CTRL:
8233                 if(mii_reg & MII_CR_POWER_DOWN) {
8234- break;
8235+ break;
8236                 }
8237- if(netif_running(adapter->netdev)) {
8238- iegbe_down(adapter);
8239- iegbe_up(adapter);
8240+ if(netif_running(adapter->netdev)) {
8241+ iegbe_down(adapter);
8242+ iegbe_up(adapter);
8243                 } else {
8244- iegbe_reset(adapter);
8245+ iegbe_reset(adapter);
8246                 }
8247- break;
8248- }
8249- }
8250- spin_unlock_irqrestore(&adapter->stats_lock, flags);
8251- break;
8252- default:
8253- return -EOPNOTSUPP;
8254- }
8255- return E1000_SUCCESS;
8256+ break;
8257+ }
8258+ }
8259+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
8260+ break;
8261+ default:
8262+ return -EOPNOTSUPP;
8263+ }
8264+ return E1000_SUCCESS;
8265 }
8266 #endif
8267 
8268-void
8269-iegbe_pci_set_mwi(struct iegbe_hw *hw)
8270+void iegbe_pci_set_mwi(struct iegbe_hw *hw)
8271 {
8272- struct iegbe_adapter *adapter = hw->back;
8273-#ifdef HAVE_PCI_SET_MWI
8274- int ret_val = pci_set_mwi(adapter->pdev);
8275-
8276- if(ret_val) {
8277- DPRINTK(PROBE, ERR, "Error in setting MWI\n");
8278- }
8279-#else
8280- pci_write_config_word(adapter->pdev, PCI_COMMAND,
8281- adapter->hw.pci_cmd_word |
8282- PCI_COMMAND_INVALIDATE);
8283-#endif
8284+ struct iegbe_adapter *adapter = hw->back;
8285+ int ret_val = pci_set_mwi(adapter->pdev);
8286+
8287+ if (ret_val)
8288+ DPRINTK(PROBE, ERR, "Error in setting MWI\n");
8289 }
8290 
8291-void
8292-iegbe_pci_clear_mwi(struct iegbe_hw *hw)
8293+void iegbe_pci_clear_mwi(struct iegbe_hw *hw)
8294 {
8295- struct iegbe_adapter *adapter = hw->back;
8296+ struct iegbe_adapter *adapter = hw->back;
8297 
8298-#ifdef HAVE_PCI_SET_MWI
8299- pci_clear_mwi(adapter->pdev);
8300-#else
8301- pci_write_config_word(adapter->pdev, PCI_COMMAND,
8302- adapter->hw.pci_cmd_word &
8303- ~PCI_COMMAND_INVALIDATE);
8304-#endif
8305+ pci_clear_mwi(adapter->pdev);
8306 }
8307 
8308 void
8309 iegbe_read_pci_cfg(struct iegbe_hw *hw, uint32_t reg, uint16_t *value)
8310 {
8311- struct iegbe_adapter *adapter = hw->back;
8312+ struct iegbe_adapter *adapter = hw->back;
8313 
8314- pci_read_config_word(adapter->pdev, reg, value);
8315+ pci_read_config_word(adapter->pdev, reg, value);
8316 }
8317 
8318 void
8319 iegbe_write_pci_cfg(struct iegbe_hw *hw, uint32_t reg, uint16_t *value)
8320 {
8321- struct iegbe_adapter *adapter = hw->back;
8322+ struct iegbe_adapter *adapter = hw->back;
8323 
8324- pci_write_config_word(adapter->pdev, reg, *value);
8325+ pci_write_config_word(adapter->pdev, reg, *value);
8326 }
8327 
8328 uint32_t
8329 iegbe_io_read(struct iegbe_hw *hw, unsigned long port)
8330 {
8331- return inl(port);
8332+ return inl(port);
8333 }
8334 
8335 void
8336 iegbe_io_write(struct iegbe_hw *hw, unsigned long port, uint32_t value)
8337 {
8338- outl(value, port);
8339+ outl(value, port);
8340 }
8341 
8342-#ifdef NETIF_F_HW_VLAN_TX
8343-static void
8344-iegbe_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
8345+static void iegbe_vlan_rx_register(struct net_device *netdev,
8346+ struct vlan_group *grp)
8347 {
8348- struct iegbe_adapter *adapter = netdev_priv(netdev);
8349- uint32_t ctrl, rctl;
8350-
8351- iegbe_irq_disable(adapter);
8352- adapter->vlgrp = grp;
8353-
8354- if(grp) {
8355- /* enable VLAN tag insert/strip */
8356- ctrl = E1000_READ_REG(&adapter->hw, CTRL);
8357- ctrl |= E1000_CTRL_VME;
8358- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
8359-
8360- /* enable VLAN receive filtering */
8361- rctl = E1000_READ_REG(&adapter->hw, RCTL);
8362- rctl |= E1000_RCTL_VFE;
8363- rctl &= ~E1000_RCTL_CFIEN;
8364- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
8365- iegbe_update_mng_vlan(adapter);
8366- } else {
8367- /* disable VLAN tag insert/strip */
8368- ctrl = E1000_READ_REG(&adapter->hw, CTRL);
8369- ctrl &= ~E1000_CTRL_VME;
8370- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
8371+ struct iegbe_adapter *adapter = netdev_priv(netdev);
8372+ uint32_t ctrl, rctl;
8373 
8374- /* disable VLAN filtering */
8375- rctl = E1000_READ_REG(&adapter->hw, RCTL);
8376- rctl &= ~E1000_RCTL_VFE;
8377- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
8378- if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
8379- iegbe_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
8380- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
8381- }
8382- }
8383+ if (!test_bit(__E1000_DOWN, &adapter->flags))
8384+ iegbe_irq_disable(adapter);
8385+ adapter->vlgrp = grp;
8386+
8387+ if(grp) {
8388+ /* enable VLAN tag insert/strip */
8389+ ctrl = E1000_READ_REG(&adapter->hw, CTRL);
8390+ ctrl |= E1000_CTRL_VME;
8391+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
8392+
8393+ /* enable VLAN receive filtering */
8394+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
8395+ rctl |= E1000_RCTL_VFE;
8396+ rctl &= ~E1000_RCTL_CFIEN;
8397+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
8398+ iegbe_update_mng_vlan(adapter);
8399+ } else {
8400+ /* disable VLAN tag insert/strip */
8401+ ctrl = E1000_READ_REG(&adapter->hw, CTRL);
8402+ ctrl &= ~E1000_CTRL_VME;
8403+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
8404+
8405+ /* disable VLAN filtering */
8406+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
8407+ rctl &= ~E1000_RCTL_VFE;
8408+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
8409+ if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
8410+ iegbe_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
8411+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
8412+ }
8413+ }
8414 
8415- iegbe_irq_enable(adapter);
8416+ if (!test_bit(__E1000_DOWN, &adapter->flags))
8417+ iegbe_irq_enable(adapter);
8418 }
8419 
8420-static void
8421-iegbe_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
8422+static void iegbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
8423 {
8424- struct iegbe_adapter *adapter = netdev_priv(netdev);
8425- uint32_t vfta, index;
8426- if((adapter->hw.mng_cookie.status &
8427- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
8428+ struct iegbe_adapter *adapter = netdev_priv(netdev);
8429+ uint32_t vfta, index;
8430+ if((adapter->hw.mng_cookie.status &
8431+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
8432         (vid == adapter->mng_vlan_id)) {
8433- return;
8434+ return;
8435     }
8436- /* add VID to filter table */
8437+ /* add VID to filter table */
8438     index = (vid >> 0x5) & 0x7F;
8439- vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
8440+ vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
8441     vfta |= (0x1 << (vid & 0x1F));
8442- iegbe_write_vfta(&adapter->hw, index, vfta);
8443+ iegbe_write_vfta(&adapter->hw, index, vfta);
8444 }
8445 
8446-static void
8447-iegbe_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
8448+static void iegbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
8449 {
8450     struct iegbe_adapter *adapter = netdev_priv(netdev);
8451- uint32_t vfta, index;
8452+ u32 vfta, index;
8453 
8454+ if (!test_bit(__E1000_DOWN, &adapter->flags))
8455     iegbe_irq_disable(adapter);
8456-
8457- if(adapter->vlgrp) {
8458- adapter->vlgrp->vlan_devices[vid] = NULL;
8459- }
8460+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
8461+ if (!test_bit(__E1000_DOWN, &adapter->flags))
8462     iegbe_irq_enable(adapter);
8463 
8464- if((adapter->hw.mng_cookie.status &
8465- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
8466- (vid == adapter->mng_vlan_id)) {
8467- return;
8468- }
8469     /* remove VID from filter table */
8470- index = (vid >> 0x5) & 0x7F;
8471+ index = (vid >> 0x5) & 0x7F;
8472     vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
8473- vfta &= ~(0x1 << (vid & 0x1F));
8474+ vfta &= ~(0x1 << (vid & 0x1F));
8475     iegbe_write_vfta(&adapter->hw, index, vfta);
8476 }
8477 
8478-static void
8479-iegbe_restore_vlan(struct iegbe_adapter *adapter)
8480+static void iegbe_restore_vlan(struct iegbe_adapter *adapter)
8481 {
8482     iegbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
8483 
8484- if(adapter->vlgrp) {
8485- uint16_t vid;
8486- for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
8487- if(!adapter->vlgrp->vlan_devices[vid]) {
8488+ if (adapter->vlgrp) {
8489+ u16 vid;
8490+ for (vid = 0x0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
8491+ if (!vlan_group_get_device(adapter->vlgrp, vid))
8492                 continue;
8493- }
8494             iegbe_vlan_rx_add_vid(adapter->netdev, vid);
8495         }
8496     }
8497 }
8498-#endif
8499 
8500-int
8501-iegbe_set_spd_dplx(struct iegbe_adapter *adapter, uint16_t spddplx)
8502+
8503+int iegbe_set_spd_dplx(struct iegbe_adapter *adapter, u16 spddplx)
8504 {
8505- adapter->hw.autoneg = 0;
8506+ adapter->hw.autoneg = 0x0;
8507 
8508- /* Fiber NICs only allow 1000 gbps Full duplex */
8509- if((adapter->hw.media_type == iegbe_media_type_fiber
8510+ /* Fiber NICs only allow 1000 gbps Full duplex */
8511+ if((adapter->hw.media_type == iegbe_media_type_fiber
8512         || (adapter->hw.media_type == iegbe_media_type_oem
8513             && !iegbe_oem_phy_is_copper(&adapter->hw)))
8514- && spddplx != (SPEED_1000 + FULL_DUPLEX)) {
8515- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
8516- return -EINVAL;
8517- }
8518-
8519- switch(spddplx) {
8520- case SPEED_10 + HALF_DUPLEX:
8521- adapter->hw.forced_speed_duplex = iegbe_10_half;
8522- break;
8523- case SPEED_10 + FULL_DUPLEX:
8524- adapter->hw.forced_speed_duplex = iegbe_10_full;
8525- break;
8526- case SPEED_100 + HALF_DUPLEX:
8527- adapter->hw.forced_speed_duplex = iegbe_100_half;
8528- break;
8529- case SPEED_100 + FULL_DUPLEX:
8530- adapter->hw.forced_speed_duplex = iegbe_100_full;
8531- break;
8532- case SPEED_1000 + FULL_DUPLEX:
8533+ && spddplx != (SPEED_1000 + DUPLEX_FULL)) {
8534+ DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
8535+ return -EINVAL;
8536+ }
8537+
8538+ switch(spddplx) {
8539+ case SPEED_10 + DUPLEX_HALF:
8540+ adapter->hw.forced_speed_duplex = iegbe_10_half;
8541+ break;
8542+ case SPEED_10 + DUPLEX_FULL:
8543+ adapter->hw.forced_speed_duplex = iegbe_10_full;
8544+ break;
8545+ case SPEED_100 + DUPLEX_HALF:
8546+ adapter->hw.forced_speed_duplex = iegbe_100_half;
8547+ break;
8548+ case SPEED_100 + DUPLEX_FULL:
8549+ adapter->hw.forced_speed_duplex = iegbe_100_full;
8550+ break;
8551+ case SPEED_1000 + DUPLEX_FULL:
8552         adapter->hw.autoneg = 0x1;
8553- adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
8554- break;
8555- case SPEED_1000 + HALF_DUPLEX: /* not supported */
8556- default:
8557- DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
8558- return -EINVAL;
8559- }
8560- return 0;
8561+ adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
8562+ break;
8563+ case SPEED_1000 + DUPLEX_HALF: /* not supported */
8564+ default:
8565+ DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
8566+ return -EINVAL;
8567+ }
8568+ return 0x0;
8569 }
8570 
8571 static int
8572 iegbe_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
8573 {
8574- struct pci_dev *pdev = NULL;
8575+ struct pci_dev *pdev = NULL;
8576     pm_message_t state = {0x3};
8577 
8578 
8579- switch(event) {
8580- case SYS_DOWN:
8581- case SYS_HALT:
8582- case SYS_POWER_OFF:
8583- while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
8584+ switch(event) {
8585+ case SYS_DOWN:
8586+ case SYS_HALT:
8587+ case SYS_POWER_OFF:
8588+ while((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
8589             if(pci_dev_driver(pdev) == &iegbe_driver) {
8590- iegbe_suspend(pdev, state);
8591- }
8592- }
8593+ iegbe_suspend(pdev, state);
8594+ }
8595+ }
8596     }
8597- return NOTIFY_DONE;
8598+ return NOTIFY_DONE;
8599 }
8600 
8601 static int
8602 iegbe_suspend(struct pci_dev *pdev, pm_message_t state)
8603 {
8604- struct net_device *netdev = pci_get_drvdata(pdev);
8605- struct iegbe_adapter *adapter = netdev_priv(netdev);
8606- uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
8607- uint32_t wufc = adapter->wol;
8608- uint16_t cmd_word;
8609+ struct net_device *netdev = pci_get_drvdata(pdev);
8610+ struct iegbe_adapter *adapter = netdev_priv(netdev);
8611+ uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
8612+ uint32_t wufc = adapter->wol;
8613+ uint16_t cmd_word;
8614 
8615- netif_device_detach(netdev);
8616+ netif_device_detach(netdev);
8617 
8618     if(netif_running(netdev)) {
8619- iegbe_down(adapter);
8620+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
8621+ iegbe_down(adapter);
8622     }
8623- /*
8624- * ICP_XXXX style MACs do not have a link up bit in
8625- * the STATUS register, query the PHY directly
8626- */
8627- if(adapter->hw.mac_type != iegbe_icp_xxxx) {
8628- status = E1000_READ_REG(&adapter->hw, STATUS);
8629+ /*
8630+ * ICP_XXXX style MACs do not have a link up bit in
8631+ * the STATUS register, query the PHY directly
8632+ */
8633+ if(adapter->hw.mac_type != iegbe_icp_xxxx) {
8634+ status = E1000_READ_REG(&adapter->hw, STATUS);
8635         if(status & E1000_STATUS_LU) {
8636- wufc &= ~E1000_WUFC_LNKC;
8637+ wufc &= ~E1000_WUFC_LNKC;
8638         }
8639- } else {
8640- int isUp = 0;
8641+ } else {
8642+ int isUp = 0x0;
8643         if(iegbe_oem_phy_is_link_up(&adapter->hw, &isUp) != E1000_SUCCESS) {
8644- isUp = 0;
8645+ isUp = 0x0;
8646         }
8647         if(isUp) {
8648- wufc &= ~E1000_WUFC_LNKC;
8649- }
8650+ wufc &= ~E1000_WUFC_LNKC;
8651+ }
8652     }
8653 
8654- if(wufc) {
8655- iegbe_setup_rctl(adapter);
8656- iegbe_set_multi(netdev);
8657-
8658- /* turn on all-multi mode if wake on multicast is enabled */
8659- if(adapter->wol & E1000_WUFC_MC) {
8660- rctl = E1000_READ_REG(&adapter->hw, RCTL);
8661- rctl |= E1000_RCTL_MPE;
8662- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
8663- }
8664+ if(wufc) {
8665+ iegbe_setup_rctl(adapter);
8666+ iegbe_set_rx_mode(netdev);
8667+
8668+ /* turn on all-multi mode if wake on multicast is enabled */
8669+ if(adapter->wol & E1000_WUFC_MC) {
8670+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
8671+ rctl |= E1000_RCTL_MPE;
8672+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
8673+ }
8674 
8675- if(adapter->hw.mac_type >= iegbe_82540) {
8676- ctrl = E1000_READ_REG(&adapter->hw, CTRL);
8677- /* advertise wake from D3Cold */
8678- #define E1000_CTRL_ADVD3WUC 0x00100000
8679- /* phy power management enable */
8680- ctrl |= E1000_CTRL_ADVD3WUC |
8681- (adapter->hw.mac_type != iegbe_icp_xxxx
8682- ? E1000_CTRL_EN_PHY_PWR_MGMT : 0);
8683+ if(adapter->hw.mac_type >= iegbe_82540) {
8684+ ctrl = E1000_READ_REG(&adapter->hw, CTRL);
8685+ /* advertise wake from D3Cold */
8686+ #define E1000_CTRL_ADVD3WUC 0x00100000
8687+ /* phy power management enable */
8688+ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8689+ ctrl |= E1000_CTRL_ADVD3WUC |
8690+ (adapter->hw.mac_type != iegbe_icp_xxxx
8691+ ? E1000_CTRL_EN_PHY_PWR_MGMT : 0x0);
8692 
8693- E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
8694- }
8695+ E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
8696+ }
8697 
8698- if(adapter->hw.media_type == iegbe_media_type_fiber ||
8699- adapter->hw.media_type == iegbe_media_type_internal_serdes) {
8700- /* keep the laser running in D3 */
8701- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
8702- ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
8703- E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
8704- }
8705+ if(adapter->hw.media_type == iegbe_media_type_fiber ||
8706+ adapter->hw.media_type == iegbe_media_type_internal_serdes) {
8707+ /* keep the laser running in D3 */
8708+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
8709+ ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
8710+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
8711+ }
8712 
8713         /* Allow OEM PHYs (if any exist) to keep the laser
8714          *running in D3 */
8715         iegbe_oem_fiber_live_in_suspend(&adapter->hw);
8716 
8717- /* Allow time for pending master requests to run */
8718- iegbe_disable_pciex_master(&adapter->hw);
8719+ /* Allow time for pending master requests to run */
8720+ iegbe_disable_pciex_master(&adapter->hw);
8721 
8722- E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
8723- E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
8724+ E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
8725+ E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
8726         pci_enable_wake(pdev, 0x3, 0x1);
8727         pci_enable_wake(pdev, 0x4, 0x1); /* 4 == D3 cold */
8728- } else {
8729- E1000_WRITE_REG(&adapter->hw, WUC, 0);
8730- E1000_WRITE_REG(&adapter->hw, WUFC, 0);
8731- pci_enable_wake(pdev, 0x3, 0);
8732- pci_enable_wake(pdev, 0x4, 0); /* 4 == D3 cold */
8733- }
8734+ } else {
8735+ E1000_WRITE_REG(&adapter->hw, WUC, 0x0);
8736+ E1000_WRITE_REG(&adapter->hw, WUFC, 0x0);
8737+ pci_enable_wake(pdev, 0x3, 0x0);
8738+ pci_enable_wake(pdev, 0x4, 0x0); /* 4 == D3 cold */
8739+ }
8740 
8741- pci_save_state(pdev);
8742-
8743- if(adapter->hw.mac_type >= iegbe_82540
8744- && adapter->hw.mac_type != iegbe_icp_xxxx
8745- && adapter->hw.media_type == iegbe_media_type_copper) {
8746- manc = E1000_READ_REG(&adapter->hw, MANC);
8747- if(manc & E1000_MANC_SMBUS_EN) {
8748- manc |= E1000_MANC_ARP_EN;
8749- E1000_WRITE_REG(&adapter->hw, MANC, manc);
8750+ pci_save_state(pdev);
8751+
8752+ if(adapter->hw.mac_type >= iegbe_82540
8753+ && adapter->hw.mac_type != iegbe_icp_xxxx
8754+ && adapter->hw.media_type == iegbe_media_type_copper) {
8755+ manc = E1000_READ_REG(&adapter->hw, MANC);
8756+ if(manc & E1000_MANC_SMBUS_EN) {
8757+ manc |= E1000_MANC_ARP_EN;
8758+ E1000_WRITE_REG(&adapter->hw, MANC, manc);
8759             pci_enable_wake(pdev, 0x3, 0x1);
8760             pci_enable_wake(pdev, 0x4, 0x1); /* 4 == D3 cold */
8761- }
8762- }
8763+ }
8764+ }
8765 
8766- switch(adapter->hw.mac_type) {
8767- case iegbe_82571:
8768- case iegbe_82572:
8769- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
8770- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
8771- ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
8772- break;
8773- case iegbe_82573:
8774- swsm = E1000_READ_REG(&adapter->hw, SWSM);
8775- E1000_WRITE_REG(&adapter->hw, SWSM,
8776- swsm & ~E1000_SWSM_DRV_LOAD);
8777- break;
8778- default:
8779- break;
8780- }
8781+ switch(adapter->hw.mac_type) {
8782+ case iegbe_82571:
8783+ case iegbe_82572:
8784+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
8785+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
8786+ ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
8787+ break;
8788+ case iegbe_82573:
8789+ swsm = E1000_READ_REG(&adapter->hw, SWSM);
8790+ E1000_WRITE_REG(&adapter->hw, SWSM,
8791+ swsm & ~E1000_SWSM_DRV_LOAD);
8792+ break;
8793+ default:
8794+ break;
8795+ }
8796 
8797- pci_disable_device(pdev);
8798- if(adapter->hw.mac_type == iegbe_icp_xxxx) {
8799- /*
8800- * ICP xxxx devices are not true PCI devices, in the context
8801- * of power management, disabling the bus mastership is not
8802- * sufficient to disable the device, it is also necessary to
8803- * disable IO, Memory, and Interrupts if they are enabled.
8804- */
8805- pci_read_config_word(pdev, PCI_COMMAND, &cmd_word);
8806+ pci_disable_device(pdev);
8807+ if(adapter->hw.mac_type == iegbe_icp_xxxx) {
8808+ /*
8809+ * ICP xxxx devices are not true PCI devices, in the context
8810+ * of power management, disabling the bus mastership is not
8811+ * sufficient to disable the device, it is also necessary to
8812+ * disable IO, Memory, and Interrupts if they are enabled.
8813+ */
8814+ pci_read_config_word(pdev, PCI_COMMAND, &cmd_word);
8815         if(cmd_word & PCI_COMMAND_IO) {
8816- cmd_word &= ~PCI_COMMAND_IO;
8817+ cmd_word &= ~PCI_COMMAND_IO;
8818         }
8819         if(cmd_word & PCI_COMMAND_MEMORY) {
8820- cmd_word &= ~PCI_COMMAND_MEMORY;
8821+ cmd_word &= ~PCI_COMMAND_MEMORY;
8822         }
8823         if(cmd_word & PCI_COMMAND_INTX_DISABLE) {
8824- cmd_word &= ~PCI_COMMAND_INTX_DISABLE;
8825+ cmd_word &= ~PCI_COMMAND_INTX_DISABLE;
8826         }
8827- pci_write_config_word(pdev, PCI_COMMAND, cmd_word);
8828- }
8829+ pci_write_config_word(pdev, PCI_COMMAND, cmd_word);
8830+ }
8831 
8832- state.event = (state.event > 0) ? 0x3 : 0;
8833- pci_set_power_state(pdev, state.event);
8834- if(gcu_suspend == 0)
8835+ state.event = (state.event > 0x0) ? 0x3 : 0x0;
8836+ pci_set_power_state(pdev, state.event);
8837+ if(gcu_suspend == 0x0)
8838      {
8839          if(gcu == NULL) {
8840- gcu = pci_find_device(PCI_VENDOR_ID_INTEL, GCU_DEVID, NULL);
8841- }
8842+ gcu = pci_get_device(PCI_VENDOR_ID_INTEL, GCU_DEVID, NULL);
8843+ }
8844          gcu_iegbe_suspend(gcu, 0x3);
8845- gcu_suspend = 1;
8846- gcu_resume = 0;
8847+ gcu_suspend = 0x1;
8848+ gcu_resume = 0x0;
8849      }
8850- return 0;
8851+ return 0x0;
8852 }
8853 
8854 #ifdef CONFIG_PM
8855 static int
8856 iegbe_resume(struct pci_dev *pdev)
8857 {
8858- struct net_device *netdev = pci_get_drvdata(pdev);
8859- struct iegbe_adapter *adapter = netdev_priv(netdev);
8860- uint32_t manc, ret_val, swsm;
8861- uint32_t ctrl_ext;
8862+ struct net_device *netdev = pci_get_drvdata(pdev);
8863+ struct iegbe_adapter *adapter = netdev_priv(netdev);
8864+ uint32_t manc, ret_val, swsm;
8865+ uint32_t ctrl_ext;
8866      int offset;
8867     uint32_t vdid;
8868 
8869- if(gcu_resume == 0)
8870+ if(gcu_resume == 0x0)
8871      {
8872          if(gcu == NULL) {
8873- gcu = pci_find_device(PCI_VENDOR_ID_INTEL, GCU_DEVID, NULL);
8874+ gcu = pci_get_device(PCI_VENDOR_ID_INTEL, GCU_DEVID, NULL);
8875            pci_read_config_dword(gcu, 0x00, &vdid);
8876- }
8877-
8878+ }
8879+
8880          if(gcu) {
8881             gcu_iegbe_resume(gcu);
8882- gcu_resume = 1;
8883- gcu_suspend = 0;
8884+ gcu_resume = 0x1;
8885+ gcu_suspend = 0x0;
8886         } else {
8887             printk("Unable to resume GCU!\n");
8888- }
8889+ }
8890      }
8891     pci_set_power_state(pdev, 0x0);
8892- pci_restore_state(pdev);
8893- ret_val = pci_enable_device(pdev);
8894- pci_set_master(pdev);
8895+ pci_restore_state(pdev);
8896+ ret_val = pci_enable_device(pdev);
8897+ pci_set_master(pdev);
8898 
8899     pci_enable_wake(pdev, 0x3, 0x0);
8900     pci_enable_wake(pdev, 0x4, 0x0); /* 4 == D3 cold */
8901 
8902- iegbe_reset(adapter);
8903- E1000_WRITE_REG(&adapter->hw, WUS, ~0);
8904+ iegbe_reset(adapter);
8905+ E1000_WRITE_REG(&adapter->hw, WUS, ~0);
8906     offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_ST)
8907                  + PCI_ST_SMIA_OFFSET;
8908     pci_write_config_dword(adapter->pdev, offset, 0x00000006);
8909@@ -5138,51 +4848,52 @@ iegbe_resume(struct pci_dev *pdev)
8910     E1000_WRITE_REG(&adapter->hw, IMC2, ~0UL);
8911 
8912     if(netif_running(netdev)) {
8913- iegbe_up(adapter);
8914+ iegbe_up(adapter);
8915     }
8916- netif_device_attach(netdev);
8917-
8918- if(adapter->hw.mac_type >= iegbe_82540
8919- && adapter->hw.mac_type != iegbe_icp_xxxx
8920- && adapter->hw.media_type == iegbe_media_type_copper) {
8921- manc = E1000_READ_REG(&adapter->hw, MANC);
8922- manc &= ~(E1000_MANC_ARP_EN);
8923- E1000_WRITE_REG(&adapter->hw, MANC, manc);
8924- }
8925+ netif_device_attach(netdev);
8926 
8927- switch(adapter->hw.mac_type) {
8928- case iegbe_82571:
8929- case iegbe_82572:
8930- ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
8931- E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
8932- ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
8933- break;
8934- case iegbe_82573:
8935- swsm = E1000_READ_REG(&adapter->hw, SWSM);
8936- E1000_WRITE_REG(&adapter->hw, SWSM,
8937- swsm | E1000_SWSM_DRV_LOAD);
8938- break;
8939- default:
8940- break;
8941- }
8942+ if(adapter->hw.mac_type >= iegbe_82540
8943+ && adapter->hw.mac_type != iegbe_icp_xxxx
8944+ && adapter->hw.media_type == iegbe_media_type_copper) {
8945+ manc = E1000_READ_REG(&adapter->hw, MANC);
8946+ manc &= ~(E1000_MANC_ARP_EN);
8947+ E1000_WRITE_REG(&adapter->hw, MANC, manc);
8948+ }
8949+
8950+ switch(adapter->hw.mac_type) {
8951+ case iegbe_82571:
8952+ case iegbe_82572:
8953+ ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
8954+ E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
8955+ ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
8956+ break;
8957+ case iegbe_82573:
8958+ swsm = E1000_READ_REG(&adapter->hw, SWSM);
8959+ E1000_WRITE_REG(&adapter->hw, SWSM,
8960+ swsm | E1000_SWSM_DRV_LOAD);
8961+ break;
8962+ default:
8963+ break;
8964+ }
8965+#endif
8966 
8967- return 0;
8968+ return 0x0;
8969 }
8970-#endif
8971+
8972 #ifdef CONFIG_NET_POLL_CONTROLLER
8973 /*
8974  * Polling 'interrupt' - used by things like netconsole to send skbs
8975  * without having to re-enable interrupts. It's not called while
8976  * the interrupt routine is executing.
8977  */
8978-static void
8979-iegbe_netpoll(struct net_device *netdev)
8980+static void iegbe_netpoll(struct net_device *netdev)
8981 {
8982- struct iegbe_adapter *adapter = netdev_priv(netdev);
8983- disable_irq(adapter->pdev->irq);
8984- iegbe_intr(adapter->pdev->irq, netdev, NULL);
8985- enable_irq(adapter->pdev->irq);
8986+ struct iegbe_adapter *adapter = netdev_priv(netdev);
8987+ disable_irq(adapter->pdev->irq);
8988+ iegbe_intr(adapter->pdev->irq, netdev);
8989+ enable_irq(adapter->pdev->irq);
8990 }
8991 #endif
8992 
8993+
8994 /* iegbe_main.c */
8995--- a/Embedded/src/GbE/iegbe_oem_phy.c
8996+++ b/Embedded/src/GbE/iegbe_oem_phy.c
8997@@ -2,31 +2,31 @@
8998 
8999 GPL LICENSE SUMMARY
9000 
9001- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
9002+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
9003 
9004- This program is free software; you can redistribute it and/or modify
9005+ This program is free software; you can redistribute it and/or modify
9006   it under the terms of version 2 of the GNU General Public License as
9007   published by the Free Software Foundation.
9008 
9009- This program is distributed in the hope that it will be useful, but
9010- WITHOUT ANY WARRANTY; without even the implied warranty of
9011- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9012+ This program is distributed in the hope that it will be useful, but
9013+ WITHOUT ANY WARRANTY; without even the implied warranty of
9014+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9015   General Public License for more details.
9016 
9017- You should have received a copy of the GNU General Public License
9018- along with this program; if not, write to the Free Software
9019+ You should have received a copy of the GNU General Public License
9020+ along with this program; if not, write to the Free Software
9021   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
9022- The full GNU General Public License is included in this distribution
9023+ The full GNU General Public License is included in this distribution
9024   in the file called LICENSE.GPL.
9025 
9026   Contact Information:
9027   Intel Corporation
9028 
9029- version: Embedded.L.1.0.34
9030+ version: Embedded.Release.Patch.L.1.0.7-5
9031 
9032   Contact Information:
9033 
9034- Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
9035+ Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
9036 
9037 *****************************************************************************/
9038 /**************************************************************************
9039@@ -65,11 +65,6 @@ static int32_t iegbe_oem_link_m88_setup(
9040 static int32_t iegbe_oem_set_phy_mode(struct iegbe_hw *hw);
9041 static int32_t iegbe_oem_detect_phy(struct iegbe_hw *hw);
9042 
9043-/* Define specific BCM functions */
9044-static int32_t iegbe_oem_link_bcm5481_setup(struct iegbe_hw *hw);
9045-static int32_t bcm5481_read_18sv (struct iegbe_hw *hw, int sv, uint16_t *data);
9046-static int32_t oi_phy_setup (struct iegbe_hw *hw);
9047-
9048 /**
9049  * iegbe_oem_setup_link
9050  * @hw: iegbe_hw struct containing device specific information
9051@@ -84,7 +79,7 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
9052 {
9053 #ifdef EXTERNAL_MDIO
9054 
9055- /*
9056+ /*
9057      * see iegbe_setup_copper_link() as the primary example. Look at both
9058      * the M88 and IGP functions that are called for ideas, possibly for
9059      * power management.
9060@@ -102,14 +97,14 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
9061     }
9062     /* AFU: add test to exit out if improper phy type
9063      */
9064- /* relevent parts of iegbe_copper_link_preconfig */
9065- ctrl = E1000_READ_REG(hw, CTRL);
9066- ctrl |= E1000_CTRL_SLU;
9067- ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
9068- E1000_WRITE_REG(hw, CTRL, ctrl);
9069-
9070+ /* relevent parts of iegbe_copper_link_preconfig */
9071+ ctrl = E1000_READ_REG(hw, CTRL);
9072+ ctrl |= E1000_CTRL_SLU;
9073+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
9074+ E1000_WRITE_REG(hw, CTRL, ctrl);
9075+
9076     /* this is required for *hw init */
9077- ret_val = iegbe_oem_detect_phy(hw);
9078+ ret_val = iegbe_oem_detect_phy(hw);
9079     if(ret_val) {
9080         return ret_val;
9081     }
9082@@ -119,23 +114,13 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
9083     }
9084 
9085     switch (hw->phy_id) {
9086- case BCM5395S_PHY_ID:
9087- return E1000_SUCCESS;
9088- break;
9089-
9090         case M88E1000_I_PHY_ID:
9091         case M88E1141_E_PHY_ID:
9092             ret_val = iegbe_oem_link_m88_setup(hw);
9093- if(ret_val) {
9094- return ret_val;
9095- }
9096- break;
9097- case BCM5481_PHY_ID:
9098- ret_val = iegbe_oem_link_bcm5481_setup(hw);
9099- if(ret_val) {
9100- return ret_val;
9101+ if(ret_val) {
9102+ return ret_val;
9103             }
9104- break;
9105+ break;
9106         default:
9107             DEBUGOUT("Invalid PHY ID\n");
9108             return -E1000_ERR_PHY_TYPE;
9109@@ -143,16 +128,16 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
9110 
9111      if(hw->autoneg) {
9112          ret_val = iegbe_copper_link_autoneg(hw);
9113- if(ret_val) {
9114- return ret_val;
9115- }
9116+ if(ret_val) {
9117+ return ret_val;
9118      }
9119+ }
9120      else {
9121          DEBUGOUT("Forcing speed and duplex\n");
9122          ret_val = iegbe_phy_force_speed_duplex(hw);
9123      }
9124-
9125- /*
9126+
9127+ /*
9128       * Check link status. Wait up to 100 microseconds for link to become
9129       * valid.
9130       */
9131@@ -194,51 +179,6 @@ iegbe_oem_setup_link(struct iegbe_hw *hw
9132 #endif /* ifdef EXTERNAL_MDIO */
9133 }
9134 
9135-/**
9136- * iegbe_oem_link_bcm5481_setup
9137- * @hw: iegbe_hw struct containing device specific information
9138- *
9139- * Returns E1000_SUCCESS, negative E1000 error code on failure
9140- *
9141- * copied verbatim from iegbe_oem_link_m88_setup
9142- **/
9143-static int32_t
9144-iegbe_oem_link_bcm5481_setup(struct iegbe_hw *hw)
9145-{
9146- int32_t ret_val;
9147- uint16_t phy_data;
9148-
9149- //DEBUGFUNC(__func__);
9150-
9151- if(!hw)
9152- return -1;
9153-
9154- /* phy_reset_disable is set in iegbe_oem_set_phy_mode */
9155- if(hw->phy_reset_disable)
9156- return E1000_SUCCESS;
9157-
9158- // Enable MDIX in extended control reg.
9159- ret_val = iegbe_oem_read_phy_reg_ex(hw, BCM5481_ECTRL, &phy_data);
9160- if(ret_val)
9161- {
9162- DEBUGOUT("Unable to read BCM5481_ECTRL register\n");
9163- return ret_val;
9164- }
9165-
9166- phy_data &= ~BCM5481_ECTRL_DISMDIX;
9167- ret_val = iegbe_oem_write_phy_reg_ex(hw, BCM5481_ECTRL, phy_data);
9168- if(ret_val)
9169- {
9170- DEBUGOUT("Unable to write BCM5481_ECTRL register\n");
9171- return ret_val;
9172- }
9173-
9174- ret_val = oi_phy_setup (hw);
9175- if (ret_val)
9176- return ret_val;
9177-
9178- return E1000_SUCCESS;
9179-}
9180 
9181 /**
9182  * iegbe_oem_link_m88_setup
9183@@ -253,7 +193,7 @@ static int32_t
9184 iegbe_oem_link_m88_setup(struct iegbe_hw *hw)
9185 {
9186     int32_t ret_val;
9187- uint16_t phy_data;
9188+ uint16_t phy_data = 0;
9189 
9190     DEBUGFUNC1("%s",__func__);
9191 
9192@@ -261,7 +201,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
9193         return -1;
9194     }
9195 
9196- ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
9197+ ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
9198                                               &phy_data);
9199     phy_data |= 0x00000008;
9200     ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
9201@@ -279,7 +219,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
9202 
9203     phy_data &= ~M88E1000_PSCR_ASSERT_CRS_ON_TX;
9204 
9205- /*
9206+ /*
9207      * Options:
9208      * MDI/MDI-X = 0 (default)
9209      * 0 - Auto for all speeds
9210@@ -305,7 +245,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
9211     break;
9212     }
9213 
9214- /*
9215+ /*
9216      * Options:
9217      * disable_polarity_correction = 0 (default)
9218      * Automatic Correction for Reversed Cable Polarity
9219@@ -316,25 +256,25 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
9220 
9221     if(hw->disable_polarity_correction == 1) {
9222         phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
9223- }
9224+ }
9225     ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
9226     if(ret_val) {
9227         DEBUGOUT("Unable to write M88E1000_PHY_SPEC_CTRL register\n");
9228         return ret_val;
9229     }
9230 
9231- /*
9232+ /*
9233      * Force TX_CLK in the Extended PHY Specific Control Register
9234      * to 25MHz clock.
9235      */
9236- ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_EXT_PHY_SPEC_CTRL,
9237+ ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_EXT_PHY_SPEC_CTRL,
9238                                         &phy_data);
9239     if(ret_val) {
9240         DEBUGOUT("Unable to read M88E1000_EXT_PHY_SPEC_CTRL register\n");
9241         return ret_val;
9242     }
9243 
9244- /*
9245+ /*
9246      * For Truxton, it is necessary to add RGMII tx and rx
9247      * timing delay though the EXT_PHY_SPEC_CTRL register
9248      */
9249@@ -350,13 +290,13 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
9250         phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
9251                      M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
9252     }
9253- ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_EXT_PHY_SPEC_CTRL,
9254+ ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_EXT_PHY_SPEC_CTRL,
9255                                          phy_data);
9256     if(ret_val) {
9257             DEBUGOUT("Unable to read M88E1000_EXT_PHY_SPEC_CTRL register\n");
9258         return ret_val;
9259     }
9260-
9261+
9262 
9263     /* SW Reset the PHY so all changes take effect */
9264     ret_val = iegbe_phy_hw_reset(hw);
9265@@ -371,7 +311,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
9266 /**
9267  * iegbe_oem_force_mdi
9268  * @hw: iegbe_hw struct containing device specific information
9269- * @resetPhy: returns true if after calling this function the
9270+ * @resetPhy: returns true if after calling this function the
9271  * PHY requires a reset
9272  *
9273  * Returns E1000_SUCCESS, negative E1000 error code on failure
9274@@ -379,7 +319,7 @@ iegbe_oem_link_m88_setup(struct iegbe_hw
9275  * This is called from iegbe_phy_force_speed_duplex, which is
9276  * called from iegbe_oem_setup_link.
9277  **/
9278-int32_t
9279+int32_t
9280 iegbe_oem_force_mdi(struct iegbe_hw *hw, int *resetPhy)
9281 {
9282 #ifdef EXTERNAL_MDIO
9283@@ -393,35 +333,30 @@ iegbe_oem_force_mdi(struct iegbe_hw *hw,
9284         return -1;
9285     }
9286 
9287- /*
9288+ /*
9289      * a boolean to indicate if the phy needs to be reset
9290- *
9291+ *
9292      * Make note that the M88 phy is what'll be used on Truxton
9293      * see iegbe_phy_force_speed_duplex, which does the following for M88
9294      */
9295       switch (hw->phy_id) {
9296- case BCM5395S_PHY_ID:
9297- case BCM5481_PHY_ID:
9298- DEBUGOUT("WARNING: An empty iegbe_oem_force_mdi() has been called!\n");
9299- break;
9300-
9301           case M88E1000_I_PHY_ID:
9302           case M88E1141_E_PHY_ID:
9303- ret_val = iegbe_oem_read_phy_reg_ex(hw,
9304- M88E1000_PHY_SPEC_CTRL,
9305+ ret_val = iegbe_oem_read_phy_reg_ex(hw,
9306+ M88E1000_PHY_SPEC_CTRL,
9307                                                    &phy_data);
9308               if(ret_val) {
9309                   DEBUGOUT("Unable to read M88E1000_PHY_SPEC_CTRL register\n");
9310                   return ret_val;
9311                }
9312-
9313+
9314                /*
9315- * Clear Auto-Crossover to force MDI manually. M88E1000 requires
9316+ * Clear Auto-Crossover to force MDI manually. M88E1000 requires
9317                 * MDI forced whenever speed are duplex are forced.
9318                 */
9319-
9320+
9321               phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
9322- ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
9323+ ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
9324                                                     phy_data);
9325               if(ret_val) {
9326                   DEBUGOUT("Unable to write M88E1000_PHY_SPEC_CTRL register\n");
9327@@ -458,7 +393,7 @@ iegbe_oem_force_mdi(struct iegbe_hw *hw,
9328  * This is called from iegbe_phy_force_speed_duplex, which is
9329  * called from iegbe_oem_setup_link.
9330  **/
9331-int32_t
9332+int32_t
9333 iegbe_oem_phy_reset_dsp(struct iegbe_hw *hw)
9334 {
9335 #ifdef EXTERNAL_MDIO
9336@@ -478,10 +413,8 @@ iegbe_oem_phy_reset_dsp(struct iegbe_hw
9337      * no-op.
9338      */
9339      switch (hw->phy_id) {
9340- case M88E1000_I_PHY_ID:
9341- case M88E1141_E_PHY_ID:
9342- case BCM5481_PHY_ID:
9343- case BCM5395S_PHY_ID:
9344+ case M88E1000_I_PHY_ID:
9345+ case M88E1141_E_PHY_ID:
9346              DEBUGOUT("No DSP to reset on OEM PHY\n");
9347          break;
9348          default:
9349@@ -508,7 +441,7 @@ iegbe_oem_phy_reset_dsp(struct iegbe_hw
9350  * This is called from iegbe_phy_force_speed_duplex, which is
9351  * called from iegbe_oem_setup_link.
9352  **/
9353-int32_t
9354+int32_t
9355 iegbe_oem_cleanup_after_phy_reset(struct iegbe_hw *hw)
9356 {
9357 #ifdef EXTERNAL_MDIO
9358@@ -520,29 +453,24 @@ iegbe_oem_cleanup_after_phy_reset(struct
9359 
9360     if(!hw) {
9361         return -1;
9362- }
9363+ }
9364 
9365- /*
9366+ /*
9367      * Make note that the M88 phy is what'll be used on Truxton.
9368      * see iegbe_phy_force_speed_duplex, which does the following for M88
9369      */
9370     switch (hw->phy_id) {
9371- case BCM5395S_PHY_ID:
9372- case BCM5481_PHY_ID:
9373- DEBUGOUT("WARNING: An empty iegbe_oem_cleanup_after_phy_reset() has been called!\n");
9374- break;
9375-
9376         case M88E1000_I_PHY_ID:
9377         case M88E1141_E_PHY_ID:
9378             /*
9379- * Because we reset the PHY above, we need to re-force
9380+ * Because we reset the PHY above, we need to re-force
9381              * TX_CLK in the Extended PHY Specific Control Register to
9382              * 25MHz clock. This value defaults back to a 2.5MHz clock
9383              * when the PHY is reset.
9384              */
9385 
9386              ret_val = iegbe_oem_read_phy_reg_ex(hw,
9387- M88E1000_EXT_PHY_SPEC_CTRL,
9388+ M88E1000_EXT_PHY_SPEC_CTRL,
9389                                                  &phy_data);
9390              if(ret_val) {
9391                  DEBUGOUT("Unable to read M88E1000_EXT_SPEC_CTRL register\n");
9392@@ -550,22 +478,23 @@ iegbe_oem_cleanup_after_phy_reset(struct
9393              }
9394 
9395              phy_data |= M88E1000_EPSCR_TX_CLK_25;
9396- ret_val = iegbe_oem_write_phy_reg_ex(hw,
9397- M88E1000_EXT_PHY_SPEC_CTRL,
9398+ ret_val = iegbe_oem_write_phy_reg_ex(hw,
9399+ M88E1000_EXT_PHY_SPEC_CTRL,
9400                                                    phy_data);
9401              if(ret_val) {
9402- DEBUGOUT("Unable to write M88E1000_EXT_PHY_SPEC_CTRL register\n");
9403+ DEBUGOUT("Unable to write M88E1000_EXT_PHY_SPEC_CTRL "
9404+ "register\n");
9405                  return ret_val;
9406              }
9407 
9408              /*
9409               * In addition, because of the s/w reset above, we need to enable
9410- * CRX on TX. This must be set for both full and half duplex
9411+ * CRX on TX. This must be set for both full and half duplex
9412               * operation.
9413               */
9414 
9415- ret_val = iegbe_oem_read_phy_reg_ex(hw,
9416- M88E1000_PHY_SPEC_CTRL,
9417+ ret_val = iegbe_oem_read_phy_reg_ex(hw,
9418+ M88E1000_PHY_SPEC_CTRL,
9419                                                    &phy_data);
9420               if(ret_val) {
9421                   DEBUGOUT("Unable to read M88E1000_PHY_SPEC_CTRL register\n");
9422@@ -573,12 +502,12 @@ iegbe_oem_cleanup_after_phy_reset(struct
9423               }
9424 
9425           phy_data &= ~M88E1000_PSCR_ASSERT_CRS_ON_TX;
9426- ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
9427+ ret_val = iegbe_oem_write_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
9428                                                     phy_data);
9429               if(ret_val) {
9430                   DEBUGOUT("Unable to write M88E1000_PHY_SPEC_CTRL register\n");
9431                   return ret_val;
9432- }
9433+ }
9434         break;
9435         default:
9436             DEBUGOUT("Invalid PHY ID\n");
9437@@ -604,12 +533,12 @@ iegbe_oem_cleanup_after_phy_reset(struct
9438  * This is called from iegbe_oem_setup_link which is
9439  * called from iegbe_setup_link.
9440  **/
9441-static int32_t
9442+static int32_t
9443 iegbe_oem_set_phy_mode(struct iegbe_hw *hw)
9444 {
9445     /*
9446      * it is unclear if it is necessary to set the phy mode. Right now only
9447- * one MAC 82545 Rev 3 does it, but the other MACs like Tolapai do not.
9448+ * one MAC 82545 Rev 3 does it, but the other MACs like tola do not.
9449      * Leave the functionality off for now until it is determined that Tolapai
9450      * needs it as well.
9451      */
9452@@ -638,41 +567,37 @@ iegbe_oem_set_phy_mode(struct iegbe_hw *
9453 #ifndef skip_set_mode
9454     DEBUGOUT("No need to call oem_set_phy_mode on Truxton\n");
9455 #else
9456- /*
9457+ /*
9458      * Make note that the M88 phy is what'll be used on Truxton.
9459      *
9460      * use iegbe_set_phy_mode as example
9461      */
9462     switch (hw->phy_id) {
9463- case BCM5395S_PHY_ID:
9464- case BCM5481_PHY_ID:
9465- DEBUGOUT("WARNING: An empty iegbe_oem_set_phy_mode() has been called!\n");
9466- break;
9467-
9468          case M88E1000_I_PHY_ID:
9469          case M88E1141_E_PHY_ID:
9470- ret_val = iegbe_read_eeprom(hw,
9471- EEPROM_PHY_CLASS_WORD,
9472- 1,
9473+ ret_val = iegbe_read_eeprom(hw,
9474+ EEPROM_PHY_CLASS_WORD,
9475+ 1,
9476                                           &eeprom_data);
9477               if(ret_val) {
9478                   return ret_val;
9479               }
9480 
9481- if((eeprom_data != EEPROM_RESERVED_WORD) &&
9482- (eeprom_data & EEPROM_PHY_CLASS_A))
9483+ if((eeprom_data != EEPROM_RESERVED_WORD) &&
9484+ (eeprom_data & EEPROM_PHY_CLASS_A))
9485               {
9486- ret_val = iegbe_oem_write_phy_reg_ex(hw,
9487- M88E1000_PHY_PAGE_SELECT,
9488- 0x000B);
9489+ ret_val = iegbe_oem_write_phy_reg_ex(hw,
9490+ M88E1000_PHY_PAGE_SELECT,
9491+ 0x000B);
9492                   if(ret_val) {
9493- DEBUGOUT("Unable to write to M88E1000_PHY_PAGE_SELECT register on PHY\n");
9494+ DEBUGOUT("Unable to write to M88E1000_PHY_PAGE_SELECT "
9495+ "register on PHY\n");
9496                       return ret_val;
9497                   }
9498 
9499- ret_val = iegbe_oem_write_phy_reg_ex(hw,
9500- M88E1000_PHY_GEN_CONTROL,
9501- 0x8104);
9502+ ret_val = iegbe_oem_write_phy_reg_ex(hw,
9503+ M88E1000_PHY_GEN_CONTROL,
9504+ 0x8104);
9505                   if(ret_val) {
9506                       DEBUGOUT("Unable to write to M88E1000_PHY_GEN_CONTROL"
9507                                "register on PHY\n");
9508@@ -687,11 +612,12 @@ iegbe_oem_set_phy_mode(struct iegbe_hw *
9509             return -E1000_ERR_PHY_TYPE;
9510     }
9511 #endif
9512-
9513+
9514     return E1000_SUCCESS;
9515 
9516 }
9517 
9518+
9519 /**
9520  * iegbe_oem_detect_phy
9521  * @hw: iegbe_hw struct containing device specific information
9522@@ -702,7 +628,7 @@ iegbe_oem_set_phy_mode(struct iegbe_hw *
9523  *
9524  * This borrows heavily from iegbe_detect_gig_phy
9525  **/
9526-static int32_t
9527+static int32_t
9528 iegbe_oem_detect_phy(struct iegbe_hw *hw)
9529 {
9530     int32_t ret_val;
9531@@ -715,33 +641,20 @@ iegbe_oem_detect_phy(struct iegbe_hw *hw
9532     }
9533     hw->phy_type = iegbe_phy_oem;
9534 
9535-{
9536- // If MAC2 (BCM5395 switch), manually detect the phy
9537- struct iegbe_adapter *adapter;
9538- uint32_t device_number;
9539- adapter = (struct iegbe_adapter *) hw->back;
9540- device_number = PCI_SLOT(adapter->pdev->devfn);
9541- if (device_number == ICP_XXXX_MAC_2) {
9542- hw->phy_id = BCM5395S_PHY_ID;
9543- hw->phy_revision = 0;
9544- return E1000_SUCCESS;
9545- }
9546-}
9547-
9548-
9549     ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_ID1, &phy_id_high);
9550     if(ret_val) {
9551         DEBUGOUT("Unable to read PHY register PHY_ID1\n");
9552         return ret_val;
9553     }
9554-
9555+
9556     usec_delay(0x14);
9557     ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_ID2, &phy_id_low);
9558     if(ret_val) {
9559         DEBUGOUT("Unable to read PHY register PHY_ID2\n");
9560         return ret_val;
9561     }
9562- hw->phy_id = (uint32_t) ((phy_id_high << 0x10) + phy_id_low);
9563+ hw->phy_id = (uint32_t) ((phy_id_high << 0x10) +
9564+ (phy_id_low & PHY_REVISION_MASK));
9565     hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
9566 
9567     return E1000_SUCCESS;
9568@@ -753,15 +666,15 @@ iegbe_oem_detect_phy(struct iegbe_hw *hw
9569  * @hw: iegbe_hw struct containing device specific information
9570  *
9571  * Returns the value of the Inter Packet Gap (IPG) Transmit Time (IPGT) in the
9572- * Transmit IPG register appropriate for the given PHY. This field is only 10
9573+ * Transmit IPG register appropriate for the given PHY. This field is only 10
9574  * bits wide.
9575  *
9576  * In the original iegbe code, only the IPGT field varied between media types.
9577- * If the OEM phy requires setting IPG Receive Time 1 & 2 Registers, it would
9578+ * If the OEM phy requires setting IPG Receive Time 1 & 2 Registers, it would
9579  * be required to modify the iegbe_config_tx() function to accomdate the change
9580  *
9581  **/
9582-uint32_t
9583+uint32_t
9584 iegbe_oem_get_tipg(struct iegbe_hw *hw)
9585 {
9586 #ifdef EXTERNAL_MDIO
9587@@ -777,15 +690,13 @@ iegbe_oem_get_tipg(struct iegbe_hw *hw)
9588     switch (hw->phy_id) {
9589          case M88E1000_I_PHY_ID:
9590          case M88E1141_E_PHY_ID:
9591- case BCM5481_PHY_ID:
9592- case BCM5395S_PHY_ID:
9593              phy_num = DEFAULT_ICP_XXXX_TIPG_IPGT;
9594          break;
9595          default:
9596             DEBUGOUT("Invalid PHY ID\n");
9597             return DEFAULT_ICP_XXXX_TIPG_IPGT;
9598     }
9599-
9600+
9601     return phy_num;
9602 
9603 #else /* ifdef EXTERNAL_MDIO */
9604@@ -803,15 +714,15 @@ iegbe_oem_get_tipg(struct iegbe_hw *hw)
9605  * iegbe_oem_phy_is_copper
9606  * @hw: iegbe_hw struct containing device specific information
9607  *
9608- * Test for media type within the iegbe driver is common, so this is a simple
9609- * test for copper PHYs. The ICP_XXXX family of controllers initially only
9610- * supported copper interconnects (no TBI (ten bit interface) for Fiber
9611- * existed). If future revs support either Fiber or an internal SERDES, it
9612- * may become necessary to evaluate where this function is used to go beyond
9613+ * Test for media type within the iegbe driver is common, so this is a simple
9614+ * test for copper PHYs. The ICP_XXXX family of controllers initially only
9615+ * supported copper interconnects (no TBI (ten bit interface) for Fiber
9616+ * existed). If future revs support either Fiber or an internal SERDES, it
9617+ * may become necessary to evaluate where this function is used to go beyond
9618  * determining whether or not media type is just copper.
9619  *
9620  **/
9621-int
9622+int
9623 iegbe_oem_phy_is_copper(struct iegbe_hw *hw)
9624 {
9625 #ifdef EXTERNAL_MDIO
9626@@ -827,23 +738,21 @@ iegbe_oem_phy_is_copper(struct iegbe_hw
9627     switch (hw->phy_id) {
9628         case M88E1000_I_PHY_ID:
9629         case M88E1141_E_PHY_ID:
9630- case BCM5481_PHY_ID:
9631- case BCM5395S_PHY_ID:
9632             isCopper = TRUE;
9633         break;
9634         default:
9635             DEBUGOUT("Invalid PHY ID\n");
9636             return -E1000_ERR_PHY_TYPE;
9637     }
9638-
9639+
9640     return isCopper;
9641 
9642 #else /* ifdef EXTERNAL_MDIO */
9643 
9644- /*
9645+ /*
9646      * caught between returning true or false. True allows it to
9647      * be entered into && statements w/o ill effect, but false
9648- * would make more sense
9649+ * would make more sense
9650      */
9651     DEBUGOUT("Invalid value for transceiver type, return FALSE\n");
9652     return FALSE;
9653@@ -856,19 +765,19 @@ iegbe_oem_phy_is_copper(struct iegbe_hw
9654  * iegbe_oem_get_phy_dev_number
9655  * @hw: iegbe_hw struct containing device specific information
9656  *
9657- * For ICP_XXXX family of devices, there are 3 MACs, each of which may
9658- * have a different PHY (and indeed a different media interface). This
9659- * function is used to indicate which of the MAC/PHY pairs we are interested
9660+ * For ICP_XXXX family of devices, there are 3 MACs, each of which may
9661+ * have a different PHY (and indeed a different media interface). This
9662+ * function is used to indicate which of the MAC/PHY pairs we are interested
9663  * in.
9664- *
9665+ *
9666  **/
9667-uint32_t
9668+uint32_t
9669 iegbe_oem_get_phy_dev_number(struct iegbe_hw *hw)
9670 {
9671 #ifdef EXTERNAL_MDIO
9672 
9673- /*
9674- * for ICP_XXXX family of devices, the three network interfaces are
9675+ /*
9676+ * for ICP_XXXX family of devices, the three network interfaces are
9677      * differentiated by their PCI device number, where the three share
9678      * the same PCI bus
9679      */
9680@@ -886,15 +795,15 @@ iegbe_oem_get_phy_dev_number(struct iegb
9681 
9682     switch(device_number)
9683     {
9684- case ICP_XXXX_MAC_0:
9685+ case ICP_XXXX_MAC_0:
9686+ hw->phy_addr = 0x00;
9687+ break;
9688+ case ICP_XXXX_MAC_1:
9689           hw->phy_addr = 0x01;
9690       break;
9691- case ICP_XXXX_MAC_1:
9692+ case ICP_XXXX_MAC_2:
9693           hw->phy_addr = 0x02;
9694       break;
9695- case ICP_XXXX_MAC_2:
9696- hw->phy_addr = 0x00;
9697- break;
9698       default: hw->phy_addr = 0x00;
9699     }
9700      return hw->phy_addr;
9701@@ -915,7 +824,7 @@ iegbe_oem_get_phy_dev_number(struct iegb
9702  * @cmd: the original IOCTL command that instigated the call chain.
9703  *
9704  * This function abstracts out the code necessary to service the
9705- * SIOCSMIIREG case within the iegbe_mii_ioctl() for oem PHYs.
9706+ * SIOCSMIIREG case within the iegbe_mii_ioctl() for oem PHYs.
9707  * iegbe_mii_ioctl() was implemented for copper phy's only and this
9708  * function will only be called if iegbe_oem_phy_is_copper() returns true for
9709  * a given MAC. Note that iegbe_mii_ioctl() has a compile flag
9710@@ -924,14 +833,14 @@ iegbe_oem_get_phy_dev_number(struct iegb
9711  * NOTE: a spinlock is in effect for the duration of this call. It is
9712  * imperative that a negative value be returned on any error, so
9713  * the spinlock can be released properly.
9714- *
9715+ *
9716  **/
9717 int
9718 iegbe_oem_mii_ioctl(struct iegbe_adapter *adapter, unsigned long flags,
9719                     struct ifreq *ifr, int cmd)
9720 {
9721 #ifdef EXTERNAL_MDIO
9722-
9723+
9724     struct mii_ioctl_data *data = if_mii(ifr);
9725     uint16_t mii_reg = data->val_in;
9726     uint16_t spddplx;
9727@@ -942,12 +851,6 @@ iegbe_oem_mii_ioctl(struct iegbe_adapter
9728     if(!adapter || !ifr) {
9729         return -1;
9730     }
9731-
9732- // If MAC2 (BCM5395 switch) then leave now
9733- if ((PCI_SLOT(adapter->pdev->devfn)) == ICP_XXXX_MAC_2) {
9734- return -1;
9735- }
9736-
9737     switch (data->reg_num) {
9738         case PHY_CTRL:
9739             if(mii_reg & MII_CR_POWER_DOWN) {
9740@@ -956,7 +859,7 @@ iegbe_oem_mii_ioctl(struct iegbe_adapter
9741             if(mii_reg & MII_CR_AUTO_NEG_EN) {
9742                 adapter->hw.autoneg = 1;
9743                 adapter->hw.autoneg_advertised = ICP_XXXX_AUTONEG_ADV_DEFAULT;
9744- }
9745+ }
9746             else {
9747                 if(mii_reg & 0x40) {
9748                     spddplx = SPEED_1000;
9749@@ -976,7 +879,7 @@ iegbe_oem_mii_ioctl(struct iegbe_adapter
9750             if(netif_running(adapter->netdev)) {
9751                 iegbe_down(adapter);
9752                 iegbe_up(adapter);
9753- }
9754+ }
9755             else {
9756                 iegbe_reset(adapter);
9757             }
9758@@ -1043,10 +946,10 @@ void iegbe_oem_fiber_live_in_suspend(str
9759  * Note: The call to iegbe_get_regs() assumed an array of 24 elements
9760  * where the last 11 are passed to this function. If the array
9761  * that is passed to the calling function has its size or element
9762- * defintions changed, this function becomes broken.
9763+ * defintions changed, this function becomes broken.
9764  *
9765  **/
9766-void iegbe_oem_get_phy_regs(struct iegbe_adapter *adapter, uint32_t *data,
9767+void iegbe_oem_get_phy_regs(struct iegbe_adapter *adapter, uint32_t *data,
9768                             uint32_t data_len)
9769 {
9770 #define EXPECTED_ARRAY_LEN 11
9771@@ -1062,13 +965,13 @@ void iegbe_oem_get_phy_regs(struct iegbe
9772      * Use the corrected_length variable to make sure we don't exceed that
9773      * length
9774      */
9775- corrected_len = data_len>EXPECTED_ARRAY_LEN
9776+ corrected_len = data_len>EXPECTED_ARRAY_LEN
9777                     ? EXPECTED_ARRAY_LEN : data_len;
9778     memset(data, 0, corrected_len*sizeof(uint32_t));
9779 
9780 #ifdef EXTERNAL_MDIO
9781 
9782- /*
9783+ /*
9784      * Fill data[] with...
9785      *
9786      * [0] = cable length
9787@@ -1084,16 +987,11 @@ void iegbe_oem_get_phy_regs(struct iegbe
9788      * [10] = mdix mode
9789      */
9790     switch (adapter->hw.phy_id) {
9791- case BCM5395S_PHY_ID:
9792- case BCM5481_PHY_ID:
9793- DEBUGOUT("WARNING: An empty iegbe_oem_get_phy_regs() has been called!\n");
9794- break;
9795-
9796         case M88E1000_I_PHY_ID:
9797         case M88E1141_E_PHY_ID:
9798             if(corrected_len > 0) {
9799- iegbe_oem_read_phy_reg_ex(&adapter->hw,
9800- M88E1000_PHY_SPEC_STATUS,
9801+ iegbe_oem_read_phy_reg_ex(&adapter->hw,
9802+ M88E1000_PHY_SPEC_STATUS,
9803                                           (uint16_t *) &data[0]);
9804             }
9805           if(corrected_len > 0x1){
9806@@ -1106,7 +1004,7 @@ void iegbe_oem_get_phy_regs(struct iegbe
9807               data[0x3] = 0x0; /* Dummy (to align w/ IGP phy reg dump) */
9808             }
9809           if(corrected_len > 0x4) {
9810- iegbe_oem_read_phy_reg_ex(&adapter->hw, M88E1000_PHY_SPEC_CTRL,
9811+ iegbe_oem_read_phy_reg_ex(&adapter->hw, M88E1000_PHY_SPEC_CTRL,
9812                                  (uint16_t *) &data[0x4]);
9813             }
9814           if(corrected_len > 0x5) {
9815@@ -1144,7 +1042,7 @@ void iegbe_oem_get_phy_regs(struct iegbe
9816  * This is called from iegbe_set_phy_loopback in response from call from
9817  * ethtool to place the PHY into loopback mode.
9818  **/
9819-int
9820+int
9821 iegbe_oem_phy_loopback(struct iegbe_adapter *adapter)
9822 {
9823 #ifdef EXTERNAL_MDIO
9824@@ -1165,23 +1063,18 @@ iegbe_oem_phy_loopback(struct iegbe_adap
9825      * was that nonintegrated called iegbe_phy_reset_clk_and_crs(),
9826      * hopefully this won't matter as CRS required for half-duplex
9827      * operation and this is set to full duplex.
9828- *
9829+ *
9830      * Make note that the M88 phy is what'll be used on Truxton
9831      * Loopback configuration is the same for each of the supported PHYs.
9832      */
9833     switch (adapter->hw.phy_id) {
9834- case BCM5395S_PHY_ID:
9835- DEBUGOUT("WARNING: An empty iegbe_oem_phy_loopback() has been called!\n");
9836- break;
9837-
9838         case M88E1000_I_PHY_ID:
9839         case M88E1141_E_PHY_ID:
9840- case BCM5481_PHY_ID:
9841 
9842           adapter->hw.autoneg = FALSE;
9843 
9844           /* turn off Auto-MDI/MDIX */
9845- /*ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw,
9846+ /*ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw,
9847                                                M88E1000_PHY_SPEC_CTRL, 0x0808);
9848           if(ret_val)
9849           {
9850@@ -1206,10 +1099,10 @@ iegbe_oem_phy_loopback(struct iegbe_adap
9851               DEBUGOUT("Unable to write to register PHY_CTRL\n");
9852               return ret_val;
9853           }
9854-
9855-
9856+
9857+
9858           /* force 1000, set loopback */
9859- /*ret_val =
9860+ /*ret_val =
9861                  iegbe_oem_write_phy_reg_ex(&adapter->hw, PHY_CTRL, 0x4140); */
9862           ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw, PHY_CTRL, 0x6100);
9863           if(ret_val) {
9864@@ -1228,21 +1121,21 @@ iegbe_oem_phy_loopback(struct iegbe_adap
9865           E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
9866 
9867           /*
9868- * Write out to PHY registers 29 and 30 to disable the Receiver.
9869+ * Write out to PHY registers 29 and 30 to disable the Receiver.
9870            * This directly lifted from iegbe_phy_disable_receiver().
9871- *
9872+ *
9873            * The code is currently commented out as for the M88 used in
9874            * Truxton, registers 29 and 30 are unutilized. Leave in, just
9875- * in case we are on the receiving end of an 'undocumented'
9876+ * in case we are on the receiving end of an 'undocumented'
9877            * feature
9878            */
9879- /*
9880+ /*
9881            * iegbe_oem_write_phy_reg_ex(&adapter->hw, 29, 0x001F);
9882            * iegbe_oem_write_phy_reg_ex(&adapter->hw, 30, 0x8FFC);
9883            * iegbe_oem_write_phy_reg_ex(&adapter->hw, 29, 0x001A);
9884            * iegbe_oem_write_phy_reg_ex(&adapter->hw, 30, 0x8FF0);
9885            */
9886-
9887+
9888           break;
9889         default:
9890             DEBUGOUT("Invalid PHY ID\n");
9891@@ -1268,15 +1161,15 @@ iegbe_oem_phy_loopback(struct iegbe_adap
9892  * ethtool to place the PHY out of loopback mode. This handles the OEM
9893  * specific part of loopback cleanup.
9894  **/
9895-void
9896+void
9897 iegbe_oem_loopback_cleanup(struct iegbe_adapter *adapter)
9898 {
9899 #ifdef EXTERNAL_MDIO
9900 
9901- /*
9902- * This borrows liberally from iegbe_loopback_cleanup().
9903+ /*
9904+ * This borrows liberally from iegbe_loopback_cleanup().
9905      * making note that the M88 phy is what'll be used on Truxton
9906- *
9907+ *
9908      * Loopback cleanup is the same for all supported PHYs.
9909      */
9910     int32_t ret_val;
9911@@ -1289,38 +1182,32 @@ iegbe_oem_loopback_cleanup(struct iegbe_
9912     }
9913 
9914     switch (adapter->hw.phy_id) {
9915- case BCM5395S_PHY_ID:
9916- DEBUGOUT("WARNING: An empty iegbe_oem_loopback_cleanup() has been called!\n");
9917- return;
9918- break;
9919-
9920         case M88E1000_I_PHY_ID:
9921         case M88E1141_E_PHY_ID:
9922- case BCM5481_PHY_ID:
9923         default:
9924             adapter->hw.autoneg = TRUE;
9925-
9926- ret_val = iegbe_oem_read_phy_reg_ex(&adapter->hw, PHY_CTRL,
9927+
9928+ ret_val = iegbe_oem_read_phy_reg_ex(&adapter->hw, PHY_CTRL,
9929                                                 &phy_reg);
9930             if(ret_val) {
9931                 DEBUGOUT("Unable to read to register PHY_CTRL\n");
9932                 return;
9933             }
9934-
9935+
9936             if(phy_reg & MII_CR_LOOPBACK) {
9937                 phy_reg &= ~MII_CR_LOOPBACK;
9938-
9939- ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw, PHY_CTRL,
9940+
9941+ ret_val = iegbe_oem_write_phy_reg_ex(&adapter->hw, PHY_CTRL,
9942                                                      phy_reg);
9943                 if(ret_val) {
9944                     DEBUGOUT("Unable to write to register PHY_CTRL\n");
9945                     return;
9946                 }
9947-
9948+
9949                 iegbe_phy_reset(&adapter->hw);
9950             }
9951     }
9952-
9953+
9954 #endif /* ifdef EXTERNAL_MDIO */
9955     return;
9956 
9957@@ -1336,7 +1223,7 @@ iegbe_oem_loopback_cleanup(struct iegbe_
9958  * Called by iegbe_check_downshift(), checks the PHY to see if it running
9959  * at as speed slower than its maximum.
9960  **/
9961-uint32_t
9962+uint32_t
9963 iegbe_oem_phy_speed_downgraded(struct iegbe_hw *hw, uint16_t *isDowngraded)
9964 {
9965 #ifdef EXTERNAL_MDIO
9966@@ -1356,24 +1243,19 @@ iegbe_oem_phy_speed_downgraded(struct ie
9967      */
9968 
9969     switch (hw->phy_id) {
9970- case BCM5395S_PHY_ID:
9971- case BCM5481_PHY_ID:
9972- *isDowngraded = 0;
9973- break;
9974-
9975         case M88E1000_I_PHY_ID:
9976         case M88E1141_E_PHY_ID:
9977- ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
9978+ ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
9979                                                 &phy_data);
9980           if(ret_val) {
9981                 DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
9982                 return ret_val;
9983             }
9984-
9985- *isDowngraded = (phy_data & M88E1000_PSSR_DOWNSHIFT)
9986+
9987+ *isDowngraded = (phy_data & M88E1000_PSSR_DOWNSHIFT)
9988                              >> M88E1000_PSSR_DOWNSHIFT_SHIFT;
9989-
9990- break;
9991+
9992+ break;
9993         default:
9994             DEBUGOUT("Invalid PHY ID\n");
9995             return 1;
9996@@ -1388,7 +1270,7 @@ iegbe_oem_phy_speed_downgraded(struct ie
9997     }
9998 
9999     *isDowngraded = 0;
10000- return 0;
10001+ return 0;
10002 
10003 #endif /* ifdef EXTERNAL_MDIO */
10004 }
10005@@ -1403,7 +1285,7 @@ iegbe_oem_phy_speed_downgraded(struct ie
10006  * Called by iegbe_check_downshift(), checks the PHY to see if it running
10007  * at as speed slower than its maximum.
10008  **/
10009-int32_t
10010+int32_t
10011 iegbe_oem_check_polarity(struct iegbe_hw *hw, uint16_t *polarity)
10012 {
10013 #ifdef EXTERNAL_MDIO
10014@@ -1417,33 +1299,27 @@ iegbe_oem_check_polarity(struct iegbe_hw
10015         return -1;
10016     }
10017 
10018- /*
10019+ /*
10020      * borrow liberally from iegbe_check_polarity.
10021      * Make note that the M88 phy is what'll be used on Truxton
10022      */
10023 
10024     switch (hw->phy_id) {
10025- case BCM5395S_PHY_ID:
10026- case BCM5481_PHY_ID:
10027- *polarity = 0;
10028- break;
10029-
10030         case M88E1000_I_PHY_ID:
10031         case M88E1141_E_PHY_ID:
10032             /* return the Polarity bit in the Status register. */
10033- ret_val = iegbe_oem_read_phy_reg_ex(hw,
10034- M88E1000_PHY_SPEC_STATUS,
10035+ ret_val = iegbe_oem_read_phy_reg_ex(hw,
10036+ M88E1000_PHY_SPEC_STATUS,
10037                                                 &phy_data);
10038             if(ret_val) {
10039               DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
10040               return ret_val;
10041             }
10042 
10043- *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY)
10044+ *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY)
10045                          >> M88E1000_PSSR_REV_POLARITY_SHIFT;
10046-
10047- break;
10048-
10049+
10050+ break;
10051          default:
10052               DEBUGOUT("Invalid PHY ID\n");
10053               return -E1000_ERR_PHY_TYPE;
10054@@ -1472,7 +1348,7 @@ iegbe_oem_check_polarity(struct iegbe_hw
10055  * the MAC with the PHY. It turns out on ICP_XXXX, this is not
10056  * done automagically.
10057  **/
10058-int32_t
10059+int32_t
10060 iegbe_oem_phy_is_full_duplex(struct iegbe_hw *hw, int *isFD)
10061 {
10062 #ifdef EXTERNAL_MDIO
10063@@ -1485,40 +1361,22 @@ iegbe_oem_phy_is_full_duplex(struct iegb
10064     if(!hw || !isFD) {
10065         return -1;
10066     }
10067- /*
10068+ /*
10069      * Make note that the M88 phy is what'll be used on Truxton
10070      * see iegbe_config_mac_to_phy
10071      */
10072-
10073+
10074       switch (hw->phy_id) {
10075- case BCM5395S_PHY_ID:
10076- /* Always full duplex */
10077- *isFD = 1;
10078- break;
10079-
10080- case BCM5481_PHY_ID:
10081- ret_val = iegbe_read_phy_reg(hw, BCM5481_ASTAT, &phy_data);
10082- if(ret_val) return ret_val;
10083-
10084- switch (BCM5481_ASTAT_HCD(phy_data)) {
10085- case BCM5481_ASTAT_1KBTFD:
10086- case BCM5481_ASTAT_100BTXFD:
10087- *isFD = 1;
10088- break;
10089- default:
10090- *isFD = 0;
10091- }
10092- break;
10093-
10094           case M88E1000_I_PHY_ID:
10095           case M88E1141_E_PHY_ID:
10096- ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
10097- if(ret_val) {
10098- DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
10099- return ret_val;
10100- }
10101+ ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
10102+ &phy_data);
10103+ if(ret_val) {
10104+ DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
10105+ return ret_val;
10106+ }
10107               *isFD = (phy_data & M88E1000_PSSR_DPLX) != 0;
10108-
10109+
10110            break;
10111            default:
10112                DEBUGOUT("Invalid PHY ID\n");
10113@@ -1546,7 +1404,7 @@ iegbe_oem_phy_is_full_duplex(struct iegb
10114  * the MAC with the PHY. It turns out on ICP_XXXX, this is not
10115  * done automagically.
10116  **/
10117-int32_t
10118+int32_t
10119 iegbe_oem_phy_is_speed_1000(struct iegbe_hw *hw, int *is1000)
10120 {
10121 #ifdef EXTERNAL_MDIO
10122@@ -1565,28 +1423,10 @@ iegbe_oem_phy_is_speed_1000(struct iegbe
10123      */
10124 
10125     switch (hw->phy_id) {
10126- case BCM5395S_PHY_ID:
10127- /* Always 1000mb */
10128- *is1000 = 1;
10129- break;
10130-
10131- case BCM5481_PHY_ID:
10132- ret_val = iegbe_read_phy_reg(hw, BCM5481_ASTAT, &phy_data);
10133- if(ret_val) return ret_val;
10134-
10135- switch (BCM5481_ASTAT_HCD(phy_data)) {
10136- case BCM5481_ASTAT_1KBTFD:
10137- case BCM5481_ASTAT_1KBTHD:
10138- *is1000 = 1;
10139- break;
10140- default:
10141- *is1000 = 0;
10142- }
10143- break;
10144-
10145         case M88E1000_I_PHY_ID:
10146         case M88E1141_E_PHY_ID:
10147- ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
10148+ ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
10149+ &phy_data);
10150             if(ret_val) {
10151                 DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
10152                 return ret_val;
10153@@ -1638,28 +1478,9 @@ iegbe_oem_phy_is_speed_100(struct iegbe_
10154      * see iegbe_config_mac_to_phy
10155      */
10156     switch (hw->phy_id) {
10157- case BCM5395S_PHY_ID:
10158- /* Always 1000Mb, never 100mb */
10159- *is100 = 0;
10160- break;
10161-
10162- case BCM5481_PHY_ID:
10163- ret_val = iegbe_read_phy_reg(hw, BCM5481_ASTAT, &phy_data);
10164- if(ret_val) return ret_val;
10165-
10166- switch (BCM5481_ASTAT_HCD(phy_data)) {
10167- case BCM5481_ASTAT_100BTXFD:
10168- case BCM5481_ASTAT_100BTXHD:
10169- *is100 = 1;
10170- break;
10171- default:
10172- *is100 = 0;
10173- }
10174- break;
10175-
10176         case M88E1000_I_PHY_ID:
10177         case M88E1141_E_PHY_ID:
10178- ret_val = iegbe_oem_read_phy_reg_ex(hw,
10179+ ret_val = iegbe_oem_read_phy_reg_ex(hw,
10180                                                 M88E1000_PHY_SPEC_STATUS,
10181                                                 &phy_data);
10182             if(ret_val) {
10183@@ -1714,29 +1535,24 @@ iegbe_oem_phy_get_info(struct iegbe_hw *
10184      * see iegbe_phy_m88_get_info
10185      */
10186     switch (hw->phy_id) {
10187- case BCM5395S_PHY_ID:
10188- case BCM5481_PHY_ID:
10189- DEBUGOUT("WARNING: An empty iegbe_oem_phy_get_info() has been called!\n");
10190- break;
10191-
10192         case M88E1000_I_PHY_ID:
10193         case M88E1141_E_PHY_ID:
10194- /* The downshift status is checked only once, after link is
10195- * established and it stored in the hw->speed_downgraded parameter.*/
10196+ /* The downshift status is checked only once, after link is
10197+ * established and it stored in the hw->speed_downgraded parameter.*/
10198             phy_info->downshift = (iegbe_downshift)hw->speed_downgraded;
10199-
10200- ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
10201+
10202+ ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_CTRL,
10203                                                 &phy_data);
10204             if(ret_val) {
10205                 DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_CTRL\n");
10206                 return ret_val;
10207             }
10208 
10209- phy_info->extended_10bt_distance =
10210- (phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE)
10211+ phy_info->extended_10bt_distance =
10212+ (phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE)
10213                  >> M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT;
10214             phy_info->polarity_correction =
10215- (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
10216+ (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
10217                  >> M88E1000_PSCR_POLARITY_REVERSAL_SHIFT;
10218 
10219             /* Check polarity status */
10220@@ -1747,11 +1563,11 @@ iegbe_oem_phy_get_info(struct iegbe_hw *
10221 
10222             phy_info->cable_polarity = polarity;
10223 
10224- ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
10225+ ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
10226                                                 &phy_data);
10227             if(ret_val) {
10228- DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
10229- return ret_val;
10230+ DEBUGOUT("Unable to read register M88E1000_PHY_SPEC_STATUS\n");
10231+ return ret_val;
10232             }
10233 
10234             phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX)
10235@@ -1761,24 +1577,24 @@ iegbe_oem_phy_get_info(struct iegbe_hw *
10236                 /* Cable Length Estimation and Local/Remote Receiver Information
10237                  * are only valid at 1000 Mbps.
10238                  */
10239- phy_info->cable_length =
10240+ phy_info->cable_length =
10241                     (phy_data & M88E1000_PSSR_CABLE_LENGTH)
10242                      >> M88E1000_PSSR_CABLE_LENGTH_SHIFT;
10243 
10244- ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_1000T_STATUS,
10245+ ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_1000T_STATUS,
10246                                                     &phy_data);
10247                 if(ret_val) {
10248                     DEBUGOUT("Unable to read register PHY_1000T_STATUS\n");
10249                     return ret_val;
10250                 }
10251 
10252- phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
10253+ phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
10254                                       >> SR_1000T_LOCAL_RX_STATUS_SHIFT;
10255-
10256- phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
10257+
10258+ phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
10259                                       >> SR_1000T_REMOTE_RX_STATUS_SHIFT;
10260             }
10261-
10262+
10263         break;
10264         default:
10265             DEBUGOUT("Invalid PHY ID\n");
10266@@ -1801,7 +1617,7 @@ iegbe_oem_phy_get_info(struct iegbe_hw *
10267  * This function will perform a software initiated reset of
10268  * the PHY
10269  **/
10270-int32_t
10271+int32_t
10272 iegbe_oem_phy_hw_reset(struct iegbe_hw *hw)
10273 {
10274 #ifdef EXTERNAL_MDIO
10275@@ -1815,18 +1631,13 @@ iegbe_oem_phy_hw_reset(struct iegbe_hw *
10276         return -1;
10277     }
10278     /*
10279- * This code pretty much copies the default case from
10280+ * This code pretty much copies the default case from
10281      * iegbe_phy_reset() as that is what is appropriate for
10282- * the M88 used in truxton.
10283+ * the M88 used in truxton.
10284      */
10285     switch (hw->phy_id) {
10286- case BCM5395S_PHY_ID:
10287- DEBUGOUT("WARNING: An empty iegbe_oem_phy_hw_reset() has been called!\n");
10288- break;
10289-
10290         case M88E1000_I_PHY_ID:
10291         case M88E1141_E_PHY_ID:
10292- case BCM5481_PHY_ID:
10293             ret_val = iegbe_oem_read_phy_reg_ex(hw, PHY_CTRL, &phy_data);
10294             if(ret_val) {
10295                 DEBUGOUT("Unable to read register PHY_CTRL\n");
10296@@ -1864,7 +1675,7 @@ iegbe_oem_phy_hw_reset(struct iegbe_hw *
10297  * to perform and post reset initialiation. Not all PHYs require
10298  * this, which is why it was split off as a seperate function.
10299  **/
10300-void
10301+void
10302 iegbe_oem_phy_init_script(struct iegbe_hw *hw)
10303 {
10304 #ifdef EXTERNAL_MDIO
10305@@ -1877,19 +1688,17 @@ iegbe_oem_phy_init_script(struct iegbe_h
10306 
10307     /* call the GCU func that can do any phy specific init
10308      * functions after a reset
10309- *
10310+ *
10311      * Make note that the M88 phy is what'll be used on Truxton
10312      *
10313- * The closest thing is in iegbe_phy_init_script, however this is
10314+ * The closest thing is in iegbe_phy_init_script, however this is
10315      * for the IGP style of phy. This is probably a no-op for truxton
10316      * but may be needed by OEM's later on
10317- *
10318+ *
10319      */
10320     switch (hw->phy_id) {
10321         case M88E1000_I_PHY_ID:
10322         case M88E1141_E_PHY_ID:
10323- case BCM5481_PHY_ID:
10324- case BCM5395S_PHY_ID:
10325             DEBUGOUT("Nothing to do for OEM PHY Init");
10326         break;
10327         default:
10328@@ -1926,13 +1735,8 @@ iegbe_oem_read_phy_reg_ex(struct iegbe_h
10329         return -1;
10330     }
10331 
10332- if (hw->phy_id == BCM5395S_PHY_ID) {
10333- DEBUGOUT("WARNING: iegbe_oem_read_phy_reg_ex() has been unexpectedly called!\n");
10334- return -1;
10335- }
10336-
10337     /* call the GCU func that will read the phy
10338- *
10339+ *
10340      * Make note that the M88 phy is what'll be used on Truxton.
10341      *
10342      * The closest thing is in iegbe_read_phy_reg_ex.
10343@@ -1940,7 +1744,7 @@ iegbe_oem_read_phy_reg_ex(struct iegbe_h
10344      * NOTE: this is 1 (of 2) functions that is truly dependant on the
10345      * gcu module
10346      */
10347-
10348+
10349         ret_val = gcu_read_eth_phy(iegbe_oem_get_phy_dev_number(hw),
10350                                    reg_addr, phy_data);
10351         if(ret_val) {
10352@@ -1962,10 +1766,10 @@ iegbe_oem_read_phy_reg_ex(struct iegbe_h
10353  *
10354  * Returns E1000_SUCCESS, negative E1000 error code on failure
10355  *
10356- * This is called from iegbe_config_mac_to_phy. Various supported
10357+ * This is called from iegbe_config_mac_to_phy. Various supported
10358  * Phys may require the RGMII/RMII Translation gasket be set to RMII.
10359  **/
10360-int32_t
10361+int32_t
10362 iegbe_oem_set_trans_gasket(struct iegbe_hw *hw)
10363 {
10364 #ifdef EXTERNAL_MDIO
10365@@ -1978,17 +1782,12 @@ iegbe_oem_set_trans_gasket(struct iegbe_
10366     }
10367 
10368      switch (hw->phy_id) {
10369- case BCM5395S_PHY_ID:
10370- case BCM5481_PHY_ID:
10371- DEBUGOUT("WARNING: An empty iegbe_oem_set_trans_gasket() has been called!\n");
10372- break;
10373-
10374          case M88E1000_I_PHY_ID:
10375          case M88E1141_E_PHY_ID:
10376          /* Gasket set correctly for Marvell Phys, so nothing to do */
10377          break;
10378          /* Add your PHY_ID here if your device requires an RMII interface
10379- case YOUR_PHY_ID:
10380+ case YOUR_PHY_ID:
10381              ctrl_aux_reg = E1000_READ_REG(hw, CTRL_AUX);
10382              ctrl_aux_reg |= E1000_CTRL_AUX_ICP_xxxx_MII_TGS; // Set the RGMII_RMII bit
10383          */
10384@@ -2032,7 +1831,7 @@ iegbe_oem_write_phy_reg_ex(struct iegbe_
10385         return -1;
10386     }
10387     /* call the GCU func that will write to the phy
10388- *
10389+ *
10390      * Make note that the M88 phy is what'll be used on Truxton.
10391      *
10392      * The closest thing is in iegbe_write_phy_reg_ex
10393@@ -2062,11 +1861,11 @@ iegbe_oem_write_phy_reg_ex(struct iegbe_
10394  * @hw struct iegbe_hw hardware specific data
10395  *
10396  * iegbe_reset_hw is called to reset the MAC. If, for
10397- * some reason the PHY needs to be reset as well, this
10398+ * some reason the PHY needs to be reset as well, this
10399  * should return TRUE and then iegbe_oem_phy_hw_reset()
10400  * will be called.
10401  **/
10402-int
10403+int
10404 iegbe_oem_phy_needs_reset_with_mac(struct iegbe_hw *hw)
10405 {
10406 #ifdef EXTERNAL_MDIO
10407@@ -2079,16 +1878,14 @@ iegbe_oem_phy_needs_reset_with_mac(struc
10408         return FALSE;
10409     }
10410 
10411- /*
10412+ /*
10413      * From the original iegbe driver, the M88
10414- * PHYs did not seem to need this reset,
10415+ * PHYs did not seem to need this reset,
10416      * so returning FALSE.
10417      */
10418     switch (hw->phy_id) {
10419         case M88E1000_I_PHY_ID:
10420         case M88E1141_E_PHY_ID:
10421- case BCM5481_PHY_ID:
10422- case BCM5395S_PHY_ID:
10423             ret_val = FALSE;
10424         break;
10425         default:
10426@@ -2116,7 +1913,7 @@ iegbe_oem_phy_needs_reset_with_mac(struc
10427  * tweaking of the PHY, for PHYs that support a DSP.
10428  *
10429  **/
10430-int32_t
10431+int32_t
10432 iegbe_oem_config_dsp_after_link_change(struct iegbe_hw *hw,
10433                                        int link_up)
10434 {
10435@@ -2138,8 +1935,6 @@ iegbe_oem_config_dsp_after_link_change(s
10436     switch (hw->phy_id) {
10437         case M88E1000_I_PHY_ID:
10438         case M88E1141_E_PHY_ID:
10439- case BCM5481_PHY_ID:
10440- case BCM5395S_PHY_ID:
10441             DEBUGOUT("No DSP to configure on OEM PHY");
10442         break;
10443         default:
10444@@ -2165,7 +1960,7 @@ iegbe_oem_config_dsp_after_link_change(s
10445  *
10446  *
10447  **/
10448-int32_t
10449+int32_t
10450 iegbe_oem_get_cable_length(struct iegbe_hw *hw,
10451                            uint16_t *min_length,
10452                            uint16_t *max_length)
10453@@ -2177,21 +1972,15 @@ iegbe_oem_get_cable_length(struct iegbe_
10454     uint16_t phy_data;
10455 
10456    DEBUGFUNC1("%s",__func__);
10457-
10458+
10459     if(!hw || !min_length || !max_length) {
10460         return -1;
10461     }
10462 
10463     switch (hw->phy_id) {
10464- case BCM5395S_PHY_ID:
10465- case BCM5481_PHY_ID:
10466- *min_length = 0;
10467- *max_length = iegbe_igp_cable_length_150;
10468- break;
10469-
10470         case M88E1000_I_PHY_ID:
10471         case M88E1141_E_PHY_ID:
10472- ret_val = iegbe_oem_read_phy_reg_ex(hw,
10473+ ret_val = iegbe_oem_read_phy_reg_ex(hw,
10474                                                 M88E1000_PHY_SPEC_STATUS,
10475                                                 &phy_data);
10476             if(ret_val) {
10477@@ -2246,13 +2035,13 @@ iegbe_oem_get_cable_length(struct iegbe_
10478 /**
10479  * iegbe_oem_phy_is_link_up
10480  * @hw iegbe_hw struct containing device specific information
10481- * @isUp a boolean returning true if link is up
10482+ * @isUp a boolean returning true if link is up
10483  *
10484  * This is called as part of iegbe_config_mac_to_phy() to align
10485  * the MAC with the PHY. It turns out on ICP_XXXX, this is not
10486  * done automagically.
10487  **/
10488-int32_t
10489+int32_t
10490 iegbe_oem_phy_is_link_up(struct iegbe_hw *hw, int *isUp)
10491 {
10492 #ifdef EXTERNAL_MDIO
10493@@ -2266,35 +2055,19 @@ iegbe_oem_phy_is_link_up(struct iegbe_hw
10494     if(!hw || !isUp) {
10495         return -1;
10496     }
10497- /*
10498+ /*
10499      * Make note that the M88 phy is what'll be used on Truxton
10500      * see iegbe_config_mac_to_phy
10501      */
10502 
10503     switch (hw->phy_id) {
10504- case BCM5395S_PHY_ID:
10505- /* Link always up */
10506- *isUp = TRUE;
10507- return E1000_SUCCESS;
10508- break;
10509-
10510- case BCM5481_PHY_ID:
10511- iegbe_oem_read_phy_reg_ex(hw, BCM5481_ESTAT, &phy_data);
10512- ret_val = iegbe_oem_read_phy_reg_ex(hw, BCM5481_ESTAT, &phy_data);
10513- if(ret_val)
10514- {
10515- DEBUGOUT("Unable to read PHY register BCM5481_ESTAT\n");
10516- return ret_val;
10517- }
10518- statusMask = BCM5481_ESTAT_LINK;
10519- break;
10520-
10521- case M88E1000_I_PHY_ID:
10522+ case M88E1000_I_PHY_ID:
10523         case M88E1141_E_PHY_ID:
10524- iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
10525- ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
10526+ iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
10527+ ret_val = iegbe_oem_read_phy_reg_ex(hw, M88E1000_PHY_SPEC_STATUS,
10528+ &phy_data);
10529             statusMask = M88E1000_PSSR_LINK;
10530- break;
10531+ break;
10532         default:
10533             DEBUGOUT("Invalid PHY ID\n");
10534             return -E1000_ERR_PHY_TYPE;
10535@@ -2319,213 +2092,3 @@ iegbe_oem_phy_is_link_up(struct iegbe_hw
10536 #endif /* ifdef EXTERNAL_MDIO */
10537 }
10538 
10539-
10540-
10541-//-----
10542-// Read BCM5481 expansion register
10543-//
10544-int32_t
10545-bcm5481_read_ex (struct iegbe_hw *hw, uint16_t reg, uint16_t *data)
10546-{
10547- int ret;
10548- uint16_t selector;
10549- uint16_t reg_data;
10550-
10551- // Get the current value of bits 15:12
10552- ret = iegbe_oem_read_phy_reg_ex (hw, 0x15, &selector);
10553- if (ret)
10554- return ret;
10555-
10556- // Select the expansion register
10557- selector &= 0xf000;
10558- selector |= (0xf << 8) | (reg);
10559- iegbe_oem_write_phy_reg_ex (hw, 0x17, selector);
10560-
10561- // Read the expansion register
10562- ret = iegbe_oem_read_phy_reg_ex (hw, 0x15, &reg_data);
10563-
10564- // De-select the expansion registers.
10565- selector &= 0xf000;
10566- iegbe_oem_write_phy_reg_ex (hw, 0x17, selector);
10567-
10568- if (ret)
10569- return ret;
10570-
10571- *data = reg_data;
10572- return ret;
10573-}
10574-
10575-//-----
10576-// Read reg 0x18 sub-register
10577-//
10578-static int32_t
10579-bcm5481_read_18sv (struct iegbe_hw *hw, int sv, uint16_t *data)
10580-{
10581- int ret;
10582- uint16_t tmp_data;
10583-
10584- // Select reg 0x18, sv
10585- tmp_data = ((sv & BCM5481_R18H_SV_MASK) << 12) | BCM5481_R18H_SV_MCTRL;
10586- ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R18H, tmp_data);
10587- if(ret)
10588- return ret;
10589-
10590- // Read reg 0x18, sv
10591- ret = iegbe_oem_read_phy_reg_ex (hw, BCM5481_R18H, &tmp_data);
10592- if(ret)
10593- return ret;
10594-
10595- *data = tmp_data;
10596- return ret;
10597-}
10598-
10599-//-----
10600-// Read reg 0x1C sub-register
10601-//
10602-int32_t
10603-bcm5481_read_1csv (struct iegbe_hw *hw, int sv, uint16_t *data)
10604-{
10605- int ret;
10606- uint16_t tmp_data;
10607-
10608- // Select reg 0x1c, sv
10609- tmp_data = ((sv & BCM5481_R1CH_SV_MASK) << BCM5481_R1CH_SV_SHIFT);
10610-
10611- ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R1CH, tmp_data);
10612- if(ret)
10613- return ret;
10614-
10615- // Read reg 0x1c, sv
10616- ret = iegbe_oem_read_phy_reg_ex (hw, BCM5481_R1CH, &tmp_data);
10617- if(ret)
10618- return ret;
10619-
10620- *data = tmp_data;
10621- return ret;
10622-}
10623-
10624-//-----
10625-// Read-modify-write a 0x1C register.
10626-//
10627-// hw - hardware access info.
10628-// reg - 0x1C register to modify.
10629-// data - bits which should be set.
10630-// mask - the '1' bits in this argument will be cleared in the data
10631-// read from 'reg' then 'data' will be or'd in and the result
10632-// will be written to 'reg'.
10633-
10634-int32_t
10635-bcm5481_rmw_1csv (struct iegbe_hw *hw, uint16_t reg, uint16_t data, uint16_t mask)
10636-{
10637- int32_t ret;
10638- uint16_t reg_data;
10639-
10640- ret = 0;
10641-
10642- ret = bcm5481_read_1csv (hw, reg, &reg_data);
10643- if (ret)
10644- {
10645- DEBUGOUT("Unable to read BCM5481 1CH register\n");
10646- printk (KERN_ERR "Unable to read BCM5481 1CH register [0x%x]\n", reg);
10647- return ret;
10648- }
10649-
10650- reg_data &= ~mask;
10651- reg_data |= (BCM5481_R1CH_WE | data);
10652-
10653- ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R1CH, reg_data);
10654- if(ret)
10655- {
10656- DEBUGOUT("Unable to write BCM5481 1CH register\n");
10657- printk (KERN_ERR "Unable to write BCM5481 1CH register\n");
10658- return ret;
10659- }
10660-
10661- return ret;
10662-}
10663-
10664-int32_t
10665-oi_phy_setup (struct iegbe_hw *hw)
10666-{
10667- int ret;
10668- uint16_t pmii_data;
10669- uint16_t mctrl_data;
10670- uint16_t cacr_data;
10671- uint16_t sc1_data;
10672- uint16_t lctl_data;
10673-
10674- ret = 0;
10675-
10676- // Set low power mode via reg 0x18, sv010, bit 6
10677- // Do a read-modify-write on reg 0x18, sv010 register to preserve existing bits.
10678- ret = bcm5481_read_18sv (hw, BCM5481_R18H_SV_PMII, &pmii_data);
10679- if (ret)
10680- {
10681- DEBUGOUT("Unable to read BCM5481_R18H_SV_PMII register\n");
10682- printk (KERN_ERR "Unable to read BCM5481_R18H_SV_PMII register\n");
10683- return ret;
10684- }
10685-
10686- // Set the LPM bit in the data just read and write back to sv010
10687- // The shadow register select bits [2:0] are set by reading the sv010
10688- // register.
10689- pmii_data |= BCM5481_R18H_SV010_LPM;
10690- ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R18H, pmii_data);
10691- if(ret)
10692- {
10693- DEBUGOUT("Unable to write BCM5481_R18H register\n");
10694- printk (KERN_ERR "Unable to write BCM5481_R18H register\n");
10695- return ret;
10696- }
10697-
10698-
10699- // Set the RGMII RXD to RXC skew bit in reg 0x18, sv111
10700-
10701- if (bcm5481_read_18sv (hw, BCM5481_R18H_SV_MCTRL, &mctrl_data))
10702- {
10703- DEBUGOUT("Unable to read BCM5481_R18H_SV_MCTRL register\n");
10704- printk (KERN_ERR "Unable to read BCM5481_R18H_SV_MCTRL register\n");
10705- return ret;
10706- }
10707- mctrl_data |= (BCM5481_R18H_WE | BCM5481_R18H_SV111_SKEW);
10708-
10709- ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R18H, mctrl_data);
10710- if(ret)
10711- {
10712- DEBUGOUT("Unable to write BCM5481_R18H register\n");
10713- printk (KERN_ERR "Unable to write BCM5481_R18H register\n");
10714- return ret;
10715- }
10716-
10717-
10718- // Enable RGMII transmit clock delay in reg 0x1c, sv00011
10719- ret = bcm5481_read_1csv (hw, BCM5481_R1CH_CACR, &cacr_data);
10720- if (ret)
10721- {
10722- DEBUGOUT("Unable to read BCM5481_R1CH_CACR register\n");
10723- printk (KERN_ERR "Unable to read BCM5481_R1CH_CACR register\n");
10724- return ret;
10725- }
10726-
10727- cacr_data |= (BCM5481_R1CH_WE | BCM5481_R1CH_CACR_TCD);
10728-
10729- ret = iegbe_oem_write_phy_reg_ex (hw, BCM5481_R1CH, cacr_data);
10730- if(ret)
10731- {
10732- DEBUGOUT("Unable to write BCM5481_R1CH register\n");
10733- printk (KERN_ERR "Unable to write BCM5481_R1CH register\n");
10734- return ret;
10735- }
10736-
10737- // Enable dual link speed indication (0x1c, sv 00010, bit 2)
10738- ret = bcm5481_rmw_1csv (hw, BCM5481_R1CH_SC1, BCM5481_R1CH_SC1_LINK, BCM5481_R1CH_SC1_LINK);
10739- if (ret)
10740- return ret;
10741-
10742- // Enable link and activity on ACTIVITY LED (0x1c, sv 01001, bit 4=1, bit 3=0)
10743- ret = bcm5481_rmw_1csv (hw, BCM5481_R1CH_LCTRL, BCM5481_R1CH_LCTRL_ALEN, BCM5481_R1CH_LCTRL_ALEN | BCM5481_R1CH_LCTRL_AEN);
10744- if (ret)
10745- return ret;
10746-
10747- return ret;
10748-}
10749--- a/Embedded/src/GbE/iegbe_oem_phy.h
10750+++ b/Embedded/src/GbE/iegbe_oem_phy.h
10751@@ -2,31 +2,31 @@
10752 
10753 GPL LICENSE SUMMARY
10754 
10755- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
10756+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
10757 
10758- This program is free software; you can redistribute it and/or modify
10759+ This program is free software; you can redistribute it and/or modify
10760   it under the terms of version 2 of the GNU General Public License as
10761   published by the Free Software Foundation.
10762 
10763- This program is distributed in the hope that it will be useful, but
10764- WITHOUT ANY WARRANTY; without even the implied warranty of
10765- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10766+ This program is distributed in the hope that it will be useful, but
10767+ WITHOUT ANY WARRANTY; without even the implied warranty of
10768+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10769   General Public License for more details.
10770 
10771- You should have received a copy of the GNU General Public License
10772- along with this program; if not, write to the Free Software
10773+ You should have received a copy of the GNU General Public License
10774+ along with this program; if not, write to the Free Software
10775   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
10776- The full GNU General Public License is included in this distribution
10777+ The full GNU General Public License is included in this distribution
10778   in the file called LICENSE.GPL.
10779 
10780   Contact Information:
10781   Intel Corporation
10782 
10783- version: Embedded.L.1.0.34
10784+ version: Embedded.Release.Patch.L.1.0.7-5
10785 
10786   Contact Information:
10787-
10788- Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
10789+
10790+ Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
10791 
10792 *******************************************************************************/
10793 #ifndef _IEGBE_OEM_PHY_H_
10794@@ -45,10 +45,10 @@ int32_t iegbe_oem_set_trans_gasket(struc
10795 uint32_t iegbe_oem_get_tipg(struct iegbe_hw *hw);
10796 int iegbe_oem_phy_is_copper(struct iegbe_hw *hw);
10797 uint32_t iegbe_oem_get_phy_dev_number(struct iegbe_hw *hw);
10798-int iegbe_oem_mii_ioctl(struct iegbe_adapter *adapter, unsigned long flags,
10799+int iegbe_oem_mii_ioctl(struct iegbe_adapter *adapter, unsigned long flags,
10800                         struct ifreq *ifr, int cmd);
10801 void iegbe_oem_fiber_live_in_suspend(struct iegbe_hw *hw);
10802-void iegbe_oem_get_phy_regs(struct iegbe_adapter *adapter, uint32_t *data,
10803+void iegbe_oem_get_phy_regs(struct iegbe_adapter *adapter, uint32_t *data,
10804                             uint32_t data_length);
10805 int iegbe_oem_phy_loopback(struct iegbe_adapter *adapter);
10806 void iegbe_oem_loopback_cleanup(struct iegbe_adapter *adapter);
10807@@ -94,81 +94,14 @@ int32_t iegbe_oem_phy_is_link_up(struct
10808 #define ICP_XXXX_MAC_2 2
10809 
10810 #define DEFAULT_ICP_XXXX_TIPG_IPGT 8 /* Inter Packet Gap Transmit Time */
10811-#define ICP_XXXX_TIPG_IPGT_MASK 0x000003FFUL
10812-#define BCM5481_PHY_ID 0x0143BCA2
10813-#define BCM5395S_PHY_ID 0x0143BCF0
10814+#define ICP_XXXX_TIPG_IPGT_MASK 0x000003FFUL
10815 
10816 /* Miscellaneous defines */
10817 #ifdef IEGBE_10_100_ONLY
10818- #define ICP_XXXX_AUTONEG_ADV_DEFAULT 0x0F
10819+ #define ICP_XXXX_AUTONEG_ADV_DEFAULT 0x0F
10820 #else
10821     #define ICP_XXXX_AUTONEG_ADV_DEFAULT 0x2F
10822 #endif
10823 
10824-//-----
10825-// BCM5481 specifics
10826-
10827-#define BCM5481_ECTRL (0x10)
10828-#define BCM5481_ESTAT (0x11)
10829-#define BCM5481_RXERR (0x12)
10830-#define BCM5481_EXPRW (0x15)
10831-#define BCM5481_EXPACC (0x17)
10832-#define BCM5481_ASTAT (0x19)
10833-#define BCM5481_R18H (0x18)
10834-#define BCM5481_R1CH (0x1c)
10835-
10836-//-----
10837-// indirect register access via register 18h
10838-
10839-#define BCM5481_R18H_SV_MASK (7) // Mask for SV bits.
10840-#define BCM5481_R18H_SV_ACTRL (0) // SV000 Aux. control
10841-#define BCM5481_R18H_SV_10BT (1) // SV001 10Base-T
10842-#define BCM5481_R18H_SV_PMII (2) // SV010 Power/MII control
10843-#define BCM5481_R18H_SV_MTEST (4) // SV100 Misc. test
10844-#define BCM5481_R18H_SV_MCTRL (7) // SV111 Misc. control
10845-
10846-#define BCM5481_R18H_SV001_POL (1 << 13) // Polarity
10847-#define BCM5481_R18H_SV010_LPM (1 << 6)
10848-#define BCM5481_R18H_SV111_SKEW (1 << 8)
10849-#define BCM5481_R18H_WE (1 << 15) // Write enable
10850-
10851-// 0x1c registers
10852-#define BCM5481_R1CH_SV_SHIFT (10)
10853-#define BCM5481_R1CH_SV_MASK (0x1f)
10854-#define BCM5481_R1CH_SC1 (0x02) // sv00010 Spare control 1
10855-#define BCM5481_R1CH_CACR (0x03) // sv00011 Clock alignment control
10856-#define BCM5481_R1CH_LCTRL (0x09) // sv01001 LED control
10857-#define BCM5481_R1CH_LEDS1 (0x0d) // sv01101 LED selector 1
10858-
10859-// 0x1c common
10860-#define BCM5481_R1CH_WE (1 << 15) // Write enable
10861-
10862-// 0x1c, sv 00010
10863-#define BCM5481_R1CH_SC1_LINK (1 << 2) // sv00010 Linkspeed
10864-
10865-// 0x1c, sv 00011
10866-#define BCM5481_R1CH_CACR_TCD (1 << 9) // sv00011 RGMII tx clock delay
10867-
10868-// 0x1c, sv 01001
10869-#define BCM5481_R1CH_LCTRL_ALEN (1 << 4) // Activity/Link enable on ACTIVITY LED
10870-#define BCM5481_R1CH_LCTRL_AEN (1 << 3) // Activity enable on ACTIVITY LED
10871-
10872-
10873-#define BCM5481_ECTRL_DISMDIX (1 <<14)
10874-
10875-#define BCM5481_MCTRL_AUTOMDIX (1 <<9)
10876-
10877-#define BCM5481_ESTAT_LINK (1 << 8)
10878-
10879-#define BCM5481_ASTAT_ANC (1 << 15)
10880-#define BCM5481_ASTAT_ANHCD (7 << 8)
10881-#define BCM5481_ASTAT_HCD(x) ((x >> 8) & 7)
10882-#define BCM5481_ASTAT_1KBTFD (0x7)
10883-#define BCM5481_ASTAT_1KBTHD (0x6)
10884-#define BCM5481_ASTAT_100BTXFD (0x5)
10885-#define BCM5481_ASTAT_100BTXHD (0x3)
10886-
10887-// end BCM5481 specifics
10888-
10889 #endif /* ifndef _IEGBE_OEM_PHY_H_ */
10890-
10891+
10892--- a/Embedded/src/GbE/iegbe_osdep.h
10893+++ b/Embedded/src/GbE/iegbe_osdep.h
10894@@ -2,7 +2,7 @@
10895 
10896 GPL LICENSE SUMMARY
10897 
10898- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
10899+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
10900 
10901   This program is free software; you can redistribute it and/or modify
10902   it under the terms of version 2 of the GNU General Public License as
10903@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
10904   Contact Information:
10905   Intel Corporation
10906 
10907- version: Embedded.L.1.0.34
10908+ version: Embedded.Release.Patch.L.1.0.7-5
10909 
10910   Contact Information:
10911 
10912--- a/Embedded/src/GbE/iegbe_param.c
10913+++ b/Embedded/src/GbE/iegbe_param.c
10914@@ -2,7 +2,7 @@
10915 
10916 GPL LICENSE SUMMARY
10917 
10918- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
10919+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
10920 
10921   This program is free software; you can redistribute it and/or modify
10922   it under the terms of version 2 of the GNU General Public License as
10923@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
10924   Contact Information:
10925   Intel Corporation
10926 
10927- version: Embedded.L.1.0.34
10928+ version: Embedded.Release.Patch.L.1.0.7-5
10929 
10930   Contact Information:
10931 
10932@@ -239,11 +239,7 @@ E1000_PARAM(InterruptThrottleRate, "Inte
10933 #define MAX_TXABSDELAY 0xFFFF
10934 #define MIN_TXABSDELAY 0
10935 
10936-#ifdef IEGBE_GBE_WORKAROUND
10937-#define DEFAULT_ITR 0
10938-#else
10939 #define DEFAULT_ITR 8000
10940-#endif
10941 
10942 
10943 #define MAX_ITR 100000
10944@@ -373,7 +369,7 @@ iegbe_check_options(struct iegbe_adapter
10945             tx_ring->count = opt.def;
10946         }
10947 #endif
10948- for (i = 0; i < adapter->num_queues; i++)
10949+ for (i = 0; i < adapter->num_tx_queues; i++)
10950             tx_ring[i].count = tx_ring->count;
10951     }
10952     { /* Receive Descriptor Count */
10953@@ -403,7 +399,7 @@ iegbe_check_options(struct iegbe_adapter
10954             rx_ring->count = opt.def;
10955         }
10956 #endif
10957- for (i = 0; i < adapter->num_queues; i++)
10958+ for (i = 0; i < adapter->num_rx_queues; i++)
10959             rx_ring[i].count = rx_ring->count;
10960     }
10961     { /* Checksum Offload Enable/Disable */
10962--- a/Embedded/src/GbE/kcompat.c
10963+++ b/Embedded/src/GbE/kcompat.c
10964@@ -1,8 +1,8 @@
10965-/************************************************************
10966-
10967+/************************************************************
10968+
10969 GPL LICENSE SUMMARY
10970 
10971- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
10972+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
10973 
10974   This program is free software; you can redistribute it and/or modify
10975   it under the terms of version 2 of the GNU General Public License as
10976@@ -22,183 +22,192 @@ GPL LICENSE SUMMARY
10977   Contact Information:
10978   Intel Corporation
10979 
10980- version: Embedded.L.1.0.34
10981-
10982- Contact Information:
10983-
10984- Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
10985-
10986-**************************************************************/
10987-/**************************************************************************
10988- * @ingroup KCOMPAT_GENERAL
10989- *
10990- * @file kcompat.c
10991- *
10992- * @description
10993- *
10994- *
10995- **************************************************************************/
10996-#include "kcompat.h"
10997-
10998-/*************************************************************/
10999-/* 2.4.13 => 2.4.3 */
11000-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0xd) )
11001-
11002-/**************************************/
11003-/* PCI DMA MAPPING */
11004-
11005-#if defined(CONFIG_HIGHMEM)
11006-
11007-#ifndef PCI_DRAM_OFFSET
11008-#define PCI_DRAM_OFFSET 0
11009-#endif
11010-
11011-u64 _kc_pci_map_page(struct pci_dev *dev,
11012- struct page *page,
11013- unsigned long offset,
11014- size_t size,
11015- int direction)
11016-{
11017- u64 ret_val;
11018- ret_val = (((u64)(page - mem_map) << PAGE_SHIFT) + offset +
11019- PCI_DRAM_OFFSET);
11020- return ret_val;
11021-}
11022-
11023-#else /* CONFIG_HIGHMEM */
11024-
11025-u64 _kc_pci_map_page(struct pci_dev *dev,
11026- struct page *page,
11027- unsigned long offset,
11028- size_t size,
11029- int direction)
11030-{
11031- return pci_map_single(dev, (void *)page_address(page) + offset,
11032- size, direction);
11033-}
11034-
11035-#endif /* CONFIG_HIGHMEM */
11036-
11037-void _kc_pci_unmap_page(struct pci_dev *dev,
11038- u64 dma_addr,
11039- size_t size,
11040- int direction)
11041-{
11042- return pci_unmap_single(dev, dma_addr, size, direction);
11043-}
11044-
11045-#endif /* 2.4.13 => 2.4.3 */
11046-
11047-
11048-/*****************************************************************************/
11049-/* 2.4.3 => 2.4.0 */
11050-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0x3) )
11051-
11052-/**************************************/
11053-/* PCI DRIVER API */
11054-
11055-int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
11056-{
11057- if(!pci_dma_supported(dev, mask)) {
11058- return -EIO;
11059- }
11060- dev->dma_mask = mask;
11061- return 0;
11062-}
11063-
11064-int _kc_pci_request_regions(struct pci_dev *dev, char *res_name)
11065-{
11066- int i;
11067-
11068- for (i = 0; i < 0x6; i++) {
11069- if (pci_resource_len(dev, i) == 0) {
11070- continue;
11071- }
11072- if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
11073- if (!request_region(pci_resource_start(dev, i),
11074- pci_resource_len(dev, i), res_name)) {
11075- pci_release_regions(dev);
11076- return -EBUSY;
11077- }
11078- } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
11079- if (!request_mem_region(pci_resource_start(dev, i),
11080- pci_resource_len(dev, i),
11081- res_name)) {
11082- pci_release_regions(dev);
11083- return -EBUSY;
11084- }
11085- }
11086- }
11087- return 0;
11088-}
11089-
11090-void _kc_pci_release_regions(struct pci_dev *dev)
11091-{
11092- int i;
11093-
11094- for (i = 0; i < 0x6; i++) {
11095- if (pci_resource_len(dev, i) == 0) {
11096- continue;
11097- }
11098- if (pci_resource_flags(dev, i) & IORESOURCE_IO){
11099- release_region(pci_resource_start(dev, i),
11100- pci_resource_len(dev, i));
11101- } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
11102- release_mem_region(pci_resource_start(dev, i),
11103- pci_resource_len(dev, i));
11104- }
11105- }
11106-}
11107-
11108-/**************************************/
11109-/* NETWORK DRIVER API */
11110-
11111-struct net_device * _kc_alloc_etherdev(int sizeof_priv)
11112-{
11113- struct net_device *dev;
11114- int alloc_size;
11115-
11116- alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 0x1f;
11117-
11118- dev = kmalloc(alloc_size, GFP_KERNEL);
11119-
11120- if (!dev) { return NULL; }
11121-
11122- memset(dev, 0, alloc_size);
11123-
11124- if (sizeof_priv) {
11125- dev->priv = (void *) (((unsigned long)(dev + 1) + 0x1f) & ~0x1f);
11126- }
11127- dev->name[0] = '\0';
11128-
11129- ether_setup(dev);
11130-
11131- return dev;
11132-}
11133-
11134-int _kc_is_valid_ether_addr(u8 *addr)
11135-{
11136- const char zaddr[0x6] = {0,};
11137-
11138- return !(addr[0]&1) && memcmp( addr, zaddr, 0x6);
11139-}
11140-
11141-#endif /* 2.4.3 => 2.4.0 */
11142-
11143-
11144-/*****************************************************************/
11145-/* 2.4.6 => 2.4.3 */
11146-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0x6) )
11147-
11148-int _kc_pci_set_power_state(struct pci_dev *dev, int state)
11149-{ return 0; }
11150-int _kc_pci_save_state(struct pci_dev *dev, u32 *buffer)
11151-{ return 0; }
11152-int _kc_pci_restore_state(struct pci_dev *pdev, u32 *buffer)
11153-{ return 0; }
11154-int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
11155-{ return 0; }
11156-
11157-#endif /* 2.4.6 => 2.4.3 */
11158-
11159-
11160+ version: Embedded.Release.Patch.L.1.0.7-5
11161+
11162+ Contact Information:
11163+
11164+ Intel Corporation, 5000 W Chandler Blvd, Chandler, AZ 85226
11165+
11166+**************************************************************/
11167+/**************************************************************************
11168+ * @ingroup KCOMPAT_GENERAL
11169+ *
11170+ * @file kcompat.c
11171+ *
11172+ * @description
11173+ *
11174+ *
11175+ **************************************************************************/
11176+#include "kcompat.h"
11177+
11178+/*************************************************************/
11179+/* 2.4.13 => 2.4.3 */
11180+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0xd) )
11181+
11182+/**************************************/
11183+/* PCI DMA MAPPING */
11184+
11185+#if defined(CONFIG_HIGHMEM)
11186+
11187+#ifndef PCI_DRAM_OFFSET
11188+#define PCI_DRAM_OFFSET 0
11189+#endif
11190+
11191+u64 _kc_pci_map_page(struct pci_dev *dev,
11192+ struct page *page,
11193+ unsigned long offset,
11194+ size_t size,
11195+ int direction)
11196+{
11197+ u64 ret_val;
11198+ ret_val = (((u64)(page - mem_map) << PAGE_SHIFT) + offset +
11199+ PCI_DRAM_OFFSET);
11200+ return ret_val;
11201+}
11202+
11203+#else /* CONFIG_HIGHMEM */
11204+
11205+u64 _kc_pci_map_page(struct pci_dev *dev,
11206+ struct page *page,
11207+ unsigned long offset,
11208+ size_t size,
11209+ int direction)
11210+{
11211+ return pci_map_single(dev, (void *)page_address(page) + offset,
11212+ size, direction);
11213+}
11214+
11215+#endif /* CONFIG_HIGHMEM */
11216+
11217+void _kc_pci_unmap_page(struct pci_dev *dev,
11218+ u64 dma_addr,
11219+ size_t size,
11220+ int direction)
11221+{
11222+ return pci_unmap_single(dev, dma_addr, size, direction);
11223+}
11224+
11225+#endif /* 2.4.13 => 2.4.3 */
11226+
11227+
11228+/*****************************************************************************/
11229+/* 2.4.3 => 2.4.0 */
11230+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0x3) )
11231+
11232+/**************************************/
11233+/* PCI DRIVER API */
11234+
11235+int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
11236+{
11237+ if(!pci_dma_supported(dev, mask)) {
11238+ return -EIO;
11239+ }
11240+ dev->dma_mask = mask;
11241+ return 0;
11242+}
11243+
11244+int _kc_pci_request_regions(struct pci_dev *dev, char *res_name)
11245+{
11246+ int i;
11247+
11248+ for (i = 0; i < 0x6; i++) {
11249+ if (pci_resource_len(dev, i) == 0) {
11250+ continue;
11251+ }
11252+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
11253+ if (!request_region(pci_resource_start(dev, i),
11254+ pci_resource_len(dev, i), res_name)) {
11255+ pci_release_regions(dev);
11256+ return -EBUSY;
11257+ }
11258+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
11259+ if (!request_mem_region(pci_resource_start(dev, i),
11260+ pci_resource_len(dev, i),
11261+ res_name)) {
11262+ pci_release_regions(dev);
11263+ return -EBUSY;
11264+ }
11265+ }
11266+ }
11267+ return 0;
11268+}
11269+
11270+void _kc_pci_release_regions(struct pci_dev *dev)
11271+{
11272+ int i;
11273+
11274+ for (i = 0; i < 0x6; i++) {
11275+ if (pci_resource_len(dev, i) == 0) {
11276+ continue;
11277+ }
11278+ if (pci_resource_flags(dev, i) & IORESOURCE_IO){
11279+ release_region(pci_resource_start(dev, i),
11280+ pci_resource_len(dev, i));
11281+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
11282+ release_mem_region(pci_resource_start(dev, i),
11283+ pci_resource_len(dev, i));
11284+ }
11285+ }
11286+}
11287+
11288+/**************************************/
11289+/* NETWORK DRIVER API */
11290+
11291+struct net_device * _kc_alloc_etherdev(int sizeof_priv)
11292+{
11293+ struct net_device *dev;
11294+ int alloc_size;
11295+
11296+ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 0x1f;
11297+
11298+ dev = kmalloc(alloc_size, GFP_KERNEL);
11299+
11300+ if (!dev) { return NULL; }
11301+
11302+ memset(dev, 0, alloc_size);
11303+
11304+ if (sizeof_priv) {
11305+ dev->priv = (void *) (((unsigned long)(dev + 1) + 0x1f) & ~0x1f);
11306+ }
11307+ dev->name[0] = '\0';
11308+
11309+ ether_setup(dev);
11310+
11311+ return dev;
11312+}
11313+
11314+int _kc_is_valid_ether_addr(u8 *addr)
11315+{
11316+ const char zaddr[0x6] = {0,};
11317+
11318+ return !(addr[0]&1) && memcmp( addr, zaddr, 0x6);
11319+}
11320+
11321+#endif /* 2.4.3 => 2.4.0 */
11322+
11323+
11324+/*****************************************************************/
11325+/* 2.4.6 => 2.4.3 */
11326+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(0x2,0x4,0x6) )
11327+
11328+int _kc_pci_set_power_state(struct pci_dev *dev, int state)
11329+{ return 0; }
11330+int _kc_pci_save_state(struct pci_dev *dev, u32 *buffer)
11331+{ return 0; }
11332+int _kc_pci_restore_state(struct pci_dev *pdev, u32 *buffer)
11333+{ return 0; }
11334+int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
11335+{ return 0; }
11336+
11337+#endif /* 2.4.6 => 2.4.3 */
11338+
11339+
11340+
11341+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,24) )
11342+
11343+void dump_stack(void)
11344+{
11345+}
11346+
11347+#endif /* 2.4.24 */
11348+
11349--- a/Embedded/src/GbE/kcompat_ethtool.c
11350+++ b/Embedded/src/GbE/kcompat_ethtool.c
11351@@ -2,7 +2,7 @@
11352 /*
11353  * GPL LICENSE SUMMARY
11354  *
11355- * Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
11356+ * Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
11357  *
11358  * This program is free software; you can redistribute it and/or modify
11359  * it under the terms of version 2 of the GNU General Public License as
11360@@ -22,7 +22,7 @@
11361  * Contact Information:
11362  * Intel Corporation
11363  *
11364- * version: Embedded.L.1.0.34
11365+ * version: Embedded.Release.Patch.L.1.0.7-5
11366  */
11367   
11368 /**************************************************************************
11369@@ -779,6 +779,7 @@ static int ethtool_get_stats(struct net_
11370 }
11371 
11372 /* The main entry point in this file. Called from net/core/dev.c */
11373+
11374 #define ETHTOOL_OPS_COMPAT
11375 int ethtool_ioctl(struct ifreq *ifr)
11376 {
11377--- a/Embedded/src/GbE/kcompat.h
11378+++ b/Embedded/src/GbE/kcompat.h
11379@@ -2,7 +2,7 @@
11380 
11381 GPL LICENSE SUMMARY
11382 
11383- Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
11384+ Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
11385 
11386   This program is free software; you can redistribute it and/or modify
11387   it under the terms of version 2 of the GNU General Public License as
11388@@ -22,7 +22,7 @@ GPL LICENSE SUMMARY
11389   Contact Information:
11390   Intel Corporation
11391 
11392- version: Embedded.L.1.0.34
11393+ version: Embedded.Release.Patch.L.1.0.7-5
11394   
11395   Contact Information:
11396   
11397@@ -69,15 +69,6 @@ GPL LICENSE SUMMARY
11398 #define CONFIG_NET_POLL_CONTROLLER
11399 #endif
11400 
11401-#ifdef E1000_NAPI
11402-#undef CONFIG_E1000_NAPI
11403-#define CONFIG_E1000_NAPI
11404-#endif
11405-
11406-#ifdef E1000_NO_NAPI
11407-#undef CONFIG_E1000_NAPI
11408-#endif
11409-
11410 #ifndef module_param
11411 #define module_param(v,t,p) MODULE_PARM(v, "i");
11412 #endif
11413@@ -554,35 +545,14 @@ extern void _kc_pci_unmap_page(struct pc
11414 #endif
11415 
11416 /*****************************************************************************/
11417-/* 2.4.23 => 2.4.22 */
11418-#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
11419-#ifdef CONFIG_E1000_NAPI
11420-#ifndef netif_poll_disable
11421-#define netif_poll_disable(x) _kc_netif_poll_disable(x)
11422-static inline void _kc_netif_poll_disable(struct net_device *netdev)
11423-{
11424- while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
11425- /* No hurry */
11426- current->state = TASK_INTERRUPTIBLE;
11427- schedule_timeout(1);
11428- }
11429-}
11430-#endif
11431-#ifndef netif_poll_enable
11432-#define netif_poll_enable(x) _kc_netif_poll_enable(x)
11433-static inline void _kc_netif_poll_enable(struct net_device *netdev)
11434-{
11435- clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
11436-}
11437-#endif
11438-#endif
11439-#endif
11440-
11441-/*****************************************************************************/
11442 /* 2.5.28 => 2.4.23 */
11443 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
11444 
11445+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
11446+static inline void _kc_synchronize_irq(void) { barrier(); }
11447+#else
11448 static inline void _kc_synchronize_irq() { synchronize_irq(); }
11449+#endif /* 2.4.23 */
11450 #undef synchronize_irq
11451 #define synchronize_irq(X) _kc_synchronize_irq()
11452 
11453@@ -747,6 +717,37 @@ static inline struct mii_ioctl_data *_kc
11454 #define skb_header_cloned(x) 0
11455 #endif /* SKB_DATAREF_SHIFT not defined */
11456 
11457+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
11458+
11459+#define ioread32(addr) readl(addr)
11460+#define iowrite32(val,addr) writel(val,addr)
11461+
11462+#endif /* 2.6.10 */
11463+
11464+#ifndef DEFINE_SPINLOCK
11465+#define DEFINE_SPINLOCK(s) spinlock_t s = SPIN_LOCK_UNLOCKED
11466+#endif /* DEFINE_SPINLOCK */
11467+
11468+#ifndef PCI_COMMAND_INTX_DISABLE
11469+#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
11470+#endif /* PCI_COMMAND_INTX_DISABLE */
11471+
11472+#ifndef ETH_GSTRING_LEN
11473+#define ETH_GSTRING_LEN 32
11474+#endif /* ETH_GSTRING_LEN */
11475+
11476+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,24) )
11477+
11478+extern void dump_stack(void);
11479+
11480+#undef register_reboot_notifier
11481+#define register_reboot_notifier(a)
11482+
11483+#undef unregister_reboot_notifier
11484+#define unregister_reboot_notifier(a)
11485+
11486+#endif /* 2.4.24 */
11487+
11488 #endif /* _KCOMPAT_H_ */
11489 
11490  
11491--- a/Embedded/src/GbE/Makefile
11492+++ b/Embedded/src/GbE/Makefile
11493@@ -1,6 +1,6 @@
11494 # GPL LICENSE SUMMARY
11495 #
11496-# Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
11497+# Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
11498 #
11499 # This program is free software; you can redistribute it and/or modify
11500 # it under the terms of version 2 of the GNU General Public License as
11501@@ -20,7 +20,7 @@
11502 # Contact Information:
11503 # Intel Corporation
11504 #
11505-# version: Embedded.L.1.0.34
11506+# version: Embedded.Release.Patch.L.1.0.7-5
11507 
11508 ###########################################################################
11509 # Driver files
11510@@ -35,6 +35,8 @@ MDIO_PHONY_CFILES = gcu.c
11511 MDIO_CFILES = gcu_main.c gcu_if.c
11512 MDIO_HFILES = gcu.h gcu_if.h gcu_reg.h kcompat.h
11513 
11514+KVER=$(shell uname -r)
11515+
11516 #
11517 # Variables:
11518 # KSRC (path to kernel source to build against)
11519@@ -50,45 +52,16 @@ MDIO_HFILES = gcu.h gcu_if.h gcu_reg.h k
11520 
11521 # set KSRC, KOBJ, and EXTERNAL_MDIO to default values of not already set
11522 #
11523-KOBJ ?= /usr/src/kernels/linux
11524-KSRC ?= /usr/src/kernels/linux
11525+#KOBJ=/usr/src/kernels/linux
11526+#KSRC=/usr/src/kernels/linux
11527+#KSRC=$(KOBJ)
11528 EXTERNAL_MDIO ?= 1
11529 GBE_NAME = iegbe
11530 GCU_NAME = gcu
11531 
11532-# By default the workaround for the IEGBE writeback issue is enabled
11533-#
11534-IEGBE_GBE_WORKAROUND ?= 0
11535-
11536-# If the platform only supports 10/100 this variable needs to be set
11537-# so the default advertisement is set appropriately.
11538-# By default, this variable will be disabled.
11539-#
11540-IEGBE_10_100_ONLY ?= 0
11541-
11542-# check for version.h and autoconf.h for running kernel in /boot (SUSE)
11543-ifneq (,$(wildcard /boot/vmlinuz.version.h))
11544- VERSION_FILE := /boot/vmlinuz.version.h
11545- CONFIG_FILE := /boot/vmlinuz.autoconf.h
11546- KVER := $(shell $(CC) $(CFLAGS) -E -dM $(VERSION_FILE) | \
11547- grep UTS_RELEASE | awk '{ print $$3 }' | sed 's/\"//g')
11548- ifeq ($(KVER),$(shell uname -r))
11549- # set up include path to override headers from kernel source
11550- x:=$(shell rm -rf include)
11551- x:=$(shell mkdir -p include/linux)
11552- x:=$(shell cp /boot/vmlinuz.version.h include/linux/version.h)
11553- x:=$(shell cp /boot/vmlinuz.autoconf.h include/linux/autoconf.h)
11554- CFLAGS += -I./include
11555- else
11556- VERSION_FILE := $(KOBJ)/include/linux/version.h
11557- UTS_REL_FILE := $(KSRC)/include/linux/utsrelease.h
11558- CONFIG_FILE := $(KOBJ)/include/linux/autoconf.h
11559- endif
11560-else
11561- VERSION_FILE := $(KOBJ)/include/linux/version.h
11562- UTS_REL_FILE := $(KSRC)/include/linux/utsrelease.h
11563- CONFIG_FILE := $(KOBJ)/include/linux/autoconf.h
11564-endif
11565+VERSION_FILE := $(KSRC)/include/linux/version.h
11566+UTS_REL_FILE := $(KSRC)/include/linux/utsrelease.h
11567+CONFIG_FILE := $(KSRC)/include/linux/autoconf.h
11568 
11569 ifeq (,$(wildcard $(VERSION_FILE)))
11570   $(error Linux kernel source not configured - missing version.h)
11571@@ -98,83 +71,8 @@ ifeq (,$(wildcard $(CONFIG_FILE)))
11572   $(error Linux kernel source not configured - missing autoconf.h)
11573 endif
11574 
11575-# as of 2.6.16, kernel define UTS_RELEASE has been moved to utsrelease.h
11576-# so check that file for kernel version string instead of version.h
11577-USE_UTS_REL := $(shell [ -f $(UTS_REL_FILE) ] && echo "1")
11578-
11579-# pick a compiler
11580-ifneq (,$(findstring egcs-2.91.66, $(shell cat /proc/version)))
11581- CC := kgcc gcc cc
11582-else
11583- CC := gcc cc
11584-endif
11585-test_cc = $(shell $(cc) --version > /dev/null 2>&1 && echo $(cc))
11586-CC := $(foreach cc, $(CC), $(test_cc))
11587-CC := $(firstword $(CC))
11588-ifeq (,$(CC))
11589- $(error Compiler not found)
11590-endif
11591-
11592-# we need to know what platform the driver is being built on
11593-# some additional features are only built on Intel platforms
11594-ARCH := $(shell uname -m | sed 's/i.86/i386/')
11595-ifeq ($(ARCH),alpha)
11596- CFLAGS += -ffixed-8 -mno-fp-regs
11597-endif
11598-ifeq ($(ARCH),x86_64)
11599- CFLAGS += -mcmodel=kernel -mno-red-zone
11600-endif
11601-ifeq ($(ARCH),ppc)
11602- CFLAGS += -msoft-float
11603-endif
11604-ifeq ($(ARCH),ppc64)
11605- CFLAGS += -m64 -msoft-float
11606- LDFLAGS += -melf64ppc
11607-endif
11608-
11609-# standard flags for module builds
11610-CFLAGS += -DLINUX -D__KERNEL__ -DMODULE -O2 -pipe -Wall
11611-CFLAGS += -I$(KSRC)/include -I.
11612-CFLAGS += $(shell [ -f $(KSRC)/include/linux/modversions.h ] && \
11613- echo "-DMODVERSIONS -DEXPORT_SYMTAB \
11614- -include $(KSRC)/include/linux/modversions.h")
11615-
11616-ifeq ($(IEGBE_GBE_WORKAROUND), 1)
11617-CFLAGS += -DIEGBE_GBE_WORKAROUND -DE1000_NO_NAPI
11618-endif
11619-
11620-ifeq ($(IEGBE_10_100_ONLY), 1)
11621-CFLAGS += -DIEGBE_10_100_ONLY
11622-endif
11623-
11624-CFLAGS += $(CFLAGS_EXTRA)
11625-#ifeq (,$(shell echo $(CFLAGS_EXTRA) | grep NAPI))
11626-#CFLAGS += -DE1000_NO_NAPI
11627-#CFLAGS_EXTRA += -DE1000_NO_NAPI
11628-#endif
11629-
11630-RHC := $(KSRC)/include/linux/rhconfig.h
11631-ifneq (,$(wildcard $(RHC)))
11632- # 7.3 typo in rhconfig.h
11633- ifneq (,$(shell $(CC) $(CFLAGS) -E -dM $(RHC) | grep __module__bigmem))
11634- CFLAGS += -D__module_bigmem
11635- endif
11636-endif
11637-
11638-# get the kernel version - we use this to find the correct install path
11639-ifeq ($(USE_UTS_REL), 1)
11640- KVER := $(shell $(CC) $(CFLAGS) -E -dM $(UTS_REL_FILE) | grep UTS_RELEASE | \
11641- awk '{ print $$3 }' | sed 's/\"//g')
11642-else
11643- KVER := $(shell $(CC) $(CFLAGS) -E -dM $(VERSION_FILE) | grep UTS_RELEASE | \
11644- awk '{ print $$3 }' | sed 's/\"//g')
11645-endif
11646-
11647-KKVER := $(shell echo $(KVER) | \
11648- awk '{ if ($$0 ~ /2\.[6-9]\./) print "1"; else print "0"}')
11649-ifeq ($(KKVER), 0)
11650- $(error *** Aborting the build. \
11651- *** This driver is not supported on kernel versions older than 2.6.18)
11652+ifeq (,$(wildcard $(UTS_REL_FILE)))
11653+ $(error Linux kernel source not configured - missing utsrelease.h)
11654 endif
11655 
11656 # set the install path
11657@@ -202,11 +100,11 @@ ifneq ($(SMP),$(shell uname -a | grep SM
11658 endif
11659 
11660 ifeq ($(SMP),1)
11661- CFLAGS += -D__SMP__
11662+ EXTRA_CFLAGS += -D__SMP__
11663 endif
11664 
11665 ifeq ($(EXTERNAL_MDIO), 1)
11666- CFLAGS += -DEXTERNAL_MDIO
11667+ EXTRA_CFLAGS += -DEXTERNAL_MDIO
11668 endif
11669 
11670 ###########################################################################
11671@@ -223,7 +121,6 @@ MANSECTION = 7
11672 MANFILE = $(TARGET:.ko=.$(MANSECTION))
11673 
11674 ifneq ($(PATCHLEVEL),)
11675- EXTRA_CFLAGS += $(CFLAGS_EXTRA)
11676   obj-m += $(TARGET:.ko=.o)
11677   iegbe-objs := $(CFILES:.c=.o)
11678   ifeq ($(EXTERNAL_MDIO),1)
11679--- a/filelist
11680+++ b/filelist
11681@@ -1,41 +1,3 @@
11682-Embedded/Makefile
11683-Embedded/environment.mk
11684-Embedded/src/1588/1588.c
11685-Embedded/src/1588/1588.h
11686-Embedded/src/1588/IxTimeSyncAcc_p.h
11687-Embedded/src/1588/Makefile
11688-Embedded/src/1588/ixtimesyncacc.c
11689-Embedded/src/1588/ixtimesyncacc.h
11690-Embedded/src/1588/linux_ioctls.h
11691-Embedded/src/CAN/Makefile
11692-Embedded/src/CAN/can_fifo.c
11693-Embedded/src/CAN/can_fifo.h
11694-Embedded/src/CAN/can_ioctl.h
11695-Embedded/src/CAN/can_main.c
11696-Embedded/src/CAN/can_main.h
11697-Embedded/src/CAN/can_port.h
11698-Embedded/src/CAN/icp_can.c
11699-Embedded/src/CAN/icp_can.h
11700-Embedded/src/CAN/icp_can_regs.h
11701-Embedded/src/CAN/icp_can_types.h
11702-Embedded/src/CAN/icp_can_user.h
11703-Embedded/src/EDMA/Makefile
11704-Embedded/src/EDMA/dma.h
11705-Embedded/src/EDMA/dma_api.h
11706-Embedded/src/EDMA/dma_client_api.c
11707-Embedded/src/EDMA/dma_common.c
11708-Embedded/src/EDMA/dma_internals.h
11709-Embedded/src/EDMA/dma_linux.c
11710-Embedded/src/EDMA/os/os.c
11711-Embedded/src/EDMA/os/os.h
11712-Embedded/src/EDMA/os/os_list.c
11713-Embedded/src/EDMA/os/os_list.h
11714-Embedded/src/EDMA/os/os_types.h
11715-Embedded/src/GPIO/Makefile
11716-Embedded/src/GPIO/common.h
11717-Embedded/src/GPIO/gpio.h
11718-Embedded/src/GPIO/gpio_ref.c
11719-Embedded/src/GPIO/linux_ioctls.h
11720 Embedded/src/GbE/Makefile
11721 Embedded/src/GbE/gcu.h
11722 Embedded/src/GbE/gcu_if.c
11723@@ -55,16 +17,6 @@ Embedded/src/GbE/iegbe_param.c
11724 Embedded/src/GbE/kcompat.c
11725 Embedded/src/GbE/kcompat.h
11726 Embedded/src/GbE/kcompat_ethtool.c
11727-Embedded/src/WDT/Makefile
11728-Embedded/src/WDT/iwdt.c
11729-Embedded/src/WDT/iwdt.h
11730-Embedded/src/patches/Intel_EP80579_RHEL5.patch
11731-Embedded/src/patches/pci.ids_RHEL5.patch
11732 LICENSE.GPL
11733-build_system/build_files/Core/ia.mk
11734-build_system/build_files/OS/linux_2.6.mk
11735-build_system/build_files/OS/linux_2.6_kernel_space_rules.mk
11736-build_system/build_files/common.mk
11737-build_system/build_files/rules.mk
11738 filelist
11739 versionfile
11740--- a/versionfile
11741+++ b/versionfile
11742@@ -1,4 +1,4 @@
11743-PACKAGE_TYPE=Embedded
11744+PACKAGE_TYPE=Embedded.Release.Patch
11745 
11746 PACKAGE_OS=L
11747 
11748@@ -6,4 +6,6 @@ PACKAGE_VERSION_MAJOR_NUMBER=1
11749 
11750 PACKAGE_VERSION_MINOR_NUMBER=0
11751 
11752-PACKAGE_VERSION_PATCH_NUMBER=34
11753+PACKAGE_VERSION_PATCH_NUMBER=7
11754+
11755+PACKAGE_VERSION_BUILD_NUMBER=5
11756

Archive Download this file



interactive