| 1 | commit 01ffc0a7f1c1801a2354719dedbc32aff45b987d |
| 2 | Author: David Woodhouse <dwmw2@infradead.org> |
| 3 | Date: Sat Nov 24 12:11:21 2012 +0000 |
| 4 | |
| 5 | 8139cp: re-enable interrupts after tx timeout |
| 6 | |
| 7 | Recovery doesn't work too well if we leave interrupts disabled... |
| 8 | |
| 9 | Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> |
| 10 | Acked-by: Francois Romieu <romieu@fr.zoreil.com> |
| 11 | Signed-off-by: David S. Miller <davem@davemloft.net> |
| 12 | |
| 13 | commit 871f0d4c153e1258d4becf306eca6761bf38b629 |
| 14 | Author: David Woodhouse <dwmw2@infradead.org> |
| 15 | Date: Thu Nov 22 03:16:58 2012 +0000 |
| 16 | |
| 17 | 8139cp: enable bql |
| 18 | |
| 19 | This adds support for byte queue limits on RTL8139C+ |
| 20 | |
| 21 | Tested on real hardware. |
| 22 | |
| 23 | Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> |
| 24 | Acked-By: Dave Täht <dave.taht@bufferbloat.net> |
| 25 | Signed-off-by: David S. Miller <davem@davemloft.net> |
| 26 | |
| 27 | commit a9dbe40fc10cea2efe6e1ff9e03c62dd7579c5ba |
| 28 | Author: David Woodhouse <dwmw2@infradead.org> |
| 29 | Date: Wed Nov 21 10:27:19 2012 +0000 |
| 30 | |
| 31 | 8139cp: set ring address after enabling C+ mode |
| 32 | |
| 33 | This fixes (for me) a regression introduced by commit b01af457 ("8139cp: |
| 34 | set ring address before enabling receiver"). That commit configured the |
| 35 | descriptor ring addresses earlier in the initialisation sequence, in |
| 36 | order to avoid the possibility of triggering stray DMA before the |
| 37 | correct address had been set up. |
| 38 | |
| 39 | Unfortunately, it seems that the hardware will scribble garbage into the |
| 40 | TxRingAddr registers when we enable "plus mode" Tx in the CpCmd |
| 41 | register. Observed on a Traverse Geos router board. |
| 42 | |
| 43 | To deal with this, while not reintroducing the problem which led to the |
| 44 | original commit, we augment cp_start_hw() to write to the CpCmd register |
| 45 | *first*, then set the descriptor ring addresses, and then finally to |
| 46 | enable Rx and Tx in the original 8139 Cmd register. The datasheet |
| 47 | actually indicates that we should enable Tx/Rx in the Cmd register |
| 48 | *before* configuring the descriptor addresses, but that would appear to |
| 49 | re-introduce the problem that the offending commit b01af457 was trying |
| 50 | to solve. And this variant appears to work fine on real hardware. |
| 51 | |
| 52 | Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> |
| 53 | Cc: stable@kernel.org [3.5+] |
| 54 | Signed-off-by: David S. Miller <davem@davemloft.net> |
| 55 | |
| 56 | commit 071e3ef4a94a021b16a2912f3885c86f4ff36b49 |
| 57 | Author: David S. Miller <davem@davemloft.net> |
| 58 | Date: Sun Nov 25 15:52:09 2012 -0500 |
| 59 | |
| 60 | Revert "8139cp: revert "set ring address before enabling receiver"" |
| 61 | |
| 62 | This reverts commit b26623dab7eeb1e9f5898c7a49458789dd492f20. |
| 63 | |
| 64 | This reverts the revert, in net-next we'll try another scheme |
| 65 | to fix this bug using patches from David Woodhouse. |
| 66 | |
| 67 | Signed-off-by: David S. Miller <davem@davemloft.net> |
| 68 | |
| 69 | --- a/drivers/net/ethernet/realtek/8139cp.c |
| 70 | +++ b/drivers/net/ethernet/realtek/8139cp.c |
| 71 | @@ -648,6 +648,7 @@ static void cp_tx (struct cp_private *cp |
| 72 | { |
| 73 | unsigned tx_head = cp->tx_head; |
| 74 | unsigned tx_tail = cp->tx_tail; |
| 75 | + unsigned bytes_compl = 0, pkts_compl = 0; |
| 76 | |
| 77 | while (tx_tail != tx_head) { |
| 78 | struct cp_desc *txd = cp->tx_ring + tx_tail; |
| 79 | @@ -666,6 +667,9 @@ static void cp_tx (struct cp_private *cp |
| 80 | le32_to_cpu(txd->opts1) & 0xffff, |
| 81 | PCI_DMA_TODEVICE); |
| 82 | |
| 83 | + bytes_compl += skb->len; |
| 84 | + pkts_compl++; |
| 85 | + |
| 86 | if (status & LastFrag) { |
| 87 | if (status & (TxError | TxFIFOUnder)) { |
| 88 | netif_dbg(cp, tx_err, cp->dev, |
| 89 | @@ -697,6 +701,7 @@ static void cp_tx (struct cp_private *cp |
| 90 | |
| 91 | cp->tx_tail = tx_tail; |
| 92 | |
| 93 | + netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); |
| 94 | if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) |
| 95 | netif_wake_queue(cp->dev); |
| 96 | } |
| 97 | @@ -843,6 +848,8 @@ static netdev_tx_t cp_start_xmit (struct |
| 98 | wmb(); |
| 99 | } |
| 100 | cp->tx_head = entry; |
| 101 | + |
| 102 | + netdev_sent_queue(dev, skb->len); |
| 103 | netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", |
| 104 | entry, skb->len); |
| 105 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) |
| 106 | @@ -937,6 +944,8 @@ static void cp_stop_hw (struct cp_privat |
| 107 | |
| 108 | cp->rx_tail = 0; |
| 109 | cp->tx_head = cp->tx_tail = 0; |
| 110 | + |
| 111 | + netdev_reset_queue(cp->dev); |
| 112 | } |
| 113 | |
| 114 | static void cp_reset_hw (struct cp_private *cp) |
| 115 | @@ -957,8 +966,38 @@ static void cp_reset_hw (struct cp_priva |
| 116 | |
| 117 | static inline void cp_start_hw (struct cp_private *cp) |
| 118 | { |
| 119 | + dma_addr_t ring_dma; |
| 120 | + |
| 121 | cpw16(CpCmd, cp->cpcmd); |
| 122 | + |
| 123 | + /* |
| 124 | + * These (at least TxRingAddr) need to be configured after the |
| 125 | + * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33 |
| 126 | + * (C+ Command Register) recommends that these and more be configured |
| 127 | + * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware |
| 128 | + * it's been observed that the TxRingAddr is actually reset to garbage |
| 129 | + * when C+ mode Tx is enabled in CpCmd. |
| 130 | + */ |
| 131 | + cpw32_f(HiTxRingAddr, 0); |
| 132 | + cpw32_f(HiTxRingAddr + 4, 0); |
| 133 | + |
| 134 | + ring_dma = cp->ring_dma; |
| 135 | + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); |
| 136 | + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); |
| 137 | + |
| 138 | + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; |
| 139 | + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); |
| 140 | + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); |
| 141 | + |
| 142 | + /* |
| 143 | + * Strictly speaking, the datasheet says this should be enabled |
| 144 | + * *before* setting the descriptor addresses. But what, then, would |
| 145 | + * prevent it from doing DMA to random unconfigured addresses? |
| 146 | + * This variant appears to work fine. |
| 147 | + */ |
| 148 | cpw8(Cmd, RxOn | TxOn); |
| 149 | + |
| 150 | + netdev_reset_queue(cp->dev); |
| 151 | } |
| 152 | |
| 153 | static void cp_enable_irq(struct cp_private *cp) |
| 154 | @@ -969,7 +1008,6 @@ static void cp_enable_irq(struct cp_priv |
| 155 | static void cp_init_hw (struct cp_private *cp) |
| 156 | { |
| 157 | struct net_device *dev = cp->dev; |
| 158 | - dma_addr_t ring_dma; |
| 159 | |
| 160 | cp_reset_hw(cp); |
| 161 | |
| 162 | @@ -992,17 +1030,6 @@ static void cp_init_hw (struct cp_privat |
| 163 | |
| 164 | cpw8(Config5, cpr8(Config5) & PMEStatus); |
| 165 | |
| 166 | - cpw32_f(HiTxRingAddr, 0); |
| 167 | - cpw32_f(HiTxRingAddr + 4, 0); |
| 168 | - |
| 169 | - ring_dma = cp->ring_dma; |
| 170 | - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); |
| 171 | - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); |
| 172 | - |
| 173 | - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; |
| 174 | - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); |
| 175 | - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); |
| 176 | - |
| 177 | cpw16(MultiIntr, 0); |
| 178 | |
| 179 | cpw8_f(Cfg9346, Cfg9346_Lock); |
| 180 | @@ -1197,6 +1224,7 @@ static void cp_tx_timeout(struct net_dev |
| 181 | cp_clean_rings(cp); |
| 182 | rc = cp_init_rings(cp); |
| 183 | cp_start_hw(cp); |
| 184 | + cp_enable_irq(cp); |
| 185 | |
| 186 | netif_wake_queue(dev); |
| 187 | |
| 188 | |