Root/target/linux/leon/patches/024-greth_resolve_smp_and_other_issues.patch

1From 4439d933884ee3c7e320b8d33bd2e268dd5b6fa5 Mon Sep 17 00:00:00 2001
2From: Daniel Hellstrom <daniel@gaisler.com>
3Date: Wed, 1 Dec 2010 11:40:19 +0100
4Subject: [PATCH] GRETH: resolve SMP issues and other problems
5
6Fixes the following:
71. POLL should not enable IRQ when work is not completed
82. No locking between TX descriptor cleaning and XMIT descriptor handling
93. No locking between RX POLL and XMIT modifying control register
104. Since TX cleaning (called from POLL) is running in parallel with XMIT
11   unnecessary locking is needed.
125. IRQ handler looks at RX frame status solely, this is wrong when IRQ is
13   temporarily disabled (in POLL), and when IRQ is shared.
146. IRQ handler clears IRQ status, which is unnecessary
157. TX queue was stopped in preventing cause when not MAX_SKB_FRAGS+1 descriptors
16   were available after a SKB been scheduled by XMIT. Instead the TX queue is
17   stopped first when not enough descriptors are available upon entering XMIT.
18
19It was hard to split up this patch in smaller pieces since all are tied
20together somehow.
21
22Note the RX flag used in the interrupt handler does not signal that interrtupt
23was asserted, but that a frame was received. Same goes for TX. Also, IRQ is not
24asserted when the RX flag is set before enabling IRQ enable until a new frame is
25received. So extra care must be taken to avoid enabling IRQ and all descriptors
26are already used, hence dead lock will upon us. See new POLL implementation that
27enableds IRQ then look at the RX flag to determine if one or more IRQs may have
28been missed. TX/RX flags are cleared before handling previously enabled
29descriptors, this ensures that the RX/TX flags are valid when determining if IRQ
30should be turned on again.
31
32By moving TX cleaning from POLL to XMIT in the standard case, removes some
33locking trouble. Enabling TX cleaning from poll only when not enough TX
34descriptors are available is safe because the TX queue is at the same time
35stopped, thus XMIT will not be called. The TX queue is woken up again when
36enough descriptrs are available.
37
38TX Frames are always enabled with IRQ, however the TX IRQ Enable flag will not
39be enabled until XMIT must wait for free descriptors.
40
41Locking RX and XMIT parts of the driver from each other is needed because the
42RX/TX enable bits share the same register.
43
44Signed-off-by: Daniel Hellstrom <daniel@gaisler.com>
45---
46 drivers/net/greth.c | 158 ++++++++++++++++++++++++++++++---------------------
47 1 files changed, 93 insertions(+), 65 deletions(-)
48
49--- a/drivers/net/greth.c
50+++ b/drivers/net/greth.c
51@@ -1,7 +1,7 @@
52 /*
53  * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
54  *
55- * 2005-2009 (c) Aeroflex Gaisler AB
56+ * 2005-2010 (c) Aeroflex Gaisler AB
57  *
58  * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
59  * available in the GRLIB VHDL IP core library.
60@@ -401,12 +401,20 @@ greth_start_xmit(struct sk_buff *skb, st
61     struct greth_private *greth = netdev_priv(dev);
62     struct greth_bd *bdp;
63     int err = NETDEV_TX_OK;
64- u32 status, dma_addr;
65+ u32 status, dma_addr, ctrl;
66+ unsigned long flags;
67 
68- bdp = greth->tx_bd_base + greth->tx_next;
69+ /* Clean TX Ring */
70+ greth_clean_tx(greth->netdev);
71 
72     if (unlikely(greth->tx_free <= 0)) {
73+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
74+ ctrl = GRETH_REGLOAD(greth->regs->control);
75+ /* Enable TX IRQ only if not already in poll() routine */
76+ if ( ctrl & GRETH_RXI )
77+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
78         netif_stop_queue(dev);
79+ spin_unlock_irqrestore(&greth->devlock, flags);
80         return NETDEV_TX_BUSY;
81     }
82 
83@@ -419,13 +427,14 @@ greth_start_xmit(struct sk_buff *skb, st
84         goto out;
85     }
86 
87+ bdp = greth->tx_bd_base + greth->tx_next;
88     dma_addr = greth_read_bd(&bdp->addr);
89 
90     memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
91 
92     dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
93 
94- status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
95+ status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
96 
97     /* Wrap around descriptor ring */
98     if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
99@@ -435,22 +444,11 @@ greth_start_xmit(struct sk_buff *skb, st
100     greth->tx_next = NEXT_TX(greth->tx_next);
101     greth->tx_free--;
102 
103- /* No more descriptors */
104- if (unlikely(greth->tx_free == 0)) {
105-
106- /* Free transmitted descriptors */
107- greth_clean_tx(dev);
108-
109- /* If nothing was cleaned, stop queue & wait for irq */
110- if (unlikely(greth->tx_free == 0)) {
111- status |= GRETH_BD_IE;
112- netif_stop_queue(dev);
113- }
114- }
115-
116     /* Write descriptor control word and enable transmission */
117     greth_write_bd(&bdp->stat, status);
118+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
119     greth_enable_tx(greth);
120+ spin_unlock_irqrestore(&greth->devlock, flags);
121 
122 out:
123     dev_kfree_skb(skb);
124@@ -463,13 +461,24 @@ greth_start_xmit_gbit(struct sk_buff *sk
125 {
126     struct greth_private *greth = netdev_priv(dev);
127     struct greth_bd *bdp;
128- u32 status = 0, dma_addr;
129+ u32 status = 0, dma_addr, ctrl;
130     int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
131+ unsigned long flags;
132 
133     nr_frags = skb_shinfo(skb)->nr_frags;
134 
135+ /* Clean TX Ring */
136+ greth_clean_tx_gbit(dev);
137+
138     if (greth->tx_free < nr_frags + 1) {
139+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
140+ ctrl = GRETH_REGLOAD(greth->regs->control);
141+ /* Enable TX IRQ only if not already in poll() routine */
142+ if ( ctrl & GRETH_RXI ) {
143+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
144+ }
145         netif_stop_queue(dev);
146+ spin_unlock_irqrestore(&greth->devlock, flags);
147         err = NETDEV_TX_BUSY;
148         goto out;
149     }
150@@ -522,14 +531,8 @@ greth_start_xmit_gbit(struct sk_buff *sk
151         /* More fragments left */
152         if (i < nr_frags - 1)
153             status |= GRETH_TXBD_MORE;
154-
155- /* ... last fragment, check if out of descriptors */
156- else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
157-
158- /* Enable interrupts and stop queue */
159- status |= GRETH_BD_IE;
160- netif_stop_queue(dev);
161- }
162+ else
163+ status |= GRETH_BD_IE; /* enable IRQ on last fragment */
164 
165         greth_write_bd(&bdp->stat, status);
166 
167@@ -557,7 +560,9 @@ greth_start_xmit_gbit(struct sk_buff *sk
168 
169     wmb();
170 
171+ spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
172     greth_enable_tx(greth);
173+ spin_unlock_irqrestore(&greth->devlock, flags);
174 
175     return NETDEV_TX_OK;
176 
177@@ -579,12 +584,11 @@ out:
178     return err;
179 }
180 
181-
182 static irqreturn_t greth_interrupt(int irq, void *dev_id)
183 {
184     struct net_device *dev = dev_id;
185     struct greth_private *greth;
186- u32 status;
187+ u32 status, ctrl;
188     irqreturn_t retval = IRQ_NONE;
189 
190     greth = netdev_priv(dev);
191@@ -594,13 +598,14 @@ static irqreturn_t greth_interrupt(int i
192     /* Get the interrupt events that caused us to be here. */
193     status = GRETH_REGLOAD(greth->regs->status);
194 
195- /* Handle rx and tx interrupts through poll */
196- if (status & (GRETH_INT_RE | GRETH_INT_RX | GRETH_INT_TE | GRETH_INT_TX)) {
197+ /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be set regardless
198+ * of whether IRQ is enabled or not. Especially important when shared IRQ.
199+ */
200+ ctrl = GRETH_REGLOAD(greth->regs->control);
201 
202- /* Clear interrupt status */
203- GRETH_REGSAVE(greth->regs->status,
204- status & (GRETH_INT_RE | GRETH_INT_RX |
205- GRETH_INT_TE | GRETH_INT_TX));
206+ /* Handle rx and tx interrupts through poll */
207+ if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
208+ ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
209 
210         retval = IRQ_HANDLED;
211 
212@@ -625,6 +630,8 @@ static void greth_clean_tx(struct net_de
213 
214     while (1) {
215         bdp = greth->tx_bd_base + greth->tx_last;
216+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
217+ mb();
218         stat = greth_read_bd(&bdp->stat);
219 
220         if (unlikely(stat & GRETH_BD_EN))
221@@ -685,7 +692,10 @@ static void greth_clean_tx_gbit(struct n
222 
223         /* We only clean fully completed SKBs */
224         bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
225- stat = bdp_last_frag->stat;
226+
227+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
228+ mb();
229+ stat = greth_read_bd(&bdp_last_frag->stat);
230 
231         if (stat & GRETH_BD_EN)
232             break;
233@@ -717,23 +727,12 @@ static void greth_clean_tx_gbit(struct n
234         greth->tx_free += nr_frags+1;
235         dev_kfree_skb(skb);
236     }
237- if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
238+
239+ if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS + 1))) {
240         netif_wake_queue(dev);
241     }
242 }
243 
244-static int greth_pending_packets(struct greth_private *greth)
245-{
246- struct greth_bd *bdp;
247- u32 status;
248- bdp = greth->rx_bd_base + greth->rx_cur;
249- status = greth_read_bd(&bdp->stat);
250- if (status & GRETH_BD_EN)
251- return 0;
252- else
253- return 1;
254-}
255-
256 static int greth_rx(struct net_device *dev, int limit)
257 {
258     struct greth_private *greth;
259@@ -742,20 +741,24 @@ static int greth_rx(struct net_device *d
260     int pkt_len;
261     int bad, count;
262     u32 status, dma_addr;
263+ unsigned long flags;
264 
265     greth = netdev_priv(dev);
266 
267     for (count = 0; count < limit; ++count) {
268 
269         bdp = greth->rx_bd_base + greth->rx_cur;
270+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
271+ mb();
272         status = greth_read_bd(&bdp->stat);
273- dma_addr = greth_read_bd(&bdp->addr);
274- bad = 0;
275 
276         if (unlikely(status & GRETH_BD_EN)) {
277             break;
278         }
279 
280+ dma_addr = greth_read_bd(&bdp->addr);
281+ bad = 0;
282+
283         /* Check status for errors. */
284         if (unlikely(status & GRETH_RXBD_STATUS)) {
285             if (status & GRETH_RXBD_ERR_FT) {
286@@ -817,7 +820,9 @@ static int greth_rx(struct net_device *d
287 
288         dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
289 
290+ spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
291         greth_enable_rx(greth);
292+ spin_unlock_irqrestore(&greth->devlock, flags);
293 
294         greth->rx_cur = NEXT_RX(greth->rx_cur);
295     }
296@@ -851,6 +856,7 @@ static int greth_rx_gbit(struct net_devi
297     int pkt_len;
298     int bad, count = 0;
299     u32 status, dma_addr;
300+ unsigned long flags;
301 
302     greth = netdev_priv(dev);
303 
304@@ -858,6 +864,8 @@ static int greth_rx_gbit(struct net_devi
305 
306         bdp = greth->rx_bd_base + greth->rx_cur;
307         skb = greth->rx_skbuff[greth->rx_cur];
308+ GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
309+ mb();
310         status = greth_read_bd(&bdp->stat);
311         bad = 0;
312 
313@@ -940,7 +948,9 @@ static int greth_rx_gbit(struct net_devi
314 
315         wmb();
316         greth_write_bd(&bdp->stat, status);
317+ spin_lock_irqsave(&greth->devlock, flags);
318         greth_enable_rx(greth);
319+ spin_unlock_irqrestore(&greth->devlock, flags);
320         greth->rx_cur = NEXT_RX(greth->rx_cur);
321     }
322 
323@@ -952,15 +962,19 @@ static int greth_poll(struct napi_struct
324 {
325     struct greth_private *greth;
326     int work_done = 0;
327+ unsigned long flags;
328+ u32 mask, ctrl;
329     greth = container_of(napi, struct greth_private, napi);
330 
331- if (greth->gbit_mac) {
332- greth_clean_tx_gbit(greth->netdev);
333- } else {
334- greth_clean_tx(greth->netdev);
335+restart_txrx_poll:
336+ if ( netif_queue_stopped(greth->netdev) ) {
337+ if (greth->gbit_mac) {
338+ greth_clean_tx_gbit(greth->netdev);
339+ } else {
340+ greth_clean_tx(greth->netdev);
341+ }
342     }
343 
344-restart_poll:
345     if (greth->gbit_mac) {
346         work_done += greth_rx_gbit(greth->netdev, budget - work_done);
347     } else {
348@@ -969,15 +983,29 @@ restart_poll:
349 
350     if (work_done < budget) {
351 
352- napi_complete(napi);
353+ spin_lock_irqsave(&greth->devlock, flags);
354 
355- if (greth_pending_packets(greth)) {
356- napi_reschedule(napi);
357- goto restart_poll;
358+ ctrl = GRETH_REGLOAD(greth->regs->control);
359+ if (netif_queue_stopped(greth->netdev)) {
360+ GRETH_REGSAVE(greth->regs->control,
361+ ctrl | GRETH_TXI | GRETH_RXI);
362+ mask = GRETH_INT_RX | GRETH_INT_RE |
363+ GRETH_INT_TX | GRETH_INT_TE;
364+ } else {
365+ GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
366+ mask = GRETH_INT_RX | GRETH_INT_RE;
367+ }
368+
369+ if (GRETH_REGLOAD(greth->regs->status) & mask) {
370+ GRETH_REGSAVE(greth->regs->control, ctrl);
371+ spin_unlock_irqrestore(&greth->devlock, flags);
372+ goto restart_txrx_poll;
373+ } else {
374+ __napi_complete(napi);
375+ spin_unlock_irqrestore(&greth->devlock, flags);
376         }
377     }
378 
379- greth_enable_irqs(greth);
380     return work_done;
381 }
382 
383@@ -1172,11 +1200,11 @@ static const struct ethtool_ops greth_et
384 };
385 
386 static struct net_device_ops greth_netdev_ops = {
387- .ndo_open = greth_open,
388- .ndo_stop = greth_close,
389- .ndo_start_xmit = greth_start_xmit,
390- .ndo_set_mac_address = greth_set_mac_add,
391- .ndo_validate_addr = eth_validate_addr,
392+ .ndo_open = greth_open,
393+ .ndo_stop = greth_close,
394+ .ndo_start_xmit = greth_start_xmit,
395+ .ndo_set_mac_address = greth_set_mac_add,
396+ .ndo_validate_addr = eth_validate_addr,
397 };
398 
399 static inline int wait_for_mdio(struct greth_private *greth)
400

Archive Download this file



interactive