| 1 | From 544631281bed5cc37b8f2d3a99f44c9d4b97f9a8 Mon Sep 17 00:00:00 2001 |
| 2 | From: Daniel Hellstrom <daniel@gaisler.com> |
| 3 | Date: Wed, 1 Dec 2010 10:07:12 +0100 |
| 4 | Subject: [PATCH] GRETH: GBit transmit descriptor handling optimization |
| 5 | |
| 6 | It is safe to enable all fragments before enabling the first descriptor, |
| 7 | this way all descriptors don't have to be processed twice, added extra |
| 8 | memory barrier. |
| 9 | |
| 10 | Signed-off-by: Daniel Hellstrom <daniel@gaisler.com> |
| 11 | --- |
| 12 | drivers/net/greth.c | 19 ++++++++++--------- |
| 13 | 1 files changed, 10 insertions(+), 9 deletions(-) |
| 14 | |
| 15 | --- a/drivers/net/greth.c |
| 16 | +++ b/drivers/net/greth.c |
| 17 | @@ -512,7 +512,7 @@ greth_start_xmit_gbit(struct sk_buff *sk |
| 18 | greth->tx_skbuff[curr_tx] = NULL; |
| 19 | bdp = greth->tx_bd_base + curr_tx; |
| 20 | |
| 21 | - status = GRETH_TXBD_CSALL; |
| 22 | + status = GRETH_TXBD_CSALL | GRETH_BD_EN; |
| 23 | status |= frag->size & GRETH_BD_LEN; |
| 24 | |
| 25 | /* Wrap around descriptor ring */ |
| 26 | @@ -549,26 +549,27 @@ greth_start_xmit_gbit(struct sk_buff *sk |
| 27 | |
| 28 | wmb(); |
| 29 | |
| 30 | - /* Enable the descriptors that we configured ... */ |
| 31 | - for (i = 0; i < nr_frags + 1; i++) { |
| 32 | - bdp = greth->tx_bd_base + greth->tx_next; |
| 33 | - greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); |
| 34 | - greth->tx_next = NEXT_TX(greth->tx_next); |
| 35 | - greth->tx_free--; |
| 36 | - } |
| 37 | + /* Enable the descriptor chain by enabling the first descriptor */ |
| 38 | + bdp = greth->tx_bd_base + greth->tx_next; |
| 39 | + greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); |
| 40 | + greth->tx_next = curr_tx; |
| 41 | + greth->tx_free -= nr_frags + 1; |
| 42 | + |
| 43 | + wmb(); |
| 44 | |
| 45 | greth_enable_tx(greth); |
| 46 | |
| 47 | return NETDEV_TX_OK; |
| 48 | |
| 49 | frag_map_error: |
| 50 | - /* Unmap SKB mappings that succeeded */ |
| 51 | + /* Unmap SKB mappings that succeeded and disable descriptor */ |
| 52 | for (i = 0; greth->tx_next + i != curr_tx; i++) { |
| 53 | bdp = greth->tx_bd_base + greth->tx_next + i; |
| 54 | dma_unmap_single(greth->dev, |
| 55 | greth_read_bd(&bdp->addr), |
| 56 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, |
| 57 | DMA_TO_DEVICE); |
| 58 | + greth_write_bd(&bdp->stat, 0); |
| 59 | } |
| 60 | map_error: |
| 61 | if (net_ratelimit()) |
| 62 | |