| 1 | /* |
| 2 | * originally drivers/net/tulip/interrupt.c |
| 3 | * Copyright 2000,2001 The Linux Kernel Team |
| 4 | * Written/copyright 1994-2001 by Donald Becker. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | |
| 11 | #include "net.h" |
| 12 | |
| 13 | int tulip_refill_rx(struct net_device *dev) |
| 14 | { |
| 15 | struct tulip_private *tp = netdev_priv(dev); |
| 16 | int entry; |
| 17 | int refilled = 0; |
| 18 | |
| 19 | /* Refill the Rx ring buffers. */ |
| 20 | for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { |
| 21 | entry = tp->dirty_rx % RX_RING_SIZE; |
| 22 | if (tp->rx_buffers[entry].skb == NULL) { |
| 23 | struct sk_buff *skb; |
| 24 | dma_addr_t mapping; |
| 25 | |
| 26 | skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ); |
| 27 | if (skb == NULL) |
| 28 | break; |
| 29 | |
| 30 | mapping = dma_map_single(&dev->dev, skb->data, |
| 31 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 32 | tp->rx_buffers[entry].mapping = mapping; |
| 33 | |
| 34 | skb->dev = dev; /* Mark as being used by this device. */ |
| 35 | tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); |
| 36 | refilled++; |
| 37 | } |
| 38 | tp->rx_ring[entry].status = cpu_to_le32(DescOwned); |
| 39 | } |
| 40 | return refilled; |
| 41 | } |
| 42 | |
| 43 | void oom_timer(unsigned long data) |
| 44 | { |
| 45 | struct net_device *dev = (struct net_device *)data; |
| 46 | struct tulip_private *tp = netdev_priv(dev); |
| 47 | napi_schedule(&tp->napi); |
| 48 | } |
| 49 | |
| 50 | int tulip_poll(struct napi_struct *napi, int budget) |
| 51 | { |
| 52 | struct tulip_private *tp = container_of(napi, struct tulip_private, napi); |
| 53 | struct net_device *dev = tp->dev; |
| 54 | int entry = tp->cur_rx % RX_RING_SIZE; |
| 55 | int work_done = 0; |
| 56 | |
| 57 | if (tulip_debug > 4) |
| 58 | printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n", |
| 59 | entry, tp->rx_ring[entry].status); |
| 60 | |
| 61 | do { |
| 62 | if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { |
| 63 | printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n"); |
| 64 | break; |
| 65 | } |
| 66 | /* Acknowledge current RX interrupt sources. */ |
| 67 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); |
| 68 | |
| 69 | |
| 70 | /* If we own the next entry, it is a new packet. Send it up. */ |
| 71 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { |
| 72 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); |
| 73 | short pkt_len; |
| 74 | |
| 75 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) |
| 76 | break; |
| 77 | |
| 78 | if (tulip_debug > 5) |
| 79 | printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n", |
| 80 | dev->name, entry, status); |
| 81 | |
| 82 | if (++work_done >= budget) |
| 83 | goto not_done; |
| 84 | |
| 85 | /* |
| 86 | * Omit the four octet CRC from the length. |
| 87 | * (May not be considered valid until we have |
| 88 | * checked status for RxLengthOver2047 bits) |
| 89 | */ |
| 90 | pkt_len = ((status >> 16) & 0x7ff) - 4; |
| 91 | |
| 92 | #if 0 |
| 93 | csr6 = ioread32(tp->base_addr + CSR6); |
| 94 | if (csr6 & 0x1) |
| 95 | pkt_len += 2; |
| 96 | |
| 97 | #endif |
| 98 | /* |
| 99 | * Maximum pkt_len is 1518 (1514 + vlan header) |
| 100 | * Anything higher than this is always invalid |
| 101 | * regardless of RxLengthOver2047 bits |
| 102 | */ |
| 103 | |
| 104 | if ((status & (RxLengthOver2047 | |
| 105 | RxDescCRCError | |
| 106 | RxDescCollisionSeen | |
| 107 | RxDescRunt | |
| 108 | RxDescDescErr | |
| 109 | RxWholePkt)) != RxWholePkt || |
| 110 | pkt_len > 1518) { |
| 111 | if ((status & (RxLengthOver2047 | |
| 112 | RxWholePkt)) != RxWholePkt) { |
| 113 | /* Ingore earlier buffers. */ |
| 114 | if ((status & 0xffff) != 0x7fff) { |
| 115 | if (tulip_debug > 1) |
| 116 | dev_warn(&dev->dev, |
| 117 | "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", |
| 118 | status); |
| 119 | tp->stats.rx_length_errors++; |
| 120 | } |
| 121 | } else { |
| 122 | /* There was a fatal error. */ |
| 123 | if (tulip_debug > 2) |
| 124 | printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", |
| 125 | dev->name, status); |
| 126 | tp->stats.rx_errors++; /* end of a packet.*/ |
| 127 | if (pkt_len > 1518 || |
| 128 | (status & RxDescRunt)) |
| 129 | tp->stats.rx_length_errors++; |
| 130 | |
| 131 | if (status & 0x0004) tp->stats.rx_frame_errors++; |
| 132 | if (status & 0x0002) tp->stats.rx_crc_errors++; |
| 133 | if (status & 0x0001) tp->stats.rx_fifo_errors++; |
| 134 | } |
| 135 | } else { |
| 136 | struct sk_buff *skb = tp->rx_buffers[entry].skb; |
| 137 | char *temp = skb_put(skb, pkt_len); |
| 138 | |
| 139 | #if 0 |
| 140 | if (csr6 & 1) |
| 141 | skb_pull(skb, 2); |
| 142 | #endif |
| 143 | #ifndef final_version |
| 144 | if (tp->rx_buffers[entry].mapping != |
| 145 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { |
| 146 | dev_err(&dev->dev, |
| 147 | "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n", |
| 148 | le32_to_cpu(tp->rx_ring[entry].buffer1), |
| 149 | (unsigned long long)tp->rx_buffers[entry].mapping, |
| 150 | skb->head, temp); |
| 151 | } |
| 152 | #endif |
| 153 | |
| 154 | tp->rx_buffers[entry].skb = NULL; |
| 155 | tp->rx_buffers[entry].mapping = 0; |
| 156 | skb->protocol = eth_type_trans(skb, dev); |
| 157 | |
| 158 | netif_receive_skb(skb); |
| 159 | |
| 160 | tp->stats.rx_packets++; |
| 161 | tp->stats.rx_bytes += pkt_len; |
| 162 | } |
| 163 | entry = (++tp->cur_rx) % RX_RING_SIZE; |
| 164 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) |
| 165 | tulip_refill_rx(dev); |
| 166 | |
| 167 | } |
| 168 | |
| 169 | /* New ack strategy... irq does not ack Rx any longer |
| 170 | hopefully this helps */ |
| 171 | |
| 172 | /* Really bad things can happen here... If new packet arrives |
| 173 | * and an irq arrives (tx or just due to occasionally unset |
| 174 | * mask), it will be acked by irq handler, but new thread |
| 175 | * is not scheduled. It is major hole in design. |
| 176 | * No idea how to fix this if "playing with fire" will fail |
| 177 | * tomorrow (night 011029). If it will not fail, we won |
| 178 | * finally: amount of IO did not increase at all. */ |
| 179 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); |
| 180 | |
| 181 | tulip_refill_rx(dev); |
| 182 | |
| 183 | /* If RX ring is not full we are out of memory. */ |
| 184 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
| 185 | goto oom; |
| 186 | |
| 187 | /* Remove us from polling list and enable RX intr. */ |
| 188 | napi_complete(napi); |
| 189 | iowrite32(VALID_INTR, tp->base_addr+CSR7); |
| 190 | |
| 191 | /* The last op happens after poll completion. Which means the following: |
| 192 | * 1. it can race with disabling irqs in irq handler |
| 193 | * 2. it can race with dise/enabling irqs in other poll threads |
| 194 | * 3. if an irq raised after beginning loop, it will be immediately |
| 195 | * triggered here. |
| 196 | * |
| 197 | * Summarizing: the logic results in some redundant irqs both |
| 198 | * due to races in masking and due to too late acking of already |
| 199 | * processed irqs. But it must not result in losing events. |
| 200 | */ |
| 201 | |
| 202 | return work_done; |
| 203 | |
| 204 | not_done: |
| 205 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || |
| 206 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
| 207 | tulip_refill_rx(dev); |
| 208 | |
| 209 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
| 210 | goto oom; |
| 211 | |
| 212 | return work_done; |
| 213 | |
| 214 | oom: /* Executed with RX ints disabled */ |
| 215 | |
| 216 | /* Start timer, stop polling, but do not enable rx interrupts. */ |
| 217 | mod_timer(&tp->oom_timer, jiffies+1); |
| 218 | |
| 219 | /* Think: timer_pending() was an explicit signature of bug. |
| 220 | * Timer can be pending now but fired and completed |
| 221 | * before we did napi_complete(). See? We would lose it. */ |
| 222 | |
| 223 | /* remove ourselves from the polling list */ |
| 224 | napi_complete(napi); |
| 225 | |
| 226 | return work_done; |
| 227 | } |
| 228 | |
| 229 | /* The interrupt handler does all of the Rx thread work and cleans up |
| 230 | after the Tx thread. */ |
| 231 | irqreturn_t tulip_interrupt(int irq, void *dev_instance) |
| 232 | { |
| 233 | struct net_device *dev = (struct net_device *)dev_instance; |
| 234 | struct tulip_private *tp = netdev_priv(dev); |
| 235 | void __iomem *ioaddr = tp->base_addr; |
| 236 | int csr5; |
| 237 | int missed; |
| 238 | int rx = 0; |
| 239 | int tx = 0; |
| 240 | int oi = 0; |
| 241 | int maxrx = RX_RING_SIZE; |
| 242 | int maxtx = TX_RING_SIZE; |
| 243 | int maxoi = TX_RING_SIZE; |
| 244 | int rxd = 0; |
| 245 | unsigned int work_count = 25; |
| 246 | unsigned int handled = 0; |
| 247 | |
| 248 | /* Let's see whether the interrupt really is for us */ |
| 249 | csr5 = ioread32(ioaddr + CSR5); |
| 250 | |
| 251 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) |
| 252 | return IRQ_RETVAL(handled); |
| 253 | |
| 254 | tp->nir++; |
| 255 | |
| 256 | do { |
| 257 | |
| 258 | if (!rxd && (csr5 & (RxIntr | RxNoBuf))) { |
| 259 | rxd++; |
| 260 | /* Mask RX intrs and add the device to poll list. */ |
| 261 | iowrite32(VALID_INTR&~RxPollInt, ioaddr + CSR7); |
| 262 | napi_schedule(&tp->napi); |
| 263 | |
| 264 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) |
| 265 | break; |
| 266 | } |
| 267 | |
| 268 | /* Acknowledge the interrupt sources we handle here ASAP |
| 269 | the poll function does Rx and RxNoBuf acking */ |
| 270 | |
| 271 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); |
| 272 | |
| 273 | if (tulip_debug > 4) |
| 274 | printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n", |
| 275 | dev->name, csr5, ioread32(ioaddr + CSR5)); |
| 276 | |
| 277 | |
| 278 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { |
| 279 | unsigned int dirty_tx; |
| 280 | |
| 281 | spin_lock(&tp->lock); |
| 282 | |
| 283 | for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; |
| 284 | dirty_tx++) { |
| 285 | int entry = dirty_tx % TX_RING_SIZE; |
| 286 | int status = le32_to_cpu(tp->tx_ring[entry].status); |
| 287 | |
| 288 | if (status < 0) |
| 289 | break; /* It still has not been Txed */ |
| 290 | |
| 291 | if (status & 0x8000) { |
| 292 | /* There was an major error, log it. */ |
| 293 | #ifndef final_version |
| 294 | if (tulip_debug > 1) |
| 295 | printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n", |
| 296 | dev->name, status); |
| 297 | #endif |
| 298 | tp->stats.tx_errors++; |
| 299 | if (status & 0x4104) tp->stats.tx_aborted_errors++; |
| 300 | if (status & 0x0C00) tp->stats.tx_carrier_errors++; |
| 301 | if (status & 0x0200) tp->stats.tx_window_errors++; |
| 302 | if (status & 0x0002) tp->stats.tx_fifo_errors++; |
| 303 | if (status & 0x0080) tp->stats.tx_heartbeat_errors++; |
| 304 | } else { |
| 305 | tp->stats.tx_bytes += |
| 306 | tp->tx_buffers[entry].skb->len; |
| 307 | tp->stats.collisions += (status >> 3) & 15; |
| 308 | tp->stats.tx_packets++; |
| 309 | } |
| 310 | |
| 311 | dma_unmap_single(&tp->pdev->dev, tp->tx_buffers[entry].mapping, |
| 312 | tp->tx_buffers[entry].skb->len, DMA_TO_DEVICE); |
| 313 | /* Free the original skb. */ |
| 314 | dev_kfree_skb_irq(tp->tx_buffers[entry].skb); |
| 315 | tp->tx_buffers[entry].skb = NULL; |
| 316 | tp->tx_buffers[entry].mapping = 0; |
| 317 | tx++; |
| 318 | } |
| 319 | |
| 320 | #ifndef final_version |
| 321 | if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { |
| 322 | dev_err(&dev->dev, |
| 323 | "Out-of-sync dirty pointer, %d vs. %d\n", |
| 324 | dirty_tx, tp->cur_tx); |
| 325 | dirty_tx += TX_RING_SIZE; |
| 326 | } |
| 327 | #endif |
| 328 | |
| 329 | if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) |
| 330 | netif_wake_queue(dev); |
| 331 | |
| 332 | tp->dirty_tx = dirty_tx; |
| 333 | if (csr5 & TxDied) { |
| 334 | if (tulip_debug > 2) |
| 335 | dev_warn(&dev->dev, |
| 336 | "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n", |
| 337 | csr5, ioread32(ioaddr + CSR6), |
| 338 | tp->csr6); |
| 339 | tulip_restart_rxtx(tp); |
| 340 | } |
| 341 | spin_unlock(&tp->lock); |
| 342 | } |
| 343 | |
| 344 | /* Log errors. */ |
| 345 | if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ |
| 346 | if (csr5 == 0xffffffff) |
| 347 | break; |
| 348 | if (csr5 & TxJabber) tp->stats.tx_errors++; |
| 349 | if (csr5 & TxFIFOUnderflow) { |
| 350 | if ((tp->csr6 & 0xC000) != 0xC000) |
| 351 | tp->csr6 += 0x4000; /* Bump up the Tx threshold */ |
| 352 | else |
| 353 | tp->csr6 |= 0x00200000; /* Store-n-forward. */ |
| 354 | /* Restart the transmit process. */ |
| 355 | tulip_restart_rxtx(tp); |
| 356 | iowrite32(0, ioaddr + CSR1); |
| 357 | } |
| 358 | if (csr5 & (RxDied | RxNoBuf)) { |
| 359 | iowrite32(tp->mc_filter[0], ioaddr + CSR27); |
| 360 | iowrite32(tp->mc_filter[1], ioaddr + CSR28); |
| 361 | } |
| 362 | if (csr5 & RxDied) { /* Missed a Rx frame. */ |
| 363 | tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; |
| 364 | tp->stats.rx_errors++; |
| 365 | tulip_start_rxtx(tp); |
| 366 | } |
| 367 | /* |
| 368 | * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this |
| 369 | * call is ever done under the spinlock |
| 370 | */ |
| 371 | if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { |
| 372 | if (tp->link_change) |
| 373 | (tp->link_change)(dev, csr5); |
| 374 | } |
| 375 | if (csr5 & SystemError) { |
| 376 | int error = (csr5 >> 23) & 7; |
| 377 | /* oops, we hit a PCI error. The code produced corresponds |
| 378 | * to the reason: |
| 379 | * 0 - parity error |
| 380 | * 1 - master abort |
| 381 | * 2 - target abort |
| 382 | * Note that on parity error, we should do a software reset |
| 383 | * of the chip to get it back into a sane state (according |
| 384 | * to the 21142/3 docs that is). |
| 385 | * -- rmk |
| 386 | */ |
| 387 | dev_err(&dev->dev, |
| 388 | "(%lu) System Error occurred (%d)\n", |
| 389 | tp->nir, error); |
| 390 | } |
| 391 | /* Clear all error sources, included undocumented ones! */ |
| 392 | iowrite32(0x0800f7ba, ioaddr + CSR5); |
| 393 | oi++; |
| 394 | } |
| 395 | if (csr5 & TimerInt) { |
| 396 | |
| 397 | if (tulip_debug > 2) |
| 398 | dev_err(&dev->dev, |
| 399 | "Re-enabling interrupts, %08x\n", |
| 400 | csr5); |
| 401 | iowrite32(VALID_INTR, ioaddr + CSR7); |
| 402 | oi++; |
| 403 | } |
| 404 | if (tx > maxtx || rx > maxrx || oi > maxoi) { |
| 405 | if (tulip_debug > 1) |
| 406 | dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n", |
| 407 | csr5, tp->nir, tx, rx, oi); |
| 408 | |
| 409 | /* Acknowledge all interrupt sources. */ |
| 410 | iowrite32(0x8001ffff, ioaddr + CSR5); |
| 411 | /* Mask all interrupting sources, set timer to |
| 412 | re-enable. */ |
| 413 | iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7); |
| 414 | iowrite32(0x0012, ioaddr + CSR11); |
| 415 | break; |
| 416 | } |
| 417 | |
| 418 | work_count--; |
| 419 | if (work_count == 0) |
| 420 | break; |
| 421 | |
| 422 | csr5 = ioread32(ioaddr + CSR5); |
| 423 | |
| 424 | if (rxd) |
| 425 | csr5 &= ~RxPollInt; |
| 426 | } while ((csr5 & (TxNoBuf | |
| 427 | TxDied | |
| 428 | TxIntr | |
| 429 | TimerInt | |
| 430 | /* Abnormal intr. */ |
| 431 | RxDied | |
| 432 | TxFIFOUnderflow | |
| 433 | TxJabber | |
| 434 | TPLnkFail | |
| 435 | SystemError )) != 0); |
| 436 | |
| 437 | if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { |
| 438 | tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; |
| 439 | } |
| 440 | |
| 441 | if (tulip_debug > 4) |
| 442 | printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n", |
| 443 | dev->name, ioread32(ioaddr + CSR5)); |
| 444 | |
| 445 | return IRQ_HANDLED; |
| 446 | } |
| 447 | |