| 1 | /* |
| 2 | * originally drivers/net/tulip_core.c |
| 3 | * Copyright 2000,2001 The Linux Kernel Team |
| 4 | * Written/copyright 1994-2001 by Donald Becker. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | |
| 11 | #define DRV_NAME "tulip" |
| 12 | #define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */ |
| 13 | #define DRV_RELDATE "Feb 27, 2007" |
| 14 | |
| 15 | #include "net.h" |
| 16 | |
| 17 | static char version[] __devinitdata = |
| 18 | "ADM8668net driver version " DRV_VERSION " (" DRV_RELDATE ")\n"; |
| 19 | |
| 20 | #define MAX_UNITS 2 |
| 21 | |
| 22 | /* |
| 23 | Set the bus performance register. |
| 24 | Typical: Set 16 longword cache alignment, no burst limit. |
| 25 | Cache alignment bits 15:14 Burst length 13:8 |
| 26 | 0000 No alignment 0x00000000 unlimited 0800 8 longwords |
| 27 | 4000 8 longwords 0100 1 longword 1000 16 longwords |
| 28 | 8000 16 longwords 0200 2 longwords 2000 32 longwords |
| 29 | C000 32 longwords 0400 4 longwords |
| 30 | Warning: many older 486 systems are broken and require setting 0x00A04800 |
| 31 | 8 longword cache alignment, 8 longword burst. |
| 32 | ToDo: Non-Intel setting could be better. |
| 33 | */ |
| 34 | |
| 35 | //static int csr0 = 0x00200000 | 0x4000; |
| 36 | static int csr0 = 0; |
| 37 | |
| 38 | /* Operational parameters that usually are not changed. */ |
| 39 | /* Time in jiffies before concluding the transmitter is hung. */ |
| 40 | #define TX_TIMEOUT (4*HZ) |
| 41 | |
| 42 | MODULE_AUTHOR("Scott Nicholas <neutronscott@scottn.us>"); |
| 43 | MODULE_DESCRIPTION("ADM8668 new ethernet driver."); |
| 44 | MODULE_LICENSE("GPL"); |
| 45 | MODULE_VERSION(DRV_VERSION); |
| 46 | |
| 47 | #ifdef TULIP_DEBUG |
| 48 | int tulip_debug = TULIP_DEBUG; |
| 49 | #else |
| 50 | int tulip_debug = 1; |
| 51 | #endif |
| 52 | |
| 53 | static void tulip_tx_timeout(struct net_device *dev); |
| 54 | static void tulip_init_ring(struct net_device *dev); |
| 55 | static void tulip_free_ring(struct net_device *dev); |
| 56 | static netdev_tx_t tulip_start_xmit(struct sk_buff *skb, |
| 57 | struct net_device *dev); |
| 58 | static int tulip_open(struct net_device *dev); |
| 59 | static int tulip_close(struct net_device *dev); |
| 60 | static void tulip_up(struct net_device *dev); |
| 61 | static void tulip_down(struct net_device *dev); |
| 62 | static struct net_device_stats *tulip_get_stats(struct net_device *dev); |
| 63 | //static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
| 64 | static void set_rx_mode(struct net_device *dev); |
| 65 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 66 | static void poll_tulip(struct net_device *dev); |
| 67 | #endif |
| 68 | |
| 69 | static void tulip_up(struct net_device *dev) |
| 70 | { |
| 71 | struct tulip_private *tp = netdev_priv(dev); |
| 72 | void __iomem *ioaddr = tp->base_addr; |
| 73 | |
| 74 | napi_enable(&tp->napi); |
| 75 | |
| 76 | /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ |
| 77 | iowrite32(0x00000001, ioaddr + CSR0); |
| 78 | |
| 79 | /* Deassert reset. |
| 80 | Wait the specified 50 PCI cycles after a reset by initializing |
| 81 | Tx and Rx queues and the address filter list. */ |
| 82 | iowrite32(tp->csr0, ioaddr + CSR0); |
| 83 | |
| 84 | if (tulip_debug > 1) |
| 85 | printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n", |
| 86 | dev->name, dev->irq); |
| 87 | |
| 88 | iowrite32(tp->rx_ring_dma, ioaddr + CSR3); |
| 89 | iowrite32(tp->tx_ring_dma, ioaddr + CSR4); |
| 90 | tp->cur_rx = tp->cur_tx = 0; |
| 91 | tp->dirty_rx = tp->dirty_tx = 0; |
| 92 | |
| 93 | /* set mac address */ |
| 94 | iowrite32(get_unaligned_le32(dev->dev_addr), ioaddr + 0xA4); |
| 95 | iowrite32(get_unaligned_le16(dev->dev_addr + 4), ioaddr + 0xA8); |
| 96 | iowrite32(0, ioaddr + CSR27); |
| 97 | iowrite32(0, ioaddr + CSR28); |
| 98 | |
| 99 | tp->csr6 = 0; |
| 100 | |
| 101 | /* Enable automatic Tx underrun recovery. */ |
| 102 | iowrite32(ioread32(ioaddr + CSR18) | 1, ioaddr + CSR18); |
| 103 | tp->csr6 = 0x00040000; |
| 104 | |
| 105 | /* Start the chip's Tx to process setup frame. */ |
| 106 | tulip_stop_rxtx(tp); |
| 107 | barrier(); |
| 108 | udelay(5); |
| 109 | iowrite32(tp->csr6 | TxOn, ioaddr + CSR6); |
| 110 | |
| 111 | /* Enable interrupts by setting the interrupt mask. */ |
| 112 | iowrite32(VALID_INTR, ioaddr + CSR5); |
| 113 | iowrite32(VALID_INTR, ioaddr + CSR7); |
| 114 | tulip_start_rxtx(tp); |
| 115 | iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ |
| 116 | |
| 117 | if (tulip_debug > 2) { |
| 118 | printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n", |
| 119 | dev->name, ioread32(ioaddr + CSR0), |
| 120 | ioread32(ioaddr + CSR5), |
| 121 | ioread32(ioaddr + CSR6)); |
| 122 | } |
| 123 | |
| 124 | init_timer(&tp->oom_timer); |
| 125 | tp->oom_timer.data = (unsigned long)dev; |
| 126 | tp->oom_timer.function = oom_timer; |
| 127 | } |
| 128 | |
| 129 | static int |
| 130 | tulip_open(struct net_device *dev) |
| 131 | { |
| 132 | int retval; |
| 133 | |
| 134 | tulip_init_ring (dev); |
| 135 | |
| 136 | retval = request_irq(dev->irq, tulip_interrupt, 0, dev->name, dev); |
| 137 | if (retval) |
| 138 | goto free_ring; |
| 139 | |
| 140 | tulip_up (dev); |
| 141 | |
| 142 | netif_start_queue (dev); |
| 143 | |
| 144 | return 0; |
| 145 | |
| 146 | free_ring: |
| 147 | tulip_free_ring (dev); |
| 148 | return retval; |
| 149 | } |
| 150 | |
| 151 | |
| 152 | static void tulip_tx_timeout(struct net_device *dev) |
| 153 | { |
| 154 | struct tulip_private *tp = netdev_priv(dev); |
| 155 | void __iomem *ioaddr = tp->base_addr; |
| 156 | unsigned long flags; |
| 157 | |
| 158 | spin_lock_irqsave (&tp->lock, flags); |
| 159 | |
| 160 | dev_warn(&dev->dev, |
| 161 | "Transmit timed out, status %08x, CSR12 %08x, resetting...\n", |
| 162 | ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12)); |
| 163 | |
| 164 | tulip_tx_timeout_complete(tp, ioaddr); |
| 165 | |
| 166 | spin_unlock_irqrestore (&tp->lock, flags); |
| 167 | dev->trans_start = jiffies; /* prevent tx timeout */ |
| 168 | netif_wake_queue (dev); |
| 169 | } |
| 170 | |
| 171 | |
| 172 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ |
| 173 | static void tulip_init_ring(struct net_device *dev) |
| 174 | { |
| 175 | struct tulip_private *tp = netdev_priv(dev); |
| 176 | int i; |
| 177 | |
| 178 | tp->nir = 0; |
| 179 | |
| 180 | for (i = 0; i < RX_RING_SIZE; i++) { |
| 181 | tp->rx_ring[i].status = 0x00000000; |
| 182 | tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ); |
| 183 | tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1)); |
| 184 | tp->rx_buffers[i].skb = NULL; |
| 185 | tp->rx_buffers[i].mapping = 0; |
| 186 | } |
| 187 | /* Mark the last entry as wrapping the ring. */ |
| 188 | tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP); |
| 189 | tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma); |
| 190 | |
| 191 | for (i = 0; i < RX_RING_SIZE; i++) { |
| 192 | dma_addr_t mapping; |
| 193 | /* Note the receive buffer must be longword aligned. |
| 194 | dev_alloc_skb() provides 16 byte alignment. But do *not* |
| 195 | use skb_reserve() to align the IP header! */ |
| 196 | struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); |
| 197 | tp->rx_buffers[i].skb = skb; |
| 198 | if (skb == NULL) |
| 199 | break; |
| 200 | mapping = dma_map_single(&dev->dev, skb->data, |
| 201 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
| 202 | tp->rx_buffers[i].mapping = mapping; |
| 203 | skb->dev = dev; /* Mark as being used by this device. */ |
| 204 | tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ |
| 205 | tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); |
| 206 | } |
| 207 | tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
| 208 | |
| 209 | /* The Tx buffer descriptor is filled in as needed, but we |
| 210 | do need to clear the ownership bit. */ |
| 211 | for (i = 0; i < TX_RING_SIZE; i++) { |
| 212 | tp->tx_buffers[i].skb = NULL; |
| 213 | tp->tx_buffers[i].mapping = 0; |
| 214 | tp->tx_ring[i].status = 0x00000000; |
| 215 | tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1)); |
| 216 | } |
| 217 | tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma); |
| 218 | } |
| 219 | |
| 220 | static netdev_tx_t |
| 221 | tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 222 | { |
| 223 | struct tulip_private *tp = netdev_priv(dev); |
| 224 | int entry; |
| 225 | u32 flag; |
| 226 | dma_addr_t mapping; |
| 227 | unsigned long flags; |
| 228 | |
| 229 | spin_lock_irqsave(&tp->lock, flags); |
| 230 | |
| 231 | /* Calculate the next Tx descriptor entry. */ |
| 232 | entry = tp->cur_tx % TX_RING_SIZE; |
| 233 | |
| 234 | tp->tx_buffers[entry].skb = skb; |
| 235 | mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len, |
| 236 | DMA_TO_DEVICE); |
| 237 | tp->tx_buffers[entry].mapping = mapping; |
| 238 | tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping); |
| 239 | |
| 240 | if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ |
| 241 | flag = 0x60000000; /* No interrupt */ |
| 242 | } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { |
| 243 | flag = 0xe0000000; /* Tx-done intr. */ |
| 244 | } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { |
| 245 | flag = 0x60000000; /* No Tx-done intr. */ |
| 246 | } else { /* Leave room for set_rx_mode() to fill entries. */ |
| 247 | flag = 0xe0000000; /* Tx-done intr. */ |
| 248 | netif_stop_queue(dev); |
| 249 | } |
| 250 | if (entry == TX_RING_SIZE-1) |
| 251 | flag = 0xe0000000 | DESC_RING_WRAP; |
| 252 | |
| 253 | tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag); |
| 254 | /* if we were using Transmit Automatic Polling, we would need a |
| 255 | * wmb() here. */ |
| 256 | tp->tx_ring[entry].status = cpu_to_le32(DescOwned); |
| 257 | wmb(); |
| 258 | |
| 259 | tp->cur_tx++; |
| 260 | |
| 261 | /* Trigger an immediate transmit demand. */ |
| 262 | iowrite32(0, tp->base_addr + CSR1); |
| 263 | |
| 264 | spin_unlock_irqrestore(&tp->lock, flags); |
| 265 | |
| 266 | return NETDEV_TX_OK; |
| 267 | } |
| 268 | |
| 269 | static void tulip_clean_tx_ring(struct tulip_private *tp) |
| 270 | { |
| 271 | unsigned int dirty_tx; |
| 272 | |
| 273 | for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0; |
| 274 | dirty_tx++) { |
| 275 | int entry = dirty_tx % TX_RING_SIZE; |
| 276 | int status = le32_to_cpu(tp->tx_ring[entry].status); |
| 277 | |
| 278 | if (status < 0) { |
| 279 | tp->stats.tx_errors++; /* It wasn't Txed */ |
| 280 | tp->tx_ring[entry].status = 0; |
| 281 | } |
| 282 | |
| 283 | dma_unmap_single(&tp->pdev->dev, tp->tx_buffers[entry].mapping, |
| 284 | tp->tx_buffers[entry].skb->len, |
| 285 | DMA_TO_DEVICE); |
| 286 | |
| 287 | /* Free the original skb. */ |
| 288 | dev_kfree_skb_irq(tp->tx_buffers[entry].skb); |
| 289 | tp->tx_buffers[entry].skb = NULL; |
| 290 | tp->tx_buffers[entry].mapping = 0; |
| 291 | } |
| 292 | } |
| 293 | |
| 294 | static void tulip_down (struct net_device *dev) |
| 295 | { |
| 296 | struct tulip_private *tp = netdev_priv(dev); |
| 297 | void __iomem *ioaddr = tp->base_addr; |
| 298 | unsigned long flags; |
| 299 | |
| 300 | napi_disable(&tp->napi); |
| 301 | del_timer_sync (&tp->oom_timer); |
| 302 | spin_lock_irqsave (&tp->lock, flags); |
| 303 | |
| 304 | /* Disable interrupts by clearing the interrupt mask. */ |
| 305 | iowrite32 (0x00000000, ioaddr + CSR7); |
| 306 | |
| 307 | /* Stop the Tx and Rx processes. */ |
| 308 | tulip_stop_rxtx(tp); |
| 309 | |
| 310 | /* prepare receive buffers */ |
| 311 | tulip_refill_rx(dev); |
| 312 | |
| 313 | /* release any unconsumed transmit buffers */ |
| 314 | tulip_clean_tx_ring(tp); |
| 315 | |
| 316 | if (ioread32 (ioaddr + CSR6) != 0xffffffff) |
| 317 | tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff; |
| 318 | |
| 319 | spin_unlock_irqrestore (&tp->lock, flags); |
| 320 | } |
| 321 | |
| 322 | static void tulip_free_ring (struct net_device *dev) |
| 323 | { |
| 324 | struct tulip_private *tp = netdev_priv(dev); |
| 325 | int i; |
| 326 | |
| 327 | /* Free all the skbuffs in the Rx queue. */ |
| 328 | for (i = 0; i < RX_RING_SIZE; i++) { |
| 329 | struct sk_buff *skb = tp->rx_buffers[i].skb; |
| 330 | dma_addr_t mapping = tp->rx_buffers[i].mapping; |
| 331 | |
| 332 | tp->rx_buffers[i].skb = NULL; |
| 333 | tp->rx_buffers[i].mapping = 0; |
| 334 | |
| 335 | tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ |
| 336 | tp->rx_ring[i].length = 0; |
| 337 | /* An invalid address. */ |
| 338 | tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0); |
| 339 | if (skb) { |
| 340 | dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ, |
| 341 | DMA_FROM_DEVICE); |
| 342 | dev_kfree_skb (skb); |
| 343 | } |
| 344 | } |
| 345 | |
| 346 | for (i = 0; i < TX_RING_SIZE; i++) { |
| 347 | struct sk_buff *skb = tp->tx_buffers[i].skb; |
| 348 | |
| 349 | if (skb != NULL) { |
| 350 | dma_unmap_single(&tp->pdev->dev, |
| 351 | tp->tx_buffers[i].mapping, skb->len, DMA_TO_DEVICE); |
| 352 | dev_kfree_skb (skb); |
| 353 | } |
| 354 | tp->tx_buffers[i].skb = NULL; |
| 355 | tp->tx_buffers[i].mapping = 0; |
| 356 | } |
| 357 | } |
| 358 | |
| 359 | static int tulip_close (struct net_device *dev) |
| 360 | { |
| 361 | struct tulip_private *tp = netdev_priv(dev); |
| 362 | void __iomem *ioaddr = tp->base_addr; |
| 363 | |
| 364 | netif_stop_queue (dev); |
| 365 | |
| 366 | tulip_down (dev); |
| 367 | |
| 368 | if (tulip_debug > 1) |
| 369 | dev_printk(KERN_DEBUG, &dev->dev, |
| 370 | "Shutting down ethercard, status was %02x\n", |
| 371 | ioread32 (ioaddr + CSR5)); |
| 372 | |
| 373 | free_irq (dev->irq, dev); |
| 374 | |
| 375 | tulip_free_ring (dev); |
| 376 | |
| 377 | return 0; |
| 378 | } |
| 379 | |
| 380 | static struct net_device_stats *tulip_get_stats(struct net_device *dev) |
| 381 | { |
| 382 | struct tulip_private *tp = netdev_priv(dev); |
| 383 | void __iomem *ioaddr = tp->base_addr; |
| 384 | |
| 385 | if (netif_running(dev)) { |
| 386 | unsigned long flags; |
| 387 | |
| 388 | spin_lock_irqsave (&tp->lock, flags); |
| 389 | |
| 390 | tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; |
| 391 | |
| 392 | spin_unlock_irqrestore(&tp->lock, flags); |
| 393 | } |
| 394 | |
| 395 | return &tp->stats; |
| 396 | } |
| 397 | |
| 398 | |
| 399 | static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
| 400 | { |
| 401 | strcpy(info->driver, DRV_NAME); |
| 402 | strcpy(info->version, DRV_VERSION); |
| 403 | strcpy(info->bus_info, "mmio"); |
| 404 | } |
| 405 | |
| 406 | static const struct ethtool_ops ops = { |
| 407 | .get_drvinfo = tulip_get_drvinfo |
| 408 | }; |
| 409 | |
| 410 | static void set_rx_mode(struct net_device *dev) |
| 411 | { |
| 412 | struct tulip_private *tp = netdev_priv(dev); |
| 413 | void __iomem *ioaddr = tp->base_addr; |
| 414 | int csr6; |
| 415 | |
| 416 | csr6 = ioread32(ioaddr + CSR6) & ~0x00D5; |
| 417 | |
| 418 | tp->csr6 &= ~0x00D5; |
| 419 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
| 420 | tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; |
| 421 | csr6 |= AcceptAllMulticast | AcceptAllPhys; |
| 422 | } else if ((netdev_mc_count(dev) > 1000) || |
| 423 | (dev->flags & IFF_ALLMULTI)) { |
| 424 | /* Too many to filter well -- accept all multicasts. */ |
| 425 | tp->csr6 |= AcceptAllMulticast; |
| 426 | csr6 |= AcceptAllMulticast; |
| 427 | } else { |
| 428 | /* Some work-alikes have only a 64-entry hash filter table. */ |
| 429 | /* Should verify correctness on big-endian/__powerpc__ */ |
| 430 | struct netdev_hw_addr *ha; |
| 431 | if (netdev_mc_count(dev) > 64) { |
| 432 | /* Arbitrary non-effective limit. */ |
| 433 | tp->csr6 |= AcceptAllMulticast; |
| 434 | csr6 |= AcceptAllMulticast; |
| 435 | } else { |
| 436 | u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ |
| 437 | int filterbit; |
| 438 | netdev_for_each_mc_addr(ha, dev) { |
| 439 | filterbit = ether_crc_le(ETH_ALEN, ha->addr); |
| 440 | filterbit &= 0x3f; |
| 441 | mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); |
| 442 | if (tulip_debug > 2) |
| 443 | dev_info(&dev->dev, |
| 444 | "Added filter for %pM %08x bit %d\n", |
| 445 | ha->addr, |
| 446 | ether_crc(ETH_ALEN, ha->addr), |
| 447 | filterbit); |
| 448 | } |
| 449 | if (mc_filter[0] == tp->mc_filter[0] && |
| 450 | mc_filter[1] == tp->mc_filter[1]) |
| 451 | ; /* No change. */ |
| 452 | iowrite32(mc_filter[0], ioaddr + CSR27); |
| 453 | iowrite32(mc_filter[1], ioaddr + CSR28); |
| 454 | tp->mc_filter[0] = mc_filter[0]; |
| 455 | tp->mc_filter[1] = mc_filter[1]; |
| 456 | } |
| 457 | } |
| 458 | |
| 459 | if (dev->irq == ADM8668_LAN_IRQ) |
| 460 | csr6 |= (1 << 9); /* force 100Mbps full duplex */ |
| 461 | // csr6 |= 1; /* pad 2 bytes. vlan? */ |
| 462 | |
| 463 | iowrite32(csr6, ioaddr + CSR6); |
| 464 | } |
| 465 | |
| 466 | static const struct net_device_ops tulip_netdev_ops = { |
| 467 | .ndo_open = tulip_open, |
| 468 | .ndo_start_xmit = tulip_start_xmit, |
| 469 | .ndo_tx_timeout = tulip_tx_timeout, |
| 470 | .ndo_stop = tulip_close, |
| 471 | .ndo_get_stats = tulip_get_stats, |
| 472 | .ndo_set_rx_mode = set_rx_mode, |
| 473 | .ndo_change_mtu = eth_change_mtu, |
| 474 | .ndo_set_mac_address = eth_mac_addr, |
| 475 | .ndo_validate_addr = eth_validate_addr, |
| 476 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 477 | .ndo_poll_controller = poll_tulip, |
| 478 | #endif |
| 479 | }; |
| 480 | |
| 481 | static int __devinit adm8668net_probe(struct platform_device *pdev) |
| 482 | { |
| 483 | struct tulip_private *tp; |
| 484 | struct net_device *dev; |
| 485 | struct resource *res; |
| 486 | void __iomem *ioaddr; |
| 487 | int irq; |
| 488 | |
| 489 | if (pdev->id < 0 || pdev->id >= MAX_UNITS) |
| 490 | return -EINVAL; |
| 491 | |
| 492 | if (!(res = platform_get_resource(pdev, IORESOURCE_IRQ, 0))) |
| 493 | return -ENODEV; |
| 494 | irq = res->start; |
| 495 | if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) |
| 496 | return -ENODEV; |
| 497 | if (!(ioaddr = ioremap(res->start, res->end - res->start))) |
| 498 | return -ENODEV; |
| 499 | if (!(dev = alloc_etherdev(sizeof (*tp)))) |
| 500 | return -ENOMEM; |
| 501 | |
| 502 | /* setup net dev */ |
| 503 | dev->base_addr = (unsigned long)res->start; |
| 504 | dev->irq = irq; |
| 505 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 506 | |
| 507 | /* tulip private struct */ |
| 508 | tp = netdev_priv(dev); |
| 509 | tp->dev = dev; |
| 510 | tp->base_addr = ioaddr; |
| 511 | tp->csr0 = csr0; |
| 512 | tp->pdev = pdev; |
| 513 | tp->rx_ring = dma_alloc_coherent(&pdev->dev, |
| 514 | sizeof(struct tulip_rx_desc) * RX_RING_SIZE + |
| 515 | sizeof(struct tulip_tx_desc) * TX_RING_SIZE, |
| 516 | &tp->rx_ring_dma, GFP_KERNEL); |
| 517 | if (!tp->rx_ring) |
| 518 | return -ENODEV; |
| 519 | tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); |
| 520 | tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE; |
| 521 | |
| 522 | spin_lock_init(&tp->lock); |
| 523 | |
| 524 | /* Stop the chip's Tx and Rx processes. */ |
| 525 | tulip_stop_rxtx(tp); |
| 526 | |
| 527 | /* Clear the missed-packet counter. */ |
| 528 | ioread32(ioaddr + CSR8); |
| 529 | |
| 530 | /* Addresses are stored in BSP area of NOR flash */ |
| 531 | if (irq == ADM8668_WAN_IRQ) |
| 532 | memcpy(dev->dev_addr, (char *)ADM8668_WAN_MACADDR, 6); |
| 533 | else |
| 534 | memcpy(dev->dev_addr, (char *)ADM8668_LAN_MACADDR, 6); |
| 535 | |
| 536 | /* The Tulip-specific entries in the device structure. */ |
| 537 | dev->netdev_ops = &tulip_netdev_ops; |
| 538 | dev->watchdog_timeo = TX_TIMEOUT; |
| 539 | netif_napi_add(dev, &tp->napi, tulip_poll, 16); |
| 540 | SET_ETHTOOL_OPS(dev, &ops); |
| 541 | |
| 542 | if (register_netdev(dev)) |
| 543 | goto err_out_free_ring; |
| 544 | |
| 545 | dev_info(&dev->dev, |
| 546 | "ADM8668net at MMIO %#lx %pM, IRQ %d\n", |
| 547 | (unsigned long)dev->base_addr, dev->dev_addr, irq); |
| 548 | |
| 549 | platform_set_drvdata(pdev, dev); |
| 550 | return 0; |
| 551 | |
| 552 | err_out_free_ring: |
| 553 | dma_free_coherent(&pdev->dev, |
| 554 | sizeof (struct tulip_rx_desc) * RX_RING_SIZE + |
| 555 | sizeof (struct tulip_tx_desc) * TX_RING_SIZE, |
| 556 | tp->rx_ring, tp->rx_ring_dma); |
| 557 | return -ENODEV; |
| 558 | } |
| 559 | |
| 560 | static int __devexit adm8668net_remove(struct platform_device *pdev) |
| 561 | { |
| 562 | struct net_device *dev = platform_get_drvdata (pdev); |
| 563 | struct tulip_private *tp; |
| 564 | |
| 565 | if (!dev) |
| 566 | return -ENODEV; |
| 567 | |
| 568 | tp = netdev_priv(dev); |
| 569 | unregister_netdev(dev); |
| 570 | dma_free_coherent(&pdev->dev, |
| 571 | sizeof (struct tulip_rx_desc) * RX_RING_SIZE + |
| 572 | sizeof (struct tulip_tx_desc) * TX_RING_SIZE, |
| 573 | tp->rx_ring, tp->rx_ring_dma); |
| 574 | iounmap(tp->base_addr); |
| 575 | free_netdev(dev); |
| 576 | platform_set_drvdata(pdev, NULL); |
| 577 | return 0; |
| 578 | } |
| 579 | |
| 580 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 581 | /* |
| 582 | * Polling 'interrupt' - used by things like netconsole to send skbs |
| 583 | * without having to re-enable interrupts. It's not called while |
| 584 | * the interrupt routine is executing. |
| 585 | */ |
| 586 | |
| 587 | static void poll_tulip (struct net_device *dev) |
| 588 | { |
| 589 | /* disable_irq here is not very nice, but with the lockless |
| 590 | interrupt handler we have no other choice. */ |
| 591 | disable_irq(dev->irq); |
| 592 | tulip_interrupt(dev->irq, dev); |
| 593 | enable_irq(dev->irq); |
| 594 | } |
| 595 | #endif |
| 596 | |
| 597 | static struct platform_driver adm8668net_platform_driver = { |
| 598 | .probe = adm8668net_probe, |
| 599 | .remove = __devexit_p(adm8668net_remove), |
| 600 | .driver = { |
| 601 | .owner = THIS_MODULE, |
| 602 | .name = "adm8668_eth" |
| 603 | }, |
| 604 | }; |
| 605 | |
| 606 | static int __init adm8668net_init(void) |
| 607 | { |
| 608 | pr_info("%s", version); |
| 609 | return platform_driver_register(&adm8668net_platform_driver); |
| 610 | } |
| 611 | |
| 612 | static void __exit adm8668net_exit(void) |
| 613 | { |
| 614 | platform_driver_unregister(&adm8668net_platform_driver); |
| 615 | } |
| 616 | |
| 617 | module_init(adm8668net_init); |
| 618 | module_exit(adm8668net_exit); |
| 619 | |