Root/
1 | /* |
2 | * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports |
3 | * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> |
4 | * |
5 | * Based on the 64360 driver from: |
6 | * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> |
7 | * Rabeeh Khoury <rabeeh@marvell.com> |
8 | * |
9 | * Copyright (C) 2003 PMC-Sierra, Inc., |
10 | * written by Manish Lachwani |
11 | * |
12 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> |
13 | * |
14 | * Copyright (C) 2004-2006 MontaVista Software, Inc. |
15 | * Dale Farnsworth <dale@farnsworth.org> |
16 | * |
17 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> |
18 | * <sjhill@realitydiluted.com> |
19 | * |
20 | * Copyright (C) 2007-2008 Marvell Semiconductor |
21 | * Lennert Buytenhek <buytenh@marvell.com> |
22 | * |
23 | * This program is free software; you can redistribute it and/or |
24 | * modify it under the terms of the GNU General Public License |
25 | * as published by the Free Software Foundation; either version 2 |
26 | * of the License, or (at your option) any later version. |
27 | * |
28 | * This program is distributed in the hope that it will be useful, |
29 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
30 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
31 | * GNU General Public License for more details. |
32 | * |
33 | * You should have received a copy of the GNU General Public License |
34 | * along with this program; if not, write to the Free Software |
35 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
36 | */ |
37 | |
38 | #include <linux/init.h> |
39 | #include <linux/dma-mapping.h> |
40 | #include <linux/in.h> |
41 | #include <linux/ip.h> |
42 | #include <linux/tcp.h> |
43 | #include <linux/udp.h> |
44 | #include <linux/etherdevice.h> |
45 | #include <linux/delay.h> |
46 | #include <linux/ethtool.h> |
47 | #include <linux/platform_device.h> |
48 | #include <linux/module.h> |
49 | #include <linux/kernel.h> |
50 | #include <linux/spinlock.h> |
51 | #include <linux/workqueue.h> |
52 | #include <linux/phy.h> |
53 | #include <linux/mv643xx_eth.h> |
54 | #include <linux/io.h> |
55 | #include <linux/types.h> |
56 | #include <linux/inet_lro.h> |
57 | #include <linux/slab.h> |
58 | #include <asm/system.h> |
59 | |
60 | static char mv643xx_eth_driver_name[] = "mv643xx_eth"; |
61 | static char mv643xx_eth_driver_version[] = "1.4"; |
62 | |
63 | |
64 | /* |
65 | * Registers shared between all ports. |
66 | */ |
67 | #define PHY_ADDR 0x0000 |
68 | #define SMI_REG 0x0004 |
69 | #define SMI_BUSY 0x10000000 |
70 | #define SMI_READ_VALID 0x08000000 |
71 | #define SMI_OPCODE_READ 0x04000000 |
72 | #define SMI_OPCODE_WRITE 0x00000000 |
73 | #define ERR_INT_CAUSE 0x0080 |
74 | #define ERR_INT_SMI_DONE 0x00000010 |
75 | #define ERR_INT_MASK 0x0084 |
76 | #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) |
77 | #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) |
78 | #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) |
79 | #define WINDOW_BAR_ENABLE 0x0290 |
80 | #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) |
81 | |
82 | /* |
83 | * Main per-port registers. These live at offset 0x0400 for |
84 | * port #0, 0x0800 for port #1, and 0x0c00 for port #2. |
85 | */ |
86 | #define PORT_CONFIG 0x0000 |
87 | #define UNICAST_PROMISCUOUS_MODE 0x00000001 |
88 | #define PORT_CONFIG_EXT 0x0004 |
89 | #define MAC_ADDR_LOW 0x0014 |
90 | #define MAC_ADDR_HIGH 0x0018 |
91 | #define SDMA_CONFIG 0x001c |
92 | #define TX_BURST_SIZE_16_64BIT 0x01000000 |
93 | #define TX_BURST_SIZE_4_64BIT 0x00800000 |
94 | #define BLM_TX_NO_SWAP 0x00000020 |
95 | #define BLM_RX_NO_SWAP 0x00000010 |
96 | #define RX_BURST_SIZE_16_64BIT 0x00000008 |
97 | #define RX_BURST_SIZE_4_64BIT 0x00000004 |
98 | #define PORT_SERIAL_CONTROL 0x003c |
99 | #define SET_MII_SPEED_TO_100 0x01000000 |
100 | #define SET_GMII_SPEED_TO_1000 0x00800000 |
101 | #define SET_FULL_DUPLEX_MODE 0x00200000 |
102 | #define MAX_RX_PACKET_9700BYTE 0x000a0000 |
103 | #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 |
104 | #define DO_NOT_FORCE_LINK_FAIL 0x00000400 |
105 | #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 |
106 | #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 |
107 | #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 |
108 | #define FORCE_LINK_PASS 0x00000002 |
109 | #define SERIAL_PORT_ENABLE 0x00000001 |
110 | #define PORT_STATUS 0x0044 |
111 | #define TX_FIFO_EMPTY 0x00000400 |
112 | #define TX_IN_PROGRESS 0x00000080 |
113 | #define PORT_SPEED_MASK 0x00000030 |
114 | #define PORT_SPEED_1000 0x00000010 |
115 | #define PORT_SPEED_100 0x00000020 |
116 | #define PORT_SPEED_10 0x00000000 |
117 | #define FLOW_CONTROL_ENABLED 0x00000008 |
118 | #define FULL_DUPLEX 0x00000004 |
119 | #define LINK_UP 0x00000002 |
120 | #define TXQ_COMMAND 0x0048 |
121 | #define TXQ_FIX_PRIO_CONF 0x004c |
122 | #define TX_BW_RATE 0x0050 |
123 | #define TX_BW_MTU 0x0058 |
124 | #define TX_BW_BURST 0x005c |
125 | #define INT_CAUSE 0x0060 |
126 | #define INT_TX_END 0x07f80000 |
127 | #define INT_TX_END_0 0x00080000 |
128 | #define INT_RX 0x000003fc |
129 | #define INT_RX_0 0x00000004 |
130 | #define INT_EXT 0x00000002 |
131 | #define INT_CAUSE_EXT 0x0064 |
132 | #define INT_EXT_LINK_PHY 0x00110000 |
133 | #define INT_EXT_TX 0x000000ff |
134 | #define INT_MASK 0x0068 |
135 | #define INT_MASK_EXT 0x006c |
136 | #define TX_FIFO_URGENT_THRESHOLD 0x0074 |
137 | #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc |
138 | #define TX_BW_RATE_MOVED 0x00e0 |
139 | #define TX_BW_MTU_MOVED 0x00e8 |
140 | #define TX_BW_BURST_MOVED 0x00ec |
141 | #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) |
142 | #define RXQ_COMMAND 0x0280 |
143 | #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) |
144 | #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) |
145 | #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) |
146 | #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) |
147 | |
148 | /* |
149 | * Misc per-port registers. |
150 | */ |
151 | #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) |
152 | #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) |
153 | #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) |
154 | #define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) |
155 | |
156 | |
157 | /* |
158 | * SDMA configuration register default value. |
159 | */ |
160 | #if defined(__BIG_ENDIAN) |
161 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ |
162 | (RX_BURST_SIZE_4_64BIT | \ |
163 | TX_BURST_SIZE_4_64BIT) |
164 | #elif defined(__LITTLE_ENDIAN) |
165 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ |
166 | (RX_BURST_SIZE_4_64BIT | \ |
167 | BLM_RX_NO_SWAP | \ |
168 | BLM_TX_NO_SWAP | \ |
169 | TX_BURST_SIZE_4_64BIT) |
170 | #else |
171 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined |
172 | #endif |
173 | |
174 | |
175 | /* |
176 | * Misc definitions. |
177 | */ |
178 | #define DEFAULT_RX_QUEUE_SIZE 128 |
179 | #define DEFAULT_TX_QUEUE_SIZE 256 |
180 | #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) |
181 | |
182 | |
183 | /* |
184 | * RX/TX descriptors. |
185 | */ |
186 | #if defined(__BIG_ENDIAN) |
187 | struct rx_desc { |
188 | u16 byte_cnt; /* Descriptor buffer byte count */ |
189 | u16 buf_size; /* Buffer size */ |
190 | u32 cmd_sts; /* Descriptor command status */ |
191 | u32 next_desc_ptr; /* Next descriptor pointer */ |
192 | u32 buf_ptr; /* Descriptor buffer pointer */ |
193 | }; |
194 | |
195 | struct tx_desc { |
196 | u16 byte_cnt; /* buffer byte count */ |
197 | u16 l4i_chk; /* CPU provided TCP checksum */ |
198 | u32 cmd_sts; /* Command/status field */ |
199 | u32 next_desc_ptr; /* Pointer to next descriptor */ |
200 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ |
201 | }; |
202 | #elif defined(__LITTLE_ENDIAN) |
203 | struct rx_desc { |
204 | u32 cmd_sts; /* Descriptor command status */ |
205 | u16 buf_size; /* Buffer size */ |
206 | u16 byte_cnt; /* Descriptor buffer byte count */ |
207 | u32 buf_ptr; /* Descriptor buffer pointer */ |
208 | u32 next_desc_ptr; /* Next descriptor pointer */ |
209 | }; |
210 | |
211 | struct tx_desc { |
212 | u32 cmd_sts; /* Command/status field */ |
213 | u16 l4i_chk; /* CPU provided TCP checksum */ |
214 | u16 byte_cnt; /* buffer byte count */ |
215 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ |
216 | u32 next_desc_ptr; /* Pointer to next descriptor */ |
217 | }; |
218 | #else |
219 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined |
220 | #endif |
221 | |
222 | /* RX & TX descriptor command */ |
223 | #define BUFFER_OWNED_BY_DMA 0x80000000 |
224 | |
225 | /* RX & TX descriptor status */ |
226 | #define ERROR_SUMMARY 0x00000001 |
227 | |
228 | /* RX descriptor status */ |
229 | #define LAYER_4_CHECKSUM_OK 0x40000000 |
230 | #define RX_ENABLE_INTERRUPT 0x20000000 |
231 | #define RX_FIRST_DESC 0x08000000 |
232 | #define RX_LAST_DESC 0x04000000 |
233 | #define RX_IP_HDR_OK 0x02000000 |
234 | #define RX_PKT_IS_IPV4 0x01000000 |
235 | #define RX_PKT_IS_ETHERNETV2 0x00800000 |
236 | #define RX_PKT_LAYER4_TYPE_MASK 0x00600000 |
237 | #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 |
238 | #define RX_PKT_IS_VLAN_TAGGED 0x00080000 |
239 | |
240 | /* TX descriptor command */ |
241 | #define TX_ENABLE_INTERRUPT 0x00800000 |
242 | #define GEN_CRC 0x00400000 |
243 | #define TX_FIRST_DESC 0x00200000 |
244 | #define TX_LAST_DESC 0x00100000 |
245 | #define ZERO_PADDING 0x00080000 |
246 | #define GEN_IP_V4_CHECKSUM 0x00040000 |
247 | #define GEN_TCP_UDP_CHECKSUM 0x00020000 |
248 | #define UDP_FRAME 0x00010000 |
249 | #define MAC_HDR_EXTRA_4_BYTES 0x00008000 |
250 | #define MAC_HDR_EXTRA_8_BYTES 0x00000200 |
251 | |
252 | #define TX_IHL_SHIFT 11 |
253 | |
254 | |
255 | /* global *******************************************************************/ |
256 | struct mv643xx_eth_shared_private { |
257 | /* |
258 | * Ethernet controller base address. |
259 | */ |
260 | void __iomem *base; |
261 | |
262 | /* |
263 | * Points at the right SMI instance to use. |
264 | */ |
265 | struct mv643xx_eth_shared_private *smi; |
266 | |
267 | /* |
268 | * Provides access to local SMI interface. |
269 | */ |
270 | struct mii_bus *smi_bus; |
271 | |
272 | /* |
273 | * If we have access to the error interrupt pin (which is |
274 | * somewhat misnamed as it not only reflects internal errors |
275 | * but also reflects SMI completion), use that to wait for |
276 | * SMI access completion instead of polling the SMI busy bit. |
277 | */ |
278 | int err_interrupt; |
279 | wait_queue_head_t smi_busy_wait; |
280 | |
281 | /* |
282 | * Per-port MBUS window access register value. |
283 | */ |
284 | u32 win_protect; |
285 | |
286 | /* |
287 | * Hardware-specific parameters. |
288 | */ |
289 | unsigned int t_clk; |
290 | int extended_rx_coal_limit; |
291 | int tx_bw_control; |
292 | }; |
293 | |
294 | #define TX_BW_CONTROL_ABSENT 0 |
295 | #define TX_BW_CONTROL_OLD_LAYOUT 1 |
296 | #define TX_BW_CONTROL_NEW_LAYOUT 2 |
297 | |
298 | static int mv643xx_eth_open(struct net_device *dev); |
299 | static int mv643xx_eth_stop(struct net_device *dev); |
300 | |
301 | |
302 | /* per-port *****************************************************************/ |
303 | struct mib_counters { |
304 | u64 good_octets_received; |
305 | u32 bad_octets_received; |
306 | u32 internal_mac_transmit_err; |
307 | u32 good_frames_received; |
308 | u32 bad_frames_received; |
309 | u32 broadcast_frames_received; |
310 | u32 multicast_frames_received; |
311 | u32 frames_64_octets; |
312 | u32 frames_65_to_127_octets; |
313 | u32 frames_128_to_255_octets; |
314 | u32 frames_256_to_511_octets; |
315 | u32 frames_512_to_1023_octets; |
316 | u32 frames_1024_to_max_octets; |
317 | u64 good_octets_sent; |
318 | u32 good_frames_sent; |
319 | u32 excessive_collision; |
320 | u32 multicast_frames_sent; |
321 | u32 broadcast_frames_sent; |
322 | u32 unrec_mac_control_received; |
323 | u32 fc_sent; |
324 | u32 good_fc_received; |
325 | u32 bad_fc_received; |
326 | u32 undersize_received; |
327 | u32 fragments_received; |
328 | u32 oversize_received; |
329 | u32 jabber_received; |
330 | u32 mac_receive_error; |
331 | u32 bad_crc_event; |
332 | u32 collision; |
333 | u32 late_collision; |
334 | }; |
335 | |
336 | struct lro_counters { |
337 | u32 lro_aggregated; |
338 | u32 lro_flushed; |
339 | u32 lro_no_desc; |
340 | }; |
341 | |
342 | struct rx_queue { |
343 | int index; |
344 | |
345 | int rx_ring_size; |
346 | |
347 | int rx_desc_count; |
348 | int rx_curr_desc; |
349 | int rx_used_desc; |
350 | |
351 | struct rx_desc *rx_desc_area; |
352 | dma_addr_t rx_desc_dma; |
353 | int rx_desc_area_size; |
354 | struct sk_buff **rx_skb; |
355 | |
356 | struct net_lro_mgr lro_mgr; |
357 | struct net_lro_desc lro_arr[8]; |
358 | }; |
359 | |
360 | struct tx_queue { |
361 | int index; |
362 | |
363 | int tx_ring_size; |
364 | |
365 | int tx_desc_count; |
366 | int tx_curr_desc; |
367 | int tx_used_desc; |
368 | |
369 | struct tx_desc *tx_desc_area; |
370 | dma_addr_t tx_desc_dma; |
371 | int tx_desc_area_size; |
372 | |
373 | struct sk_buff_head tx_skb; |
374 | |
375 | unsigned long tx_packets; |
376 | unsigned long tx_bytes; |
377 | unsigned long tx_dropped; |
378 | }; |
379 | |
380 | struct mv643xx_eth_private { |
381 | struct mv643xx_eth_shared_private *shared; |
382 | void __iomem *base; |
383 | int port_num; |
384 | |
385 | struct net_device *dev; |
386 | |
387 | struct phy_device *phy; |
388 | |
389 | struct timer_list mib_counters_timer; |
390 | spinlock_t mib_counters_lock; |
391 | struct mib_counters mib_counters; |
392 | |
393 | struct lro_counters lro_counters; |
394 | |
395 | struct work_struct tx_timeout_task; |
396 | |
397 | struct napi_struct napi; |
398 | u32 int_mask; |
399 | u8 oom; |
400 | u8 work_link; |
401 | u8 work_tx; |
402 | u8 work_tx_end; |
403 | u8 work_rx; |
404 | u8 work_rx_refill; |
405 | |
406 | int skb_size; |
407 | struct sk_buff_head rx_recycle; |
408 | |
409 | /* |
410 | * RX state. |
411 | */ |
412 | int rx_ring_size; |
413 | unsigned long rx_desc_sram_addr; |
414 | int rx_desc_sram_size; |
415 | int rxq_count; |
416 | struct timer_list rx_oom; |
417 | struct rx_queue rxq[8]; |
418 | |
419 | /* |
420 | * TX state. |
421 | */ |
422 | int tx_ring_size; |
423 | unsigned long tx_desc_sram_addr; |
424 | int tx_desc_sram_size; |
425 | int txq_count; |
426 | struct tx_queue txq[8]; |
427 | }; |
428 | |
429 | |
430 | /* port register accessors **************************************************/ |
431 | static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) |
432 | { |
433 | return readl(mp->shared->base + offset); |
434 | } |
435 | |
436 | static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) |
437 | { |
438 | return readl(mp->base + offset); |
439 | } |
440 | |
441 | static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) |
442 | { |
443 | writel(data, mp->shared->base + offset); |
444 | } |
445 | |
446 | static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) |
447 | { |
448 | writel(data, mp->base + offset); |
449 | } |
450 | |
451 | |
452 | /* rxq/txq helper functions *************************************************/ |
453 | static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) |
454 | { |
455 | return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); |
456 | } |
457 | |
458 | static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) |
459 | { |
460 | return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); |
461 | } |
462 | |
463 | static void rxq_enable(struct rx_queue *rxq) |
464 | { |
465 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
466 | wrlp(mp, RXQ_COMMAND, 1 << rxq->index); |
467 | } |
468 | |
469 | static void rxq_disable(struct rx_queue *rxq) |
470 | { |
471 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
472 | u8 mask = 1 << rxq->index; |
473 | |
474 | wrlp(mp, RXQ_COMMAND, mask << 8); |
475 | while (rdlp(mp, RXQ_COMMAND) & mask) |
476 | udelay(10); |
477 | } |
478 | |
479 | static void txq_reset_hw_ptr(struct tx_queue *txq) |
480 | { |
481 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
482 | u32 addr; |
483 | |
484 | addr = (u32)txq->tx_desc_dma; |
485 | addr += txq->tx_curr_desc * sizeof(struct tx_desc); |
486 | wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); |
487 | } |
488 | |
489 | static void txq_enable(struct tx_queue *txq) |
490 | { |
491 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
492 | wrlp(mp, TXQ_COMMAND, 1 << txq->index); |
493 | } |
494 | |
495 | static void txq_disable(struct tx_queue *txq) |
496 | { |
497 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
498 | u8 mask = 1 << txq->index; |
499 | |
500 | wrlp(mp, TXQ_COMMAND, mask << 8); |
501 | while (rdlp(mp, TXQ_COMMAND) & mask) |
502 | udelay(10); |
503 | } |
504 | |
505 | static void txq_maybe_wake(struct tx_queue *txq) |
506 | { |
507 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
508 | struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); |
509 | |
510 | if (netif_tx_queue_stopped(nq)) { |
511 | __netif_tx_lock(nq, smp_processor_id()); |
512 | if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) |
513 | netif_tx_wake_queue(nq); |
514 | __netif_tx_unlock(nq); |
515 | } |
516 | } |
517 | |
518 | |
519 | /* rx napi ******************************************************************/ |
520 | static int |
521 | mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, |
522 | u64 *hdr_flags, void *priv) |
523 | { |
524 | unsigned long cmd_sts = (unsigned long)priv; |
525 | |
526 | /* |
527 | * Make sure that this packet is Ethernet II, is not VLAN |
528 | * tagged, is IPv4, has a valid IP header, and is TCP. |
529 | */ |
530 | if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | |
531 | RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | |
532 | RX_PKT_IS_VLAN_TAGGED)) != |
533 | (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | |
534 | RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) |
535 | return -1; |
536 | |
537 | skb_reset_network_header(skb); |
538 | skb_set_transport_header(skb, ip_hdrlen(skb)); |
539 | *iphdr = ip_hdr(skb); |
540 | *tcph = tcp_hdr(skb); |
541 | *hdr_flags = LRO_IPV4 | LRO_TCP; |
542 | |
543 | return 0; |
544 | } |
545 | |
546 | static int rxq_process(struct rx_queue *rxq, int budget) |
547 | { |
548 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
549 | struct net_device_stats *stats = &mp->dev->stats; |
550 | int lro_flush_needed; |
551 | int rx; |
552 | |
553 | lro_flush_needed = 0; |
554 | rx = 0; |
555 | while (rx < budget && rxq->rx_desc_count) { |
556 | struct rx_desc *rx_desc; |
557 | unsigned int cmd_sts; |
558 | struct sk_buff *skb; |
559 | u16 byte_cnt; |
560 | |
561 | rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; |
562 | |
563 | cmd_sts = rx_desc->cmd_sts; |
564 | if (cmd_sts & BUFFER_OWNED_BY_DMA) |
565 | break; |
566 | rmb(); |
567 | |
568 | skb = rxq->rx_skb[rxq->rx_curr_desc]; |
569 | rxq->rx_skb[rxq->rx_curr_desc] = NULL; |
570 | |
571 | rxq->rx_curr_desc++; |
572 | if (rxq->rx_curr_desc == rxq->rx_ring_size) |
573 | rxq->rx_curr_desc = 0; |
574 | |
575 | dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, |
576 | rx_desc->buf_size, DMA_FROM_DEVICE); |
577 | rxq->rx_desc_count--; |
578 | rx++; |
579 | |
580 | mp->work_rx_refill |= 1 << rxq->index; |
581 | |
582 | byte_cnt = rx_desc->byte_cnt; |
583 | |
584 | /* |
585 | * Update statistics. |
586 | * |
587 | * Note that the descriptor byte count includes 2 dummy |
588 | * bytes automatically inserted by the hardware at the |
589 | * start of the packet (which we don't count), and a 4 |
590 | * byte CRC at the end of the packet (which we do count). |
591 | */ |
592 | stats->rx_packets++; |
593 | stats->rx_bytes += byte_cnt - 2; |
594 | |
595 | /* |
596 | * In case we received a packet without first / last bits |
597 | * on, or the error summary bit is set, the packet needs |
598 | * to be dropped. |
599 | */ |
600 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) |
601 | != (RX_FIRST_DESC | RX_LAST_DESC)) |
602 | goto err; |
603 | |
604 | /* |
605 | * The -4 is for the CRC in the trailer of the |
606 | * received packet |
607 | */ |
608 | skb_put(skb, byte_cnt - 2 - 4); |
609 | |
610 | if (cmd_sts & LAYER_4_CHECKSUM_OK) |
611 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
612 | skb->protocol = eth_type_trans(skb, mp->dev); |
613 | |
614 | if (skb->dev->features & NETIF_F_LRO && |
615 | skb->ip_summed == CHECKSUM_UNNECESSARY) { |
616 | lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); |
617 | lro_flush_needed = 1; |
618 | } else |
619 | netif_receive_skb(skb); |
620 | |
621 | continue; |
622 | |
623 | err: |
624 | stats->rx_dropped++; |
625 | |
626 | if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != |
627 | (RX_FIRST_DESC | RX_LAST_DESC)) { |
628 | if (net_ratelimit()) |
629 | dev_printk(KERN_ERR, &mp->dev->dev, |
630 | "received packet spanning " |
631 | "multiple descriptors\n"); |
632 | } |
633 | |
634 | if (cmd_sts & ERROR_SUMMARY) |
635 | stats->rx_errors++; |
636 | |
637 | dev_kfree_skb(skb); |
638 | } |
639 | |
640 | if (lro_flush_needed) |
641 | lro_flush_all(&rxq->lro_mgr); |
642 | |
643 | if (rx < budget) |
644 | mp->work_rx &= ~(1 << rxq->index); |
645 | |
646 | return rx; |
647 | } |
648 | |
649 | static int rxq_refill(struct rx_queue *rxq, int budget) |
650 | { |
651 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
652 | int refilled; |
653 | |
654 | refilled = 0; |
655 | while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { |
656 | struct sk_buff *skb; |
657 | int rx; |
658 | struct rx_desc *rx_desc; |
659 | int size; |
660 | |
661 | skb = __skb_dequeue(&mp->rx_recycle); |
662 | if (skb == NULL) |
663 | skb = dev_alloc_skb(mp->skb_size); |
664 | |
665 | if (skb == NULL) { |
666 | mp->oom = 1; |
667 | goto oom; |
668 | } |
669 | |
670 | if (SKB_DMA_REALIGN) |
671 | skb_reserve(skb, SKB_DMA_REALIGN); |
672 | |
673 | refilled++; |
674 | rxq->rx_desc_count++; |
675 | |
676 | rx = rxq->rx_used_desc++; |
677 | if (rxq->rx_used_desc == rxq->rx_ring_size) |
678 | rxq->rx_used_desc = 0; |
679 | |
680 | rx_desc = rxq->rx_desc_area + rx; |
681 | |
682 | size = skb->end - skb->data; |
683 | rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, |
684 | skb->data, size, |
685 | DMA_FROM_DEVICE); |
686 | rx_desc->buf_size = size; |
687 | rxq->rx_skb[rx] = skb; |
688 | wmb(); |
689 | rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; |
690 | wmb(); |
691 | |
692 | /* |
693 | * The hardware automatically prepends 2 bytes of |
694 | * dummy data to each received packet, so that the |
695 | * IP header ends up 16-byte aligned. |
696 | */ |
697 | skb_reserve(skb, 2); |
698 | } |
699 | |
700 | if (refilled < budget) |
701 | mp->work_rx_refill &= ~(1 << rxq->index); |
702 | |
703 | oom: |
704 | return refilled; |
705 | } |
706 | |
707 | |
708 | /* tx ***********************************************************************/ |
709 | static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) |
710 | { |
711 | int frag; |
712 | |
713 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
714 | skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; |
715 | if (fragp->size <= 8 && fragp->page_offset & 7) |
716 | return 1; |
717 | } |
718 | |
719 | return 0; |
720 | } |
721 | |
722 | static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) |
723 | { |
724 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
725 | int nr_frags = skb_shinfo(skb)->nr_frags; |
726 | int frag; |
727 | |
728 | for (frag = 0; frag < nr_frags; frag++) { |
729 | skb_frag_t *this_frag; |
730 | int tx_index; |
731 | struct tx_desc *desc; |
732 | |
733 | this_frag = &skb_shinfo(skb)->frags[frag]; |
734 | tx_index = txq->tx_curr_desc++; |
735 | if (txq->tx_curr_desc == txq->tx_ring_size) |
736 | txq->tx_curr_desc = 0; |
737 | desc = &txq->tx_desc_area[tx_index]; |
738 | |
739 | /* |
740 | * The last fragment will generate an interrupt |
741 | * which will free the skb on TX completion. |
742 | */ |
743 | if (frag == nr_frags - 1) { |
744 | desc->cmd_sts = BUFFER_OWNED_BY_DMA | |
745 | ZERO_PADDING | TX_LAST_DESC | |
746 | TX_ENABLE_INTERRUPT; |
747 | } else { |
748 | desc->cmd_sts = BUFFER_OWNED_BY_DMA; |
749 | } |
750 | |
751 | desc->l4i_chk = 0; |
752 | desc->byte_cnt = this_frag->size; |
753 | desc->buf_ptr = dma_map_page(mp->dev->dev.parent, |
754 | this_frag->page, |
755 | this_frag->page_offset, |
756 | this_frag->size, DMA_TO_DEVICE); |
757 | } |
758 | } |
759 | |
760 | static inline __be16 sum16_as_be(__sum16 sum) |
761 | { |
762 | return (__force __be16)sum; |
763 | } |
764 | |
765 | static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) |
766 | { |
767 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
768 | int nr_frags = skb_shinfo(skb)->nr_frags; |
769 | int tx_index; |
770 | struct tx_desc *desc; |
771 | u32 cmd_sts; |
772 | u16 l4i_chk; |
773 | int length; |
774 | |
775 | cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; |
776 | l4i_chk = 0; |
777 | |
778 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
779 | int tag_bytes; |
780 | |
781 | BUG_ON(skb->protocol != htons(ETH_P_IP) && |
782 | skb->protocol != htons(ETH_P_8021Q)); |
783 | |
784 | tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN; |
785 | if (unlikely(tag_bytes & ~12)) { |
786 | if (skb_checksum_help(skb) == 0) |
787 | goto no_csum; |
788 | kfree_skb(skb); |
789 | return 1; |
790 | } |
791 | |
792 | if (tag_bytes & 4) |
793 | cmd_sts |= MAC_HDR_EXTRA_4_BYTES; |
794 | if (tag_bytes & 8) |
795 | cmd_sts |= MAC_HDR_EXTRA_8_BYTES; |
796 | |
797 | cmd_sts |= GEN_TCP_UDP_CHECKSUM | |
798 | GEN_IP_V4_CHECKSUM | |
799 | ip_hdr(skb)->ihl << TX_IHL_SHIFT; |
800 | |
801 | switch (ip_hdr(skb)->protocol) { |
802 | case IPPROTO_UDP: |
803 | cmd_sts |= UDP_FRAME; |
804 | l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); |
805 | break; |
806 | case IPPROTO_TCP: |
807 | l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); |
808 | break; |
809 | default: |
810 | BUG(); |
811 | } |
812 | } else { |
813 | no_csum: |
814 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ |
815 | cmd_sts |= 5 << TX_IHL_SHIFT; |
816 | } |
817 | |
818 | tx_index = txq->tx_curr_desc++; |
819 | if (txq->tx_curr_desc == txq->tx_ring_size) |
820 | txq->tx_curr_desc = 0; |
821 | desc = &txq->tx_desc_area[tx_index]; |
822 | |
823 | if (nr_frags) { |
824 | txq_submit_frag_skb(txq, skb); |
825 | length = skb_headlen(skb); |
826 | } else { |
827 | cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; |
828 | length = skb->len; |
829 | } |
830 | |
831 | desc->l4i_chk = l4i_chk; |
832 | desc->byte_cnt = length; |
833 | desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, |
834 | length, DMA_TO_DEVICE); |
835 | |
836 | __skb_queue_tail(&txq->tx_skb, skb); |
837 | |
838 | /* ensure all other descriptors are written before first cmd_sts */ |
839 | wmb(); |
840 | desc->cmd_sts = cmd_sts; |
841 | |
842 | /* clear TX_END status */ |
843 | mp->work_tx_end &= ~(1 << txq->index); |
844 | |
845 | /* ensure all descriptors are written before poking hardware */ |
846 | wmb(); |
847 | txq_enable(txq); |
848 | |
849 | txq->tx_desc_count += nr_frags + 1; |
850 | |
851 | return 0; |
852 | } |
853 | |
854 | static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) |
855 | { |
856 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
857 | int queue; |
858 | struct tx_queue *txq; |
859 | struct netdev_queue *nq; |
860 | |
861 | queue = skb_get_queue_mapping(skb); |
862 | txq = mp->txq + queue; |
863 | nq = netdev_get_tx_queue(dev, queue); |
864 | |
865 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { |
866 | txq->tx_dropped++; |
867 | dev_printk(KERN_DEBUG, &dev->dev, |
868 | "failed to linearize skb with tiny " |
869 | "unaligned fragment\n"); |
870 | return NETDEV_TX_BUSY; |
871 | } |
872 | |
873 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { |
874 | if (net_ratelimit()) |
875 | dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); |
876 | kfree_skb(skb); |
877 | return NETDEV_TX_OK; |
878 | } |
879 | |
880 | if (!txq_submit_skb(txq, skb)) { |
881 | int entries_left; |
882 | |
883 | txq->tx_bytes += skb->len; |
884 | txq->tx_packets++; |
885 | dev->trans_start = jiffies; |
886 | |
887 | entries_left = txq->tx_ring_size - txq->tx_desc_count; |
888 | if (entries_left < MAX_SKB_FRAGS + 1) |
889 | netif_tx_stop_queue(nq); |
890 | } |
891 | |
892 | return NETDEV_TX_OK; |
893 | } |
894 | |
895 | |
896 | /* tx napi ******************************************************************/ |
897 | static void txq_kick(struct tx_queue *txq) |
898 | { |
899 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
900 | struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); |
901 | u32 hw_desc_ptr; |
902 | u32 expected_ptr; |
903 | |
904 | __netif_tx_lock(nq, smp_processor_id()); |
905 | |
906 | if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) |
907 | goto out; |
908 | |
909 | hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); |
910 | expected_ptr = (u32)txq->tx_desc_dma + |
911 | txq->tx_curr_desc * sizeof(struct tx_desc); |
912 | |
913 | if (hw_desc_ptr != expected_ptr) |
914 | txq_enable(txq); |
915 | |
916 | out: |
917 | __netif_tx_unlock(nq); |
918 | |
919 | mp->work_tx_end &= ~(1 << txq->index); |
920 | } |
921 | |
922 | static int txq_reclaim(struct tx_queue *txq, int budget, int force) |
923 | { |
924 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
925 | struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); |
926 | int reclaimed; |
927 | |
928 | __netif_tx_lock(nq, smp_processor_id()); |
929 | |
930 | reclaimed = 0; |
931 | while (reclaimed < budget && txq->tx_desc_count > 0) { |
932 | int tx_index; |
933 | struct tx_desc *desc; |
934 | u32 cmd_sts; |
935 | struct sk_buff *skb; |
936 | |
937 | tx_index = txq->tx_used_desc; |
938 | desc = &txq->tx_desc_area[tx_index]; |
939 | cmd_sts = desc->cmd_sts; |
940 | |
941 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { |
942 | if (!force) |
943 | break; |
944 | desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; |
945 | } |
946 | |
947 | txq->tx_used_desc = tx_index + 1; |
948 | if (txq->tx_used_desc == txq->tx_ring_size) |
949 | txq->tx_used_desc = 0; |
950 | |
951 | reclaimed++; |
952 | txq->tx_desc_count--; |
953 | |
954 | skb = NULL; |
955 | if (cmd_sts & TX_LAST_DESC) |
956 | skb = __skb_dequeue(&txq->tx_skb); |
957 | |
958 | if (cmd_sts & ERROR_SUMMARY) { |
959 | dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); |
960 | mp->dev->stats.tx_errors++; |
961 | } |
962 | |
963 | if (cmd_sts & TX_FIRST_DESC) { |
964 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, |
965 | desc->byte_cnt, DMA_TO_DEVICE); |
966 | } else { |
967 | dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, |
968 | desc->byte_cnt, DMA_TO_DEVICE); |
969 | } |
970 | |
971 | if (skb != NULL) { |
972 | if (skb_queue_len(&mp->rx_recycle) < |
973 | mp->rx_ring_size && |
974 | skb_recycle_check(skb, mp->skb_size)) |
975 | __skb_queue_head(&mp->rx_recycle, skb); |
976 | else |
977 | dev_kfree_skb(skb); |
978 | } |
979 | } |
980 | |
981 | __netif_tx_unlock(nq); |
982 | |
983 | if (reclaimed < budget) |
984 | mp->work_tx &= ~(1 << txq->index); |
985 | |
986 | return reclaimed; |
987 | } |
988 | |
989 | |
990 | /* tx rate control **********************************************************/ |
991 | /* |
992 | * Set total maximum TX rate (shared by all TX queues for this port) |
993 | * to 'rate' bits per second, with a maximum burst of 'burst' bytes. |
994 | */ |
995 | static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) |
996 | { |
997 | int token_rate; |
998 | int mtu; |
999 | int bucket_size; |
1000 | |
1001 | token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); |
1002 | if (token_rate > 1023) |
1003 | token_rate = 1023; |
1004 | |
1005 | mtu = (mp->dev->mtu + 255) >> 8; |
1006 | if (mtu > 63) |
1007 | mtu = 63; |
1008 | |
1009 | bucket_size = (burst + 255) >> 8; |
1010 | if (bucket_size > 65535) |
1011 | bucket_size = 65535; |
1012 | |
1013 | switch (mp->shared->tx_bw_control) { |
1014 | case TX_BW_CONTROL_OLD_LAYOUT: |
1015 | wrlp(mp, TX_BW_RATE, token_rate); |
1016 | wrlp(mp, TX_BW_MTU, mtu); |
1017 | wrlp(mp, TX_BW_BURST, bucket_size); |
1018 | break; |
1019 | case TX_BW_CONTROL_NEW_LAYOUT: |
1020 | wrlp(mp, TX_BW_RATE_MOVED, token_rate); |
1021 | wrlp(mp, TX_BW_MTU_MOVED, mtu); |
1022 | wrlp(mp, TX_BW_BURST_MOVED, bucket_size); |
1023 | break; |
1024 | } |
1025 | } |
1026 | |
1027 | static void txq_set_rate(struct tx_queue *txq, int rate, int burst) |
1028 | { |
1029 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1030 | int token_rate; |
1031 | int bucket_size; |
1032 | |
1033 | token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); |
1034 | if (token_rate > 1023) |
1035 | token_rate = 1023; |
1036 | |
1037 | bucket_size = (burst + 255) >> 8; |
1038 | if (bucket_size > 65535) |
1039 | bucket_size = 65535; |
1040 | |
1041 | wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); |
1042 | wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); |
1043 | } |
1044 | |
1045 | static void txq_set_fixed_prio_mode(struct tx_queue *txq) |
1046 | { |
1047 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
1048 | int off; |
1049 | u32 val; |
1050 | |
1051 | /* |
1052 | * Turn on fixed priority mode. |
1053 | */ |
1054 | off = 0; |
1055 | switch (mp->shared->tx_bw_control) { |
1056 | case TX_BW_CONTROL_OLD_LAYOUT: |
1057 | off = TXQ_FIX_PRIO_CONF; |
1058 | break; |
1059 | case TX_BW_CONTROL_NEW_LAYOUT: |
1060 | off = TXQ_FIX_PRIO_CONF_MOVED; |
1061 | break; |
1062 | } |
1063 | |
1064 | if (off) { |
1065 | val = rdlp(mp, off); |
1066 | val |= 1 << txq->index; |
1067 | wrlp(mp, off, val); |
1068 | } |
1069 | } |
1070 | |
1071 | |
1072 | /* mii management interface *************************************************/ |
1073 | static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) |
1074 | { |
1075 | struct mv643xx_eth_shared_private *msp = dev_id; |
1076 | |
1077 | if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { |
1078 | writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); |
1079 | wake_up(&msp->smi_busy_wait); |
1080 | return IRQ_HANDLED; |
1081 | } |
1082 | |
1083 | return IRQ_NONE; |
1084 | } |
1085 | |
1086 | static int smi_is_done(struct mv643xx_eth_shared_private *msp) |
1087 | { |
1088 | return !(readl(msp->base + SMI_REG) & SMI_BUSY); |
1089 | } |
1090 | |
1091 | static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) |
1092 | { |
1093 | if (msp->err_interrupt == NO_IRQ) { |
1094 | int i; |
1095 | |
1096 | for (i = 0; !smi_is_done(msp); i++) { |
1097 | if (i == 10) |
1098 | return -ETIMEDOUT; |
1099 | msleep(10); |
1100 | } |
1101 | |
1102 | return 0; |
1103 | } |
1104 | |
1105 | if (!smi_is_done(msp)) { |
1106 | wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), |
1107 | msecs_to_jiffies(100)); |
1108 | if (!smi_is_done(msp)) |
1109 | return -ETIMEDOUT; |
1110 | } |
1111 | |
1112 | return 0; |
1113 | } |
1114 | |
1115 | static int smi_bus_read(struct mii_bus *bus, int addr, int reg) |
1116 | { |
1117 | struct mv643xx_eth_shared_private *msp = bus->priv; |
1118 | void __iomem *smi_reg = msp->base + SMI_REG; |
1119 | int ret; |
1120 | |
1121 | if (smi_wait_ready(msp)) { |
1122 | printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); |
1123 | return -ETIMEDOUT; |
1124 | } |
1125 | |
1126 | writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); |
1127 | |
1128 | if (smi_wait_ready(msp)) { |
1129 | printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); |
1130 | return -ETIMEDOUT; |
1131 | } |
1132 | |
1133 | ret = readl(smi_reg); |
1134 | if (!(ret & SMI_READ_VALID)) { |
1135 | printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); |
1136 | return -ENODEV; |
1137 | } |
1138 | |
1139 | return ret & 0xffff; |
1140 | } |
1141 | |
1142 | static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) |
1143 | { |
1144 | struct mv643xx_eth_shared_private *msp = bus->priv; |
1145 | void __iomem *smi_reg = msp->base + SMI_REG; |
1146 | |
1147 | if (smi_wait_ready(msp)) { |
1148 | printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); |
1149 | return -ETIMEDOUT; |
1150 | } |
1151 | |
1152 | writel(SMI_OPCODE_WRITE | (reg << 21) | |
1153 | (addr << 16) | (val & 0xffff), smi_reg); |
1154 | |
1155 | if (smi_wait_ready(msp)) { |
1156 | printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); |
1157 | return -ETIMEDOUT; |
1158 | } |
1159 | |
1160 | return 0; |
1161 | } |
1162 | |
1163 | |
1164 | /* statistics ***************************************************************/ |
1165 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) |
1166 | { |
1167 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1168 | struct net_device_stats *stats = &dev->stats; |
1169 | unsigned long tx_packets = 0; |
1170 | unsigned long tx_bytes = 0; |
1171 | unsigned long tx_dropped = 0; |
1172 | int i; |
1173 | |
1174 | for (i = 0; i < mp->txq_count; i++) { |
1175 | struct tx_queue *txq = mp->txq + i; |
1176 | |
1177 | tx_packets += txq->tx_packets; |
1178 | tx_bytes += txq->tx_bytes; |
1179 | tx_dropped += txq->tx_dropped; |
1180 | } |
1181 | |
1182 | stats->tx_packets = tx_packets; |
1183 | stats->tx_bytes = tx_bytes; |
1184 | stats->tx_dropped = tx_dropped; |
1185 | |
1186 | return stats; |
1187 | } |
1188 | |
1189 | static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) |
1190 | { |
1191 | u32 lro_aggregated = 0; |
1192 | u32 lro_flushed = 0; |
1193 | u32 lro_no_desc = 0; |
1194 | int i; |
1195 | |
1196 | for (i = 0; i < mp->rxq_count; i++) { |
1197 | struct rx_queue *rxq = mp->rxq + i; |
1198 | |
1199 | lro_aggregated += rxq->lro_mgr.stats.aggregated; |
1200 | lro_flushed += rxq->lro_mgr.stats.flushed; |
1201 | lro_no_desc += rxq->lro_mgr.stats.no_desc; |
1202 | } |
1203 | |
1204 | mp->lro_counters.lro_aggregated = lro_aggregated; |
1205 | mp->lro_counters.lro_flushed = lro_flushed; |
1206 | mp->lro_counters.lro_no_desc = lro_no_desc; |
1207 | } |
1208 | |
1209 | static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) |
1210 | { |
1211 | return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); |
1212 | } |
1213 | |
1214 | static void mib_counters_clear(struct mv643xx_eth_private *mp) |
1215 | { |
1216 | int i; |
1217 | |
1218 | for (i = 0; i < 0x80; i += 4) |
1219 | mib_read(mp, i); |
1220 | } |
1221 | |
1222 | static void mib_counters_update(struct mv643xx_eth_private *mp) |
1223 | { |
1224 | struct mib_counters *p = &mp->mib_counters; |
1225 | |
1226 | spin_lock_bh(&mp->mib_counters_lock); |
1227 | p->good_octets_received += mib_read(mp, 0x00); |
1228 | p->bad_octets_received += mib_read(mp, 0x08); |
1229 | p->internal_mac_transmit_err += mib_read(mp, 0x0c); |
1230 | p->good_frames_received += mib_read(mp, 0x10); |
1231 | p->bad_frames_received += mib_read(mp, 0x14); |
1232 | p->broadcast_frames_received += mib_read(mp, 0x18); |
1233 | p->multicast_frames_received += mib_read(mp, 0x1c); |
1234 | p->frames_64_octets += mib_read(mp, 0x20); |
1235 | p->frames_65_to_127_octets += mib_read(mp, 0x24); |
1236 | p->frames_128_to_255_octets += mib_read(mp, 0x28); |
1237 | p->frames_256_to_511_octets += mib_read(mp, 0x2c); |
1238 | p->frames_512_to_1023_octets += mib_read(mp, 0x30); |
1239 | p->frames_1024_to_max_octets += mib_read(mp, 0x34); |
1240 | p->good_octets_sent += mib_read(mp, 0x38); |
1241 | p->good_frames_sent += mib_read(mp, 0x40); |
1242 | p->excessive_collision += mib_read(mp, 0x44); |
1243 | p->multicast_frames_sent += mib_read(mp, 0x48); |
1244 | p->broadcast_frames_sent += mib_read(mp, 0x4c); |
1245 | p->unrec_mac_control_received += mib_read(mp, 0x50); |
1246 | p->fc_sent += mib_read(mp, 0x54); |
1247 | p->good_fc_received += mib_read(mp, 0x58); |
1248 | p->bad_fc_received += mib_read(mp, 0x5c); |
1249 | p->undersize_received += mib_read(mp, 0x60); |
1250 | p->fragments_received += mib_read(mp, 0x64); |
1251 | p->oversize_received += mib_read(mp, 0x68); |
1252 | p->jabber_received += mib_read(mp, 0x6c); |
1253 | p->mac_receive_error += mib_read(mp, 0x70); |
1254 | p->bad_crc_event += mib_read(mp, 0x74); |
1255 | p->collision += mib_read(mp, 0x78); |
1256 | p->late_collision += mib_read(mp, 0x7c); |
1257 | spin_unlock_bh(&mp->mib_counters_lock); |
1258 | |
1259 | mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); |
1260 | } |
1261 | |
1262 | static void mib_counters_timer_wrapper(unsigned long _mp) |
1263 | { |
1264 | struct mv643xx_eth_private *mp = (void *)_mp; |
1265 | |
1266 | mib_counters_update(mp); |
1267 | } |
1268 | |
1269 | |
1270 | /* interrupt coalescing *****************************************************/ |
1271 | /* |
1272 | * Hardware coalescing parameters are set in units of 64 t_clk |
1273 | * cycles. I.e.: |
1274 | * |
1275 | * coal_delay_in_usec = 64000000 * register_value / t_clk_rate |
1276 | * |
1277 | * register_value = coal_delay_in_usec * t_clk_rate / 64000000 |
1278 | * |
1279 | * In the ->set*() methods, we round the computed register value |
1280 | * to the nearest integer. |
1281 | */ |
1282 | static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) |
1283 | { |
1284 | u32 val = rdlp(mp, SDMA_CONFIG); |
1285 | u64 temp; |
1286 | |
1287 | if (mp->shared->extended_rx_coal_limit) |
1288 | temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); |
1289 | else |
1290 | temp = (val & 0x003fff00) >> 8; |
1291 | |
1292 | temp *= 64000000; |
1293 | do_div(temp, mp->shared->t_clk); |
1294 | |
1295 | return (unsigned int)temp; |
1296 | } |
1297 | |
1298 | static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) |
1299 | { |
1300 | u64 temp; |
1301 | u32 val; |
1302 | |
1303 | temp = (u64)usec * mp->shared->t_clk; |
1304 | temp += 31999999; |
1305 | do_div(temp, 64000000); |
1306 | |
1307 | val = rdlp(mp, SDMA_CONFIG); |
1308 | if (mp->shared->extended_rx_coal_limit) { |
1309 | if (temp > 0xffff) |
1310 | temp = 0xffff; |
1311 | val &= ~0x023fff80; |
1312 | val |= (temp & 0x8000) << 10; |
1313 | val |= (temp & 0x7fff) << 7; |
1314 | } else { |
1315 | if (temp > 0x3fff) |
1316 | temp = 0x3fff; |
1317 | val &= ~0x003fff00; |
1318 | val |= (temp & 0x3fff) << 8; |
1319 | } |
1320 | wrlp(mp, SDMA_CONFIG, val); |
1321 | } |
1322 | |
1323 | static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) |
1324 | { |
1325 | u64 temp; |
1326 | |
1327 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; |
1328 | temp *= 64000000; |
1329 | do_div(temp, mp->shared->t_clk); |
1330 | |
1331 | return (unsigned int)temp; |
1332 | } |
1333 | |
1334 | static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) |
1335 | { |
1336 | u64 temp; |
1337 | |
1338 | temp = (u64)usec * mp->shared->t_clk; |
1339 | temp += 31999999; |
1340 | do_div(temp, 64000000); |
1341 | |
1342 | if (temp > 0x3fff) |
1343 | temp = 0x3fff; |
1344 | |
1345 | wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); |
1346 | } |
1347 | |
1348 | |
1349 | /* ethtool ******************************************************************/ |
1350 | struct mv643xx_eth_stats { |
1351 | char stat_string[ETH_GSTRING_LEN]; |
1352 | int sizeof_stat; |
1353 | int netdev_off; |
1354 | int mp_off; |
1355 | }; |
1356 | |
1357 | #define SSTAT(m) \ |
1358 | { #m, FIELD_SIZEOF(struct net_device_stats, m), \ |
1359 | offsetof(struct net_device, stats.m), -1 } |
1360 | |
1361 | #define MIBSTAT(m) \ |
1362 | { #m, FIELD_SIZEOF(struct mib_counters, m), \ |
1363 | -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } |
1364 | |
1365 | #define LROSTAT(m) \ |
1366 | { #m, FIELD_SIZEOF(struct lro_counters, m), \ |
1367 | -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } |
1368 | |
1369 | static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { |
1370 | SSTAT(rx_packets), |
1371 | SSTAT(tx_packets), |
1372 | SSTAT(rx_bytes), |
1373 | SSTAT(tx_bytes), |
1374 | SSTAT(rx_errors), |
1375 | SSTAT(tx_errors), |
1376 | SSTAT(rx_dropped), |
1377 | SSTAT(tx_dropped), |
1378 | MIBSTAT(good_octets_received), |
1379 | MIBSTAT(bad_octets_received), |
1380 | MIBSTAT(internal_mac_transmit_err), |
1381 | MIBSTAT(good_frames_received), |
1382 | MIBSTAT(bad_frames_received), |
1383 | MIBSTAT(broadcast_frames_received), |
1384 | MIBSTAT(multicast_frames_received), |
1385 | MIBSTAT(frames_64_octets), |
1386 | MIBSTAT(frames_65_to_127_octets), |
1387 | MIBSTAT(frames_128_to_255_octets), |
1388 | MIBSTAT(frames_256_to_511_octets), |
1389 | MIBSTAT(frames_512_to_1023_octets), |
1390 | MIBSTAT(frames_1024_to_max_octets), |
1391 | MIBSTAT(good_octets_sent), |
1392 | MIBSTAT(good_frames_sent), |
1393 | MIBSTAT(excessive_collision), |
1394 | MIBSTAT(multicast_frames_sent), |
1395 | MIBSTAT(broadcast_frames_sent), |
1396 | MIBSTAT(unrec_mac_control_received), |
1397 | MIBSTAT(fc_sent), |
1398 | MIBSTAT(good_fc_received), |
1399 | MIBSTAT(bad_fc_received), |
1400 | MIBSTAT(undersize_received), |
1401 | MIBSTAT(fragments_received), |
1402 | MIBSTAT(oversize_received), |
1403 | MIBSTAT(jabber_received), |
1404 | MIBSTAT(mac_receive_error), |
1405 | MIBSTAT(bad_crc_event), |
1406 | MIBSTAT(collision), |
1407 | MIBSTAT(late_collision), |
1408 | LROSTAT(lro_aggregated), |
1409 | LROSTAT(lro_flushed), |
1410 | LROSTAT(lro_no_desc), |
1411 | }; |
1412 | |
1413 | static int |
1414 | mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, |
1415 | struct ethtool_cmd *cmd) |
1416 | { |
1417 | int err; |
1418 | |
1419 | err = phy_read_status(mp->phy); |
1420 | if (err == 0) |
1421 | err = phy_ethtool_gset(mp->phy, cmd); |
1422 | |
1423 | /* |
1424 | * The MAC does not support 1000baseT_Half. |
1425 | */ |
1426 | cmd->supported &= ~SUPPORTED_1000baseT_Half; |
1427 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; |
1428 | |
1429 | return err; |
1430 | } |
1431 | |
1432 | static int |
1433 | mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, |
1434 | struct ethtool_cmd *cmd) |
1435 | { |
1436 | u32 port_status; |
1437 | |
1438 | port_status = rdlp(mp, PORT_STATUS); |
1439 | |
1440 | cmd->supported = SUPPORTED_MII; |
1441 | cmd->advertising = ADVERTISED_MII; |
1442 | switch (port_status & PORT_SPEED_MASK) { |
1443 | case PORT_SPEED_10: |
1444 | cmd->speed = SPEED_10; |
1445 | break; |
1446 | case PORT_SPEED_100: |
1447 | cmd->speed = SPEED_100; |
1448 | break; |
1449 | case PORT_SPEED_1000: |
1450 | cmd->speed = SPEED_1000; |
1451 | break; |
1452 | default: |
1453 | cmd->speed = -1; |
1454 | break; |
1455 | } |
1456 | cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; |
1457 | cmd->port = PORT_MII; |
1458 | cmd->phy_address = 0; |
1459 | cmd->transceiver = XCVR_INTERNAL; |
1460 | cmd->autoneg = AUTONEG_DISABLE; |
1461 | cmd->maxtxpkt = 1; |
1462 | cmd->maxrxpkt = 1; |
1463 | |
1464 | return 0; |
1465 | } |
1466 | |
1467 | static int |
1468 | mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1469 | { |
1470 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1471 | |
1472 | if (mp->phy != NULL) |
1473 | return mv643xx_eth_get_settings_phy(mp, cmd); |
1474 | else |
1475 | return mv643xx_eth_get_settings_phyless(mp, cmd); |
1476 | } |
1477 | |
1478 | static int |
1479 | mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1480 | { |
1481 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1482 | |
1483 | if (mp->phy == NULL) |
1484 | return -EINVAL; |
1485 | |
1486 | /* |
1487 | * The MAC does not support 1000baseT_Half. |
1488 | */ |
1489 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; |
1490 | |
1491 | return phy_ethtool_sset(mp->phy, cmd); |
1492 | } |
1493 | |
1494 | static void mv643xx_eth_get_drvinfo(struct net_device *dev, |
1495 | struct ethtool_drvinfo *drvinfo) |
1496 | { |
1497 | strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); |
1498 | strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); |
1499 | strncpy(drvinfo->fw_version, "N/A", 32); |
1500 | strncpy(drvinfo->bus_info, "platform", 32); |
1501 | drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); |
1502 | } |
1503 | |
1504 | static int mv643xx_eth_nway_reset(struct net_device *dev) |
1505 | { |
1506 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1507 | |
1508 | if (mp->phy == NULL) |
1509 | return -EINVAL; |
1510 | |
1511 | return genphy_restart_aneg(mp->phy); |
1512 | } |
1513 | |
1514 | static u32 mv643xx_eth_get_link(struct net_device *dev) |
1515 | { |
1516 | return !!netif_carrier_ok(dev); |
1517 | } |
1518 | |
1519 | static int |
1520 | mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) |
1521 | { |
1522 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1523 | |
1524 | ec->rx_coalesce_usecs = get_rx_coal(mp); |
1525 | ec->tx_coalesce_usecs = get_tx_coal(mp); |
1526 | |
1527 | return 0; |
1528 | } |
1529 | |
1530 | static int |
1531 | mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) |
1532 | { |
1533 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1534 | |
1535 | set_rx_coal(mp, ec->rx_coalesce_usecs); |
1536 | set_tx_coal(mp, ec->tx_coalesce_usecs); |
1537 | |
1538 | return 0; |
1539 | } |
1540 | |
1541 | static void |
1542 | mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) |
1543 | { |
1544 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1545 | |
1546 | er->rx_max_pending = 4096; |
1547 | er->tx_max_pending = 4096; |
1548 | er->rx_mini_max_pending = 0; |
1549 | er->rx_jumbo_max_pending = 0; |
1550 | |
1551 | er->rx_pending = mp->rx_ring_size; |
1552 | er->tx_pending = mp->tx_ring_size; |
1553 | er->rx_mini_pending = 0; |
1554 | er->rx_jumbo_pending = 0; |
1555 | } |
1556 | |
1557 | static int |
1558 | mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) |
1559 | { |
1560 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1561 | |
1562 | if (er->rx_mini_pending || er->rx_jumbo_pending) |
1563 | return -EINVAL; |
1564 | |
1565 | mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; |
1566 | mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; |
1567 | |
1568 | if (netif_running(dev)) { |
1569 | mv643xx_eth_stop(dev); |
1570 | if (mv643xx_eth_open(dev)) { |
1571 | dev_printk(KERN_ERR, &dev->dev, |
1572 | "fatal error on re-opening device after " |
1573 | "ring param change\n"); |
1574 | return -ENOMEM; |
1575 | } |
1576 | } |
1577 | |
1578 | return 0; |
1579 | } |
1580 | |
1581 | static u32 |
1582 | mv643xx_eth_get_rx_csum(struct net_device *dev) |
1583 | { |
1584 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1585 | |
1586 | return !!(rdlp(mp, PORT_CONFIG) & 0x02000000); |
1587 | } |
1588 | |
1589 | static int |
1590 | mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum) |
1591 | { |
1592 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1593 | |
1594 | wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); |
1595 | |
1596 | return 0; |
1597 | } |
1598 | |
1599 | static void mv643xx_eth_get_strings(struct net_device *dev, |
1600 | uint32_t stringset, uint8_t *data) |
1601 | { |
1602 | int i; |
1603 | |
1604 | if (stringset == ETH_SS_STATS) { |
1605 | for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { |
1606 | memcpy(data + i * ETH_GSTRING_LEN, |
1607 | mv643xx_eth_stats[i].stat_string, |
1608 | ETH_GSTRING_LEN); |
1609 | } |
1610 | } |
1611 | } |
1612 | |
1613 | static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, |
1614 | struct ethtool_stats *stats, |
1615 | uint64_t *data) |
1616 | { |
1617 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1618 | int i; |
1619 | |
1620 | mv643xx_eth_get_stats(dev); |
1621 | mib_counters_update(mp); |
1622 | mv643xx_eth_grab_lro_stats(mp); |
1623 | |
1624 | for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { |
1625 | const struct mv643xx_eth_stats *stat; |
1626 | void *p; |
1627 | |
1628 | stat = mv643xx_eth_stats + i; |
1629 | |
1630 | if (stat->netdev_off >= 0) |
1631 | p = ((void *)mp->dev) + stat->netdev_off; |
1632 | else |
1633 | p = ((void *)mp) + stat->mp_off; |
1634 | |
1635 | data[i] = (stat->sizeof_stat == 8) ? |
1636 | *(uint64_t *)p : *(uint32_t *)p; |
1637 | } |
1638 | } |
1639 | |
1640 | static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) |
1641 | { |
1642 | if (sset == ETH_SS_STATS) |
1643 | return ARRAY_SIZE(mv643xx_eth_stats); |
1644 | |
1645 | return -EOPNOTSUPP; |
1646 | } |
1647 | |
1648 | static const struct ethtool_ops mv643xx_eth_ethtool_ops = { |
1649 | .get_settings = mv643xx_eth_get_settings, |
1650 | .set_settings = mv643xx_eth_set_settings, |
1651 | .get_drvinfo = mv643xx_eth_get_drvinfo, |
1652 | .nway_reset = mv643xx_eth_nway_reset, |
1653 | .get_link = mv643xx_eth_get_link, |
1654 | .get_coalesce = mv643xx_eth_get_coalesce, |
1655 | .set_coalesce = mv643xx_eth_set_coalesce, |
1656 | .get_ringparam = mv643xx_eth_get_ringparam, |
1657 | .set_ringparam = mv643xx_eth_set_ringparam, |
1658 | .get_rx_csum = mv643xx_eth_get_rx_csum, |
1659 | .set_rx_csum = mv643xx_eth_set_rx_csum, |
1660 | .set_tx_csum = ethtool_op_set_tx_csum, |
1661 | .set_sg = ethtool_op_set_sg, |
1662 | .get_strings = mv643xx_eth_get_strings, |
1663 | .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, |
1664 | .get_flags = ethtool_op_get_flags, |
1665 | .set_flags = ethtool_op_set_flags, |
1666 | .get_sset_count = mv643xx_eth_get_sset_count, |
1667 | }; |
1668 | |
1669 | |
1670 | /* address handling *********************************************************/ |
1671 | static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) |
1672 | { |
1673 | unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); |
1674 | unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); |
1675 | |
1676 | addr[0] = (mac_h >> 24) & 0xff; |
1677 | addr[1] = (mac_h >> 16) & 0xff; |
1678 | addr[2] = (mac_h >> 8) & 0xff; |
1679 | addr[3] = mac_h & 0xff; |
1680 | addr[4] = (mac_l >> 8) & 0xff; |
1681 | addr[5] = mac_l & 0xff; |
1682 | } |
1683 | |
1684 | static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) |
1685 | { |
1686 | wrlp(mp, MAC_ADDR_HIGH, |
1687 | (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); |
1688 | wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); |
1689 | } |
1690 | |
1691 | static u32 uc_addr_filter_mask(struct net_device *dev) |
1692 | { |
1693 | struct netdev_hw_addr *ha; |
1694 | u32 nibbles; |
1695 | |
1696 | if (dev->flags & IFF_PROMISC) |
1697 | return 0; |
1698 | |
1699 | nibbles = 1 << (dev->dev_addr[5] & 0x0f); |
1700 | netdev_for_each_uc_addr(ha, dev) { |
1701 | if (memcmp(dev->dev_addr, ha->addr, 5)) |
1702 | return 0; |
1703 | if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) |
1704 | return 0; |
1705 | |
1706 | nibbles |= 1 << (ha->addr[5] & 0x0f); |
1707 | } |
1708 | |
1709 | return nibbles; |
1710 | } |
1711 | |
1712 | static void mv643xx_eth_program_unicast_filter(struct net_device *dev) |
1713 | { |
1714 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1715 | u32 port_config; |
1716 | u32 nibbles; |
1717 | int i; |
1718 | |
1719 | uc_addr_set(mp, dev->dev_addr); |
1720 | |
1721 | port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; |
1722 | |
1723 | nibbles = uc_addr_filter_mask(dev); |
1724 | if (!nibbles) { |
1725 | port_config |= UNICAST_PROMISCUOUS_MODE; |
1726 | nibbles = 0xffff; |
1727 | } |
1728 | |
1729 | for (i = 0; i < 16; i += 4) { |
1730 | int off = UNICAST_TABLE(mp->port_num) + i; |
1731 | u32 v; |
1732 | |
1733 | v = 0; |
1734 | if (nibbles & 1) |
1735 | v |= 0x00000001; |
1736 | if (nibbles & 2) |
1737 | v |= 0x00000100; |
1738 | if (nibbles & 4) |
1739 | v |= 0x00010000; |
1740 | if (nibbles & 8) |
1741 | v |= 0x01000000; |
1742 | nibbles >>= 4; |
1743 | |
1744 | wrl(mp, off, v); |
1745 | } |
1746 | |
1747 | wrlp(mp, PORT_CONFIG, port_config); |
1748 | } |
1749 | |
1750 | static int addr_crc(unsigned char *addr) |
1751 | { |
1752 | int crc = 0; |
1753 | int i; |
1754 | |
1755 | for (i = 0; i < 6; i++) { |
1756 | int j; |
1757 | |
1758 | crc = (crc ^ addr[i]) << 8; |
1759 | for (j = 7; j >= 0; j--) { |
1760 | if (crc & (0x100 << j)) |
1761 | crc ^= 0x107 << j; |
1762 | } |
1763 | } |
1764 | |
1765 | return crc; |
1766 | } |
1767 | |
1768 | static void mv643xx_eth_program_multicast_filter(struct net_device *dev) |
1769 | { |
1770 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
1771 | u32 *mc_spec; |
1772 | u32 *mc_other; |
1773 | struct dev_addr_list *addr; |
1774 | int i; |
1775 | |
1776 | if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { |
1777 | int port_num; |
1778 | u32 accept; |
1779 | |
1780 | oom: |
1781 | port_num = mp->port_num; |
1782 | accept = 0x01010101; |
1783 | for (i = 0; i < 0x100; i += 4) { |
1784 | wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); |
1785 | wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); |
1786 | } |
1787 | return; |
1788 | } |
1789 | |
1790 | mc_spec = kmalloc(0x200, GFP_ATOMIC); |
1791 | if (mc_spec == NULL) |
1792 | goto oom; |
1793 | mc_other = mc_spec + (0x100 >> 2); |
1794 | |
1795 | memset(mc_spec, 0, 0x100); |
1796 | memset(mc_other, 0, 0x100); |
1797 | |
1798 | netdev_for_each_mc_addr(addr, dev) { |
1799 | u8 *a = addr->da_addr; |
1800 | u32 *table; |
1801 | int entry; |
1802 | |
1803 | if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { |
1804 | table = mc_spec; |
1805 | entry = a[5]; |
1806 | } else { |
1807 | table = mc_other; |
1808 | entry = addr_crc(a); |
1809 | } |
1810 | |
1811 | table[entry >> 2] |= 1 << (8 * (entry & 3)); |
1812 | } |
1813 | |
1814 | for (i = 0; i < 0x100; i += 4) { |
1815 | wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); |
1816 | wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); |
1817 | } |
1818 | |
1819 | kfree(mc_spec); |
1820 | } |
1821 | |
1822 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) |
1823 | { |
1824 | mv643xx_eth_program_unicast_filter(dev); |
1825 | mv643xx_eth_program_multicast_filter(dev); |
1826 | } |
1827 | |
1828 | static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) |
1829 | { |
1830 | struct sockaddr *sa = addr; |
1831 | |
1832 | if (!is_valid_ether_addr(sa->sa_data)) |
1833 | return -EINVAL; |
1834 | |
1835 | memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); |
1836 | |
1837 | netif_addr_lock_bh(dev); |
1838 | mv643xx_eth_program_unicast_filter(dev); |
1839 | netif_addr_unlock_bh(dev); |
1840 | |
1841 | return 0; |
1842 | } |
1843 | |
1844 | |
1845 | /* rx/tx queue initialisation ***********************************************/ |
1846 | static int rxq_init(struct mv643xx_eth_private *mp, int index) |
1847 | { |
1848 | struct rx_queue *rxq = mp->rxq + index; |
1849 | struct rx_desc *rx_desc; |
1850 | int size; |
1851 | int i; |
1852 | |
1853 | rxq->index = index; |
1854 | |
1855 | rxq->rx_ring_size = mp->rx_ring_size; |
1856 | |
1857 | rxq->rx_desc_count = 0; |
1858 | rxq->rx_curr_desc = 0; |
1859 | rxq->rx_used_desc = 0; |
1860 | |
1861 | size = rxq->rx_ring_size * sizeof(struct rx_desc); |
1862 | |
1863 | if (index == 0 && size <= mp->rx_desc_sram_size) { |
1864 | rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, |
1865 | mp->rx_desc_sram_size); |
1866 | rxq->rx_desc_dma = mp->rx_desc_sram_addr; |
1867 | } else { |
1868 | rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, |
1869 | size, &rxq->rx_desc_dma, |
1870 | GFP_KERNEL); |
1871 | } |
1872 | |
1873 | if (rxq->rx_desc_area == NULL) { |
1874 | dev_printk(KERN_ERR, &mp->dev->dev, |
1875 | "can't allocate rx ring (%d bytes)\n", size); |
1876 | goto out; |
1877 | } |
1878 | memset(rxq->rx_desc_area, 0, size); |
1879 | |
1880 | rxq->rx_desc_area_size = size; |
1881 | rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), |
1882 | GFP_KERNEL); |
1883 | if (rxq->rx_skb == NULL) { |
1884 | dev_printk(KERN_ERR, &mp->dev->dev, |
1885 | "can't allocate rx skb ring\n"); |
1886 | goto out_free; |
1887 | } |
1888 | |
1889 | rx_desc = (struct rx_desc *)rxq->rx_desc_area; |
1890 | for (i = 0; i < rxq->rx_ring_size; i++) { |
1891 | int nexti; |
1892 | |
1893 | nexti = i + 1; |
1894 | if (nexti == rxq->rx_ring_size) |
1895 | nexti = 0; |
1896 | |
1897 | rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + |
1898 | nexti * sizeof(struct rx_desc); |
1899 | } |
1900 | |
1901 | rxq->lro_mgr.dev = mp->dev; |
1902 | memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); |
1903 | rxq->lro_mgr.features = LRO_F_NAPI; |
1904 | rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; |
1905 | rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; |
1906 | rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); |
1907 | rxq->lro_mgr.max_aggr = 32; |
1908 | rxq->lro_mgr.frag_align_pad = 0; |
1909 | rxq->lro_mgr.lro_arr = rxq->lro_arr; |
1910 | rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; |
1911 | |
1912 | memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); |
1913 | |
1914 | return 0; |
1915 | |
1916 | |
1917 | out_free: |
1918 | if (index == 0 && size <= mp->rx_desc_sram_size) |
1919 | iounmap(rxq->rx_desc_area); |
1920 | else |
1921 | dma_free_coherent(mp->dev->dev.parent, size, |
1922 | rxq->rx_desc_area, |
1923 | rxq->rx_desc_dma); |
1924 | |
1925 | out: |
1926 | return -ENOMEM; |
1927 | } |
1928 | |
1929 | static void rxq_deinit(struct rx_queue *rxq) |
1930 | { |
1931 | struct mv643xx_eth_private *mp = rxq_to_mp(rxq); |
1932 | int i; |
1933 | |
1934 | rxq_disable(rxq); |
1935 | |
1936 | for (i = 0; i < rxq->rx_ring_size; i++) { |
1937 | if (rxq->rx_skb[i]) { |
1938 | dev_kfree_skb(rxq->rx_skb[i]); |
1939 | rxq->rx_desc_count--; |
1940 | } |
1941 | } |
1942 | |
1943 | if (rxq->rx_desc_count) { |
1944 | dev_printk(KERN_ERR, &mp->dev->dev, |
1945 | "error freeing rx ring -- %d skbs stuck\n", |
1946 | rxq->rx_desc_count); |
1947 | } |
1948 | |
1949 | if (rxq->index == 0 && |
1950 | rxq->rx_desc_area_size <= mp->rx_desc_sram_size) |
1951 | iounmap(rxq->rx_desc_area); |
1952 | else |
1953 | dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, |
1954 | rxq->rx_desc_area, rxq->rx_desc_dma); |
1955 | |
1956 | kfree(rxq->rx_skb); |
1957 | } |
1958 | |
1959 | static int txq_init(struct mv643xx_eth_private *mp, int index) |
1960 | { |
1961 | struct tx_queue *txq = mp->txq + index; |
1962 | struct tx_desc *tx_desc; |
1963 | int size; |
1964 | int i; |
1965 | |
1966 | txq->index = index; |
1967 | |
1968 | txq->tx_ring_size = mp->tx_ring_size; |
1969 | |
1970 | txq->tx_desc_count = 0; |
1971 | txq->tx_curr_desc = 0; |
1972 | txq->tx_used_desc = 0; |
1973 | |
1974 | size = txq->tx_ring_size * sizeof(struct tx_desc); |
1975 | |
1976 | if (index == 0 && size <= mp->tx_desc_sram_size) { |
1977 | txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, |
1978 | mp->tx_desc_sram_size); |
1979 | txq->tx_desc_dma = mp->tx_desc_sram_addr; |
1980 | } else { |
1981 | txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, |
1982 | size, &txq->tx_desc_dma, |
1983 | GFP_KERNEL); |
1984 | } |
1985 | |
1986 | if (txq->tx_desc_area == NULL) { |
1987 | dev_printk(KERN_ERR, &mp->dev->dev, |
1988 | "can't allocate tx ring (%d bytes)\n", size); |
1989 | return -ENOMEM; |
1990 | } |
1991 | memset(txq->tx_desc_area, 0, size); |
1992 | |
1993 | txq->tx_desc_area_size = size; |
1994 | |
1995 | tx_desc = (struct tx_desc *)txq->tx_desc_area; |
1996 | for (i = 0; i < txq->tx_ring_size; i++) { |
1997 | struct tx_desc *txd = tx_desc + i; |
1998 | int nexti; |
1999 | |
2000 | nexti = i + 1; |
2001 | if (nexti == txq->tx_ring_size) |
2002 | nexti = 0; |
2003 | |
2004 | txd->cmd_sts = 0; |
2005 | txd->next_desc_ptr = txq->tx_desc_dma + |
2006 | nexti * sizeof(struct tx_desc); |
2007 | } |
2008 | |
2009 | skb_queue_head_init(&txq->tx_skb); |
2010 | |
2011 | return 0; |
2012 | } |
2013 | |
2014 | static void txq_deinit(struct tx_queue *txq) |
2015 | { |
2016 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
2017 | |
2018 | txq_disable(txq); |
2019 | txq_reclaim(txq, txq->tx_ring_size, 1); |
2020 | |
2021 | BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); |
2022 | |
2023 | if (txq->index == 0 && |
2024 | txq->tx_desc_area_size <= mp->tx_desc_sram_size) |
2025 | iounmap(txq->tx_desc_area); |
2026 | else |
2027 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, |
2028 | txq->tx_desc_area, txq->tx_desc_dma); |
2029 | } |
2030 | |
2031 | |
2032 | /* netdev ops and related ***************************************************/ |
2033 | static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) |
2034 | { |
2035 | u32 int_cause; |
2036 | u32 int_cause_ext; |
2037 | |
2038 | int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; |
2039 | if (int_cause == 0) |
2040 | return 0; |
2041 | |
2042 | int_cause_ext = 0; |
2043 | if (int_cause & INT_EXT) { |
2044 | int_cause &= ~INT_EXT; |
2045 | int_cause_ext = rdlp(mp, INT_CAUSE_EXT); |
2046 | } |
2047 | |
2048 | if (int_cause) { |
2049 | wrlp(mp, INT_CAUSE, ~int_cause); |
2050 | mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & |
2051 | ~(rdlp(mp, TXQ_COMMAND) & 0xff); |
2052 | mp->work_rx |= (int_cause & INT_RX) >> 2; |
2053 | } |
2054 | |
2055 | int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; |
2056 | if (int_cause_ext) { |
2057 | wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); |
2058 | if (int_cause_ext & INT_EXT_LINK_PHY) |
2059 | mp->work_link = 1; |
2060 | mp->work_tx |= int_cause_ext & INT_EXT_TX; |
2061 | } |
2062 | |
2063 | return 1; |
2064 | } |
2065 | |
2066 | static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) |
2067 | { |
2068 | struct net_device *dev = (struct net_device *)dev_id; |
2069 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2070 | |
2071 | if (unlikely(!mv643xx_eth_collect_events(mp))) |
2072 | return IRQ_NONE; |
2073 | |
2074 | wrlp(mp, INT_MASK, 0); |
2075 | napi_schedule(&mp->napi); |
2076 | |
2077 | return IRQ_HANDLED; |
2078 | } |
2079 | |
2080 | static void handle_link_event(struct mv643xx_eth_private *mp) |
2081 | { |
2082 | struct net_device *dev = mp->dev; |
2083 | u32 port_status; |
2084 | int speed; |
2085 | int duplex; |
2086 | int fc; |
2087 | |
2088 | port_status = rdlp(mp, PORT_STATUS); |
2089 | if (!(port_status & LINK_UP)) { |
2090 | if (netif_carrier_ok(dev)) { |
2091 | int i; |
2092 | |
2093 | printk(KERN_INFO "%s: link down\n", dev->name); |
2094 | |
2095 | netif_carrier_off(dev); |
2096 | |
2097 | for (i = 0; i < mp->txq_count; i++) { |
2098 | struct tx_queue *txq = mp->txq + i; |
2099 | |
2100 | txq_reclaim(txq, txq->tx_ring_size, 1); |
2101 | txq_reset_hw_ptr(txq); |
2102 | } |
2103 | } |
2104 | return; |
2105 | } |
2106 | |
2107 | switch (port_status & PORT_SPEED_MASK) { |
2108 | case PORT_SPEED_10: |
2109 | speed = 10; |
2110 | break; |
2111 | case PORT_SPEED_100: |
2112 | speed = 100; |
2113 | break; |
2114 | case PORT_SPEED_1000: |
2115 | speed = 1000; |
2116 | break; |
2117 | default: |
2118 | speed = -1; |
2119 | break; |
2120 | } |
2121 | duplex = (port_status & FULL_DUPLEX) ? 1 : 0; |
2122 | fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; |
2123 | |
2124 | printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " |
2125 | "flow control %sabled\n", dev->name, |
2126 | speed, duplex ? "full" : "half", |
2127 | fc ? "en" : "dis"); |
2128 | |
2129 | if (!netif_carrier_ok(dev)) |
2130 | netif_carrier_on(dev); |
2131 | } |
2132 | |
2133 | static int mv643xx_eth_poll(struct napi_struct *napi, int budget) |
2134 | { |
2135 | struct mv643xx_eth_private *mp; |
2136 | int work_done; |
2137 | |
2138 | mp = container_of(napi, struct mv643xx_eth_private, napi); |
2139 | |
2140 | if (unlikely(mp->oom)) { |
2141 | mp->oom = 0; |
2142 | del_timer(&mp->rx_oom); |
2143 | } |
2144 | |
2145 | work_done = 0; |
2146 | while (work_done < budget) { |
2147 | u8 queue_mask; |
2148 | int queue; |
2149 | int work_tbd; |
2150 | |
2151 | if (mp->work_link) { |
2152 | mp->work_link = 0; |
2153 | handle_link_event(mp); |
2154 | work_done++; |
2155 | continue; |
2156 | } |
2157 | |
2158 | queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; |
2159 | if (likely(!mp->oom)) |
2160 | queue_mask |= mp->work_rx_refill; |
2161 | |
2162 | if (!queue_mask) { |
2163 | if (mv643xx_eth_collect_events(mp)) |
2164 | continue; |
2165 | break; |
2166 | } |
2167 | |
2168 | queue = fls(queue_mask) - 1; |
2169 | queue_mask = 1 << queue; |
2170 | |
2171 | work_tbd = budget - work_done; |
2172 | if (work_tbd > 16) |
2173 | work_tbd = 16; |
2174 | |
2175 | if (mp->work_tx_end & queue_mask) { |
2176 | txq_kick(mp->txq + queue); |
2177 | } else if (mp->work_tx & queue_mask) { |
2178 | work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); |
2179 | txq_maybe_wake(mp->txq + queue); |
2180 | } else if (mp->work_rx & queue_mask) { |
2181 | work_done += rxq_process(mp->rxq + queue, work_tbd); |
2182 | } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { |
2183 | work_done += rxq_refill(mp->rxq + queue, work_tbd); |
2184 | } else { |
2185 | BUG(); |
2186 | } |
2187 | } |
2188 | |
2189 | if (work_done < budget) { |
2190 | if (mp->oom) |
2191 | mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); |
2192 | napi_complete(napi); |
2193 | wrlp(mp, INT_MASK, mp->int_mask); |
2194 | } |
2195 | |
2196 | return work_done; |
2197 | } |
2198 | |
2199 | static inline void oom_timer_wrapper(unsigned long data) |
2200 | { |
2201 | struct mv643xx_eth_private *mp = (void *)data; |
2202 | |
2203 | napi_schedule(&mp->napi); |
2204 | } |
2205 | |
2206 | static void phy_reset(struct mv643xx_eth_private *mp) |
2207 | { |
2208 | int data; |
2209 | |
2210 | data = phy_read(mp->phy, MII_BMCR); |
2211 | if (data < 0) |
2212 | return; |
2213 | |
2214 | data |= BMCR_RESET; |
2215 | if (phy_write(mp->phy, MII_BMCR, data) < 0) |
2216 | return; |
2217 | |
2218 | do { |
2219 | data = phy_read(mp->phy, MII_BMCR); |
2220 | } while (data >= 0 && data & BMCR_RESET); |
2221 | } |
2222 | |
2223 | static void port_start(struct mv643xx_eth_private *mp) |
2224 | { |
2225 | u32 pscr; |
2226 | int i; |
2227 | |
2228 | /* |
2229 | * Perform PHY reset, if there is a PHY. |
2230 | */ |
2231 | if (mp->phy != NULL) { |
2232 | struct ethtool_cmd cmd; |
2233 | |
2234 | mv643xx_eth_get_settings(mp->dev, &cmd); |
2235 | phy_reset(mp); |
2236 | mv643xx_eth_set_settings(mp->dev, &cmd); |
2237 | } |
2238 | |
2239 | /* |
2240 | * Configure basic link parameters. |
2241 | */ |
2242 | pscr = rdlp(mp, PORT_SERIAL_CONTROL); |
2243 | |
2244 | pscr |= SERIAL_PORT_ENABLE; |
2245 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); |
2246 | |
2247 | pscr |= DO_NOT_FORCE_LINK_FAIL; |
2248 | if (mp->phy == NULL) |
2249 | pscr |= FORCE_LINK_PASS; |
2250 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); |
2251 | |
2252 | /* |
2253 | * Configure TX path and queues. |
2254 | */ |
2255 | tx_set_rate(mp, 1000000000, 16777216); |
2256 | for (i = 0; i < mp->txq_count; i++) { |
2257 | struct tx_queue *txq = mp->txq + i; |
2258 | |
2259 | txq_reset_hw_ptr(txq); |
2260 | txq_set_rate(txq, 1000000000, 16777216); |
2261 | txq_set_fixed_prio_mode(txq); |
2262 | } |
2263 | |
2264 | /* |
2265 | * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast |
2266 | * frames to RX queue #0, and include the pseudo-header when |
2267 | * calculating receive checksums. |
2268 | */ |
2269 | wrlp(mp, PORT_CONFIG, 0x02000000); |
2270 | |
2271 | /* |
2272 | * Treat BPDUs as normal multicasts, and disable partition mode. |
2273 | */ |
2274 | wrlp(mp, PORT_CONFIG_EXT, 0x00000000); |
2275 | |
2276 | /* |
2277 | * Add configured unicast addresses to address filter table. |
2278 | */ |
2279 | mv643xx_eth_program_unicast_filter(mp->dev); |
2280 | |
2281 | /* |
2282 | * Enable the receive queues. |
2283 | */ |
2284 | for (i = 0; i < mp->rxq_count; i++) { |
2285 | struct rx_queue *rxq = mp->rxq + i; |
2286 | u32 addr; |
2287 | |
2288 | addr = (u32)rxq->rx_desc_dma; |
2289 | addr += rxq->rx_curr_desc * sizeof(struct rx_desc); |
2290 | wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); |
2291 | |
2292 | rxq_enable(rxq); |
2293 | } |
2294 | } |
2295 | |
2296 | static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) |
2297 | { |
2298 | int skb_size; |
2299 | |
2300 | /* |
2301 | * Reserve 2+14 bytes for an ethernet header (the hardware |
2302 | * automatically prepends 2 bytes of dummy data to each |
2303 | * received packet), 16 bytes for up to four VLAN tags, and |
2304 | * 4 bytes for the trailing FCS -- 36 bytes total. |
2305 | */ |
2306 | skb_size = mp->dev->mtu + 36; |
2307 | |
2308 | /* |
2309 | * Make sure that the skb size is a multiple of 8 bytes, as |
2310 | * the lower three bits of the receive descriptor's buffer |
2311 | * size field are ignored by the hardware. |
2312 | */ |
2313 | mp->skb_size = (skb_size + 7) & ~7; |
2314 | |
2315 | /* |
2316 | * If NET_SKB_PAD is smaller than a cache line, |
2317 | * netdev_alloc_skb() will cause skb->data to be misaligned |
2318 | * to a cache line boundary. If this is the case, include |
2319 | * some extra space to allow re-aligning the data area. |
2320 | */ |
2321 | mp->skb_size += SKB_DMA_REALIGN; |
2322 | } |
2323 | |
2324 | static int mv643xx_eth_open(struct net_device *dev) |
2325 | { |
2326 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2327 | int err; |
2328 | int i; |
2329 | |
2330 | wrlp(mp, INT_CAUSE, 0); |
2331 | wrlp(mp, INT_CAUSE_EXT, 0); |
2332 | rdlp(mp, INT_CAUSE_EXT); |
2333 | |
2334 | err = request_irq(dev->irq, mv643xx_eth_irq, |
2335 | IRQF_SHARED, dev->name, dev); |
2336 | if (err) { |
2337 | dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); |
2338 | return -EAGAIN; |
2339 | } |
2340 | |
2341 | mv643xx_eth_recalc_skb_size(mp); |
2342 | |
2343 | napi_enable(&mp->napi); |
2344 | |
2345 | skb_queue_head_init(&mp->rx_recycle); |
2346 | |
2347 | mp->int_mask = INT_EXT; |
2348 | |
2349 | for (i = 0; i < mp->rxq_count; i++) { |
2350 | err = rxq_init(mp, i); |
2351 | if (err) { |
2352 | while (--i >= 0) |
2353 | rxq_deinit(mp->rxq + i); |
2354 | goto out; |
2355 | } |
2356 | |
2357 | rxq_refill(mp->rxq + i, INT_MAX); |
2358 | mp->int_mask |= INT_RX_0 << i; |
2359 | } |
2360 | |
2361 | if (mp->oom) { |
2362 | mp->rx_oom.expires = jiffies + (HZ / 10); |
2363 | add_timer(&mp->rx_oom); |
2364 | } |
2365 | |
2366 | for (i = 0; i < mp->txq_count; i++) { |
2367 | err = txq_init(mp, i); |
2368 | if (err) { |
2369 | while (--i >= 0) |
2370 | txq_deinit(mp->txq + i); |
2371 | goto out_free; |
2372 | } |
2373 | mp->int_mask |= INT_TX_END_0 << i; |
2374 | } |
2375 | |
2376 | port_start(mp); |
2377 | |
2378 | wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); |
2379 | wrlp(mp, INT_MASK, mp->int_mask); |
2380 | |
2381 | return 0; |
2382 | |
2383 | |
2384 | out_free: |
2385 | for (i = 0; i < mp->rxq_count; i++) |
2386 | rxq_deinit(mp->rxq + i); |
2387 | out: |
2388 | free_irq(dev->irq, dev); |
2389 | |
2390 | return err; |
2391 | } |
2392 | |
2393 | static void port_reset(struct mv643xx_eth_private *mp) |
2394 | { |
2395 | unsigned int data; |
2396 | int i; |
2397 | |
2398 | for (i = 0; i < mp->rxq_count; i++) |
2399 | rxq_disable(mp->rxq + i); |
2400 | for (i = 0; i < mp->txq_count; i++) |
2401 | txq_disable(mp->txq + i); |
2402 | |
2403 | while (1) { |
2404 | u32 ps = rdlp(mp, PORT_STATUS); |
2405 | |
2406 | if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) |
2407 | break; |
2408 | udelay(10); |
2409 | } |
2410 | |
2411 | /* Reset the Enable bit in the Configuration Register */ |
2412 | data = rdlp(mp, PORT_SERIAL_CONTROL); |
2413 | data &= ~(SERIAL_PORT_ENABLE | |
2414 | DO_NOT_FORCE_LINK_FAIL | |
2415 | FORCE_LINK_PASS); |
2416 | wrlp(mp, PORT_SERIAL_CONTROL, data); |
2417 | } |
2418 | |
2419 | static int mv643xx_eth_stop(struct net_device *dev) |
2420 | { |
2421 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2422 | int i; |
2423 | |
2424 | wrlp(mp, INT_MASK_EXT, 0x00000000); |
2425 | wrlp(mp, INT_MASK, 0x00000000); |
2426 | rdlp(mp, INT_MASK); |
2427 | |
2428 | napi_disable(&mp->napi); |
2429 | |
2430 | del_timer_sync(&mp->rx_oom); |
2431 | |
2432 | netif_carrier_off(dev); |
2433 | |
2434 | free_irq(dev->irq, dev); |
2435 | |
2436 | port_reset(mp); |
2437 | mv643xx_eth_get_stats(dev); |
2438 | mib_counters_update(mp); |
2439 | del_timer_sync(&mp->mib_counters_timer); |
2440 | |
2441 | skb_queue_purge(&mp->rx_recycle); |
2442 | |
2443 | for (i = 0; i < mp->rxq_count; i++) |
2444 | rxq_deinit(mp->rxq + i); |
2445 | for (i = 0; i < mp->txq_count; i++) |
2446 | txq_deinit(mp->txq + i); |
2447 | |
2448 | return 0; |
2449 | } |
2450 | |
2451 | static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
2452 | { |
2453 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2454 | |
2455 | if (mp->phy != NULL) |
2456 | return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd); |
2457 | |
2458 | return -EOPNOTSUPP; |
2459 | } |
2460 | |
2461 | static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) |
2462 | { |
2463 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2464 | |
2465 | if (new_mtu < 64 || new_mtu > 9500) |
2466 | return -EINVAL; |
2467 | |
2468 | dev->mtu = new_mtu; |
2469 | mv643xx_eth_recalc_skb_size(mp); |
2470 | tx_set_rate(mp, 1000000000, 16777216); |
2471 | |
2472 | if (!netif_running(dev)) |
2473 | return 0; |
2474 | |
2475 | /* |
2476 | * Stop and then re-open the interface. This will allocate RX |
2477 | * skbs of the new MTU. |
2478 | * There is a possible danger that the open will not succeed, |
2479 | * due to memory being full. |
2480 | */ |
2481 | mv643xx_eth_stop(dev); |
2482 | if (mv643xx_eth_open(dev)) { |
2483 | dev_printk(KERN_ERR, &dev->dev, |
2484 | "fatal error on re-opening device after " |
2485 | "MTU change\n"); |
2486 | } |
2487 | |
2488 | return 0; |
2489 | } |
2490 | |
2491 | static void tx_timeout_task(struct work_struct *ugly) |
2492 | { |
2493 | struct mv643xx_eth_private *mp; |
2494 | |
2495 | mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); |
2496 | if (netif_running(mp->dev)) { |
2497 | netif_tx_stop_all_queues(mp->dev); |
2498 | port_reset(mp); |
2499 | port_start(mp); |
2500 | netif_tx_wake_all_queues(mp->dev); |
2501 | } |
2502 | } |
2503 | |
2504 | static void mv643xx_eth_tx_timeout(struct net_device *dev) |
2505 | { |
2506 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2507 | |
2508 | dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); |
2509 | |
2510 | schedule_work(&mp->tx_timeout_task); |
2511 | } |
2512 | |
2513 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2514 | static void mv643xx_eth_netpoll(struct net_device *dev) |
2515 | { |
2516 | struct mv643xx_eth_private *mp = netdev_priv(dev); |
2517 | |
2518 | wrlp(mp, INT_MASK, 0x00000000); |
2519 | rdlp(mp, INT_MASK); |
2520 | |
2521 | mv643xx_eth_irq(dev->irq, dev); |
2522 | |
2523 | wrlp(mp, INT_MASK, mp->int_mask); |
2524 | } |
2525 | #endif |
2526 | |
2527 | |
2528 | /* platform glue ************************************************************/ |
2529 | static void |
2530 | mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, |
2531 | struct mbus_dram_target_info *dram) |
2532 | { |
2533 | void __iomem *base = msp->base; |
2534 | u32 win_enable; |
2535 | u32 win_protect; |
2536 | int i; |
2537 | |
2538 | for (i = 0; i < 6; i++) { |
2539 | writel(0, base + WINDOW_BASE(i)); |
2540 | writel(0, base + WINDOW_SIZE(i)); |
2541 | if (i < 4) |
2542 | writel(0, base + WINDOW_REMAP_HIGH(i)); |
2543 | } |
2544 | |
2545 | win_enable = 0x3f; |
2546 | win_protect = 0; |
2547 | |
2548 | for (i = 0; i < dram->num_cs; i++) { |
2549 | struct mbus_dram_window *cs = dram->cs + i; |
2550 | |
2551 | writel((cs->base & 0xffff0000) | |
2552 | (cs->mbus_attr << 8) | |
2553 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); |
2554 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); |
2555 | |
2556 | win_enable &= ~(1 << i); |
2557 | win_protect |= 3 << (2 * i); |
2558 | } |
2559 | |
2560 | writel(win_enable, base + WINDOW_BAR_ENABLE); |
2561 | msp->win_protect = win_protect; |
2562 | } |
2563 | |
2564 | static void infer_hw_params(struct mv643xx_eth_shared_private *msp) |
2565 | { |
2566 | /* |
2567 | * Check whether we have a 14-bit coal limit field in bits |
2568 | * [21:8], or a 16-bit coal limit in bits [25,21:7] of the |
2569 | * SDMA config register. |
2570 | */ |
2571 | writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); |
2572 | if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) |
2573 | msp->extended_rx_coal_limit = 1; |
2574 | else |
2575 | msp->extended_rx_coal_limit = 0; |
2576 | |
2577 | /* |
2578 | * Check whether the MAC supports TX rate control, and if |
2579 | * yes, whether its associated registers are in the old or |
2580 | * the new place. |
2581 | */ |
2582 | writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); |
2583 | if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { |
2584 | msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; |
2585 | } else { |
2586 | writel(7, msp->base + 0x0400 + TX_BW_RATE); |
2587 | if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) |
2588 | msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; |
2589 | else |
2590 | msp->tx_bw_control = TX_BW_CONTROL_ABSENT; |
2591 | } |
2592 | } |
2593 | |
2594 | static int mv643xx_eth_shared_probe(struct platform_device *pdev) |
2595 | { |
2596 | static int mv643xx_eth_version_printed; |
2597 | struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; |
2598 | struct mv643xx_eth_shared_private *msp; |
2599 | struct resource *res; |
2600 | int ret; |
2601 | |
2602 | if (!mv643xx_eth_version_printed++) |
2603 | printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " |
2604 | "driver version %s\n", mv643xx_eth_driver_version); |
2605 | |
2606 | ret = -EINVAL; |
2607 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2608 | if (res == NULL) |
2609 | goto out; |
2610 | |
2611 | ret = -ENOMEM; |
2612 | msp = kmalloc(sizeof(*msp), GFP_KERNEL); |
2613 | if (msp == NULL) |
2614 | goto out; |
2615 | memset(msp, 0, sizeof(*msp)); |
2616 | |
2617 | msp->base = ioremap(res->start, res->end - res->start + 1); |
2618 | if (msp->base == NULL) |
2619 | goto out_free; |
2620 | |
2621 | /* |
2622 | * Set up and register SMI bus. |
2623 | */ |
2624 | if (pd == NULL || pd->shared_smi == NULL) { |
2625 | msp->smi_bus = mdiobus_alloc(); |
2626 | if (msp->smi_bus == NULL) |
2627 | goto out_unmap; |
2628 | |
2629 | msp->smi_bus->priv = msp; |
2630 | msp->smi_bus->name = "mv643xx_eth smi"; |
2631 | msp->smi_bus->read = smi_bus_read; |
2632 | msp->smi_bus->write = smi_bus_write, |
2633 | snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); |
2634 | msp->smi_bus->parent = &pdev->dev; |
2635 | msp->smi_bus->phy_mask = 0xffffffff; |
2636 | if (mdiobus_register(msp->smi_bus) < 0) |
2637 | goto out_free_mii_bus; |
2638 | msp->smi = msp; |
2639 | } else { |
2640 | msp->smi = platform_get_drvdata(pd->shared_smi); |
2641 | } |
2642 | |
2643 | msp->err_interrupt = NO_IRQ; |
2644 | init_waitqueue_head(&msp->smi_busy_wait); |
2645 | |
2646 | /* |
2647 | * Check whether the error interrupt is hooked up. |
2648 | */ |
2649 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
2650 | if (res != NULL) { |
2651 | int err; |
2652 | |
2653 | err = request_irq(res->start, mv643xx_eth_err_irq, |
2654 | IRQF_SHARED, "mv643xx_eth", msp); |
2655 | if (!err) { |
2656 | writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); |
2657 | msp->err_interrupt = res->start; |
2658 | } |
2659 | } |
2660 | |
2661 | /* |
2662 | * (Re-)program MBUS remapping windows if we are asked to. |
2663 | */ |
2664 | if (pd != NULL && pd->dram != NULL) |
2665 | mv643xx_eth_conf_mbus_windows(msp, pd->dram); |
2666 | |
2667 | /* |
2668 | * Detect hardware parameters. |
2669 | */ |
2670 | msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; |
2671 | infer_hw_params(msp); |
2672 | |
2673 | platform_set_drvdata(pdev, msp); |
2674 | |
2675 | return 0; |
2676 | |
2677 | out_free_mii_bus: |
2678 | mdiobus_free(msp->smi_bus); |
2679 | out_unmap: |
2680 | iounmap(msp->base); |
2681 | out_free: |
2682 | kfree(msp); |
2683 | out: |
2684 | return ret; |
2685 | } |
2686 | |
2687 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) |
2688 | { |
2689 | struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); |
2690 | struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; |
2691 | |
2692 | if (pd == NULL || pd->shared_smi == NULL) { |
2693 | mdiobus_unregister(msp->smi_bus); |
2694 | mdiobus_free(msp->smi_bus); |
2695 | } |
2696 | if (msp->err_interrupt != NO_IRQ) |
2697 | free_irq(msp->err_interrupt, msp); |
2698 | iounmap(msp->base); |
2699 | kfree(msp); |
2700 | |
2701 | return 0; |
2702 | } |
2703 | |
2704 | static struct platform_driver mv643xx_eth_shared_driver = { |
2705 | .probe = mv643xx_eth_shared_probe, |
2706 | .remove = mv643xx_eth_shared_remove, |
2707 | .driver = { |
2708 | .name = MV643XX_ETH_SHARED_NAME, |
2709 | .owner = THIS_MODULE, |
2710 | }, |
2711 | }; |
2712 | |
2713 | static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) |
2714 | { |
2715 | int addr_shift = 5 * mp->port_num; |
2716 | u32 data; |
2717 | |
2718 | data = rdl(mp, PHY_ADDR); |
2719 | data &= ~(0x1f << addr_shift); |
2720 | data |= (phy_addr & 0x1f) << addr_shift; |
2721 | wrl(mp, PHY_ADDR, data); |
2722 | } |
2723 | |
2724 | static int phy_addr_get(struct mv643xx_eth_private *mp) |
2725 | { |
2726 | unsigned int data; |
2727 | |
2728 | data = rdl(mp, PHY_ADDR); |
2729 | |
2730 | return (data >> (5 * mp->port_num)) & 0x1f; |
2731 | } |
2732 | |
2733 | static void set_params(struct mv643xx_eth_private *mp, |
2734 | struct mv643xx_eth_platform_data *pd) |
2735 | { |
2736 | struct net_device *dev = mp->dev; |
2737 | |
2738 | if (is_valid_ether_addr(pd->mac_addr)) |
2739 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
2740 | else |
2741 | uc_addr_get(mp, dev->dev_addr); |
2742 | |
2743 | mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; |
2744 | if (pd->rx_queue_size) |
2745 | mp->rx_ring_size = pd->rx_queue_size; |
2746 | mp->rx_desc_sram_addr = pd->rx_sram_addr; |
2747 | mp->rx_desc_sram_size = pd->rx_sram_size; |
2748 | |
2749 | mp->rxq_count = pd->rx_queue_count ? : 1; |
2750 | |
2751 | mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; |
2752 | if (pd->tx_queue_size) |
2753 | mp->tx_ring_size = pd->tx_queue_size; |
2754 | mp->tx_desc_sram_addr = pd->tx_sram_addr; |
2755 | mp->tx_desc_sram_size = pd->tx_sram_size; |
2756 | |
2757 | mp->txq_count = pd->tx_queue_count ? : 1; |
2758 | } |
2759 | |
2760 | static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, |
2761 | int phy_addr) |
2762 | { |
2763 | struct mii_bus *bus = mp->shared->smi->smi_bus; |
2764 | struct phy_device *phydev; |
2765 | int start; |
2766 | int num; |
2767 | int i; |
2768 | |
2769 | if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { |
2770 | start = phy_addr_get(mp) & 0x1f; |
2771 | num = 32; |
2772 | } else { |
2773 | start = phy_addr & 0x1f; |
2774 | num = 1; |
2775 | } |
2776 | |
2777 | phydev = NULL; |
2778 | for (i = 0; i < num; i++) { |
2779 | int addr = (start + i) & 0x1f; |
2780 | |
2781 | if (bus->phy_map[addr] == NULL) |
2782 | mdiobus_scan(bus, addr); |
2783 | |
2784 | if (phydev == NULL) { |
2785 | phydev = bus->phy_map[addr]; |
2786 | if (phydev != NULL) |
2787 | phy_addr_set(mp, addr); |
2788 | } |
2789 | } |
2790 | |
2791 | return phydev; |
2792 | } |
2793 | |
2794 | static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) |
2795 | { |
2796 | struct phy_device *phy = mp->phy; |
2797 | |
2798 | phy_reset(mp); |
2799 | |
2800 | phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); |
2801 | |
2802 | if (speed == 0) { |
2803 | phy->autoneg = AUTONEG_ENABLE; |
2804 | phy->speed = 0; |
2805 | phy->duplex = 0; |
2806 | phy->advertising = phy->supported | ADVERTISED_Autoneg; |
2807 | } else { |
2808 | phy->autoneg = AUTONEG_DISABLE; |
2809 | phy->advertising = 0; |
2810 | phy->speed = speed; |
2811 | phy->duplex = duplex; |
2812 | } |
2813 | phy_start_aneg(phy); |
2814 | } |
2815 | |
2816 | static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) |
2817 | { |
2818 | u32 pscr; |
2819 | |
2820 | pscr = rdlp(mp, PORT_SERIAL_CONTROL); |
2821 | if (pscr & SERIAL_PORT_ENABLE) { |
2822 | pscr &= ~SERIAL_PORT_ENABLE; |
2823 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); |
2824 | } |
2825 | |
2826 | pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; |
2827 | if (mp->phy == NULL) { |
2828 | pscr |= DISABLE_AUTO_NEG_SPEED_GMII; |
2829 | if (speed == SPEED_1000) |
2830 | pscr |= SET_GMII_SPEED_TO_1000; |
2831 | else if (speed == SPEED_100) |
2832 | pscr |= SET_MII_SPEED_TO_100; |
2833 | |
2834 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; |
2835 | |
2836 | pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; |
2837 | if (duplex == DUPLEX_FULL) |
2838 | pscr |= SET_FULL_DUPLEX_MODE; |
2839 | } |
2840 | |
2841 | wrlp(mp, PORT_SERIAL_CONTROL, pscr); |
2842 | } |
2843 | |
2844 | static const struct net_device_ops mv643xx_eth_netdev_ops = { |
2845 | .ndo_open = mv643xx_eth_open, |
2846 | .ndo_stop = mv643xx_eth_stop, |
2847 | .ndo_start_xmit = mv643xx_eth_xmit, |
2848 | .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, |
2849 | .ndo_set_mac_address = mv643xx_eth_set_mac_address, |
2850 | .ndo_validate_addr = eth_validate_addr, |
2851 | .ndo_do_ioctl = mv643xx_eth_ioctl, |
2852 | .ndo_change_mtu = mv643xx_eth_change_mtu, |
2853 | .ndo_tx_timeout = mv643xx_eth_tx_timeout, |
2854 | .ndo_get_stats = mv643xx_eth_get_stats, |
2855 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2856 | .ndo_poll_controller = mv643xx_eth_netpoll, |
2857 | #endif |
2858 | }; |
2859 | |
2860 | static int mv643xx_eth_probe(struct platform_device *pdev) |
2861 | { |
2862 | struct mv643xx_eth_platform_data *pd; |
2863 | struct mv643xx_eth_private *mp; |
2864 | struct net_device *dev; |
2865 | struct resource *res; |
2866 | int err; |
2867 | |
2868 | pd = pdev->dev.platform_data; |
2869 | if (pd == NULL) { |
2870 | dev_printk(KERN_ERR, &pdev->dev, |
2871 | "no mv643xx_eth_platform_data\n"); |
2872 | return -ENODEV; |
2873 | } |
2874 | |
2875 | if (pd->shared == NULL) { |
2876 | dev_printk(KERN_ERR, &pdev->dev, |
2877 | "no mv643xx_eth_platform_data->shared\n"); |
2878 | return -ENODEV; |
2879 | } |
2880 | |
2881 | dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); |
2882 | if (!dev) |
2883 | return -ENOMEM; |
2884 | |
2885 | mp = netdev_priv(dev); |
2886 | platform_set_drvdata(pdev, mp); |
2887 | |
2888 | mp->shared = platform_get_drvdata(pd->shared); |
2889 | mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); |
2890 | mp->port_num = pd->port_number; |
2891 | |
2892 | mp->dev = dev; |
2893 | |
2894 | set_params(mp, pd); |
2895 | dev->real_num_tx_queues = mp->txq_count; |
2896 | |
2897 | if (pd->phy_addr != MV643XX_ETH_PHY_NONE) |
2898 | mp->phy = phy_scan(mp, pd->phy_addr); |
2899 | |
2900 | if (mp->phy != NULL) |
2901 | phy_init(mp, pd->speed, pd->duplex); |
2902 | |
2903 | SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); |
2904 | |
2905 | init_pscr(mp, pd->speed, pd->duplex); |
2906 | |
2907 | |
2908 | mib_counters_clear(mp); |
2909 | |
2910 | init_timer(&mp->mib_counters_timer); |
2911 | mp->mib_counters_timer.data = (unsigned long)mp; |
2912 | mp->mib_counters_timer.function = mib_counters_timer_wrapper; |
2913 | mp->mib_counters_timer.expires = jiffies + 30 * HZ; |
2914 | add_timer(&mp->mib_counters_timer); |
2915 | |
2916 | spin_lock_init(&mp->mib_counters_lock); |
2917 | |
2918 | INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); |
2919 | |
2920 | netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); |
2921 | |
2922 | init_timer(&mp->rx_oom); |
2923 | mp->rx_oom.data = (unsigned long)mp; |
2924 | mp->rx_oom.function = oom_timer_wrapper; |
2925 | |
2926 | |
2927 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
2928 | BUG_ON(!res); |
2929 | dev->irq = res->start; |
2930 | |
2931 | dev->netdev_ops = &mv643xx_eth_netdev_ops; |
2932 | |
2933 | dev->watchdog_timeo = 2 * HZ; |
2934 | dev->base_addr = 0; |
2935 | |
2936 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; |
2937 | dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; |
2938 | |
2939 | SET_NETDEV_DEV(dev, &pdev->dev); |
2940 | |
2941 | if (mp->shared->win_protect) |
2942 | wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); |
2943 | |
2944 | netif_carrier_off(dev); |
2945 | |
2946 | wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); |
2947 | |
2948 | set_rx_coal(mp, 250); |
2949 | set_tx_coal(mp, 0); |
2950 | |
2951 | err = register_netdev(dev); |
2952 | if (err) |
2953 | goto out; |
2954 | |
2955 | dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", |
2956 | mp->port_num, dev->dev_addr); |
2957 | |
2958 | if (mp->tx_desc_sram_size > 0) |
2959 | dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); |
2960 | |
2961 | return 0; |
2962 | |
2963 | out: |
2964 | free_netdev(dev); |
2965 | |
2966 | return err; |
2967 | } |
2968 | |
2969 | static int mv643xx_eth_remove(struct platform_device *pdev) |
2970 | { |
2971 | struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); |
2972 | |
2973 | unregister_netdev(mp->dev); |
2974 | if (mp->phy != NULL) |
2975 | phy_detach(mp->phy); |
2976 | flush_scheduled_work(); |
2977 | free_netdev(mp->dev); |
2978 | |
2979 | platform_set_drvdata(pdev, NULL); |
2980 | |
2981 | return 0; |
2982 | } |
2983 | |
2984 | static void mv643xx_eth_shutdown(struct platform_device *pdev) |
2985 | { |
2986 | struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); |
2987 | |
2988 | /* Mask all interrupts on ethernet port */ |
2989 | wrlp(mp, INT_MASK, 0); |
2990 | rdlp(mp, INT_MASK); |
2991 | |
2992 | if (netif_running(mp->dev)) |
2993 | port_reset(mp); |
2994 | } |
2995 | |
2996 | static struct platform_driver mv643xx_eth_driver = { |
2997 | .probe = mv643xx_eth_probe, |
2998 | .remove = mv643xx_eth_remove, |
2999 | .shutdown = mv643xx_eth_shutdown, |
3000 | .driver = { |
3001 | .name = MV643XX_ETH_NAME, |
3002 | .owner = THIS_MODULE, |
3003 | }, |
3004 | }; |
3005 | |
3006 | static int __init mv643xx_eth_init_module(void) |
3007 | { |
3008 | int rc; |
3009 | |
3010 | rc = platform_driver_register(&mv643xx_eth_shared_driver); |
3011 | if (!rc) { |
3012 | rc = platform_driver_register(&mv643xx_eth_driver); |
3013 | if (rc) |
3014 | platform_driver_unregister(&mv643xx_eth_shared_driver); |
3015 | } |
3016 | |
3017 | return rc; |
3018 | } |
3019 | module_init(mv643xx_eth_init_module); |
3020 | |
3021 | static void __exit mv643xx_eth_cleanup_module(void) |
3022 | { |
3023 | platform_driver_unregister(&mv643xx_eth_driver); |
3024 | platform_driver_unregister(&mv643xx_eth_shared_driver); |
3025 | } |
3026 | module_exit(mv643xx_eth_cleanup_module); |
3027 | |
3028 | MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " |
3029 | "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); |
3030 | MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); |
3031 | MODULE_LICENSE("GPL"); |
3032 | MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); |
3033 | MODULE_ALIAS("platform:" MV643XX_ETH_NAME); |
3034 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9