blob: 15c4223870287989926abc9643dae1ffdc20de3e [file] [log] [blame]
Kumar Gala0bbaf062005-06-20 10:54:21 -05001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * drivers/net/gianfar.c
3 *
4 * Gianfar Ethernet Driver
Andy Fleming7f7f5312005-11-11 12:38:59 -06005 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
Kumar Gala4c8d3d92005-11-13 16:06:30 -080010 * Maintainer: Kumar Gala
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Andy Fleminge8a2b6a2006-12-01 12:01:06 -060012 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +040013 * Copyright (c) 2007 MontaVista Software, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
25 *
26 * Theory of operation
Kumar Gala0bbaf062005-06-20 10:54:21 -050027 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * The driver is initialized through platform_device. Structures which
29 * define the configuration needed by the board are defined in a
30 * board structure in arch/ppc/platforms (though I do not
31 * discount the possibility that other architectures could one
Andy Flemingbb40dcb2005-09-23 22:54:21 -040032 * day be supported.
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 *
34 * The Gianfar Ethernet Controller uses a ring of buffer
35 * descriptors. The beginning is indicated by a register
Kumar Gala0bbaf062005-06-20 10:54:21 -050036 * pointing to the physical address of the start of the ring.
37 * The end is determined by a "wrap" bit being set in the
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 * last descriptor of the ring.
39 *
40 * When a packet is received, the RXF bit in the
Kumar Gala0bbaf062005-06-20 10:54:21 -050041 * IEVENT register is set, triggering an interrupt when the
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 * corresponding bit in the IMASK register is also set (if
43 * interrupt coalescing is active, then the interrupt may not
44 * happen immediately, but will wait until either a set number
Andy Flemingbb40dcb2005-09-23 22:54:21 -040045 * of frames or amount of time have passed). In NAPI, the
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * interrupt handler will signal there is work to be done, and
Francois Romieu0aa15382008-07-11 00:33:52 +020047 * exit. This method will start at the last known empty
Kumar Gala0bbaf062005-06-20 10:54:21 -050048 * descriptor, and process every subsequent descriptor until there
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 * are none left with data (NAPI will stop after a set number of
50 * packets to give time to other tasks, but will eventually
51 * process all the packets). The data arrives inside a
52 * pre-allocated skb, and so after the skb is passed up to the
53 * stack, a new skb must be allocated, and the address field in
54 * the buffer descriptor must be updated to indicate this new
55 * skb.
56 *
57 * When the kernel requests that a packet be transmitted, the
58 * driver starts where it left off last time, and points the
59 * descriptor at the buffer which was passed in. The driver
60 * then informs the DMA engine that there are packets ready to
61 * be transmitted. Once the controller is finished transmitting
62 * the packet, an interrupt may be triggered (under the same
63 * conditions as for reception, but depending on the TXF bit).
64 * The driver then cleans up the buffer.
65 */
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#include <linux/string.h>
69#include <linux/errno.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040070#include <linux/unistd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/slab.h>
72#include <linux/interrupt.h>
73#include <linux/init.h>
74#include <linux/delay.h>
75#include <linux/netdevice.h>
76#include <linux/etherdevice.h>
77#include <linux/skbuff.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050078#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/spinlock.h>
80#include <linux/mm.h>
Russell Kingd052d1b2005-10-29 19:07:23 +010081#include <linux/platform_device.h>
Kumar Gala0bbaf062005-06-20 10:54:21 -050082#include <linux/ip.h>
83#include <linux/tcp.h>
84#include <linux/udp.h>
Kumar Gala9c07b8842006-01-11 11:26:25 -080085#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87#include <asm/io.h>
88#include <asm/irq.h>
89#include <asm/uaccess.h>
90#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#include <linux/dma-mapping.h>
92#include <linux/crc32.h>
Andy Flemingbb40dcb2005-09-23 22:54:21 -040093#include <linux/mii.h>
94#include <linux/phy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96#include "gianfar.h"
Andy Flemingbb40dcb2005-09-23 22:54:21 -040097#include "gianfar_mii.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
99#define TX_TIMEOUT (1*HZ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#undef BRIEF_GFAR_ERRORS
101#undef VERBOSE_GFAR_ERRORS
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103const char gfar_driver_name[] = "Gianfar Ethernet";
Andy Fleming7f7f5312005-11-11 12:38:59 -0600104const char gfar_driver_version[] = "1.3";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106static int gfar_enet_open(struct net_device *dev);
107static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200108static void gfar_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev);
Andy Fleming815b97c2008-04-22 17:18:29 -0500111struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
113 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu);
David Howells7d12e782006-10-05 14:55:46 +0100116static irqreturn_t gfar_error(int irq, void *dev_id);
117static irqreturn_t gfar_transmit(int irq, void *dev_id);
118static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119static void adjust_link(struct net_device *dev);
120static void init_registers(struct net_device *dev);
121static int init_phy(struct net_device *dev);
Russell King3ae5eae2005-11-09 22:32:44 +0000122static int gfar_probe(struct platform_device *pdev);
123static int gfar_remove(struct platform_device *pdev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400124static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125static void gfar_set_multi(struct net_device *dev);
126static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Junejad3c12872007-05-11 18:25:11 -0500127static void gfar_configure_serdes(struct net_device *dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700128static int gfar_poll(struct napi_struct *napi, int budget);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300129#ifdef CONFIG_NET_POLL_CONTROLLER
130static void gfar_netpoll(struct net_device *dev);
131#endif
Kumar Gala0bbaf062005-06-20 10:54:21 -0500132int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
Andy Flemingf162b9d2008-05-02 13:00:30 -0500133static int gfar_clean_tx_ring(struct net_device *dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500135static void gfar_vlan_rx_register(struct net_device *netdev,
136 struct vlan_group *grp);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600137void gfar_halt(struct net_device *dev);
Scott Woodd87eb122008-07-11 18:04:45 -0500138static void gfar_halt_nodisable(struct net_device *dev);
Andy Fleming7f7f5312005-11-11 12:38:59 -0600139void gfar_start(struct net_device *dev);
140static void gfar_clear_exact_match(struct net_device *dev);
141static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Jeff Garzik7282d492006-09-13 14:30:00 -0400143extern const struct ethtool_ops gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145MODULE_AUTHOR("Freescale Semiconductor, Inc");
146MODULE_DESCRIPTION("Gianfar Ethernet Driver");
147MODULE_LICENSE("GPL");
148
Andy Fleming7f7f5312005-11-11 12:38:59 -0600149/* Returns 1 if incoming frames use an FCB */
150static inline int gfar_uses_fcb(struct gfar_private *priv)
Kumar Gala0bbaf062005-06-20 10:54:21 -0500151{
Andy Fleming7f7f5312005-11-11 12:38:59 -0600152 return (priv->vlan_enable || priv->rx_csum_enable);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500153}
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400154
155/* Set up the ethernet device structure, private data,
156 * and anything else we need before we start */
Russell King3ae5eae2005-11-09 22:32:44 +0000157static int gfar_probe(struct platform_device *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 u32 tempval;
160 struct net_device *dev = NULL;
161 struct gfar_private *priv = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 struct gianfar_platform_data *einfo;
163 struct resource *r;
roel kluind51894f2008-10-21 01:35:34 -0400164 int err = 0, irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
167
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400168 if (NULL == einfo) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 printk(KERN_ERR "gfar %d: Missing additional data!\n",
170 pdev->id);
171
172 return -ENODEV;
173 }
174
175 /* Create an ethernet device instance */
176 dev = alloc_etherdev(sizeof (*priv));
177
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400178 if (NULL == dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 return -ENOMEM;
180
181 priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700182 priv->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184 /* Set the info in the priv to the current info */
185 priv->einfo = einfo;
186
187 /* fill out IRQ fields */
188 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
roel kluind51894f2008-10-21 01:35:34 -0400189 irq = platform_get_irq_byname(pdev, "tx");
190 if (irq < 0)
David Vrabel48944732006-01-19 17:56:29 +0000191 goto regs_fail;
roel kluind51894f2008-10-21 01:35:34 -0400192 priv->interruptTransmit = irq;
193
194 irq = platform_get_irq_byname(pdev, "rx");
195 if (irq < 0)
196 goto regs_fail;
197 priv->interruptReceive = irq;
198
199 irq = platform_get_irq_byname(pdev, "error");
200 if (irq < 0)
201 goto regs_fail;
202 priv->interruptError = irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 } else {
roel kluind51894f2008-10-21 01:35:34 -0400204 irq = platform_get_irq(pdev, 0);
205 if (irq < 0)
David Vrabel48944732006-01-19 17:56:29 +0000206 goto regs_fail;
roel kluind51894f2008-10-21 01:35:34 -0400207 priv->interruptTransmit = irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
209
210 /* get a pointer to the register memory */
211 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Kumar Galacc8c6e32006-02-01 15:18:03 -0600212 priv->regs = ioremap(r->start, sizeof (struct gfar));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400214 if (NULL == priv->regs) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 err = -ENOMEM;
216 goto regs_fail;
217 }
218
Andy Flemingfef61082006-04-20 16:44:29 -0500219 spin_lock_init(&priv->txlock);
220 spin_lock_init(&priv->rxlock);
Scott Woodd87eb122008-07-11 18:04:45 -0500221 spin_lock_init(&priv->bflock);
Sebastian Siewiorab939902008-08-19 21:12:45 +0200222 INIT_WORK(&priv->reset_task, gfar_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Russell King3ae5eae2005-11-09 22:32:44 +0000224 platform_set_drvdata(pdev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226 /* Stop the DMA engine now, in case it was running before */
227 /* (The firmware could have used it, and left it running). */
228 /* To do this, we write Graceful Receive Stop and Graceful */
229 /* Transmit Stop, and then wait until the corresponding bits */
230 /* in IEVENT indicate the stops have completed. */
231 tempval = gfar_read(&priv->regs->dmactrl);
232 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
233 gfar_write(&priv->regs->dmactrl, tempval);
234
235 tempval = gfar_read(&priv->regs->dmactrl);
236 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
237 gfar_write(&priv->regs->dmactrl, tempval);
238
239 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
240 cpu_relax();
241
242 /* Reset MAC layer */
243 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
244
245 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
246 gfar_write(&priv->regs->maccfg1, tempval);
247
248 /* Initialize MACCFG2. */
249 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
250
251 /* Initialize ECNTRL */
252 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
253
254 /* Copy the station address into the dev structure, */
255 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
256
257 /* Set the dev->base_addr to the gfar reg region */
258 dev->base_addr = (unsigned long) (priv->regs);
259
Russell King3ae5eae2005-11-09 22:32:44 +0000260 SET_NETDEV_DEV(dev, &pdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262 /* Fill in the dev structure */
263 dev->open = gfar_enet_open;
264 dev->hard_start_xmit = gfar_start_xmit;
265 dev->tx_timeout = gfar_timeout;
266 dev->watchdog_timeo = TX_TIMEOUT;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700267 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
Vitaly Woolf2d71c22006-11-07 13:27:02 +0300268#ifdef CONFIG_NET_POLL_CONTROLLER
269 dev->poll_controller = gfar_netpoll;
270#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 dev->stop = gfar_close;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 dev->change_mtu = gfar_change_mtu;
273 dev->mtu = 1500;
274 dev->set_multicast_list = gfar_set_multi;
275
Kumar Gala0bbaf062005-06-20 10:54:21 -0500276 dev->ethtool_ops = &gfar_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Kumar Gala0bbaf062005-06-20 10:54:21 -0500278 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
279 priv->rx_csum_enable = 1;
280 dev->features |= NETIF_F_IP_CSUM;
281 } else
282 priv->rx_csum_enable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Kumar Gala0bbaf062005-06-20 10:54:21 -0500284 priv->vlgrp = NULL;
285
286 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
287 dev->vlan_rx_register = gfar_vlan_rx_register;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500288
289 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
290
291 priv->vlan_enable = 1;
292 }
293
294 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
295 priv->extended_hash = 1;
296 priv->hash_width = 9;
297
298 priv->hash_regs[0] = &priv->regs->igaddr0;
299 priv->hash_regs[1] = &priv->regs->igaddr1;
300 priv->hash_regs[2] = &priv->regs->igaddr2;
301 priv->hash_regs[3] = &priv->regs->igaddr3;
302 priv->hash_regs[4] = &priv->regs->igaddr4;
303 priv->hash_regs[5] = &priv->regs->igaddr5;
304 priv->hash_regs[6] = &priv->regs->igaddr6;
305 priv->hash_regs[7] = &priv->regs->igaddr7;
306 priv->hash_regs[8] = &priv->regs->gaddr0;
307 priv->hash_regs[9] = &priv->regs->gaddr1;
308 priv->hash_regs[10] = &priv->regs->gaddr2;
309 priv->hash_regs[11] = &priv->regs->gaddr3;
310 priv->hash_regs[12] = &priv->regs->gaddr4;
311 priv->hash_regs[13] = &priv->regs->gaddr5;
312 priv->hash_regs[14] = &priv->regs->gaddr6;
313 priv->hash_regs[15] = &priv->regs->gaddr7;
314
315 } else {
316 priv->extended_hash = 0;
317 priv->hash_width = 8;
318
319 priv->hash_regs[0] = &priv->regs->gaddr0;
320 priv->hash_regs[1] = &priv->regs->gaddr1;
321 priv->hash_regs[2] = &priv->regs->gaddr2;
322 priv->hash_regs[3] = &priv->regs->gaddr3;
323 priv->hash_regs[4] = &priv->regs->gaddr4;
324 priv->hash_regs[5] = &priv->regs->gaddr5;
325 priv->hash_regs[6] = &priv->regs->gaddr6;
326 priv->hash_regs[7] = &priv->regs->gaddr7;
327 }
328
329 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
330 priv->padding = DEFAULT_PADDING;
331 else
332 priv->padding = 0;
333
Kumar Gala0bbaf062005-06-20 10:54:21 -0500334 if (dev->features & NETIF_F_IP_CSUM)
335 dev->hard_header_len += GMAC_FCB_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
339 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
340
341 priv->txcoalescing = DEFAULT_TX_COALESCE;
342 priv->txcount = DEFAULT_TXCOUNT;
343 priv->txtime = DEFAULT_TXTIME;
344 priv->rxcoalescing = DEFAULT_RX_COALESCE;
345 priv->rxcount = DEFAULT_RXCOUNT;
346 priv->rxtime = DEFAULT_RXTIME;
347
Kumar Gala0bbaf062005-06-20 10:54:21 -0500348 /* Enable most messages by default */
349 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
350
Trent Piephod3eab822008-10-02 11:12:24 +0000351 /* Carrier starts down, phylib will bring it up */
352 netif_carrier_off(dev);
353
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 err = register_netdev(dev);
355
356 if (err) {
357 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
358 dev->name);
359 goto register_fail;
360 }
361
Andy Fleming7f7f5312005-11-11 12:38:59 -0600362 /* Create all the sysfs files */
363 gfar_init_sysfs(dev);
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 /* Print out the device info */
Johannes Berge1749612008-10-27 15:59:26 -0700366 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
368 /* Even more device info helps when determining which kernel */
Andy Fleming7f7f5312005-11-11 12:38:59 -0600369 /* provided which set of benchmarks. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
372 dev->name, priv->rx_ring_size, priv->tx_ring_size);
373
374 return 0;
375
376register_fail:
Kumar Galacc8c6e32006-02-01 15:18:03 -0600377 iounmap(priv->regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378regs_fail:
379 free_netdev(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400380 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
Russell King3ae5eae2005-11-09 22:32:44 +0000383static int gfar_remove(struct platform_device *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384{
Russell King3ae5eae2005-11-09 22:32:44 +0000385 struct net_device *dev = platform_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 struct gfar_private *priv = netdev_priv(dev);
387
Russell King3ae5eae2005-11-09 22:32:44 +0000388 platform_set_drvdata(pdev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Kumar Galacc8c6e32006-02-01 15:18:03 -0600390 iounmap(priv->regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 free_netdev(dev);
392
393 return 0;
394}
395
Scott Woodd87eb122008-07-11 18:04:45 -0500396#ifdef CONFIG_PM
397static int gfar_suspend(struct platform_device *pdev, pm_message_t state)
398{
399 struct net_device *dev = platform_get_drvdata(pdev);
400 struct gfar_private *priv = netdev_priv(dev);
401 unsigned long flags;
402 u32 tempval;
403
404 int magic_packet = priv->wol_en &&
405 (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
406
407 netif_device_detach(dev);
408
409 if (netif_running(dev)) {
410 spin_lock_irqsave(&priv->txlock, flags);
411 spin_lock(&priv->rxlock);
412
413 gfar_halt_nodisable(dev);
414
415 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
416 tempval = gfar_read(&priv->regs->maccfg1);
417
418 tempval &= ~MACCFG1_TX_EN;
419
420 if (!magic_packet)
421 tempval &= ~MACCFG1_RX_EN;
422
423 gfar_write(&priv->regs->maccfg1, tempval);
424
425 spin_unlock(&priv->rxlock);
426 spin_unlock_irqrestore(&priv->txlock, flags);
427
Scott Woodd87eb122008-07-11 18:04:45 -0500428 napi_disable(&priv->napi);
Scott Woodd87eb122008-07-11 18:04:45 -0500429
430 if (magic_packet) {
431 /* Enable interrupt on Magic Packet */
432 gfar_write(&priv->regs->imask, IMASK_MAG);
433
434 /* Enable Magic Packet mode */
435 tempval = gfar_read(&priv->regs->maccfg2);
436 tempval |= MACCFG2_MPEN;
437 gfar_write(&priv->regs->maccfg2, tempval);
438 } else {
439 phy_stop(priv->phydev);
440 }
441 }
442
443 return 0;
444}
445
446static int gfar_resume(struct platform_device *pdev)
447{
448 struct net_device *dev = platform_get_drvdata(pdev);
449 struct gfar_private *priv = netdev_priv(dev);
450 unsigned long flags;
451 u32 tempval;
452 int magic_packet = priv->wol_en &&
453 (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
454
455 if (!netif_running(dev)) {
456 netif_device_attach(dev);
457 return 0;
458 }
459
460 if (!magic_packet && priv->phydev)
461 phy_start(priv->phydev);
462
463 /* Disable Magic Packet mode, in case something
464 * else woke us up.
465 */
466
467 spin_lock_irqsave(&priv->txlock, flags);
468 spin_lock(&priv->rxlock);
469
470 tempval = gfar_read(&priv->regs->maccfg2);
471 tempval &= ~MACCFG2_MPEN;
472 gfar_write(&priv->regs->maccfg2, tempval);
473
474 gfar_start(dev);
475
476 spin_unlock(&priv->rxlock);
477 spin_unlock_irqrestore(&priv->txlock, flags);
478
479 netif_device_attach(dev);
480
Scott Woodd87eb122008-07-11 18:04:45 -0500481 napi_enable(&priv->napi);
Scott Woodd87eb122008-07-11 18:04:45 -0500482
483 return 0;
484}
485#else
486#define gfar_suspend NULL
487#define gfar_resume NULL
488#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
Andy Fleminge8a2b6a2006-12-01 12:01:06 -0600490/* Reads the controller's registers to determine what interface
491 * connects it to the PHY.
492 */
493static phy_interface_t gfar_get_interface(struct net_device *dev)
494{
495 struct gfar_private *priv = netdev_priv(dev);
496 u32 ecntrl = gfar_read(&priv->regs->ecntrl);
497
498 if (ecntrl & ECNTRL_SGMII_MODE)
499 return PHY_INTERFACE_MODE_SGMII;
500
501 if (ecntrl & ECNTRL_TBI_MODE) {
502 if (ecntrl & ECNTRL_REDUCED_MODE)
503 return PHY_INTERFACE_MODE_RTBI;
504 else
505 return PHY_INTERFACE_MODE_TBI;
506 }
507
508 if (ecntrl & ECNTRL_REDUCED_MODE) {
509 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
510 return PHY_INTERFACE_MODE_RMII;
Andy Fleming7132ab72007-07-11 11:43:07 -0500511 else {
512 phy_interface_t interface = priv->einfo->interface;
513
514 /*
515 * This isn't autodetected right now, so it must
516 * be set by the device tree or platform code.
517 */
518 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
519 return PHY_INTERFACE_MODE_RGMII_ID;
520
Andy Fleminge8a2b6a2006-12-01 12:01:06 -0600521 return PHY_INTERFACE_MODE_RGMII;
Andy Fleming7132ab72007-07-11 11:43:07 -0500522 }
Andy Fleminge8a2b6a2006-12-01 12:01:06 -0600523 }
524
525 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
526 return PHY_INTERFACE_MODE_GMII;
527
528 return PHY_INTERFACE_MODE_MII;
529}
530
531
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400532/* Initializes driver's PHY state, and attaches to the PHY.
533 * Returns 0 on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 */
535static int init_phy(struct net_device *dev)
536{
537 struct gfar_private *priv = netdev_priv(dev);
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400538 uint gigabit_support =
539 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
540 SUPPORTED_1000baseT_Full : 0;
541 struct phy_device *phydev;
Kumar Gala4d3248a2006-01-11 11:27:33 -0800542 char phy_id[BUS_ID_SIZE];
Andy Fleminge8a2b6a2006-12-01 12:01:06 -0600543 phy_interface_t interface;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545 priv->oldlink = 0;
546 priv->oldspeed = 0;
547 priv->oldduplex = -1;
548
Kumar Gala4d3248a2006-01-11 11:27:33 -0800549 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
550
Andy Fleminge8a2b6a2006-12-01 12:01:06 -0600551 interface = gfar_get_interface(dev);
552
553 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Kapil Junejad3c12872007-05-11 18:25:11 -0500555 if (interface == PHY_INTERFACE_MODE_SGMII)
556 gfar_configure_serdes(dev);
557
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400558 if (IS_ERR(phydev)) {
559 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
560 return PTR_ERR(phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 }
562
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400563 /* Remove any features not supported by the controller */
564 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
565 phydev->advertising = phydev->supported;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400567 priv->phydev = phydev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
569 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570}
571
Paul Gortmakerd0313582008-04-17 00:08:10 -0400572/*
573 * Initialize TBI PHY interface for communicating with the
574 * SERDES lynx PHY on the chip. We communicate with this PHY
575 * through the MDIO bus on each controller, treating it as a
576 * "normal" PHY at the address found in the TBIPA register. We assume
577 * that the TBIPA register is valid. Either the MDIO bus code will set
578 * it to a value that doesn't conflict with other PHYs on the bus, or the
579 * value doesn't matter, as there are no other PHYs on the bus.
580 */
Kapil Junejad3c12872007-05-11 18:25:11 -0500581static void gfar_configure_serdes(struct net_device *dev)
582{
583 struct gfar_private *priv = netdev_priv(dev);
584 struct gfar_mii __iomem *regs =
585 (void __iomem *)&priv->regs->gfar_mii_regs;
Paul Gortmakerd0313582008-04-17 00:08:10 -0400586 int tbipa = gfar_read(&priv->regs->tbipa);
Kapil Junejad3c12872007-05-11 18:25:11 -0500587
Paul Gortmakerd0313582008-04-17 00:08:10 -0400588 /* Single clk mode, mii mode off(for serdes communication) */
589 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
Kapil Junejad3c12872007-05-11 18:25:11 -0500590
Paul Gortmakerd0313582008-04-17 00:08:10 -0400591 gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE,
Kapil Junejad3c12872007-05-11 18:25:11 -0500592 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
593 ADVERTISE_1000XPSE_ASYM);
594
Paul Gortmakerd0313582008-04-17 00:08:10 -0400595 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
Kapil Junejad3c12872007-05-11 18:25:11 -0500596 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
597}
598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599static void init_registers(struct net_device *dev)
600{
601 struct gfar_private *priv = netdev_priv(dev);
602
603 /* Clear IEVENT */
604 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
605
606 /* Initialize IMASK */
607 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
608
609 /* Init hash registers to zero */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500610 gfar_write(&priv->regs->igaddr0, 0);
611 gfar_write(&priv->regs->igaddr1, 0);
612 gfar_write(&priv->regs->igaddr2, 0);
613 gfar_write(&priv->regs->igaddr3, 0);
614 gfar_write(&priv->regs->igaddr4, 0);
615 gfar_write(&priv->regs->igaddr5, 0);
616 gfar_write(&priv->regs->igaddr6, 0);
617 gfar_write(&priv->regs->igaddr7, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
619 gfar_write(&priv->regs->gaddr0, 0);
620 gfar_write(&priv->regs->gaddr1, 0);
621 gfar_write(&priv->regs->gaddr2, 0);
622 gfar_write(&priv->regs->gaddr3, 0);
623 gfar_write(&priv->regs->gaddr4, 0);
624 gfar_write(&priv->regs->gaddr5, 0);
625 gfar_write(&priv->regs->gaddr6, 0);
626 gfar_write(&priv->regs->gaddr7, 0);
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 /* Zero out the rmon mib registers if it has them */
629 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
Kumar Galacc8c6e32006-02-01 15:18:03 -0600630 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
632 /* Mask off the CAM interrupts */
633 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
634 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
635 }
636
637 /* Initialize the max receive buffer length */
638 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 /* Initialize the Minimum Frame Length Register */
641 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
Kumar Gala0bbaf062005-06-20 10:54:21 -0500644
645/* Halt the receive and transmit queues */
Scott Woodd87eb122008-07-11 18:04:45 -0500646static void gfar_halt_nodisable(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
648 struct gfar_private *priv = netdev_priv(dev);
Kumar Galacc8c6e32006-02-01 15:18:03 -0600649 struct gfar __iomem *regs = priv->regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 u32 tempval;
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 /* Mask all interrupts */
653 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
654
655 /* Clear all interrupts */
656 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
657
658 /* Stop the DMA, and wait for it to stop */
659 tempval = gfar_read(&priv->regs->dmactrl);
660 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
661 != (DMACTRL_GRS | DMACTRL_GTS)) {
662 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
663 gfar_write(&priv->regs->dmactrl, tempval);
664
665 while (!(gfar_read(&priv->regs->ievent) &
666 (IEVENT_GRSC | IEVENT_GTSC)))
667 cpu_relax();
668 }
Scott Woodd87eb122008-07-11 18:04:45 -0500669}
Scott Woodd87eb122008-07-11 18:04:45 -0500670
671/* Halt the receive and transmit queues */
672void gfar_halt(struct net_device *dev)
673{
674 struct gfar_private *priv = netdev_priv(dev);
675 struct gfar __iomem *regs = priv->regs;
676 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Scott Wood2a54adc2008-08-12 15:10:46 -0500678 gfar_halt_nodisable(dev);
679
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 /* Disable Rx and Tx */
681 tempval = gfar_read(&regs->maccfg1);
682 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
683 gfar_write(&regs->maccfg1, tempval);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500684}
685
686void stop_gfar(struct net_device *dev)
687{
688 struct gfar_private *priv = netdev_priv(dev);
Kumar Galacc8c6e32006-02-01 15:18:03 -0600689 struct gfar __iomem *regs = priv->regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500690 unsigned long flags;
691
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400692 phy_stop(priv->phydev);
693
Kumar Gala0bbaf062005-06-20 10:54:21 -0500694 /* Lock it down */
Andy Flemingfef61082006-04-20 16:44:29 -0500695 spin_lock_irqsave(&priv->txlock, flags);
696 spin_lock(&priv->rxlock);
Kumar Gala0bbaf062005-06-20 10:54:21 -0500697
Kumar Gala0bbaf062005-06-20 10:54:21 -0500698 gfar_halt(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Andy Flemingfef61082006-04-20 16:44:29 -0500700 spin_unlock(&priv->rxlock);
701 spin_unlock_irqrestore(&priv->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703 /* Free the IRQs */
704 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
705 free_irq(priv->interruptError, dev);
706 free_irq(priv->interruptTransmit, dev);
707 free_irq(priv->interruptReceive, dev);
708 } else {
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400709 free_irq(priv->interruptTransmit, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 }
711
712 free_skb_resources(priv);
713
Becky Brucecf782292008-02-18 17:24:30 -0600714 dma_free_coherent(&dev->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 sizeof(struct txbd8)*priv->tx_ring_size
716 + sizeof(struct rxbd8)*priv->rx_ring_size,
717 priv->tx_bd_base,
Kumar Gala0bbaf062005-06-20 10:54:21 -0500718 gfar_read(&regs->tbase0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719}
720
721/* If there are any tx skbs or rx skbs still around, free them.
722 * Then free tx_skbuff and rx_skbuff */
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400723static void free_skb_resources(struct gfar_private *priv)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
725 struct rxbd8 *rxbdp;
726 struct txbd8 *txbdp;
727 int i;
728
729 /* Go through all the buffer descriptors and free their data buffers */
730 txbdp = priv->tx_bd_base;
731
732 for (i = 0; i < priv->tx_ring_size; i++) {
733
734 if (priv->tx_skbuff[i]) {
Becky Brucecf782292008-02-18 17:24:30 -0600735 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 txbdp->length,
737 DMA_TO_DEVICE);
738 dev_kfree_skb_any(priv->tx_skbuff[i]);
739 priv->tx_skbuff[i] = NULL;
740 }
Andy Flemingad5da7a2008-05-07 13:20:55 -0500741
742 txbdp++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
744
745 kfree(priv->tx_skbuff);
746
747 rxbdp = priv->rx_bd_base;
748
749 /* rx_skbuff is not guaranteed to be allocated, so only
750 * free it and its contents if it is allocated */
751 if(priv->rx_skbuff != NULL) {
752 for (i = 0; i < priv->rx_ring_size; i++) {
753 if (priv->rx_skbuff[i]) {
Becky Brucecf782292008-02-18 17:24:30 -0600754 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
Andy Fleming7f7f5312005-11-11 12:38:59 -0600755 priv->rx_buffer_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 DMA_FROM_DEVICE);
757
758 dev_kfree_skb_any(priv->rx_skbuff[i]);
759 priv->rx_skbuff[i] = NULL;
760 }
761
762 rxbdp->status = 0;
763 rxbdp->length = 0;
764 rxbdp->bufPtr = 0;
765
766 rxbdp++;
767 }
768
769 kfree(priv->rx_skbuff);
770 }
771}
772
Kumar Gala0bbaf062005-06-20 10:54:21 -0500773void gfar_start(struct net_device *dev)
774{
775 struct gfar_private *priv = netdev_priv(dev);
Kumar Galacc8c6e32006-02-01 15:18:03 -0600776 struct gfar __iomem *regs = priv->regs;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500777 u32 tempval;
778
779 /* Enable Rx and Tx in MACCFG1 */
780 tempval = gfar_read(&regs->maccfg1);
781 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
782 gfar_write(&regs->maccfg1, tempval);
783
784 /* Initialize DMACTRL to have WWR and WOP */
785 tempval = gfar_read(&priv->regs->dmactrl);
786 tempval |= DMACTRL_INIT_SETTINGS;
787 gfar_write(&priv->regs->dmactrl, tempval);
788
Kumar Gala0bbaf062005-06-20 10:54:21 -0500789 /* Make sure we aren't stopped */
790 tempval = gfar_read(&priv->regs->dmactrl);
791 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
792 gfar_write(&priv->regs->dmactrl, tempval);
793
Andy Flemingfef61082006-04-20 16:44:29 -0500794 /* Clear THLT/RHLT, so that the DMA starts polling now */
795 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
796 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
797
Kumar Gala0bbaf062005-06-20 10:54:21 -0500798 /* Unmask the interrupts we look for */
799 gfar_write(&regs->imask, IMASK_DEFAULT);
800}
801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802/* Bring the controller up and running */
803int startup_gfar(struct net_device *dev)
804{
805 struct txbd8 *txbdp;
806 struct rxbd8 *rxbdp;
Grant Likelyf9663ae2007-12-01 22:10:03 -0700807 dma_addr_t addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 unsigned long vaddr;
809 int i;
810 struct gfar_private *priv = netdev_priv(dev);
Kumar Galacc8c6e32006-02-01 15:18:03 -0600811 struct gfar __iomem *regs = priv->regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 int err = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500813 u32 rctrl = 0;
Andy Fleming7f7f5312005-11-11 12:38:59 -0600814 u32 attrs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
816 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
817
818 /* Allocate memory for the buffer descriptors */
Becky Brucecf782292008-02-18 17:24:30 -0600819 vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 sizeof (struct txbd8) * priv->tx_ring_size +
821 sizeof (struct rxbd8) * priv->rx_ring_size,
822 &addr, GFP_KERNEL);
823
824 if (vaddr == 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500825 if (netif_msg_ifup(priv))
826 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
827 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 return -ENOMEM;
829 }
830
831 priv->tx_bd_base = (struct txbd8 *) vaddr;
832
833 /* enet DMA only understands physical addresses */
Kumar Gala0bbaf062005-06-20 10:54:21 -0500834 gfar_write(&regs->tbase0, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 /* Start the rx descriptor ring where the tx ring leaves off */
837 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
838 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
839 priv->rx_bd_base = (struct rxbd8 *) vaddr;
Kumar Gala0bbaf062005-06-20 10:54:21 -0500840 gfar_write(&regs->rbase0, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842 /* Setup the skbuff rings */
843 priv->tx_skbuff =
844 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
845 priv->tx_ring_size, GFP_KERNEL);
846
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400847 if (NULL == priv->tx_skbuff) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500848 if (netif_msg_ifup(priv))
849 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
850 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 err = -ENOMEM;
852 goto tx_skb_fail;
853 }
854
855 for (i = 0; i < priv->tx_ring_size; i++)
856 priv->tx_skbuff[i] = NULL;
857
858 priv->rx_skbuff =
859 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
860 priv->rx_ring_size, GFP_KERNEL);
861
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400862 if (NULL == priv->rx_skbuff) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500863 if (netif_msg_ifup(priv))
864 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
865 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 err = -ENOMEM;
867 goto rx_skb_fail;
868 }
869
870 for (i = 0; i < priv->rx_ring_size; i++)
871 priv->rx_skbuff[i] = NULL;
872
873 /* Initialize some variables in our dev structure */
874 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
875 priv->cur_rx = priv->rx_bd_base;
876 priv->skb_curtx = priv->skb_dirtytx = 0;
877 priv->skb_currx = 0;
878
879 /* Initialize Transmit Descriptor Ring */
880 txbdp = priv->tx_bd_base;
881 for (i = 0; i < priv->tx_ring_size; i++) {
882 txbdp->status = 0;
883 txbdp->length = 0;
884 txbdp->bufPtr = 0;
885 txbdp++;
886 }
887
888 /* Set the last descriptor in the ring to indicate wrap */
889 txbdp--;
890 txbdp->status |= TXBD_WRAP;
891
892 rxbdp = priv->rx_bd_base;
893 for (i = 0; i < priv->rx_ring_size; i++) {
Andy Fleming815b97c2008-04-22 17:18:29 -0500894 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Andy Fleming815b97c2008-04-22 17:18:29 -0500896 skb = gfar_new_skb(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Andy Fleming815b97c2008-04-22 17:18:29 -0500898 if (!skb) {
899 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
900 dev->name);
901
902 goto err_rxalloc_fail;
903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 priv->rx_skbuff[i] = skb;
906
Andy Fleming815b97c2008-04-22 17:18:29 -0500907 gfar_new_rxbdp(dev, rxbdp, skb);
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 rxbdp++;
910 }
911
912 /* Set the last descriptor in the ring to wrap */
913 rxbdp--;
914 rxbdp->status |= RXBD_WRAP;
915
916 /* If the device has multiple interrupts, register for
917 * them. Otherwise, only register for the one */
918 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500919 /* Install our interrupt handlers for Error,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 * Transmit, and Receive */
921 if (request_irq(priv->interruptError, gfar_error,
922 0, "enet_error", dev) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500923 if (netif_msg_intr(priv))
924 printk(KERN_ERR "%s: Can't get IRQ %d\n",
925 dev->name, priv->interruptError);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927 err = -1;
928 goto err_irq_fail;
929 }
930
931 if (request_irq(priv->interruptTransmit, gfar_transmit,
932 0, "enet_tx", dev) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500933 if (netif_msg_intr(priv))
934 printk(KERN_ERR "%s: Can't get IRQ %d\n",
935 dev->name, priv->interruptTransmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
937 err = -1;
938
939 goto tx_irq_fail;
940 }
941
942 if (request_irq(priv->interruptReceive, gfar_receive,
943 0, "enet_rx", dev) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500944 if (netif_msg_intr(priv))
945 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
946 dev->name, priv->interruptReceive);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
948 err = -1;
949 goto rx_irq_fail;
950 }
951 } else {
952 if (request_irq(priv->interruptTransmit, gfar_interrupt,
953 0, "gfar_interrupt", dev) < 0) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500954 if (netif_msg_intr(priv))
955 printk(KERN_ERR "%s: Can't get IRQ %d\n",
956 dev->name, priv->interruptError);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
958 err = -1;
959 goto err_irq_fail;
960 }
961 }
962
Andy Flemingbb40dcb2005-09-23 22:54:21 -0400963 phy_start(priv->phydev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
965 /* Configure the coalescing support */
966 if (priv->txcoalescing)
967 gfar_write(&regs->txic,
968 mk_ic_value(priv->txcount, priv->txtime));
969 else
970 gfar_write(&regs->txic, 0);
971
972 if (priv->rxcoalescing)
973 gfar_write(&regs->rxic,
974 mk_ic_value(priv->rxcount, priv->rxtime));
975 else
976 gfar_write(&regs->rxic, 0);
977
Kumar Gala0bbaf062005-06-20 10:54:21 -0500978 if (priv->rx_csum_enable)
979 rctrl |= RCTRL_CHECKSUMMING;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Andy Fleming7f7f5312005-11-11 12:38:59 -0600981 if (priv->extended_hash) {
Kumar Gala0bbaf062005-06-20 10:54:21 -0500982 rctrl |= RCTRL_EXTHASH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Andy Fleming7f7f5312005-11-11 12:38:59 -0600984 gfar_clear_exact_match(dev);
985 rctrl |= RCTRL_EMEN;
986 }
987
Kumar Gala0bbaf062005-06-20 10:54:21 -0500988 if (priv->vlan_enable)
989 rctrl |= RCTRL_VLAN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Andy Fleming7f7f5312005-11-11 12:38:59 -0600991 if (priv->padding) {
992 rctrl &= ~RCTRL_PAL_MASK;
993 rctrl |= RCTRL_PADDING(priv->padding);
994 }
995
Kumar Gala0bbaf062005-06-20 10:54:21 -0500996 /* Init rctrl based on our settings */
997 gfar_write(&priv->regs->rctrl, rctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Kumar Gala0bbaf062005-06-20 10:54:21 -0500999 if (dev->features & NETIF_F_IP_CSUM)
1000 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Andy Fleming7f7f5312005-11-11 12:38:59 -06001002 /* Set the extraction length and index */
1003 attrs = ATTRELI_EL(priv->rx_stash_size) |
1004 ATTRELI_EI(priv->rx_stash_index);
1005
1006 gfar_write(&priv->regs->attreli, attrs);
1007
1008 /* Start with defaults, and add stashing or locking
1009 * depending on the approprate variables */
1010 attrs = ATTR_INIT_SETTINGS;
1011
1012 if (priv->bd_stash_en)
1013 attrs |= ATTR_BDSTASH;
1014
1015 if (priv->rx_stash_size != 0)
1016 attrs |= ATTR_BUFSTASH;
1017
1018 gfar_write(&priv->regs->attr, attrs);
1019
1020 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
1021 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1022 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1023
1024 /* Start the controller */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001025 gfar_start(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 return 0;
1028
1029rx_irq_fail:
1030 free_irq(priv->interruptTransmit, dev);
1031tx_irq_fail:
1032 free_irq(priv->interruptError, dev);
1033err_irq_fail:
Jeff Garzik7d2e3cb2008-05-13 01:41:58 -04001034err_rxalloc_fail:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035rx_skb_fail:
1036 free_skb_resources(priv);
1037tx_skb_fail:
Becky Brucecf782292008-02-18 17:24:30 -06001038 dma_free_coherent(&dev->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 sizeof(struct txbd8)*priv->tx_ring_size
1040 + sizeof(struct rxbd8)*priv->rx_ring_size,
1041 priv->tx_bd_base,
Kumar Gala0bbaf062005-06-20 10:54:21 -05001042 gfar_read(&regs->tbase0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 return err;
1045}
1046
1047/* Called when something needs to use the ethernet device */
1048/* Returns 0 for success. */
1049static int gfar_enet_open(struct net_device *dev)
1050{
Li Yang94e8cc32007-10-12 21:53:51 +08001051 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 int err;
1053
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001054 napi_enable(&priv->napi);
1055
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 /* Initialize a bunch of registers */
1057 init_registers(dev);
1058
1059 gfar_set_mac_address(dev);
1060
1061 err = init_phy(dev);
1062
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001063 if(err) {
1064 napi_disable(&priv->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
1068 err = startup_gfar(dev);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001069 if (err) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001070 napi_disable(&priv->napi);
Anton Vorontsovdb0e8e32007-10-17 23:57:46 +04001071 return err;
1072 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074 netif_start_queue(dev);
1075
1076 return err;
1077}
1078
Andy Fleming7f7f5312005-11-11 12:38:59 -06001079static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001080{
1081 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
1082
1083 memset(fcb, 0, GMAC_FCB_LEN);
1084
Kumar Gala0bbaf062005-06-20 10:54:21 -05001085 return fcb;
1086}
1087
1088static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
1089{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001090 u8 flags = 0;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001091
1092 /* If we're here, it's a IP packet with a TCP or UDP
1093 * payload. We set it to checksum, using a pseudo-header
1094 * we provide
1095 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001096 flags = TXFCB_DEFAULT;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001097
Andy Fleming7f7f5312005-11-11 12:38:59 -06001098 /* Tell the controller what the protocol is */
1099 /* And provide the already calculated phcs */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001100 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001101 flags |= TXFCB_UDP;
Arnaldo Carvalho de Melo4bedb452007-03-13 14:28:48 -03001102 fcb->phcs = udp_hdr(skb)->check;
Andy Fleming7f7f5312005-11-11 12:38:59 -06001103 } else
Kumar Gala8da32de2007-06-29 00:12:04 -05001104 fcb->phcs = tcp_hdr(skb)->check;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001105
1106 /* l3os is the distance between the start of the
1107 * frame (skb->data) and the start of the IP hdr.
1108 * l4os is the distance between the start of the
1109 * l3 hdr and the l4 hdr */
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001110 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001111 fcb->l4os = skb_network_header_len(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001112
Andy Fleming7f7f5312005-11-11 12:38:59 -06001113 fcb->flags = flags;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001114}
1115
Andy Fleming7f7f5312005-11-11 12:38:59 -06001116void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
Kumar Gala0bbaf062005-06-20 10:54:21 -05001117{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001118 fcb->flags |= TXFCB_VLN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001119 fcb->vlctl = vlan_tx_tag_get(skb);
1120}
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122/* This is called by the kernel when a frame is ready for transmission. */
1123/* It is pointed to by the dev->hard_start_xmit function pointer */
1124static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1125{
1126 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001127 struct txfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 struct txbd8 *txbdp;
Andy Fleming7f7f5312005-11-11 12:38:59 -06001129 u16 status;
Andy Flemingfef61082006-04-20 16:44:29 -05001130 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
1132 /* Update transmit stats */
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001133 dev->stats.tx_bytes += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135 /* Lock priv now */
Andy Flemingfef61082006-04-20 16:44:29 -05001136 spin_lock_irqsave(&priv->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
1138 /* Point at the first free tx descriptor */
1139 txbdp = priv->cur_tx;
1140
1141 /* Clear all but the WRAP status flags */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001142 status = txbdp->status & TXBD_WRAP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143
Kumar Gala0bbaf062005-06-20 10:54:21 -05001144 /* Set up checksumming */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001145 if (likely((dev->features & NETIF_F_IP_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -07001146 && (CHECKSUM_PARTIAL == skb->ip_summed))) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001147 fcb = gfar_add_fcb(skb, txbdp);
Andy Fleming7f7f5312005-11-11 12:38:59 -06001148 status |= TXBD_TOE;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001149 gfar_tx_checksum(skb, fcb);
1150 }
1151
1152 if (priv->vlan_enable &&
1153 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001154 if (unlikely(NULL == fcb)) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001155 fcb = gfar_add_fcb(skb, txbdp);
Andy Fleming7f7f5312005-11-11 12:38:59 -06001156 status |= TXBD_TOE;
1157 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05001158
1159 gfar_tx_vlan(skb, fcb);
1160 }
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 /* Set buffer length and pointer */
1163 txbdp->length = skb->len;
Becky Brucecf782292008-02-18 17:24:30 -06001164 txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 skb->len, DMA_TO_DEVICE);
1166
1167 /* Save the skb pointer so we can free it later */
1168 priv->tx_skbuff[priv->skb_curtx] = skb;
1169
1170 /* Update the current skb pointer (wrapping if this was the last) */
1171 priv->skb_curtx =
1172 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1173
1174 /* Flag the BD as interrupt-causing */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001175 status |= TXBD_INTERRUPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
1177 /* Flag the BD as ready to go, last in frame, and */
1178 /* in need of CRC */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001179 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 dev->trans_start = jiffies;
1182
Scott Wood3b6330c2007-05-16 15:06:59 -05001183 /* The powerpc-specific eieio() is used, as wmb() has too strong
1184 * semantics (it requires synchronization between cacheable and
1185 * uncacheable mappings, which eieio doesn't provide and which we
1186 * don't need), thus requiring a more expensive sync instruction. At
1187 * some point, the set of architecture-independent barrier functions
1188 * should be expanded to include weaker barriers.
1189 */
1190
1191 eieio();
Andy Fleming7f7f5312005-11-11 12:38:59 -06001192 txbdp->status = status;
1193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 /* If this was the last BD in the ring, the next one */
1195 /* is at the beginning of the ring */
1196 if (txbdp->status & TXBD_WRAP)
1197 txbdp = priv->tx_bd_base;
1198 else
1199 txbdp++;
1200
1201 /* If the next BD still needs to be cleaned up, then the bds
1202 are full. We need to tell the kernel to stop sending us stuff. */
1203 if (txbdp == priv->dirty_tx) {
1204 netif_stop_queue(dev);
1205
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001206 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 }
1208
1209 /* Update the current txbd to the next one */
1210 priv->cur_tx = txbdp;
1211
1212 /* Tell the DMA to go go go */
1213 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1214
1215 /* Unlock priv */
Andy Flemingfef61082006-04-20 16:44:29 -05001216 spin_unlock_irqrestore(&priv->txlock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
1218 return 0;
1219}
1220
1221/* Stops the kernel queue, and halts the controller */
1222static int gfar_close(struct net_device *dev)
1223{
1224 struct gfar_private *priv = netdev_priv(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001225
1226 napi_disable(&priv->napi);
1227
Sebastian Siewiorab939902008-08-19 21:12:45 +02001228 cancel_work_sync(&priv->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 stop_gfar(dev);
1230
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001231 /* Disconnect from the PHY */
1232 phy_disconnect(priv->phydev);
1233 priv->phydev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 netif_stop_queue(dev);
1236
1237 return 0;
1238}
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240/* Changes the mac address if the controller is not running. */
Andy Flemingf162b9d2008-05-02 13:00:30 -05001241static int gfar_set_mac_address(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001243 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
1245 return 0;
1246}
1247
1248
Kumar Gala0bbaf062005-06-20 10:54:21 -05001249/* Enables and disables VLAN insertion/extraction */
1250static void gfar_vlan_rx_register(struct net_device *dev,
1251 struct vlan_group *grp)
1252{
1253 struct gfar_private *priv = netdev_priv(dev);
1254 unsigned long flags;
1255 u32 tempval;
1256
Andy Flemingfef61082006-04-20 16:44:29 -05001257 spin_lock_irqsave(&priv->rxlock, flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001258
1259 priv->vlgrp = grp;
1260
1261 if (grp) {
1262 /* Enable VLAN tag insertion */
1263 tempval = gfar_read(&priv->regs->tctrl);
1264 tempval |= TCTRL_VLINS;
1265
1266 gfar_write(&priv->regs->tctrl, tempval);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001267
Kumar Gala0bbaf062005-06-20 10:54:21 -05001268 /* Enable VLAN tag extraction */
1269 tempval = gfar_read(&priv->regs->rctrl);
1270 tempval |= RCTRL_VLEX;
1271 gfar_write(&priv->regs->rctrl, tempval);
1272 } else {
1273 /* Disable VLAN tag insertion */
1274 tempval = gfar_read(&priv->regs->tctrl);
1275 tempval &= ~TCTRL_VLINS;
1276 gfar_write(&priv->regs->tctrl, tempval);
1277
1278 /* Disable VLAN tag extraction */
1279 tempval = gfar_read(&priv->regs->rctrl);
1280 tempval &= ~RCTRL_VLEX;
1281 gfar_write(&priv->regs->rctrl, tempval);
1282 }
1283
Andy Flemingfef61082006-04-20 16:44:29 -05001284 spin_unlock_irqrestore(&priv->rxlock, flags);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001285}
1286
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1288{
1289 int tempsize, tempval;
1290 struct gfar_private *priv = netdev_priv(dev);
1291 int oldsize = priv->rx_buffer_size;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001292 int frame_size = new_mtu + ETH_HLEN;
1293
1294 if (priv->vlan_enable)
Dai Harukifaa89572008-03-24 10:53:26 -05001295 frame_size += VLAN_HLEN;
Kumar Gala0bbaf062005-06-20 10:54:21 -05001296
1297 if (gfar_uses_fcb(priv))
1298 frame_size += GMAC_FCB_LEN;
1299
1300 frame_size += priv->padding;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
1302 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001303 if (netif_msg_drv(priv))
1304 printk(KERN_ERR "%s: Invalid MTU setting\n",
1305 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 return -EINVAL;
1307 }
1308
1309 tempsize =
1310 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1311 INCREMENTAL_BUFFER_SIZE;
1312
1313 /* Only stop and start the controller if it isn't already
Andy Fleming7f7f5312005-11-11 12:38:59 -06001314 * stopped, and we changed something */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1316 stop_gfar(dev);
1317
1318 priv->rx_buffer_size = tempsize;
1319
1320 dev->mtu = new_mtu;
1321
1322 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1323 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1324
1325 /* If the mtu is larger than the max size for standard
1326 * ethernet frames (ie, a jumbo frame), then set maccfg2
1327 * to allow huge frames, and to check the length */
1328 tempval = gfar_read(&priv->regs->maccfg2);
1329
1330 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1331 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1332 else
1333 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1334
1335 gfar_write(&priv->regs->maccfg2, tempval);
1336
1337 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1338 startup_gfar(dev);
1339
1340 return 0;
1341}
1342
Sebastian Siewiorab939902008-08-19 21:12:45 +02001343/* gfar_reset_task gets scheduled when a packet has not been
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 * transmitted after a set amount of time.
1345 * For now, assume that clearing out all the structures, and
Sebastian Siewiorab939902008-08-19 21:12:45 +02001346 * starting over will fix the problem.
1347 */
1348static void gfar_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
Sebastian Siewiorab939902008-08-19 21:12:45 +02001350 struct gfar_private *priv = container_of(work, struct gfar_private,
1351 reset_task);
1352 struct net_device *dev = priv->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354 if (dev->flags & IFF_UP) {
1355 stop_gfar(dev);
1356 startup_gfar(dev);
1357 }
1358
David S. Miller263ba322008-07-15 03:47:41 -07001359 netif_tx_schedule_all(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360}
1361
Sebastian Siewiorab939902008-08-19 21:12:45 +02001362static void gfar_timeout(struct net_device *dev)
1363{
1364 struct gfar_private *priv = netdev_priv(dev);
1365
1366 dev->stats.tx_errors++;
1367 schedule_work(&priv->reset_task);
1368}
1369
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370/* Interrupt Handler for Transmit complete */
Andy Flemingf162b9d2008-05-02 13:00:30 -05001371static int gfar_clean_tx_ring(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 struct txbd8 *bdp;
Dai Harukid080cd62008-04-09 19:37:51 -05001374 struct gfar_private *priv = netdev_priv(dev);
1375 int howmany = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 bdp = priv->dirty_tx;
1378 while ((bdp->status & TXBD_READY) == 0) {
1379 /* If dirty_tx and cur_tx are the same, then either the */
1380 /* ring is empty or full now (it could only be full in the beginning, */
1381 /* obviously). If it is empty, we are done. */
1382 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1383 break;
1384
Dai Harukid080cd62008-04-09 19:37:51 -05001385 howmany++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
1387 /* Deferred means some collisions occurred during transmit, */
1388 /* but we eventually sent the packet. */
1389 if (bdp->status & TXBD_DEF)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001390 dev->stats.collisions++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391
1392 /* Free the sk buffer associated with this TxBD */
1393 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
Dai Harukid080cd62008-04-09 19:37:51 -05001394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1396 priv->skb_dirtytx =
1397 (priv->skb_dirtytx +
1398 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1399
Dai Harukid080cd62008-04-09 19:37:51 -05001400 /* Clean BD length for empty detection */
1401 bdp->length = 0;
1402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1404 if (bdp->status & TXBD_WRAP)
1405 bdp = priv->tx_bd_base;
1406 else
1407 bdp++;
1408
1409 /* Move dirty_tx to be the next bd */
1410 priv->dirty_tx = bdp;
1411
1412 /* We freed a buffer, so now we can restart transmission */
1413 if (netif_queue_stopped(dev))
1414 netif_wake_queue(dev);
1415 } /* while ((bdp->status & TXBD_READY) == 0) */
1416
Dai Harukid080cd62008-04-09 19:37:51 -05001417 dev->stats.tx_packets += howmany;
1418
1419 return howmany;
1420}
1421
1422/* Interrupt Handler for Transmit complete */
1423static irqreturn_t gfar_transmit(int irq, void *dev_id)
1424{
1425 struct net_device *dev = (struct net_device *) dev_id;
1426 struct gfar_private *priv = netdev_priv(dev);
1427
1428 /* Clear IEVENT */
1429 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1430
1431 /* Lock priv */
1432 spin_lock(&priv->txlock);
1433
1434 gfar_clean_tx_ring(dev);
1435
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 /* If we are coalescing the interrupts, reset the timer */
1437 /* Otherwise, clear it */
Andy Fleming2f448912008-03-24 10:53:28 -05001438 if (likely(priv->txcoalescing)) {
1439 gfar_write(&priv->regs->txic, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 gfar_write(&priv->regs->txic,
1441 mk_ic_value(priv->txcount, priv->txtime));
Andy Fleming2f448912008-03-24 10:53:28 -05001442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Andy Flemingfef61082006-04-20 16:44:29 -05001444 spin_unlock(&priv->txlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
1446 return IRQ_HANDLED;
1447}
1448
Andy Fleming815b97c2008-04-22 17:18:29 -05001449static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1450 struct sk_buff *skb)
1451{
1452 struct gfar_private *priv = netdev_priv(dev);
1453 u32 * status_len = (u32 *)bdp;
1454 u16 flags;
1455
1456 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1457 priv->rx_buffer_size, DMA_FROM_DEVICE);
1458
1459 flags = RXBD_EMPTY | RXBD_INTERRUPT;
1460
1461 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1462 flags |= RXBD_WRAP;
1463
1464 eieio();
1465
1466 *status_len = (u32)flags << 16;
1467}
1468
1469
1470struct sk_buff * gfar_new_skb(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
Andy Fleming7f7f5312005-11-11 12:38:59 -06001472 unsigned int alignamount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 struct gfar_private *priv = netdev_priv(dev);
1474 struct sk_buff *skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 /* We have to allocate the skb, so keep trying till we succeed */
Andy Fleming815b97c2008-04-22 17:18:29 -05001477 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
Andy Fleming815b97c2008-04-22 17:18:29 -05001479 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 return NULL;
1481
Andy Fleming7f7f5312005-11-11 12:38:59 -06001482 alignamount = RXBUF_ALIGNMENT -
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001483 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
Andy Fleming7f7f5312005-11-11 12:38:59 -06001484
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 /* We need the data buffer to be aligned properly. We will reserve
1486 * as many bytes as needed to align the data properly
1487 */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001488 skb_reserve(skb, alignamount);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 return skb;
1491}
1492
Li Yang298e1a92007-10-16 14:18:13 +08001493static inline void count_errors(unsigned short status, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494{
Li Yang298e1a92007-10-16 14:18:13 +08001495 struct gfar_private *priv = netdev_priv(dev);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001496 struct net_device_stats *stats = &dev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 struct gfar_extra_stats *estats = &priv->extra_stats;
1498
1499 /* If the packet was truncated, none of the other errors
1500 * matter */
1501 if (status & RXBD_TRUNCATED) {
1502 stats->rx_length_errors++;
1503
1504 estats->rx_trunc++;
1505
1506 return;
1507 }
1508 /* Count the errors, if there were any */
1509 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1510 stats->rx_length_errors++;
1511
1512 if (status & RXBD_LARGE)
1513 estats->rx_large++;
1514 else
1515 estats->rx_short++;
1516 }
1517 if (status & RXBD_NONOCTET) {
1518 stats->rx_frame_errors++;
1519 estats->rx_nonoctet++;
1520 }
1521 if (status & RXBD_CRCERR) {
1522 estats->rx_crcerr++;
1523 stats->rx_crc_errors++;
1524 }
1525 if (status & RXBD_OVERRUN) {
1526 estats->rx_overrun++;
1527 stats->rx_crc_errors++;
1528 }
1529}
1530
David Howells7d12e782006-10-05 14:55:46 +01001531irqreturn_t gfar_receive(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532{
1533 struct net_device *dev = (struct net_device *) dev_id;
1534 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 u32 tempval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 /* support NAPI */
Dai Harukid080cd62008-04-09 19:37:51 -05001538 /* Clear IEVENT, so interrupts aren't called again
1539 * because of the packets that have already arrived */
1540 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1541
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001542 if (netif_rx_schedule_prep(dev, &priv->napi)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 tempval = gfar_read(&priv->regs->imask);
Dai Harukid080cd62008-04-09 19:37:51 -05001544 tempval &= IMASK_RTX_DISABLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 gfar_write(&priv->regs->imask, tempval);
1546
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001547 __netif_rx_schedule(dev, &priv->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 } else {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001549 if (netif_msg_rx_err(priv))
1550 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1551 dev->name, gfar_read(&priv->regs->ievent),
1552 gfar_read(&priv->regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
1555 return IRQ_HANDLED;
1556}
1557
Kumar Gala0bbaf062005-06-20 10:54:21 -05001558static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1559{
1560 /* If valid headers were found, and valid sums
1561 * were verified, then we tell the kernel that no
1562 * checksumming is necessary. Otherwise, it is */
Andy Fleming7f7f5312005-11-11 12:38:59 -06001563 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
Kumar Gala0bbaf062005-06-20 10:54:21 -05001564 skb->ip_summed = CHECKSUM_UNNECESSARY;
1565 else
1566 skb->ip_summed = CHECKSUM_NONE;
1567}
1568
1569
1570static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1571{
1572 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1573
1574 /* Remove the FCB from the skb */
1575 skb_pull(skb, GMAC_FCB_LEN);
1576
1577 return fcb;
1578}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
1580/* gfar_process_frame() -- handle one incoming packet if skb
1581 * isn't NULL. */
1582static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1583 int length)
1584{
1585 struct gfar_private *priv = netdev_priv(dev);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001586 struct rxfcb *fcb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001588 if (NULL == skb) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001589 if (netif_msg_rx_err(priv))
1590 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001591 dev->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 priv->extra_stats.rx_skbmissing++;
1593 } else {
Kumar Gala0bbaf062005-06-20 10:54:21 -05001594 int ret;
1595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 /* Prep the skb for the packet */
1597 skb_put(skb, length);
1598
Kumar Gala0bbaf062005-06-20 10:54:21 -05001599 /* Grab the FCB if there is one */
1600 if (gfar_uses_fcb(priv))
1601 fcb = gfar_get_fcb(skb);
1602
1603 /* Remove the padded bytes, if there are any */
1604 if (priv->padding)
1605 skb_pull(skb, priv->padding);
1606
1607 if (priv->rx_csum_enable)
1608 gfar_rx_checksum(skb, fcb);
1609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 /* Tell the skb what kind of packet this is */
1611 skb->protocol = eth_type_trans(skb, dev);
1612
1613 /* Send the packet up the stack */
Francois Romieu0aa15382008-07-11 00:33:52 +02001614 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) {
1615 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp,
1616 fcb->vlctl);
1617 } else
1618 ret = netif_receive_skb(skb);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001619
1620 if (NET_RX_DROP == ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 priv->extra_stats.kernel_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 }
1623
1624 return 0;
1625}
1626
1627/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
Kumar Gala0bbaf062005-06-20 10:54:21 -05001628 * until the budget/quota has been reached. Returns the number
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 * of frames handled
1630 */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001631int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632{
1633 struct rxbd8 *bdp;
1634 struct sk_buff *skb;
1635 u16 pkt_len;
1636 int howmany = 0;
1637 struct gfar_private *priv = netdev_priv(dev);
1638
1639 /* Get the first full descriptor */
1640 bdp = priv->cur_rx;
1641
1642 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
Andy Fleming815b97c2008-04-22 17:18:29 -05001643 struct sk_buff *newskb;
Scott Wood3b6330c2007-05-16 15:06:59 -05001644 rmb();
Andy Fleming815b97c2008-04-22 17:18:29 -05001645
1646 /* Add another skb for the future */
1647 newskb = gfar_new_skb(dev);
1648
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 skb = priv->rx_skbuff[priv->skb_currx];
1650
Andy Fleming815b97c2008-04-22 17:18:29 -05001651 /* We drop the frame if we failed to allocate a new buffer */
1652 if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
1653 bdp->status & RXBD_ERR)) {
1654 count_errors(bdp->status, dev);
1655
1656 if (unlikely(!newskb))
1657 newskb = skb;
1658
1659 if (skb) {
1660 dma_unmap_single(&priv->dev->dev,
1661 bdp->bufPtr,
1662 priv->rx_buffer_size,
1663 DMA_FROM_DEVICE);
1664
1665 dev_kfree_skb_any(skb);
1666 }
1667 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 /* Increment the number of packets */
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001669 dev->stats.rx_packets++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 howmany++;
1671
1672 /* Remove the FCS from the packet length */
1673 pkt_len = bdp->length - 4;
1674
1675 gfar_process_frame(dev, skb, pkt_len);
1676
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001677 dev->stats.rx_bytes += pkt_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 }
1679
1680 dev->last_rx = jiffies;
1681
Andy Fleming815b97c2008-04-22 17:18:29 -05001682 priv->rx_skbuff[priv->skb_currx] = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Andy Fleming815b97c2008-04-22 17:18:29 -05001684 /* Setup the new bdp */
1685 gfar_new_rxbdp(dev, bdp, newskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687 /* Update to the next pointer */
1688 if (bdp->status & RXBD_WRAP)
1689 bdp = priv->rx_bd_base;
1690 else
1691 bdp++;
1692
1693 /* update to point at the next skb */
1694 priv->skb_currx =
Andy Fleming815b97c2008-04-22 17:18:29 -05001695 (priv->skb_currx + 1) &
1696 RX_RING_MOD_MASK(priv->rx_ring_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 }
1698
1699 /* Update the current rxbd pointer to be the next one */
1700 priv->cur_rx = bdp;
1701
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 return howmany;
1703}
1704
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001705static int gfar_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001707 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1708 struct net_device *dev = priv->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 int howmany;
Dai Harukid080cd62008-04-09 19:37:51 -05001710 unsigned long flags;
1711
1712 /* If we fail to get the lock, don't bother with the TX BDs */
1713 if (spin_trylock_irqsave(&priv->txlock, flags)) {
1714 gfar_clean_tx_ring(dev);
1715 spin_unlock_irqrestore(&priv->txlock, flags);
1716 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001718 howmany = gfar_clean_rx_ring(dev, budget);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001720 if (howmany < budget) {
1721 netif_rx_complete(dev, napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 /* Clear the halt bit in RSTAT */
1724 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1725
1726 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1727
1728 /* If we are coalescing interrupts, update the timer */
1729 /* Otherwise, clear it */
Andy Fleming2f448912008-03-24 10:53:28 -05001730 if (likely(priv->rxcoalescing)) {
1731 gfar_write(&priv->regs->rxic, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 gfar_write(&priv->regs->rxic,
1733 mk_ic_value(priv->rxcount, priv->rxtime));
Andy Fleming2f448912008-03-24 10:53:28 -05001734 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 }
1736
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001737 return howmany;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
Vitaly Woolf2d71c22006-11-07 13:27:02 +03001740#ifdef CONFIG_NET_POLL_CONTROLLER
1741/*
1742 * Polling 'interrupt' - used by things like netconsole to send skbs
1743 * without having to re-enable interrupts. It's not called while
1744 * the interrupt routine is executing.
1745 */
1746static void gfar_netpoll(struct net_device *dev)
1747{
1748 struct gfar_private *priv = netdev_priv(dev);
1749
1750 /* If the device has multiple interrupts, run tx/rx */
1751 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1752 disable_irq(priv->interruptTransmit);
1753 disable_irq(priv->interruptReceive);
1754 disable_irq(priv->interruptError);
1755 gfar_interrupt(priv->interruptTransmit, dev);
1756 enable_irq(priv->interruptError);
1757 enable_irq(priv->interruptReceive);
1758 enable_irq(priv->interruptTransmit);
1759 } else {
1760 disable_irq(priv->interruptTransmit);
1761 gfar_interrupt(priv->interruptTransmit, dev);
1762 enable_irq(priv->interruptTransmit);
1763 }
1764}
1765#endif
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767/* The interrupt handler for devices with one interrupt */
David Howells7d12e782006-10-05 14:55:46 +01001768static irqreturn_t gfar_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769{
1770 struct net_device *dev = dev_id;
1771 struct gfar_private *priv = netdev_priv(dev);
1772
1773 /* Save ievent for future reference */
1774 u32 events = gfar_read(&priv->regs->ievent);
1775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 /* Check for reception */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04001777 if (events & IEVENT_RX_MASK)
David Howells7d12e782006-10-05 14:55:46 +01001778 gfar_receive(irq, dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
1780 /* Check for transmit completion */
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04001781 if (events & IEVENT_TX_MASK)
David Howells7d12e782006-10-05 14:55:46 +01001782 gfar_transmit(irq, dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04001784 /* Check for errors */
1785 if (events & IEVENT_ERR_MASK)
1786 gfar_error(irq, dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
1788 return IRQ_HANDLED;
1789}
1790
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791/* Called every time the controller might need to be made
1792 * aware of new link state. The PHY code conveys this
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001793 * information through variables in the phydev structure, and this
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 * function converts those variables into the appropriate
1795 * register values, and can bring down the device if needed.
1796 */
1797static void adjust_link(struct net_device *dev)
1798{
1799 struct gfar_private *priv = netdev_priv(dev);
Kumar Galacc8c6e32006-02-01 15:18:03 -06001800 struct gfar __iomem *regs = priv->regs;
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001801 unsigned long flags;
1802 struct phy_device *phydev = priv->phydev;
1803 int new_state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Andy Flemingfef61082006-04-20 16:44:29 -05001805 spin_lock_irqsave(&priv->txlock, flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001806 if (phydev->link) {
1807 u32 tempval = gfar_read(&regs->maccfg2);
Andy Fleming7f7f5312005-11-11 12:38:59 -06001808 u32 ecntrl = gfar_read(&regs->ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 /* Now we make sure that we can be in full duplex mode.
1811 * If not, we operate in half-duplex mode. */
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001812 if (phydev->duplex != priv->oldduplex) {
1813 new_state = 1;
1814 if (!(phydev->duplex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 tempval &= ~(MACCFG2_FULL_DUPLEX);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001816 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 tempval |= MACCFG2_FULL_DUPLEX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001819 priv->oldduplex = phydev->duplex;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 }
1821
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001822 if (phydev->speed != priv->oldspeed) {
1823 new_state = 1;
1824 switch (phydev->speed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 case 1000:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 tempval =
1827 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 break;
1829 case 100:
1830 case 10:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 tempval =
1832 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
Andy Fleming7f7f5312005-11-11 12:38:59 -06001833
1834 /* Reduced mode distinguishes
1835 * between 10 and 100 */
1836 if (phydev->speed == SPEED_100)
1837 ecntrl |= ECNTRL_R100;
1838 else
1839 ecntrl &= ~(ECNTRL_R100);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 break;
1841 default:
Kumar Gala0bbaf062005-06-20 10:54:21 -05001842 if (netif_msg_link(priv))
1843 printk(KERN_WARNING
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001844 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1845 dev->name, phydev->speed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 break;
1847 }
1848
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001849 priv->oldspeed = phydev->speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 }
1851
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001852 gfar_write(&regs->maccfg2, tempval);
Andy Fleming7f7f5312005-11-11 12:38:59 -06001853 gfar_write(&regs->ecntrl, ecntrl);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001854
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 if (!priv->oldlink) {
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001856 new_state = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 priv->oldlink = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 }
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001859 } else if (priv->oldlink) {
1860 new_state = 1;
1861 priv->oldlink = 0;
1862 priv->oldspeed = 0;
1863 priv->oldduplex = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001866 if (new_state && netif_msg_link(priv))
1867 phy_print_status(phydev);
1868
Andy Flemingfef61082006-04-20 16:44:29 -05001869 spin_unlock_irqrestore(&priv->txlock, flags);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04001870}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
1872/* Update the hash table based on the current list of multicast
1873 * addresses we subscribe to. Also, change the promiscuity of
1874 * the device based on the flags (this function is called
1875 * whenever dev->flags is changed */
1876static void gfar_set_multi(struct net_device *dev)
1877{
1878 struct dev_mc_list *mc_ptr;
1879 struct gfar_private *priv = netdev_priv(dev);
Kumar Galacc8c6e32006-02-01 15:18:03 -06001880 struct gfar __iomem *regs = priv->regs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 u32 tempval;
1882
1883 if(dev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 /* Set RCTRL to PROM */
1885 tempval = gfar_read(&regs->rctrl);
1886 tempval |= RCTRL_PROM;
1887 gfar_write(&regs->rctrl, tempval);
1888 } else {
1889 /* Set RCTRL to not PROM */
1890 tempval = gfar_read(&regs->rctrl);
1891 tempval &= ~(RCTRL_PROM);
1892 gfar_write(&regs->rctrl, tempval);
1893 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 if(dev->flags & IFF_ALLMULTI) {
1896 /* Set the hash to rx all multicast frames */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001897 gfar_write(&regs->igaddr0, 0xffffffff);
1898 gfar_write(&regs->igaddr1, 0xffffffff);
1899 gfar_write(&regs->igaddr2, 0xffffffff);
1900 gfar_write(&regs->igaddr3, 0xffffffff);
1901 gfar_write(&regs->igaddr4, 0xffffffff);
1902 gfar_write(&regs->igaddr5, 0xffffffff);
1903 gfar_write(&regs->igaddr6, 0xffffffff);
1904 gfar_write(&regs->igaddr7, 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 gfar_write(&regs->gaddr0, 0xffffffff);
1906 gfar_write(&regs->gaddr1, 0xffffffff);
1907 gfar_write(&regs->gaddr2, 0xffffffff);
1908 gfar_write(&regs->gaddr3, 0xffffffff);
1909 gfar_write(&regs->gaddr4, 0xffffffff);
1910 gfar_write(&regs->gaddr5, 0xffffffff);
1911 gfar_write(&regs->gaddr6, 0xffffffff);
1912 gfar_write(&regs->gaddr7, 0xffffffff);
1913 } else {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001914 int em_num;
1915 int idx;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 /* zero out the hash */
Kumar Gala0bbaf062005-06-20 10:54:21 -05001918 gfar_write(&regs->igaddr0, 0x0);
1919 gfar_write(&regs->igaddr1, 0x0);
1920 gfar_write(&regs->igaddr2, 0x0);
1921 gfar_write(&regs->igaddr3, 0x0);
1922 gfar_write(&regs->igaddr4, 0x0);
1923 gfar_write(&regs->igaddr5, 0x0);
1924 gfar_write(&regs->igaddr6, 0x0);
1925 gfar_write(&regs->igaddr7, 0x0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 gfar_write(&regs->gaddr0, 0x0);
1927 gfar_write(&regs->gaddr1, 0x0);
1928 gfar_write(&regs->gaddr2, 0x0);
1929 gfar_write(&regs->gaddr3, 0x0);
1930 gfar_write(&regs->gaddr4, 0x0);
1931 gfar_write(&regs->gaddr5, 0x0);
1932 gfar_write(&regs->gaddr6, 0x0);
1933 gfar_write(&regs->gaddr7, 0x0);
1934
Andy Fleming7f7f5312005-11-11 12:38:59 -06001935 /* If we have extended hash tables, we need to
1936 * clear the exact match registers to prepare for
1937 * setting them */
1938 if (priv->extended_hash) {
1939 em_num = GFAR_EM_NUM + 1;
1940 gfar_clear_exact_match(dev);
1941 idx = 1;
1942 } else {
1943 idx = 0;
1944 em_num = 0;
1945 }
1946
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 if(dev->mc_count == 0)
1948 return;
1949
1950 /* Parse the list, and set the appropriate bits */
1951 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
Andy Fleming7f7f5312005-11-11 12:38:59 -06001952 if (idx < em_num) {
1953 gfar_set_mac_for_addr(dev, idx,
1954 mc_ptr->dmi_addr);
1955 idx++;
1956 } else
1957 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 }
1959 }
1960
1961 return;
1962}
1963
Andy Fleming7f7f5312005-11-11 12:38:59 -06001964
1965/* Clears each of the exact match registers to zero, so they
1966 * don't interfere with normal reception */
1967static void gfar_clear_exact_match(struct net_device *dev)
1968{
1969 int idx;
1970 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1971
1972 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1973 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1974}
1975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976/* Set the appropriate hash bit for the given addr */
1977/* The algorithm works like so:
1978 * 1) Take the Destination Address (ie the multicast address), and
1979 * do a CRC on it (little endian), and reverse the bits of the
1980 * result.
1981 * 2) Use the 8 most significant bits as a hash into a 256-entry
1982 * table. The table is controlled through 8 32-bit registers:
1983 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
1984 * gaddr7. This means that the 3 most significant bits in the
1985 * hash index which gaddr register to use, and the 5 other bits
1986 * indicate which bit (assuming an IBM numbering scheme, which
1987 * for PowerPC (tm) is usually the case) in the register holds
1988 * the entry. */
1989static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1990{
1991 u32 tempval;
1992 struct gfar_private *priv = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 u32 result = ether_crc(MAC_ADDR_LEN, addr);
Kumar Gala0bbaf062005-06-20 10:54:21 -05001994 int width = priv->hash_width;
1995 u8 whichbit = (result >> (32 - width)) & 0x1f;
1996 u8 whichreg = result >> (32 - width + 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 u32 value = (1 << (31-whichbit));
1998
Kumar Gala0bbaf062005-06-20 10:54:21 -05001999 tempval = gfar_read(priv->hash_regs[whichreg]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 tempval |= value;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002001 gfar_write(priv->hash_regs[whichreg], tempval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003 return;
2004}
2005
Andy Fleming7f7f5312005-11-11 12:38:59 -06002006
2007/* There are multiple MAC Address register pairs on some controllers
2008 * This function sets the numth pair to a given address
2009 */
2010static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2011{
2012 struct gfar_private *priv = netdev_priv(dev);
2013 int idx;
2014 char tmpbuf[MAC_ADDR_LEN];
2015 u32 tempval;
Kumar Galacc8c6e32006-02-01 15:18:03 -06002016 u32 __iomem *macptr = &priv->regs->macstnaddr1;
Andy Fleming7f7f5312005-11-11 12:38:59 -06002017
2018 macptr += num*2;
2019
2020 /* Now copy it into the mac registers backwards, cuz */
2021 /* little endian is silly */
2022 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
2023 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
2024
2025 gfar_write(macptr, *((u32 *) (tmpbuf)));
2026
2027 tempval = *((u32 *) (tmpbuf + 4));
2028
2029 gfar_write(macptr+1, tempval);
2030}
2031
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032/* GFAR error interrupt handler */
David Howells7d12e782006-10-05 14:55:46 +01002033static irqreturn_t gfar_error(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034{
2035 struct net_device *dev = dev_id;
2036 struct gfar_private *priv = netdev_priv(dev);
2037
2038 /* Save ievent for future reference */
2039 u32 events = gfar_read(&priv->regs->ievent);
2040
2041 /* Clear IEVENT */
Scott Woodd87eb122008-07-11 18:04:45 -05002042 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
2043
2044 /* Magic Packet is not an error. */
2045 if ((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2046 (events & IEVENT_MAG))
2047 events &= ~IEVENT_MAG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
2049 /* Hmm... */
Kumar Gala0bbaf062005-06-20 10:54:21 -05002050 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2051 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002052 dev->name, events, gfar_read(&priv->regs->imask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
2054 /* Update the error counters */
2055 if (events & IEVENT_TXE) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002056 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 if (events & IEVENT_LC)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002059 dev->stats.tx_window_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 if (events & IEVENT_CRL)
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002061 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 if (events & IEVENT_XFUN) {
Kumar Gala0bbaf062005-06-20 10:54:21 -05002063 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002064 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2065 "packet dropped.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002066 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 priv->extra_stats.tx_underrun++;
2068
2069 /* Reactivate the Tx Queues */
2070 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
2071 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002072 if (netif_msg_tx_err(priv))
2073 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 }
2075 if (events & IEVENT_BSY) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002076 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 priv->extra_stats.rx_bsy++;
2078
David Howells7d12e782006-10-05 14:55:46 +01002079 gfar_receive(irq, dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
Kumar Gala0bbaf062005-06-20 10:54:21 -05002081 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002082 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2083 dev->name, gfar_read(&priv->regs->rstat));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 }
2085 if (events & IEVENT_BABR) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07002086 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 priv->extra_stats.rx_babr++;
2088
Kumar Gala0bbaf062005-06-20 10:54:21 -05002089 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002090 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 }
2092 if (events & IEVENT_EBERR) {
2093 priv->extra_stats.eberr++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002094 if (netif_msg_rx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002095 printk(KERN_DEBUG "%s: bus error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 }
Kumar Gala0bbaf062005-06-20 10:54:21 -05002097 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002098 printk(KERN_DEBUG "%s: control frame\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
2100 if (events & IEVENT_BABT) {
2101 priv->extra_stats.tx_babt++;
Kumar Gala0bbaf062005-06-20 10:54:21 -05002102 if (netif_msg_tx_err(priv))
Sergei Shtylyov538cc7e2007-02-15 17:56:01 +04002103 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 }
2105 return IRQ_HANDLED;
2106}
2107
Kay Sievers72abb462008-04-18 13:50:44 -07002108/* work with hotplug and coldplug */
2109MODULE_ALIAS("platform:fsl-gianfar");
2110
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111/* Structure for a device driver */
Russell King3ae5eae2005-11-09 22:32:44 +00002112static struct platform_driver gfar_driver = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 .probe = gfar_probe,
2114 .remove = gfar_remove,
Scott Woodd87eb122008-07-11 18:04:45 -05002115 .suspend = gfar_suspend,
2116 .resume = gfar_resume,
Russell King3ae5eae2005-11-09 22:32:44 +00002117 .driver = {
2118 .name = "fsl-gianfar",
Kay Sievers72abb462008-04-18 13:50:44 -07002119 .owner = THIS_MODULE,
Russell King3ae5eae2005-11-09 22:32:44 +00002120 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121};
2122
2123static int __init gfar_init(void)
2124{
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002125 int err = gfar_mdio_init();
2126
2127 if (err)
2128 return err;
2129
Russell King3ae5eae2005-11-09 22:32:44 +00002130 err = platform_driver_register(&gfar_driver);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002131
2132 if (err)
2133 gfar_mdio_exit();
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002134
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002135 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136}
2137
2138static void __exit gfar_exit(void)
2139{
Russell King3ae5eae2005-11-09 22:32:44 +00002140 platform_driver_unregister(&gfar_driver);
Andy Flemingbb40dcb2005-09-23 22:54:21 -04002141 gfar_mdio_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142}
2143
2144module_init(gfar_init);
2145module_exit(gfar_exit);
2146