blob: ca6285016dfd633633c70605c296e703f40c5ee7 [file] [log] [blame]
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001/*
2 * SuperH Ethernet device driver
3 *
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09004 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00005 * Copyright (C) 2008-2009 Renesas Solutions Corp.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07006 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 */
22
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070023#include <linux/init.h>
24#include <linux/dma-mapping.h>
25#include <linux/etherdevice.h>
26#include <linux/delay.h>
27#include <linux/platform_device.h>
28#include <linux/mdio-bitbang.h>
29#include <linux/netdevice.h>
30#include <linux/phy.h>
31#include <linux/cache.h>
32#include <linux/io.h>
Magnus Dammbcd51492009-10-09 00:20:04 +000033#include <linux/pm_runtime.h>
Nobuhiro Iwamatsuf568a922009-10-26 13:49:50 +000034#include <asm/cacheflush.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070035
36#include "sh_eth.h"
37
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +000038/* There is CPU dependent code */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000039#if defined(CONFIG_CPU_SUBTYPE_SH7724)
40#define SH_ETH_RESET_DEFAULT 1
41static void sh_eth_set_duplex(struct net_device *ndev)
42{
43 struct sh_eth_private *mdp = netdev_priv(ndev);
44 u32 ioaddr = ndev->base_addr;
45
46 if (mdp->duplex) /* Full */
47 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
48 else /* Half */
49 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
50}
51
52static void sh_eth_set_rate(struct net_device *ndev)
53{
54 struct sh_eth_private *mdp = netdev_priv(ndev);
55 u32 ioaddr = ndev->base_addr;
56
57 switch (mdp->speed) {
58 case 10: /* 10BASE */
59 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
60 break;
61 case 100:/* 100BASE */
62 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
63 break;
64 default:
65 break;
66 }
67}
68
69/* SH7724 */
70static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
71 .set_duplex = sh_eth_set_duplex,
72 .set_rate = sh_eth_set_rate,
73
74 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
75 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
76 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
77
78 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
79 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
80 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
81 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
82
83 .apr = 1,
84 .mpr = 1,
85 .tpauser = 1,
86 .hw_swap = 1,
Magnus Damm503914c2009-12-15 21:16:55 -080087 .rpadir = 1,
88 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +000089};
90
91#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +000092#define SH_ETH_HAS_TSU 1
93static void sh_eth_chip_reset(struct net_device *ndev)
94{
95 /* reset device */
96 ctrl_outl(ARSTR_ARSTR, ARSTR);
97 mdelay(1);
98}
99
100static void sh_eth_reset(struct net_device *ndev)
101{
102 u32 ioaddr = ndev->base_addr;
103 int cnt = 100;
104
105 ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
106 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
107 while (cnt > 0) {
108 if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
109 break;
110 mdelay(1);
111 cnt--;
112 }
113 if (cnt < 0)
114 printk(KERN_ERR "Device reset fail\n");
115
116 /* Table Init */
117 ctrl_outl(0x0, ioaddr + TDLAR);
118 ctrl_outl(0x0, ioaddr + TDFAR);
119 ctrl_outl(0x0, ioaddr + TDFXR);
120 ctrl_outl(0x0, ioaddr + TDFFR);
121 ctrl_outl(0x0, ioaddr + RDLAR);
122 ctrl_outl(0x0, ioaddr + RDFAR);
123 ctrl_outl(0x0, ioaddr + RDFXR);
124 ctrl_outl(0x0, ioaddr + RDFFR);
125}
126
127static void sh_eth_set_duplex(struct net_device *ndev)
128{
129 struct sh_eth_private *mdp = netdev_priv(ndev);
130 u32 ioaddr = ndev->base_addr;
131
132 if (mdp->duplex) /* Full */
133 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
134 else /* Half */
135 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
136}
137
138static void sh_eth_set_rate(struct net_device *ndev)
139{
140 struct sh_eth_private *mdp = netdev_priv(ndev);
141 u32 ioaddr = ndev->base_addr;
142
143 switch (mdp->speed) {
144 case 10: /* 10BASE */
145 ctrl_outl(GECMR_10, ioaddr + GECMR);
146 break;
147 case 100:/* 100BASE */
148 ctrl_outl(GECMR_100, ioaddr + GECMR);
149 break;
150 case 1000: /* 1000BASE */
151 ctrl_outl(GECMR_1000, ioaddr + GECMR);
152 break;
153 default:
154 break;
155 }
156}
157
158/* sh7763 */
159static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
160 .chip_reset = sh_eth_chip_reset,
161 .set_duplex = sh_eth_set_duplex,
162 .set_rate = sh_eth_set_rate,
163
164 .ecsr_value = ECSR_ICD | ECSR_MPD,
165 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
166 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
167
168 .tx_check = EESR_TC1 | EESR_FTC,
169 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
170 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
171 EESR_ECI,
172 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
173 EESR_TFE,
174
175 .apr = 1,
176 .mpr = 1,
177 .tpauser = 1,
178 .bculr = 1,
179 .hw_swap = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000180 .no_trimd = 1,
181 .no_ade = 1,
182};
183
184#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
185#define SH_ETH_RESET_DEFAULT 1
186static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
187 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
188
189 .apr = 1,
190 .mpr = 1,
191 .tpauser = 1,
192 .hw_swap = 1,
193};
194#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
195#define SH_ETH_RESET_DEFAULT 1
196#define SH_ETH_HAS_TSU 1
197static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
198 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
199};
200#endif
201
202static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
203{
204 if (!cd->ecsr_value)
205 cd->ecsr_value = DEFAULT_ECSR_INIT;
206
207 if (!cd->ecsipr_value)
208 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
209
210 if (!cd->fcftr_value)
211 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
212 DEFAULT_FIFO_F_D_RFD;
213
214 if (!cd->fdr_value)
215 cd->fdr_value = DEFAULT_FDR_INIT;
216
217 if (!cd->rmcr_value)
218 cd->rmcr_value = DEFAULT_RMCR_VALUE;
219
220 if (!cd->tx_check)
221 cd->tx_check = DEFAULT_TX_CHECK;
222
223 if (!cd->eesr_err_check)
224 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
225
226 if (!cd->tx_error_check)
227 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
228}
229
230#if defined(SH_ETH_RESET_DEFAULT)
231/* Chip Reset */
232static void sh_eth_reset(struct net_device *ndev)
233{
234 u32 ioaddr = ndev->base_addr;
235
236 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
237 mdelay(3);
238 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
239}
240#endif
241
242#if defined(CONFIG_CPU_SH4)
243static void sh_eth_set_receive_align(struct sk_buff *skb)
244{
245 int reserve;
246
247 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
248 if (reserve)
249 skb_reserve(skb, reserve);
250}
251#else
252static void sh_eth_set_receive_align(struct sk_buff *skb)
253{
254 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
255}
256#endif
257
258
Yoshinori Sato71557a32008-08-06 19:49:00 -0400259/* CPU <-> EDMAC endian convert */
260static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
261{
262 switch (mdp->edmac_endian) {
263 case EDMAC_LITTLE_ENDIAN:
264 return cpu_to_le32(x);
265 case EDMAC_BIG_ENDIAN:
266 return cpu_to_be32(x);
267 }
268 return x;
269}
270
271static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
272{
273 switch (mdp->edmac_endian) {
274 case EDMAC_LITTLE_ENDIAN:
275 return le32_to_cpu(x);
276 case EDMAC_BIG_ENDIAN:
277 return be32_to_cpu(x);
278 }
279 return x;
280}
281
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700282/*
283 * Program the hardware MAC address from dev->dev_addr.
284 */
285static void update_mac_address(struct net_device *ndev)
286{
287 u32 ioaddr = ndev->base_addr;
288
289 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
290 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
291 ioaddr + MAHR);
292 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
293 ioaddr + MALR);
294}
295
296/*
297 * Get MAC address from SuperH MAC address register
298 *
299 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
300 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
301 * When you want use this device, you must set MAC address in bootloader.
302 *
303 */
Magnus Damm748031f2009-10-09 00:17:14 +0000304static void read_mac_address(struct net_device *ndev, unsigned char *mac)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700305{
306 u32 ioaddr = ndev->base_addr;
307
Magnus Damm748031f2009-10-09 00:17:14 +0000308 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
309 memcpy(ndev->dev_addr, mac, 6);
310 } else {
311 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
312 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
313 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
314 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
315 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
316 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
317 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700318}
319
320struct bb_info {
321 struct mdiobb_ctrl ctrl;
322 u32 addr;
323 u32 mmd_msk;/* MMD */
324 u32 mdo_msk;
325 u32 mdi_msk;
326 u32 mdc_msk;
327};
328
329/* PHY bit set */
330static void bb_set(u32 addr, u32 msk)
331{
332 ctrl_outl(ctrl_inl(addr) | msk, addr);
333}
334
335/* PHY bit clear */
336static void bb_clr(u32 addr, u32 msk)
337{
338 ctrl_outl((ctrl_inl(addr) & ~msk), addr);
339}
340
341/* PHY bit read */
342static int bb_read(u32 addr, u32 msk)
343{
344 return (ctrl_inl(addr) & msk) != 0;
345}
346
347/* Data I/O pin control */
348static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
349{
350 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
351 if (bit)
352 bb_set(bitbang->addr, bitbang->mmd_msk);
353 else
354 bb_clr(bitbang->addr, bitbang->mmd_msk);
355}
356
357/* Set bit data*/
358static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
359{
360 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
361
362 if (bit)
363 bb_set(bitbang->addr, bitbang->mdo_msk);
364 else
365 bb_clr(bitbang->addr, bitbang->mdo_msk);
366}
367
368/* Get bit data*/
369static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
370{
371 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
372 return bb_read(bitbang->addr, bitbang->mdi_msk);
373}
374
375/* MDC pin control */
376static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
377{
378 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
379
380 if (bit)
381 bb_set(bitbang->addr, bitbang->mdc_msk);
382 else
383 bb_clr(bitbang->addr, bitbang->mdc_msk);
384}
385
386/* mdio bus control struct */
387static struct mdiobb_ops bb_ops = {
388 .owner = THIS_MODULE,
389 .set_mdc = sh_mdc_ctrl,
390 .set_mdio_dir = sh_mmd_ctrl,
391 .set_mdio_data = sh_set_mdio,
392 .get_mdio_data = sh_get_mdio,
393};
394
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700395/* free skb and descriptor buffer */
396static void sh_eth_ring_free(struct net_device *ndev)
397{
398 struct sh_eth_private *mdp = netdev_priv(ndev);
399 int i;
400
401 /* Free Rx skb ringbuffer */
402 if (mdp->rx_skbuff) {
403 for (i = 0; i < RX_RING_SIZE; i++) {
404 if (mdp->rx_skbuff[i])
405 dev_kfree_skb(mdp->rx_skbuff[i]);
406 }
407 }
408 kfree(mdp->rx_skbuff);
409
410 /* Free Tx skb ringbuffer */
411 if (mdp->tx_skbuff) {
412 for (i = 0; i < TX_RING_SIZE; i++) {
413 if (mdp->tx_skbuff[i])
414 dev_kfree_skb(mdp->tx_skbuff[i]);
415 }
416 }
417 kfree(mdp->tx_skbuff);
418}
419
420/* format skb and descriptor buffer */
421static void sh_eth_ring_format(struct net_device *ndev)
422{
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000423 u32 ioaddr = ndev->base_addr;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700424 struct sh_eth_private *mdp = netdev_priv(ndev);
425 int i;
426 struct sk_buff *skb;
427 struct sh_eth_rxdesc *rxdesc = NULL;
428 struct sh_eth_txdesc *txdesc = NULL;
429 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
430 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
431
432 mdp->cur_rx = mdp->cur_tx = 0;
433 mdp->dirty_rx = mdp->dirty_tx = 0;
434
435 memset(mdp->rx_ring, 0, rx_ringsize);
436
437 /* build Rx ring buffer */
438 for (i = 0; i < RX_RING_SIZE; i++) {
439 /* skb */
440 mdp->rx_skbuff[i] = NULL;
441 skb = dev_alloc_skb(mdp->rx_buf_sz);
442 mdp->rx_skbuff[i] = skb;
443 if (skb == NULL)
444 break;
Yoshihiro Shimodae88aae72009-05-24 23:52:35 +0000445 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
446 DMA_FROM_DEVICE);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900447 skb->dev = ndev; /* Mark as being used by this device. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000448 sh_eth_set_receive_align(skb);
449
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700450 /* RX descriptor */
451 rxdesc = &mdp->rx_ring[i];
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000452 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Yoshinori Sato71557a32008-08-06 19:49:00 -0400453 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700454
455 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000456 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900457 /* Rx descriptor address set */
458 if (i == 0) {
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000459 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900460#if defined(CONFIG_CPU_SUBTYPE_SH7763)
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000461 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900462#endif
463 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700464 }
465
466 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
467
468 /* Mark the last entry as wrapping the ring. */
Yoshinori Sato71557a32008-08-06 19:49:00 -0400469 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700470
471 memset(mdp->tx_ring, 0, tx_ringsize);
472
473 /* build Tx ring buffer */
474 for (i = 0; i < TX_RING_SIZE; i++) {
475 mdp->tx_skbuff[i] = NULL;
476 txdesc = &mdp->tx_ring[i];
Yoshinori Sato71557a32008-08-06 19:49:00 -0400477 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700478 txdesc->buffer_length = 0;
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900479 if (i == 0) {
Yoshinori Sato71557a32008-08-06 19:49:00 -0400480 /* Tx descriptor address set */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000481 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900482#if defined(CONFIG_CPU_SUBTYPE_SH7763)
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000483 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900484#endif
485 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700486 }
487
Yoshinori Sato71557a32008-08-06 19:49:00 -0400488 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700489}
490
491/* Get skb and descriptor buffer */
492static int sh_eth_ring_init(struct net_device *ndev)
493{
494 struct sh_eth_private *mdp = netdev_priv(ndev);
495 int rx_ringsize, tx_ringsize, ret = 0;
496
497 /*
498 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
499 * card needs room to do 8 byte alignment, +2 so we can reserve
500 * the first 2 bytes, and +16 gets room for the status word from the
501 * card.
502 */
503 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
504 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
Magnus Damm503914c2009-12-15 21:16:55 -0800505 if (mdp->cd->rpadir)
506 mdp->rx_buf_sz += NET_IP_ALIGN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700507
508 /* Allocate RX and TX skb rings */
509 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
510 GFP_KERNEL);
511 if (!mdp->rx_skbuff) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000512 dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700513 ret = -ENOMEM;
514 return ret;
515 }
516
517 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
518 GFP_KERNEL);
519 if (!mdp->tx_skbuff) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000520 dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700521 ret = -ENOMEM;
522 goto skb_ring_free;
523 }
524
525 /* Allocate all Rx descriptors. */
526 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
527 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
528 GFP_KERNEL);
529
530 if (!mdp->rx_ring) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000531 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
532 rx_ringsize);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700533 ret = -ENOMEM;
534 goto desc_ring_free;
535 }
536
537 mdp->dirty_rx = 0;
538
539 /* Allocate all Tx descriptors. */
540 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
541 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
542 GFP_KERNEL);
543 if (!mdp->tx_ring) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000544 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
545 tx_ringsize);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700546 ret = -ENOMEM;
547 goto desc_ring_free;
548 }
549 return ret;
550
551desc_ring_free:
552 /* free DMA buffer */
553 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
554
555skb_ring_free:
556 /* Free Rx and Tx skb ring buffer */
557 sh_eth_ring_free(ndev);
558
559 return ret;
560}
561
562static int sh_eth_dev_init(struct net_device *ndev)
563{
564 int ret = 0;
565 struct sh_eth_private *mdp = netdev_priv(ndev);
566 u32 ioaddr = ndev->base_addr;
567 u_int32_t rx_int_var, tx_int_var;
568 u32 val;
569
570 /* Soft Reset */
571 sh_eth_reset(ndev);
572
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900573 /* Descriptor format */
574 sh_eth_ring_format(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000575 if (mdp->cd->rpadir)
576 ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700577
578 /* all sh_eth int mask */
579 ctrl_outl(0, ioaddr + EESIPR);
580
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000581#if defined(__LITTLE_ENDIAN__)
582 if (mdp->cd->hw_swap)
583 ctrl_outl(EDMR_EL, ioaddr + EDMR);
584 else
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900585#endif
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000586 ctrl_outl(0, ioaddr + EDMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700587
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900588 /* FIFO size set */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000589 ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700590 ctrl_outl(0, ioaddr + TFTR);
591
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900592 /* Frame recv control */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000593 ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700594
595 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
596 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
597 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
598
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000599 if (mdp->cd->bculr)
600 ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900601
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000602 ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900603
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000604 if (!mdp->cd->no_trimd)
605 ctrl_outl(0, ioaddr + TRIMD);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700606
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900607 /* Recv frame limit set register */
608 ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700609
610 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000611 ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700612
613 /* PAUSE Prohibition */
614 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
615 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
616
617 ctrl_outl(val, ioaddr + ECMR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900618
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000619 if (mdp->cd->set_rate)
620 mdp->cd->set_rate(ndev);
621
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900622 /* E-MAC Status Register clear */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000623 ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900624
625 /* E-MAC Interrupt Enable register */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000626 ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700627
628 /* Set MAC address */
629 update_mac_address(ndev);
630
631 /* mask reset */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000632 if (mdp->cd->apr)
633 ctrl_outl(APR_AP, ioaddr + APR);
634 if (mdp->cd->mpr)
635 ctrl_outl(MPR_MP, ioaddr + MPR);
636 if (mdp->cd->tpauser)
637 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900638
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700639 /* Setting the Rx mode will start the Rx process. */
640 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
641
642 netif_start_queue(ndev);
643
644 return ret;
645}
646
647/* free Tx skb function */
648static int sh_eth_txfree(struct net_device *ndev)
649{
650 struct sh_eth_private *mdp = netdev_priv(ndev);
651 struct sh_eth_txdesc *txdesc;
652 int freeNum = 0;
653 int entry = 0;
654
655 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
656 entry = mdp->dirty_tx % TX_RING_SIZE;
657 txdesc = &mdp->tx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -0400658 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700659 break;
660 /* Free the original skb. */
661 if (mdp->tx_skbuff[entry]) {
662 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
663 mdp->tx_skbuff[entry] = NULL;
664 freeNum++;
665 }
Yoshinori Sato71557a32008-08-06 19:49:00 -0400666 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700667 if (entry >= TX_RING_SIZE - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -0400668 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700669
670 mdp->stats.tx_packets++;
671 mdp->stats.tx_bytes += txdesc->buffer_length;
672 }
673 return freeNum;
674}
675
676/* Packet receive function */
677static int sh_eth_rx(struct net_device *ndev)
678{
679 struct sh_eth_private *mdp = netdev_priv(ndev);
680 struct sh_eth_rxdesc *rxdesc;
681
682 int entry = mdp->cur_rx % RX_RING_SIZE;
683 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
684 struct sk_buff *skb;
685 u16 pkt_len = 0;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000686 u32 desc_status;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700687
688 rxdesc = &mdp->rx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -0400689 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
690 desc_status = edmac_to_cpu(mdp, rxdesc->status);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700691 pkt_len = rxdesc->frame_length;
692
693 if (--boguscnt < 0)
694 break;
695
696 if (!(desc_status & RDFEND))
697 mdp->stats.rx_length_errors++;
698
699 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
700 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
701 mdp->stats.rx_errors++;
702 if (desc_status & RD_RFS1)
703 mdp->stats.rx_crc_errors++;
704 if (desc_status & RD_RFS2)
705 mdp->stats.rx_frame_errors++;
706 if (desc_status & RD_RFS3)
707 mdp->stats.rx_length_errors++;
708 if (desc_status & RD_RFS4)
709 mdp->stats.rx_length_errors++;
710 if (desc_status & RD_RFS6)
711 mdp->stats.rx_missed_errors++;
712 if (desc_status & RD_RFS10)
713 mdp->stats.rx_over_errors++;
714 } else {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000715 if (!mdp->cd->hw_swap)
716 sh_eth_soft_swap(
717 phys_to_virt(ALIGN(rxdesc->addr, 4)),
718 pkt_len + 2);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700719 skb = mdp->rx_skbuff[entry];
720 mdp->rx_skbuff[entry] = NULL;
Magnus Damm503914c2009-12-15 21:16:55 -0800721 if (mdp->cd->rpadir)
722 skb_reserve(skb, NET_IP_ALIGN);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700723 skb_put(skb, pkt_len);
724 skb->protocol = eth_type_trans(skb, ndev);
725 netif_rx(skb);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700726 mdp->stats.rx_packets++;
727 mdp->stats.rx_bytes += pkt_len;
728 }
Yoshinori Sato71557a32008-08-06 19:49:00 -0400729 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700730 entry = (++mdp->cur_rx) % RX_RING_SIZE;
Yoshihiro Shimoda862df492009-05-24 23:53:40 +0000731 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700732 }
733
734 /* Refill the Rx ring buffers. */
735 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
736 entry = mdp->dirty_rx % RX_RING_SIZE;
737 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900738 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000739 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900740
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700741 if (mdp->rx_skbuff[entry] == NULL) {
742 skb = dev_alloc_skb(mdp->rx_buf_sz);
743 mdp->rx_skbuff[entry] = skb;
744 if (skb == NULL)
745 break; /* Better luck next round. */
Yoshihiro Shimodae88aae72009-05-24 23:52:35 +0000746 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
747 DMA_FROM_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700748 skb->dev = ndev;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000749 sh_eth_set_receive_align(skb);
750
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900751 skb->ip_summed = CHECKSUM_NONE;
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +0000752 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700753 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700754 if (entry >= RX_RING_SIZE - 1)
755 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -0400756 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700757 else
758 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -0400759 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700760 }
761
762 /* Restart Rx engine if stopped. */
763 /* If we don't need to check status, don't. -KDU */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900764 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
765 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700766
767 return 0;
768}
769
770/* error control function */
771static void sh_eth_error(struct net_device *ndev, int intr_status)
772{
773 struct sh_eth_private *mdp = netdev_priv(ndev);
774 u32 ioaddr = ndev->base_addr;
775 u32 felic_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000776 u32 link_stat;
777 u32 mask;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700778
779 if (intr_status & EESR_ECI) {
780 felic_stat = ctrl_inl(ioaddr + ECSR);
781 ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */
782 if (felic_stat & ECSR_ICD)
783 mdp->stats.tx_carrier_errors++;
784 if (felic_stat & ECSR_LCHNG) {
785 /* Link Changed */
Yoshihiro Shimoda49235762009-08-27 23:25:03 +0000786 if (mdp->cd->no_psr || mdp->no_ether_link) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000787 if (mdp->link == PHY_DOWN)
788 link_stat = 0;
789 else
790 link_stat = PHY_ST_LINK;
791 } else {
792 link_stat = (ctrl_inl(ioaddr + PSR));
Yoshihiro Shimoda49235762009-08-27 23:25:03 +0000793 if (mdp->ether_link_active_low)
794 link_stat = ~link_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000795 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700796 if (!(link_stat & PHY_ST_LINK)) {
797 /* Link Down : disable tx and rx */
798 ctrl_outl(ctrl_inl(ioaddr + ECMR) &
799 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
800 } else {
801 /* Link Up */
802 ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
803 ~DMAC_M_ECI, ioaddr + EESIPR);
804 /*clear int */
805 ctrl_outl(ctrl_inl(ioaddr + ECSR),
806 ioaddr + ECSR);
807 ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
808 DMAC_M_ECI, ioaddr + EESIPR);
809 /* enable tx and rx */
810 ctrl_outl(ctrl_inl(ioaddr + ECMR) |
811 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
812 }
813 }
814 }
815
816 if (intr_status & EESR_TWB) {
817 /* Write buck end. unused write back interrupt */
818 if (intr_status & EESR_TABT) /* Transmit Abort int */
819 mdp->stats.tx_aborted_errors++;
820 }
821
822 if (intr_status & EESR_RABT) {
823 /* Receive Abort int */
824 if (intr_status & EESR_RFRMER) {
825 /* Receive Frame Overflow int */
826 mdp->stats.rx_frame_errors++;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000827 dev_err(&ndev->dev, "Receive Frame Overflow\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700828 }
829 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000830
831 if (!mdp->cd->no_ade) {
832 if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
833 intr_status & EESR_TFE)
834 mdp->stats.tx_fifo_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700835 }
836
837 if (intr_status & EESR_RDE) {
838 /* Receive Descriptor Empty int */
839 mdp->stats.rx_over_errors++;
840
841 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
842 ctrl_outl(EDRRR_R, ioaddr + EDRRR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000843 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700844 }
845 if (intr_status & EESR_RFE) {
846 /* Receive FIFO Overflow int */
847 mdp->stats.rx_fifo_errors++;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000848 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700849 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000850
851 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
852 if (mdp->cd->no_ade)
853 mask &= ~EESR_ADE;
854 if (intr_status & mask) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700855 /* Tx error */
856 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
857 /* dmesg */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000858 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
859 intr_status, mdp->cur_tx);
860 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700861 mdp->dirty_tx, (u32) ndev->state, edtrr);
862 /* dirty buffer free */
863 sh_eth_txfree(ndev);
864
865 /* SH7712 BUG */
866 if (edtrr ^ EDTRR_TRNS) {
867 /* tx dma start */
868 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
869 }
870 /* wakeup */
871 netif_wake_queue(ndev);
872 }
873}
874
875static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
876{
877 struct net_device *ndev = netdev;
878 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000879 struct sh_eth_cpu_data *cd = mdp->cd;
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +0000880 irqreturn_t ret = IRQ_NONE;
roel kluin37c8ae32009-06-22 07:38:00 +0000881 u32 ioaddr, intr_status = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700882
883 ioaddr = ndev->base_addr;
884 spin_lock(&mdp->lock);
885
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900886 /* Get interrpt stat */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700887 intr_status = ctrl_inl(ioaddr + EESR);
888 /* Clear interrupt */
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +0000889 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
890 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000891 cd->tx_check | cd->eesr_err_check)) {
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +0000892 ctrl_outl(intr_status, ioaddr + EESR);
893 ret = IRQ_HANDLED;
894 } else
895 goto other_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700896
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900897 if (intr_status & (EESR_FRC | /* Frame recv*/
898 EESR_RMAF | /* Multi cast address recv*/
899 EESR_RRF | /* Bit frame recv */
900 EESR_RTLF | /* Long frame recv*/
901 EESR_RTSF | /* short frame recv */
902 EESR_PRE | /* PHY-LSI recv error */
903 EESR_CERF)){ /* recv frame CRC error */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700904 sh_eth_rx(ndev);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900905 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700906
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +0900907 /* Tx Check */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000908 if (intr_status & cd->tx_check) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700909 sh_eth_txfree(ndev);
910 netif_wake_queue(ndev);
911 }
912
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000913 if (intr_status & cd->eesr_err_check)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700914 sh_eth_error(ndev, intr_status);
915
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +0000916other_irq:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700917 spin_unlock(&mdp->lock);
918
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +0000919 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700920}
921
922static void sh_eth_timer(unsigned long data)
923{
924 struct net_device *ndev = (struct net_device *)data;
925 struct sh_eth_private *mdp = netdev_priv(ndev);
926
927 mod_timer(&mdp->timer, jiffies + (10 * HZ));
928}
929
930/* PHY state control function */
931static void sh_eth_adjust_link(struct net_device *ndev)
932{
933 struct sh_eth_private *mdp = netdev_priv(ndev);
934 struct phy_device *phydev = mdp->phydev;
935 u32 ioaddr = ndev->base_addr;
936 int new_state = 0;
937
938 if (phydev->link != PHY_DOWN) {
939 if (phydev->duplex != mdp->duplex) {
940 new_state = 1;
941 mdp->duplex = phydev->duplex;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000942 if (mdp->cd->set_duplex)
943 mdp->cd->set_duplex(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700944 }
945
946 if (phydev->speed != mdp->speed) {
947 new_state = 1;
948 mdp->speed = phydev->speed;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000949 if (mdp->cd->set_rate)
950 mdp->cd->set_rate(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700951 }
952 if (mdp->link == PHY_DOWN) {
953 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
954 | ECMR_DM, ioaddr + ECMR);
955 new_state = 1;
956 mdp->link = phydev->link;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700957 }
958 } else if (mdp->link) {
959 new_state = 1;
960 mdp->link = PHY_DOWN;
961 mdp->speed = 0;
962 mdp->duplex = -1;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700963 }
964
965 if (new_state)
966 phy_print_status(phydev);
967}
968
969/* PHY init function */
970static int sh_eth_phy_init(struct net_device *ndev)
971{
972 struct sh_eth_private *mdp = netdev_priv(ndev);
David S. Miller0a372eb2009-05-26 21:11:09 -0700973 char phy_id[MII_BUS_ID_SIZE + 3];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700974 struct phy_device *phydev = NULL;
975
Kay Sieversfb28ad32008-11-10 13:55:14 -0800976 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700977 mdp->mii_bus->id , mdp->phy_id);
978
979 mdp->link = PHY_DOWN;
980 mdp->speed = 0;
981 mdp->duplex = -1;
982
983 /* Try connect to PHY */
984 phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link,
985 0, PHY_INTERFACE_MODE_MII);
986 if (IS_ERR(phydev)) {
987 dev_err(&ndev->dev, "phy_connect failed\n");
988 return PTR_ERR(phydev);
989 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000990
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700991 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000992 phydev->addr, phydev->drv->name);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700993
994 mdp->phydev = phydev;
995
996 return 0;
997}
998
999/* PHY control start function */
1000static int sh_eth_phy_start(struct net_device *ndev)
1001{
1002 struct sh_eth_private *mdp = netdev_priv(ndev);
1003 int ret;
1004
1005 ret = sh_eth_phy_init(ndev);
1006 if (ret)
1007 return ret;
1008
1009 /* reset phy - this also wakes it from PDOWN */
1010 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1011 phy_start(mdp->phydev);
1012
1013 return 0;
1014}
1015
1016/* network device open function */
1017static int sh_eth_open(struct net_device *ndev)
1018{
1019 int ret = 0;
1020 struct sh_eth_private *mdp = netdev_priv(ndev);
1021
Magnus Dammbcd51492009-10-09 00:20:04 +00001022 pm_runtime_get_sync(&mdp->pdev->dev);
1023
Joe Perchesa0607fd2009-11-18 23:29:17 -08001024 ret = request_irq(ndev->irq, sh_eth_interrupt,
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001025#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764)
1026 IRQF_SHARED,
1027#else
1028 0,
1029#endif
1030 ndev->name, ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001031 if (ret) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001032 dev_err(&ndev->dev, "Can not assign IRQ number\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001033 return ret;
1034 }
1035
1036 /* Descriptor set */
1037 ret = sh_eth_ring_init(ndev);
1038 if (ret)
1039 goto out_free_irq;
1040
1041 /* device init */
1042 ret = sh_eth_dev_init(ndev);
1043 if (ret)
1044 goto out_free_irq;
1045
1046 /* PHY control start*/
1047 ret = sh_eth_phy_start(ndev);
1048 if (ret)
1049 goto out_free_irq;
1050
1051 /* Set the timer to check for link beat. */
1052 init_timer(&mdp->timer);
1053 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001054 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001055
1056 return ret;
1057
1058out_free_irq:
1059 free_irq(ndev->irq, ndev);
Magnus Dammbcd51492009-10-09 00:20:04 +00001060 pm_runtime_put_sync(&mdp->pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001061 return ret;
1062}
1063
1064/* Timeout function */
1065static void sh_eth_tx_timeout(struct net_device *ndev)
1066{
1067 struct sh_eth_private *mdp = netdev_priv(ndev);
1068 u32 ioaddr = ndev->base_addr;
1069 struct sh_eth_rxdesc *rxdesc;
1070 int i;
1071
1072 netif_stop_queue(ndev);
1073
1074 /* worning message out. */
1075 printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
1076 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
1077
1078 /* tx_errors count up */
1079 mdp->stats.tx_errors++;
1080
1081 /* timer off */
1082 del_timer_sync(&mdp->timer);
1083
1084 /* Free all the skbuffs in the Rx queue. */
1085 for (i = 0; i < RX_RING_SIZE; i++) {
1086 rxdesc = &mdp->rx_ring[i];
1087 rxdesc->status = 0;
1088 rxdesc->addr = 0xBADF00D0;
1089 if (mdp->rx_skbuff[i])
1090 dev_kfree_skb(mdp->rx_skbuff[i]);
1091 mdp->rx_skbuff[i] = NULL;
1092 }
1093 for (i = 0; i < TX_RING_SIZE; i++) {
1094 if (mdp->tx_skbuff[i])
1095 dev_kfree_skb(mdp->tx_skbuff[i]);
1096 mdp->tx_skbuff[i] = NULL;
1097 }
1098
1099 /* device init */
1100 sh_eth_dev_init(ndev);
1101
1102 /* timer on */
1103 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1104 add_timer(&mdp->timer);
1105}
1106
1107/* Packet transmit function */
1108static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1109{
1110 struct sh_eth_private *mdp = netdev_priv(ndev);
1111 struct sh_eth_txdesc *txdesc;
1112 u32 entry;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00001113 unsigned long flags;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001114
1115 spin_lock_irqsave(&mdp->lock, flags);
1116 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1117 if (!sh_eth_txfree(ndev)) {
1118 netif_stop_queue(ndev);
1119 spin_unlock_irqrestore(&mdp->lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +00001120 return NETDEV_TX_BUSY;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001121 }
1122 }
1123 spin_unlock_irqrestore(&mdp->lock, flags);
1124
1125 entry = mdp->cur_tx % TX_RING_SIZE;
1126 mdp->tx_skbuff[entry] = skb;
1127 txdesc = &mdp->tx_ring[entry];
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001128 txdesc->addr = virt_to_phys(skb->data);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001129 /* soft swap. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001130 if (!mdp->cd->hw_swap)
1131 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1132 skb->len + 2);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001133 /* write back */
1134 __flush_purge_region(skb->data, skb->len);
1135 if (skb->len < ETHERSMALL)
1136 txdesc->buffer_length = ETHERSMALL;
1137 else
1138 txdesc->buffer_length = skb->len;
1139
1140 if (entry >= TX_RING_SIZE - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04001141 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001142 else
Yoshinori Sato71557a32008-08-06 19:49:00 -04001143 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001144
1145 mdp->cur_tx++;
1146
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001147 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
1148 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
1149
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001150 ndev->trans_start = jiffies;
1151
Patrick McHardy6ed10652009-06-23 06:03:08 +00001152 return NETDEV_TX_OK;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001153}
1154
1155/* device close function */
1156static int sh_eth_close(struct net_device *ndev)
1157{
1158 struct sh_eth_private *mdp = netdev_priv(ndev);
1159 u32 ioaddr = ndev->base_addr;
1160 int ringsize;
1161
1162 netif_stop_queue(ndev);
1163
1164 /* Disable interrupts by clearing the interrupt mask. */
1165 ctrl_outl(0x0000, ioaddr + EESIPR);
1166
1167 /* Stop the chip's Tx and Rx processes. */
1168 ctrl_outl(0, ioaddr + EDTRR);
1169 ctrl_outl(0, ioaddr + EDRRR);
1170
1171 /* PHY Disconnect */
1172 if (mdp->phydev) {
1173 phy_stop(mdp->phydev);
1174 phy_disconnect(mdp->phydev);
1175 }
1176
1177 free_irq(ndev->irq, ndev);
1178
1179 del_timer_sync(&mdp->timer);
1180
1181 /* Free all the skbuffs in the Rx queue. */
1182 sh_eth_ring_free(ndev);
1183
1184 /* free DMA buffer */
1185 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1186 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1187
1188 /* free DMA buffer */
1189 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1190 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1191
Magnus Dammbcd51492009-10-09 00:20:04 +00001192 pm_runtime_put_sync(&mdp->pdev->dev);
1193
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001194 return 0;
1195}
1196
1197static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1198{
1199 struct sh_eth_private *mdp = netdev_priv(ndev);
1200 u32 ioaddr = ndev->base_addr;
1201
Magnus Dammbcd51492009-10-09 00:20:04 +00001202 pm_runtime_get_sync(&mdp->pdev->dev);
1203
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001204 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
1205 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
1206 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
1207 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */
1208 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
1209 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001210#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1211 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
1212 ctrl_outl(0, ioaddr + CERCR); /* (write clear) */
1213 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
1214 ctrl_outl(0, ioaddr + CEECR); /* (write clear) */
1215#else
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001216 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
1217 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001218#endif
Magnus Dammbcd51492009-10-09 00:20:04 +00001219 pm_runtime_put_sync(&mdp->pdev->dev);
1220
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001221 return &mdp->stats;
1222}
1223
1224/* ioctl to device funciotn*/
1225static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1226 int cmd)
1227{
1228 struct sh_eth_private *mdp = netdev_priv(ndev);
1229 struct phy_device *phydev = mdp->phydev;
1230
1231 if (!netif_running(ndev))
1232 return -EINVAL;
1233
1234 if (!phydev)
1235 return -ENODEV;
1236
1237 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1238}
1239
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001240#if defined(SH_ETH_HAS_TSU)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001241/* Multicast reception directions set */
1242static void sh_eth_set_multicast_list(struct net_device *ndev)
1243{
1244 u32 ioaddr = ndev->base_addr;
1245
1246 if (ndev->flags & IFF_PROMISC) {
1247 /* Set promiscuous. */
1248 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
1249 ioaddr + ECMR);
1250 } else {
1251 /* Normal, unicast/broadcast-only mode. */
1252 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
1253 ioaddr + ECMR);
1254 }
1255}
1256
1257/* SuperH's TSU register init function */
1258static void sh_eth_tsu_init(u32 ioaddr)
1259{
1260 ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
1261 ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
1262 ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
1263 ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
1264 ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
1265 ctrl_outl(0, ioaddr + TSU_PRISL0);
1266 ctrl_outl(0, ioaddr + TSU_PRISL1);
1267 ctrl_outl(0, ioaddr + TSU_FWSL0);
1268 ctrl_outl(0, ioaddr + TSU_FWSL1);
1269 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001270#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1271 ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
1272 ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
1273#else
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001274 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
1275 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001276#endif
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001277 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
1278 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
1279 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
1280 ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
1281 ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
1282 ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
1283 ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
1284}
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001285#endif /* SH_ETH_HAS_TSU */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001286
1287/* MDIO bus release function */
1288static int sh_mdio_release(struct net_device *ndev)
1289{
1290 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1291
1292 /* unregister mdio bus */
1293 mdiobus_unregister(bus);
1294
1295 /* remove mdio bus info from net_device */
1296 dev_set_drvdata(&ndev->dev, NULL);
1297
1298 /* free bitbang info */
1299 free_mdio_bitbang(bus);
1300
1301 return 0;
1302}
1303
1304/* MDIO bus init function */
1305static int sh_mdio_init(struct net_device *ndev, int id)
1306{
1307 int ret, i;
1308 struct bb_info *bitbang;
1309 struct sh_eth_private *mdp = netdev_priv(ndev);
1310
1311 /* create bit control struct for PHY */
1312 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1313 if (!bitbang) {
1314 ret = -ENOMEM;
1315 goto out;
1316 }
1317
1318 /* bitbang init */
1319 bitbang->addr = ndev->base_addr + PIR;
1320 bitbang->mdi_msk = 0x08;
1321 bitbang->mdo_msk = 0x04;
1322 bitbang->mmd_msk = 0x02;/* MMD */
1323 bitbang->mdc_msk = 0x01;
1324 bitbang->ctrl.ops = &bb_ops;
1325
1326 /* MII contorller setting */
1327 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1328 if (!mdp->mii_bus) {
1329 ret = -ENOMEM;
1330 goto out_free_bitbang;
1331 }
1332
1333 /* Hook up MII support for ethtool */
1334 mdp->mii_bus->name = "sh_mii";
Lennert Buytenhek18ee49d2008-10-01 15:41:33 +00001335 mdp->mii_bus->parent = &ndev->dev;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00001336 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001337
1338 /* PHY IRQ */
1339 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1340 if (!mdp->mii_bus->irq) {
1341 ret = -ENOMEM;
1342 goto out_free_bus;
1343 }
1344
1345 for (i = 0; i < PHY_MAX_ADDR; i++)
1346 mdp->mii_bus->irq[i] = PHY_POLL;
1347
1348 /* regist mdio bus */
1349 ret = mdiobus_register(mdp->mii_bus);
1350 if (ret)
1351 goto out_free_irq;
1352
1353 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1354
1355 return 0;
1356
1357out_free_irq:
1358 kfree(mdp->mii_bus->irq);
1359
1360out_free_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001361 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001362
1363out_free_bitbang:
1364 kfree(bitbang);
1365
1366out:
1367 return ret;
1368}
1369
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00001370static const struct net_device_ops sh_eth_netdev_ops = {
1371 .ndo_open = sh_eth_open,
1372 .ndo_stop = sh_eth_close,
1373 .ndo_start_xmit = sh_eth_start_xmit,
1374 .ndo_get_stats = sh_eth_get_stats,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001375#if defined(SH_ETH_HAS_TSU)
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00001376 .ndo_set_multicast_list = sh_eth_set_multicast_list,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001377#endif
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00001378 .ndo_tx_timeout = sh_eth_tx_timeout,
1379 .ndo_do_ioctl = sh_eth_do_ioctl,
1380 .ndo_validate_addr = eth_validate_addr,
1381 .ndo_set_mac_address = eth_mac_addr,
1382 .ndo_change_mtu = eth_change_mtu,
1383};
1384
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001385static int sh_eth_drv_probe(struct platform_device *pdev)
1386{
1387 int ret, i, devno = 0;
1388 struct resource *res;
1389 struct net_device *ndev = NULL;
1390 struct sh_eth_private *mdp;
Yoshinori Sato71557a32008-08-06 19:49:00 -04001391 struct sh_eth_plat_data *pd;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001392
1393 /* get base addr */
1394 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1395 if (unlikely(res == NULL)) {
1396 dev_err(&pdev->dev, "invalid resource\n");
1397 ret = -EINVAL;
1398 goto out;
1399 }
1400
1401 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1402 if (!ndev) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001403 dev_err(&pdev->dev, "Could not allocate device.\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001404 ret = -ENOMEM;
1405 goto out;
1406 }
1407
1408 /* The sh Ether-specific entries in the device structure. */
1409 ndev->base_addr = res->start;
1410 devno = pdev->id;
1411 if (devno < 0)
1412 devno = 0;
1413
1414 ndev->dma = -1;
roel kluincc3c0802008-09-10 19:22:44 +02001415 ret = platform_get_irq(pdev, 0);
1416 if (ret < 0) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001417 ret = -ENODEV;
1418 goto out_release;
1419 }
roel kluincc3c0802008-09-10 19:22:44 +02001420 ndev->irq = ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001421
1422 SET_NETDEV_DEV(ndev, &pdev->dev);
1423
1424 /* Fill in the fields of the device structure with ethernet values. */
1425 ether_setup(ndev);
1426
1427 mdp = netdev_priv(ndev);
1428 spin_lock_init(&mdp->lock);
Magnus Dammbcd51492009-10-09 00:20:04 +00001429 mdp->pdev = pdev;
1430 pm_runtime_enable(&pdev->dev);
1431 pm_runtime_resume(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001432
Yoshinori Sato71557a32008-08-06 19:49:00 -04001433 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001434 /* get PHY ID */
Yoshinori Sato71557a32008-08-06 19:49:00 -04001435 mdp->phy_id = pd->phy;
1436 /* EDMAC endian */
1437 mdp->edmac_endian = pd->edmac_endian;
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001438 mdp->no_ether_link = pd->no_ether_link;
1439 mdp->ether_link_active_low = pd->ether_link_active_low;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001440
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001441 /* set cpu data */
1442 mdp->cd = &sh_eth_my_cpu_data;
1443 sh_eth_set_default_cpu_data(mdp->cd);
1444
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001445 /* set function */
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00001446 ndev->netdev_ops = &sh_eth_netdev_ops;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001447 ndev->watchdog_timeo = TX_TIMEOUT;
1448
1449 mdp->post_rx = POST_RX >> (devno << 1);
1450 mdp->post_fw = POST_FW >> (devno << 1);
1451
1452 /* read and set MAC address */
Magnus Damm748031f2009-10-09 00:17:14 +00001453 read_mac_address(ndev, pd->mac_addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001454
1455 /* First device only init */
1456 if (!devno) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001457 if (mdp->cd->chip_reset)
1458 mdp->cd->chip_reset(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001459
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001460#if defined(SH_ETH_HAS_TSU)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001461 /* TSU init (Init only)*/
1462 sh_eth_tsu_init(SH_TSU_ADDR);
Yoshinori Sato71557a32008-08-06 19:49:00 -04001463#endif
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001464 }
1465
1466 /* network device register */
1467 ret = register_netdev(ndev);
1468 if (ret)
1469 goto out_release;
1470
1471 /* mdio bus init */
1472 ret = sh_mdio_init(ndev, pdev->id);
1473 if (ret)
1474 goto out_unregister;
1475
1476 /* pritnt device infomation */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001477 pr_info("Base address at 0x%x, ",
1478 (u32)ndev->base_addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001479
1480 for (i = 0; i < 5; i++)
Yoshinori Sato71557a32008-08-06 19:49:00 -04001481 printk("%02X:", ndev->dev_addr[i]);
1482 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001483
1484 platform_set_drvdata(pdev, ndev);
1485
1486 return ret;
1487
1488out_unregister:
1489 unregister_netdev(ndev);
1490
1491out_release:
1492 /* net_dev free */
1493 if (ndev)
1494 free_netdev(ndev);
1495
1496out:
1497 return ret;
1498}
1499
1500static int sh_eth_drv_remove(struct platform_device *pdev)
1501{
1502 struct net_device *ndev = platform_get_drvdata(pdev);
1503
1504 sh_mdio_release(ndev);
1505 unregister_netdev(ndev);
1506 flush_scheduled_work();
Magnus Dammbcd51492009-10-09 00:20:04 +00001507 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001508 free_netdev(ndev);
1509 platform_set_drvdata(pdev, NULL);
1510
1511 return 0;
1512}
1513
Magnus Dammbcd51492009-10-09 00:20:04 +00001514static int sh_eth_runtime_nop(struct device *dev)
1515{
1516 /*
1517 * Runtime PM callback shared between ->runtime_suspend()
1518 * and ->runtime_resume(). Simply returns success.
1519 *
1520 * This driver re-initializes all registers after
1521 * pm_runtime_get_sync() anyway so there is no need
1522 * to save and restore registers here.
1523 */
1524 return 0;
1525}
1526
1527static struct dev_pm_ops sh_eth_dev_pm_ops = {
1528 .runtime_suspend = sh_eth_runtime_nop,
1529 .runtime_resume = sh_eth_runtime_nop,
1530};
1531
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001532static struct platform_driver sh_eth_driver = {
1533 .probe = sh_eth_drv_probe,
1534 .remove = sh_eth_drv_remove,
1535 .driver = {
1536 .name = CARDNAME,
Magnus Dammbcd51492009-10-09 00:20:04 +00001537 .pm = &sh_eth_dev_pm_ops,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001538 },
1539};
1540
1541static int __init sh_eth_init(void)
1542{
1543 return platform_driver_register(&sh_eth_driver);
1544}
1545
1546static void __exit sh_eth_cleanup(void)
1547{
1548 platform_driver_unregister(&sh_eth_driver);
1549}
1550
1551module_init(sh_eth_init);
1552module_exit(sh_eth_cleanup);
1553
1554MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1555MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1556MODULE_LICENSE("GPL v2");