aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/fs_enet
diff options
context:
space:
mode:
authorPantelis Antoniou <pantelis.antoniou@gmail.com>2005-10-28 16:25:58 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-10-28 16:25:58 -0400
commit48257c4f168e5d040394aeca4d37b59f68e0d36b (patch)
tree7e553a6018862338d80fb5b0e4070a371a8fb001 /drivers/net/fs_enet
parentd8840ac907c7943bc7e196b11812adfa95cb28ef (diff)
Add fs_enet ethernet network driver, for several embedded platforms.
Diffstat (limited to 'drivers/net/fs_enet')
-rw-r--r--drivers/net/fs_enet/Kconfig20
-rw-r--r--drivers/net/fs_enet/Makefile10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1226
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c507
-rw-r--r--drivers/net/fs_enet/fs_enet.h245
-rw-r--r--drivers/net/fs_enet/mac-fcc.c578
-rw-r--r--drivers/net/fs_enet/mac-fec.c653
-rw-r--r--drivers/net/fs_enet/mac-scc.c524
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c405
-rw-r--r--drivers/net/fs_enet/mii-fixed.c92
10 files changed, 4260 insertions, 0 deletions
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
new file mode 100644
index 00000000000..6aaee67dd4b
--- /dev/null
+++ b/drivers/net/fs_enet/Kconfig
@@ -0,0 +1,20 @@
+config FS_ENET
+ tristate "Freescale Ethernet Driver"
+ depends on NET_ETHERNET && (CPM1 || CPM2)
+ select MII
+
+config FS_ENET_HAS_SCC
+ bool "Chip has an SCC usable for ethernet"
+ depends on FS_ENET && (CPM1 || CPM2)
+ default y
+
+config FS_ENET_HAS_FCC
+ bool "Chip has an FCC usable for ethernet"
+ depends on FS_ENET && CPM2
+ default y
+
+config FS_ENET_HAS_FEC
+ bool "Chip has an FEC usable for ethernet"
+ depends on FS_ENET && CPM1
+ default y
+
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
new file mode 100644
index 00000000000..d6dd3f2fb43
--- /dev/null
+++ b/drivers/net/fs_enet/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the Freescale Ethernet controllers
+#
+
+obj-$(CONFIG_FS_ENET) += fs_enet.o
+
+obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o
+obj-$(CONFIG_8260) += mac-fcc.o
+
+fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
new file mode 100644
index 00000000000..44fac737328
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -0,0 +1,1226 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
+ * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+
+#include <linux/vmalloc.h>
+#include <asm/pgtable.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+static char version[] __devinitdata =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
+
+MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
+MODULE_DESCRIPTION("Freescale Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+MODULE_PARM(fs_enet_debug, "i");
+MODULE_PARM_DESC(fs_enet_debug,
+ "Freescale bitmapped debugging message enable value");
+
+int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
+
+static void fs_set_multicast_list(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ (*fep->ops->set_multicast_list)(dev);
+}
+
+/* NAPI receive function */
+static int fs_enet_rx_napi(struct net_device *dev, int *budget)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ cbd_t *bdp;
+ struct sk_buff *skb, *skbn, *skbt;
+ int received = 0;
+ u16 pkt_len, sc;
+ int curidx;
+ int rx_work_limit = 0; /* pacify gcc */
+
+ rx_work_limit = min(dev->quota, *budget);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ /* clear RX status bits for napi*/
+ (*fep->ops->napi_clear_rx_event)(dev);
+
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+
+ curidx = bdp - fep->rx_bd_base;
+
+ /*
+ * Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((sc & BD_ENET_RX_LAST) == 0)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s rcv is not +last\n",
+ dev->name);
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
+ BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ fep->stats.rx_errors++;
+ /* Frame too long or too short. */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+ fep->stats.rx_length_errors++;
+ /* Frame alignment */
+ if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ fep->stats.rx_frame_errors++;
+ /* CRC Error */
+ if (sc & BD_ENET_RX_CR)
+ fep->stats.rx_crc_errors++;
+ /* FIFO overrun */
+ if (sc & BD_ENET_RX_OV)
+ fep->stats.rx_crc_errors++;
+
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ skbn = skb;
+
+ } else {
+
+ /* napi, got packet but no quota */
+ if (--rx_work_limit < 0)
+ break;
+
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ /*
+ * Process the incoming frame.
+ */
+ fep->stats.rx_packets++;
+ pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
+ fep->stats.rx_bytes += pkt_len + 4;
+
+ if (pkt_len <= fpi->rx_copybreak) {
+ /* +2 to make IP header L1 cache aligned */
+ skbn = dev_alloc_skb(pkt_len + 2);
+ if (skbn != NULL) {
+ skb_reserve(skbn, 2); /* align IP header */
+ memcpy(skbn->data, skb->data, pkt_len);
+ /* swap */
+ skbt = skb;
+ skb = skbn;
+ skbn = skbt;
+ }
+ } else
+ skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+
+ if (skbn != NULL) {
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ skb->protocol = eth_type_trans(skb, dev);
+ received++;
+ netif_receive_skb(skb);
+ } else {
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s Memory squeeze, dropping packet.\n",
+ dev->name);
+ fep->stats.rx_dropped++;
+ skbn = skb;
+ }
+ }
+
+ fep->rx_skbuff[curidx] = skbn;
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
+
+ /*
+ * Update BD pointer to next entry.
+ */
+ if ((sc & BD_ENET_RX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->rx_bd_base;
+
+ (*fep->ops->rx_bd_done)(dev);
+ }
+
+ fep->cur_rx = bdp;
+
+ dev->quota -= received;
+ *budget -= received;
+
+ if (rx_work_limit < 0)
+ return 1; /* not done */
+
+ /* done */
+ netif_rx_complete(dev);
+
+ (*fep->ops->napi_enable_rx)(dev);
+
+ return 0;
+}
+
+/* non NAPI receive function */
+static int fs_enet_rx_non_napi(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ cbd_t *bdp;
+ struct sk_buff *skb, *skbn, *skbt;
+ int received = 0;
+ u16 pkt_len, sc;
+ int curidx;
+ /*
+ * First, grab all of the stats for the incoming packet.
+ * These get messed up if we get called due to a busy condition.
+ */
+ bdp = fep->cur_rx;
+
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+
+ curidx = bdp - fep->rx_bd_base;
+
+ /*
+ * Since we have allocated space to hold a complete frame,
+ * the last indicator should be set.
+ */
+ if ((sc & BD_ENET_RX_LAST) == 0)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s rcv is not +last\n",
+ dev->name);
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
+ BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ fep->stats.rx_errors++;
+ /* Frame too long or too short. */
+ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+ fep->stats.rx_length_errors++;
+ /* Frame alignment */
+ if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ fep->stats.rx_frame_errors++;
+ /* CRC Error */
+ if (sc & BD_ENET_RX_CR)
+ fep->stats.rx_crc_errors++;
+ /* FIFO overrun */
+ if (sc & BD_ENET_RX_OV)
+ fep->stats.rx_crc_errors++;
+
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ skbn = skb;
+
+ } else {
+
+ skb = fep->rx_skbuff[curidx];
+
+ dma_unmap_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ /*
+ * Process the incoming frame.
+ */
+ fep->stats.rx_packets++;
+ pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
+ fep->stats.rx_bytes += pkt_len + 4;
+
+ if (pkt_len <= fpi->rx_copybreak) {
+ /* +2 to make IP header L1 cache aligned */
+ skbn = dev_alloc_skb(pkt_len + 2);
+ if (skbn != NULL) {
+ skb_reserve(skbn, 2); /* align IP header */
+ memcpy(skbn->data, skb->data, pkt_len);
+ /* swap */
+ skbt = skb;
+ skb = skbn;
+ skbn = skbt;
+ }
+ } else
+ skbn = dev_alloc_skb(ENET_RX_FRSIZE);
+
+ if (skbn != NULL) {
+ skb->dev = dev;
+ skb_put(skb, pkt_len); /* Make room */
+ skb->protocol = eth_type_trans(skb, dev);
+ received++;
+ netif_rx(skb);
+ } else {
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s Memory squeeze, dropping packet.\n",
+ dev->name);
+ fep->stats.rx_dropped++;
+ skbn = skb;
+ }
+ }
+
+ fep->rx_skbuff[curidx] = skbn;
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
+
+ /*
+ * Update BD pointer to next entry.
+ */
+ if ((sc & BD_ENET_RX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->rx_bd_base;
+
+ (*fep->ops->rx_bd_done)(dev);
+ }
+
+ fep->cur_rx = bdp;
+
+ return 0;
+}
+
+static void fs_enet_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t *bdp;
+ struct sk_buff *skb;
+ int dirtyidx, do_wake, do_restart;
+ u16 sc;
+
+ spin_lock(&fep->lock);
+ bdp = fep->dirty_tx;
+
+ do_wake = do_restart = 0;
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
+
+ dirtyidx = bdp - fep->tx_bd_base;
+
+ if (fep->tx_free == fep->tx_ring)
+ break;
+
+ skb = fep->tx_skbuff[dirtyidx];
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
+
+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
+ fep->stats.tx_heartbeat_errors++;
+ if (sc & BD_ENET_TX_LC) /* Late collision */
+ fep->stats.tx_window_errors++;
+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
+ fep->stats.tx_aborted_errors++;
+ if (sc & BD_ENET_TX_UN) /* Underrun */
+ fep->stats.tx_fifo_errors++;
+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
+ fep->stats.tx_carrier_errors++;
+
+ if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+ fep->stats.tx_errors++;
+ do_restart = 1;
+ }
+ } else
+ fep->stats.tx_packets++;
+
+ if (sc & BD_ENET_TX_READY)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s HEY! Enet xmit interrupt and TX_READY.\n",
+ dev->name);
+
+ /*
+ * Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (sc & BD_ENET_TX_DEF)
+ fep->stats.collisions++;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE);
+
+ /*
+ * Free the sk buffer associated with this last transmit.
+ */
+ dev_kfree_skb_irq(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+
+ /*
+ * Update pointer to next buffer descriptor to be transmitted.
+ */
+ if ((sc & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+
+ /*
+ * Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (!fep->tx_free++)
+ do_wake = 1;
+ }
+
+ fep->dirty_tx = bdp;
+
+ if (do_restart)
+ (*fep->ops->tx_restart)(dev);
+
+ spin_unlock(&fep->lock);
+
+ if (do_wake)
+ netif_wake_queue(dev);
+}
+
+/*
+ * The interrupt handler.
+ * This is called from the MPC core interrupt.
+ */
+static irqreturn_t
+fs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct fs_enet_private *fep;
+ const struct fs_platform_info *fpi;
+ u32 int_events;
+ u32 int_clr_events;
+ int nr, napi_ok;
+ int handled;
+
+ fep = netdev_priv(dev);
+ fpi = fep->fpi;
+
+ nr = 0;
+ while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
+
+ nr++;
+
+ int_clr_events = int_events;
+ if (fpi->use_napi)
+ int_clr_events &= ~fep->ev_napi_rx;
+
+ (*fep->ops->clear_int_events)(dev, int_clr_events);
+
+ if (int_events & fep->ev_err)
+ (*fep->ops->ev_error)(dev, int_events);
+
+ if (int_events & fep->ev_rx) {
+ if (!fpi->use_napi)
+ fs_enet_rx_non_napi(dev);
+ else {
+ napi_ok = netif_rx_schedule_prep(dev);
+
+ (*fep->ops->napi_disable_rx)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
+
+ /* NOTE: it is possible for FCCs in NAPI mode */
+ /* to submit a spurious interrupt while in poll */
+ if (napi_ok)
+ __netif_rx_schedule(dev);
+ }
+ }
+
+ if (int_events & fep->ev_tx)
+ fs_enet_tx(dev);
+ }
+
+ handled = nr > 0;
+ return IRQ_RETVAL(handled);
+}
+
+void fs_init_bds(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t *bdp;
+ struct sk_buff *skb;
+ int i;
+
+ fs_cleanup_bds(dev);
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->tx_free = fep->tx_ring;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /*
+ * Initialize the receive buffer descriptors.
+ */
+ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
+ skb = dev_alloc_skb(ENET_RX_FRSIZE);
+ if (skb == NULL) {
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s Memory squeeze, unable to allocate skb\n",
+ dev->name);
+ break;
+ }
+ fep->rx_skbuff[i] = skb;
+ skb->dev = dev;
+ CBDW_BUFADDR(bdp,
+ dma_map_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE));
+ CBDW_DATLEN(bdp, 0); /* zero */
+ CBDW_SC(bdp, BD_ENET_RX_EMPTY |
+ ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
+ }
+ /*
+ * if we failed, fillup remainder
+ */
+ for (; i < fep->rx_ring; i++, bdp++) {
+ fep->rx_skbuff[i] = NULL;
+ CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+
+ /*
+ * ...and the same for transmit.
+ */
+ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
+ fep->tx_skbuff[i] = NULL;
+ CBDW_BUFADDR(bdp, 0);
+ CBDW_DATLEN(bdp, 0);
+ CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
+ }
+}
+
+void fs_cleanup_bds(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ /*
+ * Reset SKB transmit buffers.
+ */
+ for (i = 0; i < fep->tx_ring; i++) {
+ if ((skb = fep->tx_skbuff[i]) == NULL)
+ continue;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE);
+
+ fep->tx_skbuff[i] = NULL;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * Reset SKB receive buffers
+ */
+ for (i = 0; i < fep->rx_ring; i++) {
+ if ((skb = fep->rx_skbuff[i]) == NULL)
+ continue;
+
+ /* unmap */
+ dma_unmap_single(fep->dev, skb->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ fep->rx_skbuff[i] = NULL;
+
+ dev_kfree_skb(skb);
+ }
+}
+
+/**********************************************************************************/
+
+static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ cbd_t *bdp;
+ int curidx;
+ u16 sc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tx_lock, flags);
+
+ /*
+ * Fill in a Tx ring entry
+ */
+ bdp = fep->cur_tx;
+
+ if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&fep->tx_lock, flags);
+
+ /*
+ * Ooops. All transmit buffers are full. Bail out.
+ * This should not happen, since the tx queue should be stopped.
+ */
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s tx queue full!.\n", dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ curidx = bdp - fep->tx_bd_base;
+ /*
+ * Clear all of the status flags.
+ */
+ CBDC_SC(bdp, BD_ENET_TX_STATS);
+
+ /*
+ * Save skb pointer.
+ */
+ fep->tx_skbuff[curidx] = skb;
+
+ fep->stats.tx_bytes += skb->len;
+
+ /*
+ * Push the data cache so the CPM does not get stale memory data.
+ */
+ CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
+ skb->data, skb->len, DMA_TO_DEVICE));
+ CBDW_DATLEN(bdp, skb->len);
+
+ dev->trans_start = jiffies;
+
+ /*
+ * If this was the last BD in the ring, start at the beginning again.
+ */
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
+ fep->cur_tx++;
+ else
+ fep->cur_tx = fep->tx_bd_base;
+
+ if (!--fep->tx_free)
+ netif_stop_queue(dev);
+
+ /* Trigger transmission start */
+ sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
+ BD_ENET_TX_LAST | BD_ENET_TX_TC;
+
+ /* note that while FEC does not have this bit
+ * it marks it as available for software use
+ * yay for hw reuse :) */
+ if (skb->len <= 60)
+ sc |= BD_ENET_TX_PAD;
+ CBDS_SC(bdp, sc);
+
+ (*fep->ops->tx_kickstart)(dev);
+
+ spin_unlock_irqrestore(&fep->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int fs_request_irq(struct net_device *dev, int irq, const char *name,
+ irqreturn_t (*irqf)(int irq, void *dev_id, struct pt_regs *regs))
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ (*fep->ops->pre_request_irq)(dev, irq);
+ return request_irq(irq, irqf, SA_SHIRQ, name, dev);
+}
+
+static void fs_free_irq(struct net_device *dev, int irq)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ free_irq(irq, dev);
+ (*fep->ops->post_free_irq)(dev, irq);
+}
+
+/**********************************************************************************/
+
+/* This interrupt occurs when the PHY detects a link change. */
+static irqreturn_t
+fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = dev_id;
+ struct fs_enet_private *fep;
+ const struct fs_platform_info *fpi;
+
+ fep = netdev_priv(dev);
+ fpi = fep->fpi;
+
+ /*
+ * Acknowledge the interrupt if possible. If we have not
+ * found the PHY yet we can't process or acknowledge the
+ * interrupt now. Instead we ignore this interrupt for now,
+ * which we can do since it is edge triggered. It will be
+ * acknowledged later by fs_enet_open().
+ */
+ if (!fep->phy)
+ return IRQ_NONE;
+
+ fs_mii_ack_int(dev);
+ fs_mii_link_status_change_check(dev, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void fs_timeout(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int wake = 0;
+
+ fep->stats.tx_errors++;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if (dev->flags & IFF_UP) {
+ (*fep->ops->stop)(dev);
+ (*fep->ops->restart)(dev);
+ }
+
+ wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ if (wake)
+ netif_wake_queue(dev);
+}
+
+static int fs_enet_open(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ int r;
+
+ /* Install our interrupt handler. */
+ r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
+ if (r != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s Could not allocate FEC IRQ!", dev->name);
+ return -EINVAL;
+ }
+
+ /* Install our phy interrupt handler */
+ if (fpi->phy_irq != -1) {
+
+ r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
+ if (r != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s Could not allocate PHY IRQ!", dev->name);
+ fs_free_irq(dev, fep->interrupt);
+ return -EINVAL;
+ }
+ }
+
+ fs_mii_startup(dev);
+ netif_carrier_off(dev);
+ fs_mii_link_status_change_check(dev, 1);
+
+ return 0;
+}
+
+static int fs_enet_close(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ fs_mii_shutdown(dev);
+
+ spin_lock_irqsave(&fep->lock, flags);
+ (*fep->ops->stop)(dev);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ /* release any irqs */
+ if (fpi->phy_irq != -1)
+ fs_free_irq(dev, fpi->phy_irq);
+ fs_free_irq(dev, fep->interrupt);
+
+ return 0;
+}
+
+static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ return &fep->stats;
+}
+
+/*************************************************************************/
+
+static void fs_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_MODULE_NAME);
+ strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static int fs_get_regs_len(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ return (*fep->ops->get_regs_len)(dev);
+}
+
+static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int r, len;
+
+ len = regs->len;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ r = (*fep->ops->get_regs)(dev, p, &len);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ if (r == 0)
+ regs->version = 0;
+}
+
+static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ rc = mii_ethtool_gset(&fep->mii_if, cmd);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ return rc;
+}
+
+static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ rc = mii_ethtool_sset(&fep->mii_if, cmd);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ return rc;
+}
+
+static int fs_nway_reset(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ return mii_nway_restart(&fep->mii_if);
+}
+
+static u32 fs_get_msglevel(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ return fep->msg_enable;
+}
+
+static void fs_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fep->msg_enable = value;
+}
+
+static struct ethtool_ops fs_ethtool_ops = {
+ .get_drvinfo = fs_get_drvinfo,
+ .get_regs_len = fs_get_regs_len,
+ .get_settings = fs_get_settings,
+ .set_settings = fs_set_settings,
+ .nway_reset = fs_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = fs_get_msglevel,
+ .set_msglevel = fs_set_msglevel,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_regs = fs_get_regs,
+};
+
+static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
+ unsigned long flags;
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&fep->lock, flags);
+ rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
+ spin_unlock_irqrestore(&fep->lock, flags);
+ return rc;
+}
+
+extern int fs_mii_connect(struct net_device *dev);
+extern void fs_mii_disconnect(struct net_device *dev);
+
+static struct net_device *fs_init_instance(struct device *dev,
+ const struct fs_platform_info *fpi)
+{
+ struct net_device *ndev = NULL;
+ struct fs_enet_private *fep = NULL;
+ int privsize, i, r, err = 0, registered = 0;
+
+ /* guard */
+ if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
+ return ERR_PTR(-EINVAL);
+
+ privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
+ (fpi->rx_ring + fpi->tx_ring));
+
+ ndev = alloc_etherdev(privsize);
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err;
+ }
+ SET_MODULE_OWNER(ndev);
+
+ fep = netdev_priv(ndev);
+ memset(fep, 0, privsize); /* clear everything */
+
+ fep->dev = dev;
+ dev_set_drvdata(dev, ndev);
+ fep->fpi = fpi;
+ if (fpi->init_ioports)
+ fpi->init_ioports();
+
+#ifdef CONFIG_FS_ENET_HAS_FEC
+ if (fs_get_fec_index(fpi->fs_no) >= 0)
+ fep->ops = &fs_fec_ops;
+#endif
+
+#ifdef CONFIG_FS_ENET_HAS_SCC
+ if (fs_get_scc_index(fpi->fs_no) >=0 )
+ fep->ops = &fs_scc_ops;
+#endif
+
+#ifdef CONFIG_FS_ENET_HAS_FCC
+ if (fs_get_fcc_index(fpi->fs_no) >= 0)
+ fep->ops = &fs_fcc_ops;
+#endif
+
+ if (fep->ops == NULL) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s No matching ops found (%d).\n",
+ ndev->name, fpi->fs_no);
+ err = -EINVAL;
+ goto err;
+ }
+
+ r = (*fep->ops->setup_data)(ndev);
+ if (r != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s setup_data failed\n",
+ ndev->name);
+ err = r;
+ goto err;
+ }
+
+ /* point rx_skbuff, tx_skbuff */
+ fep->rx_skbuff = (struct sk_buff **)&fep[1];
+ fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
+
+ /* init locks */
+ spin_lock_init(&fep->lock);
+ spin_lock_init(&fep->tx_lock);
+
+ /*
+ * Set the Ethernet address.
+ */
+ for (i = 0; i < 6; i++)
+ ndev->dev_addr[i] = fpi->macaddr[i];
+
+ r = (*fep->ops->allocate_bd)(ndev);
+
+ if (fep->ring_base == NULL) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
+ err = r;
+ goto err;
+ }
+
+ /*
+ * Set receive and transmit descriptor base.
+ */
+ fep->rx_bd_base = fep->ring_base;
+ fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
+
+ /* initialize ring size variables */
+ fep->tx_ring = fpi->tx_ring;
+ fep->rx_ring = fpi->rx_ring;
+
+ /*
+ * The FEC Ethernet specific entries in the device structure.
+ */
+ ndev->open = fs_enet_open;
+ ndev->hard_start_xmit = fs_enet_start_xmit;
+ ndev->tx_timeout = fs_timeout;
+ ndev->watchdog_timeo = 2 * HZ;
+ ndev->stop = fs_enet_close;
+ ndev->get_stats = fs_enet_get_stats;
+ ndev->set_multicast_list = fs_set_multicast_list;
+ if (fpi->use_napi) {
+ ndev->poll = fs_enet_rx_napi;
+ ndev->weight = fpi->napi_weight;
+ }
+ ndev->ethtool_ops = &fs_ethtool_ops;
+ ndev->do_ioctl = fs_ioctl;
+
+ init_timer(&fep->phy_timer_list);
+
+ netif_carrier_off(ndev);
+
+ err = register_netdev(ndev);
+ if (err != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s register_netdev failed.\n", ndev->name);
+ goto err;
+ }
+ registered = 1;
+
+ err = fs_mii_connect(ndev);
+ if (err != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s fs_mii_connect failed.\n", ndev->name);
+ goto err;
+ }
+
+ return ndev;
+
+ err:
+ if (ndev != NULL) {
+
+ if (registered)
+ unregister_netdev(ndev);
+
+ if (fep != NULL) {
+ (*fep->ops->free_bd)(ndev);
+ (*fep->ops->cleanup_data)(ndev);
+ }
+
+ free_netdev(ndev);
+ }
+
+ dev_set_drvdata(dev, NULL);
+
+ return ERR_PTR(err);
+}
+
+static int fs_cleanup_instance(struct net_device *ndev)
+{
+ struct fs_enet_private *fep;
+ const struct fs_platform_info *fpi;
+ struct device *dev;
+
+ if (ndev == NULL)
+ return -EINVAL;
+
+ fep = netdev_priv(ndev);
+ if (fep == NULL)
+ return -EINVAL;
+
+ fpi = fep->fpi;
+
+ fs_mii_disconnect(ndev);
+
+ unregister_netdev(ndev);
+
+ dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
+ fep->ring_base, fep->ring_mem_addr);
+
+ /* reset it */
+ (*fep->ops->cleanup_data)(ndev);
+
+ dev = fep->dev;
+ if (dev != NULL) {
+ dev_set_drvdata(dev, NULL);
+ fep->dev = NULL;
+ }
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+/**************************************************************************************/
+
+/* handy pointer to the immap */
+void *fs_enet_immap = NULL;
+
+static int setup_immap(void)
+{
+ phys_addr_t paddr = 0;
+ unsigned long size = 0;
+
+#ifdef CONFIG_CPM1
+ paddr = IMAP_ADDR;
+ size = 0x10000; /* map 64K */
+#endif
+
+#ifdef CONFIG_CPM2
+ paddr = CPM_MAP_ADDR;
+ size = 0x40000; /* map 256 K */
+#endif
+ fs_enet_immap = ioremap(paddr, size);
+ if (fs_enet_immap == NULL)
+ return -EBADF; /* XXX ahem; maybe just BUG_ON? */
+
+ return 0;
+}
+
+static void cleanup_immap(void)
+{
+ if (fs_enet_immap != NULL) {
+ iounmap(fs_enet_immap);
+ fs_enet_immap = NULL;
+ }
+}
+
+/**************************************************************************************/
+
+static int __devinit fs_enet_probe(struct device *dev)
+{
+ struct net_device *ndev;
+
+ /* no fixup - no device */
+ if (dev->platform_data == NULL) {
+ printk(KERN_INFO "fs_enet: "
+ "probe called with no platform data; "
+ "remove unused devices\n");
+ return -ENODEV;
+ }
+
+ ndev = fs_init_instance(dev, dev->platform_data);
+ if (IS_ERR(ndev))
+ return PTR_ERR(ndev);
+ return 0;
+}
+
+static int fs_enet_remove(struct device *dev)
+{
+ return fs_cleanup_instance(dev_get_drvdata(dev));
+}
+
+static struct device_driver fs_enet_fec_driver = {
+ .name = "fsl-cpm-fec",
+ .bus = &platform_bus_type,
+ .probe = fs_enet_probe,
+ .remove = fs_enet_remove,
+#ifdef CONFIG_PM
+/* .suspend = fs_enet_suspend, TODO */
+/* .resume = fs_enet_resume, TODO */
+#endif
+};
+
+static struct device_driver fs_enet_scc_driver = {
+ .name = "fsl-cpm-scc",
+ .bus = &platform_bus_type,
+ .probe = fs_enet_probe,
+ .remove = fs_enet_remove,
+#ifdef CONFIG_PM
+/* .suspend = fs_enet_suspend, TODO */
+/* .resume = fs_enet_resume, TODO */
+#endif
+};
+
+static struct device_driver fs_enet_fcc_driver = {
+ .name = "fsl-cpm-fcc",
+ .bus = &platform_bus_type,
+ .probe = fs_enet_probe,
+ .remove = fs_enet_remove,
+#ifdef CONFIG_PM
+/* .suspend = fs_enet_suspend, TODO */
+/* .resume = fs_enet_resume, TODO */
+#endif
+};
+
+static int __init fs_init(void)
+{
+ int r;
+
+ printk(KERN_INFO
+ "%s", version);
+
+ r = setup_immap();
+ if (r != 0)
+ return r;
+ r = driver_register(&fs_enet_fec_driver);
+ if (r != 0)
+ goto err;
+
+ r = driver_register(&fs_enet_fcc_driver);
+ if (r != 0)
+ goto err;
+
+ r = driver_register(&fs_enet_scc_driver);
+ if (r != 0)
+ goto err;
+
+ return 0;
+err:
+ cleanup_immap();
+ return r;
+
+}
+
+static void __exit fs_cleanup(void)
+{
+ driver_unregister(&fs_enet_fec_driver);
+ driver_unregister(&fs_enet_fcc_driver);
+ driver_unregister(&fs_enet_scc_driver);
+ cleanup_immap();
+}
+
+/**************************************************************************************/
+
+module_init(fs_init);
+module_exit(fs_cleanup);
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
new file mode 100644
index 00000000000..c6770377ef8
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet-mii.c
@@ -0,0 +1,507 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
+ * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+/*
+ * Generic PHY support.
+ * Should work for all PHYs, but link change is detected by polling
+ */
+
+static void generic_timer_callback(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fep->phy_timer_list.expires = jiffies + HZ / 2;
+
+ add_timer(&fep->phy_timer_list);
+
+ fs_mii_link_status_change_check(dev, 0);
+}
+
+static void generic_startup(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
+ fep->phy_timer_list.data = (unsigned long)dev;
+ fep->phy_timer_list.function = generic_timer_callback;
+ add_timer(&fep->phy_timer_list);
+}
+
+static void generic_shutdown(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ del_timer_sync(&fep->phy_timer_list);
+}
+
+/* ------------------------------------------------------------------------- */
+/* The Davicom DM9161 is used on the NETTA board */
+
+/* register definitions */
+
+#define MII_DM9161_ANAR 4 /* Aux. Config Register */
+#define MII_DM9161_ACR 16 /* Aux. Config Register */
+#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
+#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
+#define MII_DM9161_INTR 21 /* Interrupt Register */
+#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
+#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
+
+static void dm9161_startup(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
+ /* Start autonegotiation */
+ fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ*8);
+}
+
+static void dm9161_ack_int(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
+}
+
+static void dm9161_shutdown(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
+}
+
+/**********************************************************************************/
+
+static const struct phy_info phy_info[] = {
+ {
+ .id = 0x00181b88,
+ .name = "DM9161",
+ .startup = dm9161_startup,
+ .ack_int = dm9161_ack_int,
+ .shutdown = dm9161_shutdown,
+ }, {
+ .id = 0,
+ .name = "GENERIC",
+ .startup = generic_startup,
+ .shutdown = generic_shutdown,
+ },
+};
+
+/**********************************************************************************/
+
+static int phy_id_detect(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ struct fs_enet_mii_bus *bus = fep->mii_bus;
+ int i, r, start, end, phytype, physubtype;
+ const struct phy_info *phy;
+ int phy_hwid, phy_id;
+
+ phy_hwid = -1;
+ fep->phy = NULL;
+
+ /* auto-detect? */
+ if (fpi->phy_addr == -1) {
+ start = 1;
+ end = 32;
+ } else { /* direct */
+ start = fpi->phy_addr;
+ end = start + 1;
+ }
+
+ for (phy_id = start; phy_id < end; phy_id++) {
+ /* skip already used phy addresses on this bus */
+ if (bus->usage_map & (1 << phy_id))
+ continue;
+ r = fs_mii_read(dev, phy_id, MII_PHYSID1);
+ if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
+ continue;
+ r = fs_mii_read(dev, phy_id, MII_PHYSID2);
+ if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
+ continue;
+ phy_hwid = (phytype << 16) | physubtype;
+ if (phy_hwid != -1)
+ break;
+ }
+
+ if (phy_hwid == -1) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s No PHY detected! range=0x%02x-0x%02x\n",
+ dev->name, start, end);
+ return -1;
+ }
+
+ for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
+ if (phy->id == (phy_hwid >> 4) || phy->id == 0)
+ break;
+
+ if (i >= ARRAY_SIZE(phy_info)) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s PHY id 0x%08x is not supported!\n",
+ dev->name, phy_hwid);
+ return -1;
+ }
+
+ fep->phy = phy;
+
+ /* mark this address as used */
+ bus->usage_map |= (1 << phy_id);
+
+ printk(KERN_INFO DRV_MODULE_NAME
+ ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
+ dev->name, phy_id, fep->phy->name, phy_hwid,
+ fpi->phy_addr == -1 ? " (auto-detected)" : "");
+
+ return phy_id;
+}
+
+void fs_mii_startup(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (fep->phy->startup)
+ (*fep->phy->startup) (dev);
+}
+
+void fs_mii_shutdown(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (fep->phy->shutdown)
+ (*fep->phy->shutdown) (dev);
+}
+
+void fs_mii_ack_int(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (fep->phy->ack_int)
+ (*fep->phy->ack_int) (dev);
+}
+
+#define MII_LINK 0x0001
+#define MII_HALF 0x0002
+#define MII_FULL 0x0004
+#define MII_BASE4 0x0008
+#define MII_10M 0x0010
+#define MII_100M 0x0020
+#define MII_1G 0x0040
+#define MII_10G 0x0080
+
+/* return full mii info at one gulp, with a usable form */
+static unsigned int mii_full_status(struct mii_if_info *mii)
+{
+ unsigned int status;
+ int bmsr, adv, lpa, neg;
+ struct fs_enet_private* fep = netdev_priv(mii->dev);
+
+ /* first, a dummy read, needed to latch some MII phys */
+ (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+ bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+
+ /* no link */
+ if ((bmsr & BMSR_LSTATUS) == 0)
+ return 0;
+
+ status = MII_LINK;
+
+ /* Lets look what ANEG says if it's supported - otherwize we shall
+ take the right values from the platform info*/
+ if(!mii->force_media) {
+ /* autoneg not completed; don't bother */
+ if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
+ return 0;
+
+ adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
+ lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
+
+ neg = lpa & adv;
+ } else {
+ neg = fep->fpi->bus_info->lpa;
+ }
+
+ if (neg & LPA_100FULL)
+ status |= MII_FULL | MII_100M;
+ else if (neg & LPA_100BASE4)
+ status |= MII_FULL | MII_BASE4 | MII_100M;
+ else if (neg & LPA_100HALF)
+ status |= MII_HALF | MII_100M;
+ else if (neg & LPA_10FULL)
+ status |= MII_FULL | MII_10M;
+ else
+ status |= MII_HALF | MII_10M;
+
+ return status;
+}
+
+void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct mii_if_info *mii = &fep->mii_if;
+ unsigned int mii_status;
+ int ok_to_print, link, duplex, speed;
+ unsigned long flags;
+
+ ok_to_print = netif_msg_link(fep);
+
+ mii_status = mii_full_status(mii);
+
+ if (!init_media && mii_status == fep->last_mii_status)
+ return;
+
+ fep->last_mii_status = mii_status;
+
+ link = !!(mii_status & MII_LINK);
+ duplex = !!(mii_status & MII_FULL);
+ speed = (mii_status & MII_100M) ? 100 : 10;
+
+ if (link == 0) {
+ netif_carrier_off(mii->dev);
+ netif_stop_queue(dev);
+ if (!init_media) {
+ spin_lock_irqsave(&fep->lock, flags);
+ (*fep->ops->stop)(dev);
+ spin_unlock_irqrestore(&fep->lock, flags);
+ }
+
+ if (ok_to_print)
+ printk(KERN_INFO "%s: link down\n", mii->dev->name);
+
+ } else {
+
+ mii->full_duplex = duplex;
+
+ netif_carrier_on(mii->dev);
+
+ spin_lock_irqsave(&fep->lock, flags);
+ fep->duplex = duplex;
+ fep->speed = speed;
+ (*fep->ops->restart)(dev);
+ spin_unlock_irqrestore(&fep->lock, flags);
+
+ netif_start_queue(dev);
+
+ if (ok_to_print)
+ printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
+ dev->name, speed, duplex ? "full" : "half");
+ }
+}
+
+/**********************************************************************************/
+
+int fs_mii_read(struct net_device *dev, int phy_id, int location)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_enet_mii_bus *bus = fep->mii_bus;
+
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&bus->mii_lock, flags);
+ ret = (*bus->mii_read)(bus, phy_id, location);
+ spin_unlock_irqrestore(&bus->mii_lock, flags);
+
+ return ret;
+}
+
+void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_enet_mii_bus *bus = fep->mii_bus;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bus->mii_lock, flags);
+ (*bus->mii_write)(bus, phy_id, location, value);
+ spin_unlock_irqrestore(&bus->mii_lock, flags);
+}
+
+/*****************************************************************************/
+
+/* list of all registered mii buses */
+static LIST_HEAD(fs_mii_bus_list);
+
+static struct fs_enet_mii_bus *lookup_bus(int method, int id)
+{
+ struct list_head *ptr;
+ struct fs_enet_mii_bus *bus;
+
+ list_for_each(ptr, &fs_mii_bus_list) {
+ bus = list_entry(ptr, struct fs_enet_mii_bus, list);
+ if (bus->bus_info->method == method &&
+ bus->bus_info->id == id)
+ return bus;
+ }
+ return NULL;
+}
+
+static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
+{
+ struct fs_enet_mii_bus *bus;
+ int ret = 0;
+
+ bus = kmalloc(sizeof(*bus), GFP_KERNEL);
+ if (bus == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ memset(bus, 0, sizeof(*bus));
+ spin_lock_init(&bus->mii_lock);
+ bus->bus_info = bi;
+ bus->refs = 0;
+ bus->usage_map = 0;
+
+ /* perform initialization */
+ switch (bi->method) {
+
+ case fsmii_fixed:
+ ret = fs_mii_fixed_init(bus);
+ if (ret != 0)
+ goto err;
+ break;
+
+ case fsmii_bitbang:
+ ret = fs_mii_bitbang_init(bus);
+ if (ret != 0)
+ goto err;
+ break;
+#ifdef CONFIG_FS_ENET_HAS_FEC
+ case fsmii_fec:
+ ret = fs_mii_fec_init(bus);
+ if (ret != 0)
+ goto err;
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ list_add(&bus->list, &fs_mii_bus_list);
+
+ return bus;
+
+err:
+ if (bus)
+ kfree(bus);
+ return ERR_PTR(ret);
+}
+
+static void destroy_bus(struct fs_enet_mii_bus *bus)
+{
+ /* remove from bus list */
+ list_del(&bus->list);
+
+ /* nothing more needed */
+ kfree(bus);
+}
+
+int fs_mii_connect(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ struct fs_enet_mii_bus *bus = NULL;
+
+ /* check method validity */
+ switch (fpi->bus_info->method) {
+ case fsmii_fixed:
+ case fsmii_bitbang:
+ break;
+#ifdef CONFIG_FS_ENET_HAS_FEC
+ case fsmii_fec:
+ break;
+#endif
+ default:
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s Unknown MII bus method (%d)!\n",
+ dev->name, fpi->bus_info->method);
+ return -EINVAL;
+ }
+
+ bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
+
+ /* if not found create new bus */
+ if (bus == NULL) {
+ bus = create_bus(fpi->bus_info);
+ if (IS_ERR(bus)) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s MII bus creation failure!\n", dev->name);
+ return PTR_ERR(bus);
+ }
+ }
+
+ bus->refs++;
+
+ fep->mii_bus = bus;
+
+ fep->mii_if.dev = dev;
+ fep->mii_if.phy_id_mask = 0x1f;
+ fep->mii_if.reg_num_mask = 0x1f;
+ fep->mii_if.mdio_read = fs_mii_read;
+ fep->mii_if.mdio_write = fs_mii_write;
+ fep->mii_if.force_media = fpi->bus_info->disable_aneg;
+ fep->mii_if.phy_id = phy_id_detect(dev);
+
+ return 0;
+}
+
+void fs_mii_disconnect(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_enet_mii_bus *bus = NULL;
+
+ bus = fep->mii_bus;
+ fep->mii_bus = NULL;
+
+ if (--bus->refs <= 0)
+ destroy_bus(bus);
+}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
new file mode 100644
index 00000000000..1105543b9d8
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -0,0 +1,245 @@
+#ifndef FS_ENET_H
+#define FS_ENET_H
+
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/list.h>
+
+#include <linux/fs_enet_pd.h>
+
+#include <asm/dma-mapping.h>
+
+#ifdef CONFIG_CPM1
+#include <asm/commproc.h>
+#endif
+
+#ifdef CONFIG_CPM2
+#include <asm/cpm2.h>
+#endif
+
+/* hw driver ops */
+struct fs_ops {
+ int (*setup_data)(struct net_device *dev);
+ int (*allocate_bd)(struct net_device *dev);
+ void (*free_bd)(struct net_device *dev);
+ void (*cleanup_data)(struct net_device *dev);
+ void (*set_multicast_list)(struct net_device *dev);
+ void (*restart)(struct net_device *dev);
+ void (*stop)(struct net_device *dev);
+ void (*pre_request_irq)(struct net_device *dev, int irq);
+ void (*post_free_irq)(struct net_device *dev, int irq);
+ void (*napi_clear_rx_event)(struct net_device *dev);
+ void (*napi_enable_rx)(struct net_device *dev);
+ void (*napi_disable_rx)(struct net_device *dev);
+ void (*rx_bd_done)(struct net_device *dev);
+ void (*tx_kickstart)(struct net_device *dev);
+ u32 (*get_int_events)(struct net_device *dev);
+ void (*clear_int_events)(struct net_device *dev, u32 int_events);
+ void (*ev_error)(struct net_device *dev, u32 int_events);
+ int (*get_regs)(struct net_device *dev, void *p, int *sizep);
+ int (*get_regs_len)(struct net_device *dev);
+ void (*tx_restart)(struct net_device *dev);
+};
+
+struct phy_info {
+ unsigned int id;
+ const char *name;
+ void (*startup) (struct net_device * dev);
+ void (*shutdown) (struct net_device * dev);
+ void (*ack_int) (struct net_device * dev);
+};
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
+#define MIN_MTU 46 /* this is data size */
+#define CRC_LEN 4
+
+#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
+#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
+
+/* Must be a multiple of 32 (to cover both FEC & FCC) */
+#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
+/* This is needed so that invalidate_xxx wont invalidate too much */
+#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE)
+
+struct fs_enet_mii_bus {
+ struct list_head list;
+ spinlock_t mii_lock;
+ const struct fs_mii_bus_info *bus_info;
+ int refs;
+ u32 usage_map;
+
+ int (*mii_read)(struct fs_enet_mii_bus *bus,
+ int phy_id, int location);
+
+ void (*mii_write)(struct fs_enet_mii_bus *bus,
+ int phy_id, int location, int value);
+
+ union {
+ struct {
+ unsigned int mii_speed;
+ void *fecp;
+ } fec;
+
+ struct {
+ /* note that the actual port size may */
+ /* be different; cpm(s) handle it OK */
+ u8 mdio_msk;
+ u8 *mdio_dir;
+ u8 *mdio_dat;
+ u8 mdc_msk;
+ u8 *mdc_dir;
+ u8 *mdc_dat;
+ } bitbang;
+
+ struct {
+ u16 lpa;
+ } fixed;
+ };
+};
+
+int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
+int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
+int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
+
+struct fs_enet_private {
+ struct device *dev; /* pointer back to the device (must be initialized first) */
+ spinlock_t lock; /* during all ops except TX pckt processing */
+ spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
+ const struct fs_platform_info *fpi;
+ const struct fs_ops *ops;
+ int rx_ring, tx_ring;
+ dma_addr_t ring_mem_addr;
+ void *ring_base;
+ struct sk_buff **rx_skbuff;
+ struct sk_buff **tx_skbuff;
+ cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
+ cbd_t *tx_bd_base;
+ cbd_t *dirty_tx; /* ring entries to be free()ed. */
+ cbd_t *cur_rx;
+ cbd_t *cur_tx;
+ int tx_free;
+ struct net_device_stats stats;
+ struct timer_list phy_timer_list;
+ const struct phy_info *phy;
+ u32 msg_enable;
+ struct mii_if_info mii_if;
+ unsigned int last_mii_status;
+ struct fs_enet_mii_bus *mii_bus;
+ int interrupt;
+
+ int duplex, speed; /* current settings */
+
+ /* event masks */
+ u32 ev_napi_rx; /* mask of NAPI rx events */
+ u32 ev_rx; /* rx event mask */
+ u32 ev_tx; /* tx event mask */
+ u32 ev_err; /* error event mask */
+
+ u16 bd_rx_empty; /* mask of BD rx empty */
+ u16 bd_rx_err; /* mask of BD rx errors */
+
+ union {
+ struct {
+ int idx; /* FEC1 = 0, FEC2 = 1 */
+ void *fecp; /* hw registers */
+ u32 hthi, htlo; /* state for multicast */
+ } fec;
+
+ struct {
+ int idx; /* FCC1-3 = 0-2 */
+ void *fccp; /* hw registers */
+ void *ep; /* parameter ram */
+ void *fcccp; /* hw registers cont. */
+ void *mem; /* FCC DPRAM */
+ u32 gaddrh, gaddrl; /* group address */
+ } fcc;
+
+ struct {
+ int idx; /* FEC1 = 0, FEC2 = 1 */
+ void *sccp; /* hw registers */
+ void *ep; /* parameter ram */
+ u32 hthi, htlo; /* state for multicast */
+ } scc;
+
+ };
+};
+
+/***************************************************************************/
+
+int fs_mii_read(struct net_device *dev, int phy_id, int location);
+void fs_mii_write(struct net_device *dev, int phy_id, int location, int value);
+
+void fs_mii_startup(struct net_device *dev);
+void fs_mii_shutdown(struct net_device *dev);
+void fs_mii_ack_int(struct net_device *dev);
+
+void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
+
+void fs_init_bds(struct net_device *dev);
+void fs_cleanup_bds(struct net_device *dev);
+
+/***************************************************************************/
+
+#define DRV_MODULE_NAME "fs_enet"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "Aug 8, 2005"
+
+/***************************************************************************/
+
+int fs_enet_platform_init(void);
+void fs_enet_platform_cleanup(void);
+
+/***************************************************************************/
+
+/* buffer descriptor access macros */
+
+/* access macros */
+#if defined(CONFIG_CPM1)
+/* for a a CPM1 __raw_xxx's are sufficient */
+#define __cbd_out32(addr, x) __raw_writel(x, addr)
+#define __cbd_out16(addr, x) __raw_writew(x, addr)
+#define __cbd_in32(addr) __raw_readl(addr)
+#define __cbd_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __cbd_out32(addr, x) out_be32(addr, x)
+#define __cbd_out16(addr, x) out_be16(addr, x)
+#define __cbd_in32(addr) in_be32(addr)
+#define __cbd_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
+#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
+#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
+
+/* read */
+#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
+#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
+#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
+
+/* set bits */
+#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
+
+/* clear bits */
+#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
+
+/*******************************************************************/
+
+extern const struct fs_ops fs_fec_ops;
+extern const struct fs_ops fs_fcc_ops;
+extern const struct fs_ops fs_scc_ops;
+
+/*******************************************************************/
+
+/* handy pointer to the immap */
+extern void *fs_enet_immap;
+
+/*******************************************************************/
+
+#endif
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
new file mode 100644
index 00000000000..a940b96433c
--- /dev/null
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -0,0 +1,578 @@
+/*
+ * FCC driver for Motorola MPC82xx (PQ2).
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+
+#include <asm/immap_cpm2.h>
+#include <asm/mpc8260.h>
+#include <asm/cpm2.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+/* FCC access macros */
+
+#define __fcc_out32(addr, x) out_be32((unsigned *)addr, x)
+#define __fcc_out16(addr, x) out_be16((unsigned short *)addr, x)
+#define __fcc_out8(addr, x) out_8((unsigned char *)addr, x)
+#define __fcc_in32(addr) in_be32((unsigned *)addr)
+#define __fcc_in16(addr) in_be16((unsigned short *)addr)
+#define __fcc_in8(addr) in_8((unsigned char *)addr)
+
+/* parameter space */
+
+/* write, read, set bits, clear bits */
+#define W32(_p, _m, _v) __fcc_out32(&(_p)->_m, (_v))
+#define R32(_p, _m) __fcc_in32(&(_p)->_m)
+#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
+#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
+
+#define W16(_p, _m, _v) __fcc_out16(&(_p)->_m, (_v))
+#define R16(_p, _m) __fcc_in16(&(_p)->_m)
+#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
+#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
+
+#define W8(_p, _m, _v) __fcc_out8(&(_p)->_m, (_v))
+#define R8(_p, _m) __fcc_in8(&(_p)->_m)
+#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
+#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
+
+/*************************************************/
+
+#define FCC_MAX_MULTICAST_ADDRS 64
+
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define MAX_CR_CMD_LOOPS 10000
+
+static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 mcn, u32 op)
+{
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ cpm2_map_t *immap = fs_enet_immap;
+ cpm_cpm2_t *cpmp = &immap->im_cpm;
+ u32 v;
+ int i;
+
+ /* Currently I don't know what feature call will look like. But
+ I guess there'd be something like do_cpm_cmd() which will require page & sblock */
+ v = mk_cr_cmd(fpi->cp_page, fpi->cp_block, mcn, op);
+ W32(cpmp, cp_cpcr, v | CPM_CR_FLG);
+ for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
+ if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
+ break;
+
+ if (i >= MAX_CR_CMD_LOOPS) {
+ printk(KERN_ERR "%s(): Not able to issue CPM command\n",
+ __FUNCTION__);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *pdev = to_platform_device(fep->dev);
+ struct resource *r;
+
+ /* Fill out IRQ field */
+ fep->interrupt = platform_get_irq(pdev, 0);
+
+ /* Attach the memory for the FCC Parameter RAM */
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
+ fep->fcc.ep = (void *)r->start;
+
+ if (fep->fcc.ep == NULL)
+ return -EINVAL;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
+ fep->fcc.fccp = (void *)r->start;
+
+ if (fep->fcc.fccp == NULL)
+ return -EINVAL;
+
+ fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
+
+ if (fep->fcc.fcccp == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
+#define FCC_RX_EVENT (FCC_ENET_RXF)
+#define FCC_TX_EVENT (FCC_ENET_TXB)
+#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->fcc.idx = fs_get_fcc_index(fpi->fs_no);
+ if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
+ return -EINVAL;
+
+ fep->fcc.mem = (void *)fpi->mem_offset;
+
+ if (do_pd_setup(fep) != 0)
+ return -EINVAL;
+
+ fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
+ fep->ev_rx = FCC_RX_EVENT;
+ fep->ev_tx = FCC_TX_EVENT;
+ fep->ev_err = FCC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_base = dma_alloc_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ if (fep->ring_base)
+ dma_free_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
+ fep->ring_base, fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_enet_t *ep = fep->fcc.ep;
+
+ W32(ep, fen_gaddrh, 0);
+ W32(ep, fen_gaddrl, 0);
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 *mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_enet_t *ep = fep->fcc.ep;
+ u16 taddrh, taddrm, taddrl;
+
+ taddrh = ((u16)mac[5] << 8) | mac[4];
+ taddrm = ((u16)mac[3] << 8) | mac[2];
+ taddrl = ((u16)mac[1] << 8) | mac[0];
+
+ W16(ep, fen_taddrh, taddrh);
+ W16(ep, fen_taddrm, taddrm);
+ W16(ep, fen_taddrl, taddrl);
+ fcc_cr_cmd(fep, 0x0C, CPM_CR_SET_GADDR);
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+ fcc_enet_t *ep = fep->fcc.ep;
+
+ /* clear promiscuous always */
+ C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
+
+ W32(ep, fen_gaddrh, 0xffffffff);
+ W32(ep, fen_gaddrl, 0xffffffff);
+ }
+
+ /* read back */
+ fep->fcc.gaddrh = R32(ep, fen_gaddrh);
+ fep->fcc.gaddrl = R32(ep, fen_gaddrl);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list *pmc;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ set_multicast_one(dev, pmc->dmi_addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+ fcc_t *fccp = fep->fcc.fccp;
+ fcc_c_t *fcccp = fep->fcc.fcccp;
+ fcc_enet_t *ep = fep->fcc.ep;
+ dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
+ u16 paddrh, paddrm, paddrl;
+ u16 mem_addr;
+ const unsigned char *mac;
+ int i;
+
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+
+ /* clear everything (slow & steady does it) */
+ for (i = 0; i < sizeof(*ep); i++)
+ __fcc_out8((char *)ep + i, 0);
+
+ /* get physical address */
+ rx_bd_base_phys = fep->ring_mem_addr;
+ tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
+
+ /* point to bds */
+ W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
+ W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
+
+ /* Set maximum bytes per receive buffer.
+ * It must be a multiple of 32.
+ */
+ W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
+
+ W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
+ W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
+
+ /* Allocate space in the reserved FCC area of DPRAM for the
+ * internal buffers. No one uses this space (yet), so we
+ * can do this. Later, we will add resource management for
+ * this area.
+ */
+
+ mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */
+
+ W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff));
+ W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff));
+ W16(ep, fen_padptr, mem_addr + 64);
+
+ /* fill with special symbol... */
+ memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
+
+ W32(ep, fen_genfcc.fcc_rbptr, 0);
+ W32(ep, fen_genfcc.fcc_tbptr, 0);
+ W32(ep, fen_genfcc.fcc_rcrc, 0);
+ W32(ep, fen_genfcc.fcc_tcrc, 0);
+ W16(ep, fen_genfcc.fcc_res1, 0);
+ W32(ep, fen_genfcc.fcc_res2, 0);
+
+ /* no CAM */
+ W32(ep, fen_camptr, 0);
+
+ /* Set CRC preset and mask */
+ W32(ep, fen_cmask, 0xdebb20e3);
+ W32(ep, fen_cpres, 0xffffffff);
+
+ W32(ep, fen_crcec, 0); /* CRC Error counter */
+ W32(ep, fen_alec, 0); /* alignment error counter */
+ W32(ep, fen_disfc, 0); /* discard frame counter */
+ W16(ep, fen_retlim, 15); /* Retry limit threshold */
+ W16(ep, fen_pper, 0); /* Normal persistence */
+
+ /* set group address */
+ W32(ep, fen_gaddrh, fep->fcc.gaddrh);
+ W32(ep, fen_gaddrl, fep->fcc.gaddrh);
+
+ /* Clear hash filter tables */
+ W32(ep, fen_iaddrh, 0);
+ W32(ep, fen_iaddrl, 0);
+
+ /* Clear the Out-of-sequence TxBD */
+ W16(ep, fen_tfcstat, 0);
+ W16(ep, fen_tfclen, 0);
+ W32(ep, fen_tfcptr, 0);
+
+ W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
+ W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
+
+ /* set address */
+ mac = dev->dev_addr;
+ paddrh = ((u16)mac[5] << 8) | mac[4];
+ paddrm = ((u16)mac[3] << 8) | mac[2];
+ paddrl = ((u16)mac[1] << 8) | mac[0];
+
+ W16(ep, fen_paddrh, paddrh);
+ W16(ep, fen_paddrm, paddrm);
+ W16(ep, fen_paddrl, paddrl);
+
+ W16(ep, fen_taddrh, 0);
+ W16(ep, fen_taddrm, 0);
+ W16(ep, fen_taddrl, 0);
+
+ W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
+ W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
+
+ /* Clear stat counters, in case we ever enable RMON */
+ W32(ep, fen_octc, 0);
+ W32(ep, fen_colc, 0);
+ W32(ep, fen_broc, 0);
+ W32(ep, fen_mulc, 0);
+ W32(ep, fen_uspc, 0);
+ W32(ep, fen_frgc, 0);
+ W32(ep, fen_ospc, 0);
+ W32(ep, fen_jbrc, 0);
+ W32(ep, fen_p64c, 0);
+ W32(ep, fen_p65c, 0);
+ W32(ep, fen_p128c, 0);
+ W32(ep, fen_p256c, 0);
+ W32(ep, fen_p512c, 0);
+ W32(ep, fen_p1024c, 0);
+
+ W16(ep, fen_rfthr, 0); /* Suggested by manual */
+ W16(ep, fen_rfcnt, 0);
+ W16(ep, fen_cftype, 0);
+
+ fs_init_bds(dev);
+
+ /* adjust to speed (for RMII mode) */
+ if (fpi->use_rmii) {
+ if (fep->speed == 100)
+ C8(fcccp, fcc_gfemr, 0x20);
+ else
+ S8(fcccp, fcc_gfemr, 0x20);
+ }
+
+ fcc_cr_cmd(fep, 0x0c, CPM_CR_INIT_TRX);
+
+ /* clear events */
+ W16(fccp, fcc_fcce, 0xffff);
+
+ /* Enable interrupts we wish to service */
+ W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
+
+ /* Set GFMR to enable Ethernet operating mode */
+ W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
+
+ /* set sync/delimiters */
+ W16(fccp, fcc_fdsr, 0xd555);
+
+ W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
+
+ if (fpi->use_rmii)
+ S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
+
+ /* adjust to duplex mode */
+ if (fep->duplex)
+ S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
+ else
+ C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
+
+ S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ /* stop ethernet */
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
+
+ /* clear events */
+ W16(fccp, fcc_fcce, 0xffff);
+
+ /* clear interrupt mask */
+ W16(fccp, fcc_fccm, 0);
+
+ fs_cleanup_bds(dev);
+}
+
+static void pre_request_irq(struct net_device *dev, int irq)
+{
+ /* nothing */
+}
+
+static void post_free_irq(struct net_device *dev, int irq)
+{
+ /* nothing */
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ return (u32)R16(fccp, fcc_fcce);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, int_events & 0xffff);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events);
+}
+
+int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
+ p = (char *)p + sizeof(fcc_t);
+
+ memcpy_fromio(p, fep->fcc.fcccp, sizeof(fcc_c_t));
+ p = (char *)p + sizeof(fcc_c_t);
+
+ memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
+
+ return 0;
+}
+
+int get_regs_len(struct net_device *dev)
+{
+ return sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t);
+}
+
+/* Some transmit errors cause the transmitter to shut
+ * down. We now issue a restart transmit. Since the
+ * errors close the BD and update the pointers, the restart
+ * _should_ pick up without having to reset any of our
+ * pointers either. Also, To workaround 8260 device erratum
+ * CPM37, we must disable and then re-enable the transmitter
+ * following a Late Collision, Underrun, or Retry Limit error.
+ */
+void tx_restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
+ udelay(10);
+ S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
+
+ fcc_cr_cmd(fep, 0x0C, CPM_CR_RESTART_TX);
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_fcc_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .pre_request_irq = pre_request_irq,
+ .post_free_irq = post_free_irq,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
new file mode 100644
index 00000000000..5ef4e845a38
--- /dev/null
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -0,0 +1,653 @@
+/*
+ * Freescale Ethernet controllers
+ *
+ * Copyright (c) 2005 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_8xx
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/commproc.h>
+#endif
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+#if defined(CONFIG_CPM1)
+/* for a CPM1 __raw_xxx's are sufficient */
+#define __fs_out32(addr, x) __raw_writel(x, addr)
+#define __fs_out16(addr, x) __raw_writew(x, addr)
+#define __fs_in32(addr) __raw_readl(addr)
+#define __fs_in16(addr) __raw_readw(addr)
+#else
+/* for others play it safe */
+#define __fs_out32(addr, x) out_be32(addr, x)
+#define __fs_out16(addr, x) out_be16(addr, x)
+#define __fs_in32(addr) in_be32(addr)
+#define __fs_in16(addr) in_be16(addr)
+#endif
+
+/* write */
+#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
+
+/* read */
+#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
+
+/* set bits */
+#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
+
+/* clear bits */
+#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
+
+
+/* CRC polynomium used by the FEC for the multicast group filtering */
+#define FEC_CRC_POLY 0x04C11DB7
+
+#define FEC_MAX_MULTICAST_ADDRS 64
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
+#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
+#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
+#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
+#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
+#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
+#define FEC_ENET_RXF 0x02000000U /* Full frame received */
+#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
+#define FEC_ENET_MII 0x00800000U /* MII interrupt */
+#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
+
+#define FEC_ECNTRL_PINMUX 0x00000004
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+#define FEC_ECNTRL_RESET 0x00000001
+
+#define FEC_RCNTRL_BC_REJ 0x00000010
+#define FEC_RCNTRL_PROM 0x00000008
+#define FEC_RCNTRL_MII_MODE 0x00000004
+#define FEC_RCNTRL_DRT 0x00000002
+#define FEC_RCNTRL_LOOP 0x00000001
+
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_GTS 0x00000001
+
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define FEC_MII_LOOPS 10000
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+
+static int whack_reset(fec_t * fecp)
+{
+ int i;
+
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
+ for (i = 0; i < FEC_RESET_DELAY; i++) {
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
+ return 0; /* OK */
+ udelay(1);
+ }
+
+ return -1;
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *pdev = to_platform_device(fep->dev);
+ struct resource *r;
+
+ /* Fill out IRQ field */
+ fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ fep->fec.fecp =(void*)r->start;
+
+ if(fep->fec.fecp == NULL)
+ return -EINVAL;
+
+ return 0;
+
+}
+
+#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
+#define FEC_RX_EVENT (FEC_ENET_RXF)
+#define FEC_TX_EVENT (FEC_ENET_TXF)
+#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
+ FEC_ENET_BABT | FEC_ENET_EBERR)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (do_pd_setup(fep) != 0)
+ return -EINVAL;
+
+ fep->fec.hthi = 0;
+ fep->fec.htlo = 0;
+
+ fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
+ fep->ev_rx = FEC_RX_EVENT;
+ fep->ev_tx = FEC_TX_EVENT;
+ fep->ev_err = FEC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_base = dma_alloc_coherent(fep->dev,
+ (fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), &fep->ring_mem_addr,
+ GFP_KERNEL);
+ if (fep->ring_base == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ if(fep->ring_base)
+ dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
+ * sizeof(cbd_t),
+ fep->ring_base,
+ fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ fep->fec.hthi = 0;
+ fep->fec.htlo = 0;
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 *mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ int temp, hash_index, i, j;
+ u32 crc, csrVal;
+ u8 byte, msb;
+
+ crc = 0xffffffff;
+ for (i = 0; i < 6; i++) {
+ byte = mac[i];
+ for (j = 0; j < 8; j++) {
+ msb = crc >> 31;
+ crc <<= 1;
+ if (msb ^ (byte & 0x1))
+ crc ^= FEC_CRC_POLY;
+ byte >>= 1;
+ }
+ }
+
+ temp = (crc & 0x3f) >> 1;
+ hash_index = ((temp & 0x01) << 4) |
+ ((temp & 0x02) << 2) |
+ ((temp & 0x04)) |
+ ((temp & 0x08) >> 2) |
+ ((temp & 0x10) >> 4);
+ csrVal = 1 << hash_index;
+ if (crc & 1)
+ fep->fec.hthi |= csrVal;
+ else
+ fep->fec.htlo |= csrVal;
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
+ fep->fec.hthi = 0xffffffffU;
+ fep->fec.htlo = 0xffffffffU;
+ }
+
+ FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
+ FW(fecp, hash_table_high, fep->fec.hthi);
+ FW(fecp, hash_table_low, fep->fec.htlo);
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list *pmc;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ set_multicast_one(dev, pmc->dmi_addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+static void restart(struct net_device *dev)
+{
+#ifdef CONFIG_DUET
+ immap_t *immap = fs_enet_immap;
+ u32 cptr;
+#endif
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+ const struct fs_platform_info *fpi = fep->fpi;
+ dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
+ int r;
+ u32 addrhi, addrlo;
+
+ r = whack_reset(fep->fec.fecp);
+ if (r != 0)
+ printk(KERN_ERR DRV_MODULE_NAME
+ ": %s FEC Reset FAILED!\n", dev->name);
+
+ /*
+ * Set station address.
+ */
+ addrhi = ((u32) dev->dev_addr[0] << 24) |
+ ((u32) dev->dev_addr[1] << 16) |
+ ((u32) dev->dev_addr[2] << 8) |
+ (u32) dev->dev_addr[3];
+ addrlo = ((u32) dev->dev_addr[4] << 24) |
+ ((u32) dev->dev_addr[5] << 16);
+ FW(fecp, addr_low, addrhi);
+ FW(fecp, addr_high, addrlo);
+
+ /*
+ * Reset all multicast.
+ */
+ FW(fecp, hash_table_high, fep->fec.hthi);
+ FW(fecp, hash_table_low, fep->fec.htlo);
+
+ /*
+ * Set maximum receive buffer size.
+ */
+ FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
+ FW(fecp, r_hash, PKT_MAXBUF_SIZE);
+
+ /* get physical address */
+ rx_bd_base_phys = fep->ring_mem_addr;
+ tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
+
+ /*
+ * Set receive and transmit descriptor base.
+ */
+ FW(fecp, r_des_start, rx_bd_base_phys);
+ FW(fecp, x_des_start, tx_bd_base_phys);
+
+ fs_init_bds(dev);
+
+ /*
+ * Enable big endian and don't care about SDMA FC.
+ */
+ FW(fecp, fun_code, 0x78000000);
+
+ /*
+ * Set MII speed.
+ */
+ FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed);
+
+ /*
+ * Clear any outstanding interrupt.
+ */
+ FW(fecp, ievent, 0xffc0);
+ FW(fecp, ivec, (fep->interrupt / 2) << 29);
+
+
+ /*
+ * adjust to speed (only for DUET & RMII)
+ */
+#ifdef CONFIG_DUET
+ if (fpi->use_rmii) {
+ cptr = in_be32(&immap->im_cpm.cp_cptr);
+ switch (fs_get_fec_index(fpi->fs_no)) {
+ case 0:
+ cptr |= 0x100;
+ if (fep->speed == 10)
+ cptr |= 0x0000010;
+ else if (fep->speed == 100)
+ cptr &= ~0x0000010;
+ break;
+ case 1:
+ cptr |= 0x80;
+ if (fep->speed == 10)
+ cptr |= 0x0000008;
+ else if (fep->speed == 100)
+ cptr &= ~0x0000008;
+ break;
+ default:
+ BUG(); /* should never happen */
+ break;
+ }
+ out_be32(&immap->im_cpm.cp_cptr, cptr);
+ }
+#endif
+
+ FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ /*
+ * adjust to duplex mode
+ */
+ if (fep->duplex) {
+ FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
+ } else {
+ FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
+ FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
+ }
+
+ /*
+ * Enable interrupts we wish to service.
+ */
+ FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB);
+
+ /*
+ * And last, enable the transmit and receive processing.
+ */
+ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+ struct fs_enet_mii_bus *bus = fep->mii_bus;
+ const struct fs_mii_bus_info *bi = bus->bus_info;
+ int i;
+
+ if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
+ return; /* already down */
+
+ FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
+ for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
+ i < FEC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == FEC_RESET_DELAY)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s FEC timeout on graceful transmit stop\n",
+ dev->name);
+ /*
+ * Disable FEC. Let only MII interrupts.
+ */
+ FW(fecp, imask, 0);
+ FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
+
+ fs_cleanup_bds(dev);
+
+ /* shut down FEC1? that's where the mii bus is */
+ if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) {
+ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, ievent, FEC_ENET_MII);
+ FW(fecp, mii_speed, bus->fec.mii_speed);
+ }
+}
+
+static void pre_request_irq(struct net_device *dev, int irq)
+{
+ immap_t *immap = fs_enet_immap;
+ u32 siel;
+
+ /* SIU interrupt */
+ if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
+
+ siel = in_be32(&immap->im_siu_conf.sc_siel);
+ if ((irq & 1) == 0)
+ siel |= (0x80000000 >> irq);
+ else
+ siel &= ~(0x80000000 >> (irq & ~1));
+ out_be32(&immap->im_siu_conf.sc_siel, siel);
+ }
+}
+
+static void post_free_irq(struct net_device *dev, int irq)
+{
+ /* nothing */
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ FW(fecp, r_des_active, 0x01000000);
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ FW(fecp, x_des_active, 0x01000000);
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ return FR(fecp, ievent) & FR(fecp, imask);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fec_t *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, int_events);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events);
+}
+
+int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(fec_t))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t));
+
+ return 0;
+}
+
+int get_regs_len(struct net_device *dev)
+{
+ return sizeof(fec_t);
+}
+
+void tx_restart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_fec_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .pre_request_irq = pre_request_irq,
+ .post_free_irq = post_free_irq,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
+
+/***********************************************************************/
+
+static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
+{
+ fec_t *fecp = bus->fec.fecp;
+ int i, ret = -1;
+
+ if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
+ BUG();
+
+ /* Add PHY address to register command. */
+ FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS) {
+ FW(fecp, ievent, FEC_ENET_MII);
+ ret = FR(fecp, mii_data) & 0xffff;
+ }
+
+ return ret;
+}
+
+static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
+{
+ fec_t *fecp = bus->fec.fecp;
+ int i;
+
+ /* this must never happen */
+ if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
+ BUG();
+
+ /* Add PHY address to register command. */
+ FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS)
+ FW(fecp, ievent, FEC_ENET_MII);
+}
+
+int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
+{
+ bd_t *bd = (bd_t *)__res;
+ const struct fs_mii_bus_info *bi = bus->bus_info;
+ fec_t *fecp;
+
+ if (bi->id != 0)
+ return -1;
+
+ bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
+ bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
+ & 0x3F) << 1;
+
+ fecp = bus->fec.fecp;
+
+ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ FW(fecp, ievent, FEC_ENET_MII);
+ FW(fecp, mii_speed, bus->fec.mii_speed);
+
+ bus->mii_read = mii_read;
+ bus->mii_write = mii_write;
+
+ return 0;
+}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
new file mode 100644
index 00000000000..d8c6e9cadcf
--- /dev/null
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -0,0 +1,524 @@
+/*
+ * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/fs.h>
+
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_8xx
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/commproc.h>
+#endif
+
+#include "fs_enet.h"
+
+/*************************************************/
+
+#if defined(CONFIG_CPM1)
+/* for a 8xx __raw_xxx's are sufficient */
+#define __fs_out32(addr, x) __raw_writel(x, addr)
+#define __fs_out16(addr, x) __raw_writew(x, addr)
+#define __fs_out8(addr, x) __raw_writeb(x, addr)
+#define __fs_in32(addr) __raw_readl(addr)
+#define __fs_in16(addr) __raw_readw(addr)
+#define __fs_in8(addr) __raw_readb(addr)
+#else
+/* for others play it safe */
+#define __fs_out32(addr, x) out_be32(addr, x)
+#define __fs_out16(addr, x) out_be16(addr, x)
+#define __fs_in32(addr) in_be32(addr)
+#define __fs_in16(addr) in_be16(addr)
+#endif
+
+/* write, read, set bits, clear bits */
+#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
+#define R32(_p, _m) __fs_in32(&(_p)->_m)
+#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
+#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
+
+#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
+#define R16(_p, _m) __fs_in16(&(_p)->_m)
+#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
+#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
+
+#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
+#define R8(_p, _m) __fs_in8(&(_p)->_m)
+#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
+#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
+
+#define SCC_MAX_MULTICAST_ADDRS 64
+
+/*
+ * Delay to wait for SCC reset command to complete (in us)
+ */
+#define SCC_RESET_DELAY 50
+#define MAX_CR_CMD_LOOPS 10000
+
+static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
+{
+ cpm8xx_t *cpmp = &((immap_t *)fs_enet_immap)->im_cpm;
+ u32 v, ch;
+ int i = 0;
+
+ ch = fep->scc.idx << 2;
+ v = mk_cr_cmd(ch, op);
+ W16(cpmp, cp_cpcr, v | CPM_CR_FLG);
+ for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
+ if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
+ break;
+
+ if (i >= MAX_CR_CMD_LOOPS) {
+ printk(KERN_ERR "%s(): Not able to issue CPM command\n",
+ __FUNCTION__);
+ return 1;
+ }
+ return 0;
+}
+
+static int do_pd_setup(struct fs_enet_private *fep)
+{
+ struct platform_device *pdev = to_platform_device(fep->dev);
+ struct resource *r;
+
+ /* Fill out IRQ field */
+ fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ fep->scc.sccp = (void *)r->start;
+
+ if (fep->scc.sccp == NULL)
+ return -EINVAL;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram");
+ fep->scc.ep = (void *)r->start;
+
+ if (fep->scc.ep == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
+#define SCC_RX_EVENT (SCCE_ENET_RXF)
+#define SCC_TX_EVENT (SCCE_ENET_TXB)
+#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
+
+static int setup_data(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->scc.idx = fs_get_scc_index(fpi->fs_no);
+ if ((unsigned int)fep->fcc.idx > 4) /* max 4 SCCs */
+ return -EINVAL;
+
+ do_pd_setup(fep);
+
+ fep->scc.hthi = 0;
+ fep->scc.htlo = 0;
+
+ fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
+ fep->ev_rx = SCC_RX_EVENT;
+ fep->ev_tx = SCC_TX_EVENT;
+ fep->ev_err = SCC_ERR_EVENT_MSK;
+
+ return 0;
+}
+
+static int allocate_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
+
+ fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
+ sizeof(cbd_t), 8);
+ if (IS_DPERR(fep->ring_mem_addr))
+ return -ENOMEM;
+
+ fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr);
+
+ return 0;
+}
+
+static void free_bd(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (fep->ring_base)
+ cpm_dpfree(fep->ring_mem_addr);
+}
+
+static void cleanup_data(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void set_promiscuous_mode(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_psmr, SCC_PSMR_PRO);
+}
+
+static void set_multicast_start(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_enet_t *ep = fep->scc.ep;
+
+ W16(ep, sen_gaddr1, 0);
+ W16(ep, sen_gaddr2, 0);
+ W16(ep, sen_gaddr3, 0);
+ W16(ep, sen_gaddr4, 0);
+}
+
+static void set_multicast_one(struct net_device *dev, const u8 * mac)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_enet_t *ep = fep->scc.ep;
+ u16 taddrh, taddrm, taddrl;
+
+ taddrh = ((u16) mac[5] << 8) | mac[4];
+ taddrm = ((u16) mac[3] << 8) | mac[2];
+ taddrl = ((u16) mac[1] << 8) | mac[0];
+
+ W16(ep, sen_taddrh, taddrh);
+ W16(ep, sen_taddrm, taddrm);
+ W16(ep, sen_taddrl, taddrl);
+ scc_cr_cmd(fep, CPM_CR_SET_GADDR);
+}
+
+static void set_multicast_finish(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+ scc_enet_t *ep = fep->scc.ep;
+
+ /* clear promiscuous always */
+ C16(sccp, scc_psmr, SCC_PSMR_PRO);
+
+ /* if all multi or too many multicasts; just enable all */
+ if ((dev->flags & IFF_ALLMULTI) != 0 ||
+ dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
+
+ W16(ep, sen_gaddr1, 0xffff);
+ W16(ep, sen_gaddr2, 0xffff);
+ W16(ep, sen_gaddr3, 0xffff);
+ W16(ep, sen_gaddr4, 0xffff);
+ }
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list *pmc;
+
+ if ((dev->flags & IFF_PROMISC) == 0) {
+ set_multicast_start(dev);
+ for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ set_multicast_one(dev, pmc->dmi_addr);
+ set_multicast_finish(dev);
+ } else
+ set_promiscuous_mode(dev);
+}
+
+/*
+ * This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
+static void restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+ scc_enet_t *ep = fep->scc.ep;
+ const struct fs_platform_info *fpi = fep->fpi;
+ u16 paddrh, paddrm, paddrl;
+ const unsigned char *mac;
+ int i;
+
+ C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ /* clear everything (slow & steady does it) */
+ for (i = 0; i < sizeof(*ep); i++)
+ __fs_out8((char *)ep + i, 0);
+
+ /* point to bds */
+ W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
+ W16(ep, sen_genscc.scc_tbase,
+ fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
+
+ /* Initialize function code registers for big-endian.
+ */
+ W8(ep, sen_genscc.scc_rfcr, SCC_EB);
+ W8(ep, sen_genscc.scc_tfcr, SCC_EB);
+
+ /* Set maximum bytes per receive buffer.
+ * This appears to be an Ethernet frame size, not the buffer
+ * fragment size. It must be a multiple of four.
+ */
+ W16(ep, sen_genscc.scc_mrblr, 0x5f0);
+
+ /* Set CRC preset and mask.
+ */
+ W32(ep, sen_cpres, 0xffffffff);
+ W32(ep, sen_cmask, 0xdebb20e3);
+
+ W32(ep, sen_crcec, 0); /* CRC Error counter */
+ W32(ep, sen_alec, 0); /* alignment error counter */
+ W32(ep, sen_disfc, 0); /* discard frame counter */
+
+ W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
+ W16(ep, sen_retlim, 15); /* Retry limit threshold */
+
+ W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
+
+ W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
+
+ W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
+ W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
+
+ /* Clear hash tables.
+ */
+ W16(ep, sen_gaddr1, 0);
+ W16(ep, sen_gaddr2, 0);
+ W16(ep, sen_gaddr3, 0);
+ W16(ep, sen_gaddr4, 0);
+ W16(ep, sen_iaddr1, 0);
+ W16(ep, sen_iaddr2, 0);
+ W16(ep, sen_iaddr3, 0);
+ W16(ep, sen_iaddr4, 0);
+
+ /* set address
+ */
+ mac = dev->dev_addr;
+ paddrh = ((u16) mac[5] << 8) | mac[4];
+ paddrm = ((u16) mac[3] << 8) | mac[2];
+ paddrl = ((u16) mac[1] << 8) | mac[0];
+
+ W16(ep, sen_paddrh, paddrh);
+ W16(ep, sen_paddrm, paddrm);
+ W16(ep, sen_paddrl, paddrl);
+
+ W16(ep, sen_pper, 0);
+ W16(ep, sen_taddrl, 0);
+ W16(ep, sen_taddrm, 0);
+ W16(ep, sen_taddrh, 0);
+
+ fs_init_bds(dev);
+
+ scc_cr_cmd(fep, CPM_CR_INIT_TRX);
+
+ W16(sccp, scc_scce, 0xffff);
+
+ /* Enable interrupts we wish to service.
+ */
+ W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
+
+ /* Set GSMR_H to enable all normal operating modes.
+ * Set GSMR_L to enable Ethernet to MC68160.
+ */
+ W32(sccp, scc_gsmrh, 0);
+ W32(sccp, scc_gsmrl,
+ SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
+ SCC_GSMRL_MODE_ENET);
+
+ /* Set sync/delimiters.
+ */
+ W16(sccp, scc_dsr, 0xd555);
+
+ /* Set processing mode. Use Ethernet CRC, catch broadcast, and
+ * start frame search 22 bit times after RENA.
+ */
+ W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
+
+ /* Set full duplex mode if needed */
+ if (fep->duplex)
+ S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
+
+ S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+}
+
+static void stop(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+ int i;
+
+ for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
+ udelay(1);
+
+ if (i == SCC_RESET_DELAY)
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s SCC timeout on graceful transmit stop\n",
+ dev->name);
+
+ W16(sccp, scc_sccm, 0);
+ C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+ fs_cleanup_bds(dev);
+}
+
+static void pre_request_irq(struct net_device *dev, int irq)
+{
+ immap_t *immap = fs_enet_immap;
+ u32 siel;
+
+ /* SIU interrupt */
+ if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
+
+ siel = in_be32(&immap->im_siu_conf.sc_siel);
+ if ((irq & 1) == 0)
+ siel |= (0x80000000 >> irq);
+ else
+ siel &= ~(0x80000000 >> (irq & ~1));
+ out_be32(&immap->im_siu_conf.sc_siel, siel);
+ }
+}
+
+static void post_free_irq(struct net_device *dev, int irq)
+{
+ /* nothing */
+}
+
+static void napi_clear_rx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_enable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void napi_disable_rx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+
+ C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+}
+
+static void rx_bd_done(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static void tx_kickstart(struct net_device *dev)
+{
+ /* nothing */
+}
+
+static u32 get_int_events(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+
+ return (u32) R16(sccp, scc_scce);
+}
+
+static void clear_int_events(struct net_device *dev, u32 int_events)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, int_events & 0xffff);
+}
+
+static void ev_error(struct net_device *dev, u32 int_events)
+{
+ printk(KERN_WARNING DRV_MODULE_NAME
+ ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
+}
+
+static int get_regs(struct net_device *dev, void *p, int *sizep)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t))
+ return -EINVAL;
+
+ memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
+ p = (char *)p + sizeof(scc_t);
+
+ memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t));
+
+ return 0;
+}
+
+static int get_regs_len(struct net_device *dev)
+{
+ return sizeof(scc_t) + sizeof(scc_enet_t);
+}
+
+static void tx_restart(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ scc_cr_cmd(fep, CPM_CR_RESTART_TX);
+}
+
+/*************************************************************************/
+
+const struct fs_ops fs_scc_ops = {
+ .setup_data = setup_data,
+ .cleanup_data = cleanup_data,
+ .set_multicast_list = set_multicast_list,
+ .restart = restart,
+ .stop = stop,
+ .pre_request_irq = pre_request_irq,
+ .post_free_irq = post_free_irq,
+ .napi_clear_rx_event = napi_clear_rx_event,
+ .napi_enable_rx = napi_enable_rx,
+ .napi_disable_rx = napi_disable_rx,
+ .rx_bd_done = rx_bd_done,
+ .tx_kickstart = tx_kickstart,
+ .get_int_events = get_int_events,
+ .clear_int_events = clear_int_events,
+ .ev_error = ev_error,
+ .get_regs = get_regs,
+ .get_regs_len = get_regs_len,
+ .tx_restart = tx_restart,
+ .allocate_bd = allocate_bd,
+ .free_bd = free_bd,
+};
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
new file mode 100644
index 00000000000..24a5e2e23d1
--- /dev/null
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -0,0 +1,405 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+#ifdef CONFIG_8xx
+static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
+{
+ immap_t *im = (immap_t *)fs_enet_immap;
+ void *dir, *dat, *ppar;
+ int adv;
+ u8 msk;
+
+ switch (port) {
+ case fsiop_porta:
+ dir = &im->im_ioport.iop_padir;
+ dat = &im->im_ioport.iop_padat;
+ ppar = &im->im_ioport.iop_papar;
+ break;
+
+ case fsiop_portb:
+ dir = &im->im_cpm.cp_pbdir;
+ dat = &im->im_cpm.cp_pbdat;
+ ppar = &im->im_cpm.cp_pbpar;
+ break;
+
+ case fsiop_portc:
+ dir = &im->im_ioport.iop_pcdir;
+ dat = &im->im_ioport.iop_pcdat;
+ ppar = &im->im_ioport.iop_pcpar;
+ break;
+
+ case fsiop_portd:
+ dir = &im->im_ioport.iop_pddir;
+ dat = &im->im_ioport.iop_pddat;
+ ppar = &im->im_ioport.iop_pdpar;
+ break;
+
+ case fsiop_porte:
+ dir = &im->im_cpm.cp_pedir;
+ dat = &im->im_cpm.cp_pedat;
+ ppar = &im->im_cpm.cp_pepar;
+ break;
+
+ default:
+ printk(KERN_ERR DRV_MODULE_NAME
+ "Illegal port value %d!\n", port);
+ return -EINVAL;
+ }
+
+ adv = bit >> 3;
+ dir = (char *)dir + adv;
+ dat = (char *)dat + adv;
+ ppar = (char *)ppar + adv;
+
+ msk = 1 << (7 - (bit & 7));
+ if ((in_8(ppar) & msk) != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ "pin %d on port %d is not general purpose!\n", bit, port);
+ return -EINVAL;
+ }
+
+ *dirp = dir;
+ *datp = dat;
+ *mskp = msk;
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_8260
+static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
+{
+ iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
+ void *dir, *dat, *ppar;
+ int adv;
+ u8 msk;
+
+ switch (port) {
+ case fsiop_porta:
+ dir = &io->iop_pdira;
+ dat = &io->iop_pdata;
+ ppar = &io->iop_ppara;
+ break;
+
+ case fsiop_portb:
+ dir = &io->iop_pdirb;
+ dat = &io->iop_pdatb;
+ ppar = &io->iop_pparb;
+ break;
+
+ case fsiop_portc:
+ dir = &io->iop_pdirc;
+ dat = &io->iop_pdatc;
+ ppar = &io->iop_pparc;
+ break;
+
+ case fsiop_portd:
+ dir = &io->iop_pdird;
+ dat = &io->iop_pdatd;
+ ppar = &io->iop_ppard;
+ break;
+
+ default:
+ printk(KERN_ERR DRV_MODULE_NAME
+ "Illegal port value %d!\n", port);
+ return -EINVAL;
+ }
+
+ adv = bit >> 3;
+ dir = (char *)dir + adv;
+ dat = (char *)dat + adv;
+ ppar = (char *)ppar + adv;
+
+ msk = 1 << (7 - (bit & 7));
+ if ((in_8(ppar) & msk) != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ "pin %d on port %d is not general purpose!\n", bit, port);
+ return -EINVAL;
+ }
+
+ *dirp = dir;
+ *datp = dat;
+ *mskp = msk;
+
+ return 0;
+}
+#endif
+
+static inline void bb_set(u8 *p, u8 m)
+{
+ out_8(p, in_8(p) | m);
+}
+
+static inline void bb_clr(u8 *p, u8 m)
+{
+ out_8(p, in_8(p) & ~m);
+}
+
+static inline int bb_read(u8 *p, u8 m)
+{
+ return (in_8(p) & m) != 0;
+}
+
+static inline void mdio_active(struct fs_enet_mii_bus *bus)
+{
+ bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
+}
+
+static inline void mdio_tristate(struct fs_enet_mii_bus *bus)
+{
+ bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
+}
+
+static inline int mdio_read(struct fs_enet_mii_bus *bus)
+{
+ return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+}
+
+static inline void mdio(struct fs_enet_mii_bus *bus, int what)
+{
+ if (what)
+ bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+ else
+ bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+}
+
+static inline void mdc(struct fs_enet_mii_bus *bus, int what)
+{
+ if (what)
+ bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
+ else
+ bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
+}
+
+static inline void mii_delay(struct fs_enet_mii_bus *bus)
+{
+ udelay(bus->bus_info->i.bitbang.delay);
+}
+
+/* Utility to send the preamble, address, and register (common to read and write). */
+static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
+{
+ int j;
+
+ /*
+ * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure.
+ * The IEEE spec says this is a PHY optional requirement. The AMD
+ * 79C874 requires one after power up and one after a MII communications
+ * error. This means that we are doing more preambles than we need,
+ * but it is safer and will be much more robust.
+ */
+
+ mdio_active(bus);
+ mdio(bus, 1);
+ for (j = 0; j < 32; j++) {
+ mdc(bus, 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ }
+
+ /* send the start bit (01) and the read opcode (10) or write (10) */
+ mdc(bus, 0);
+ mdio(bus, 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ mdc(bus, 0);
+ mdio(bus, 1);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ mdc(bus, 0);
+ mdio(bus, read);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ mdc(bus, 0);
+ mdio(bus, !read);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+
+ /* send the PHY address */
+ for (j = 0; j < 5; j++) {
+ mdc(bus, 0);
+ mdio(bus, (addr & 0x10) != 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ addr <<= 1;
+ }
+
+ /* send the register address */
+ for (j = 0; j < 5; j++) {
+ mdc(bus, 0);
+ mdio(bus, (reg & 0x10) != 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ reg <<= 1;
+ }
+}
+
+static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
+{
+ u16 rdreg;
+ int ret, j;
+ u8 addr = phy_id & 0xff;
+ u8 reg = location & 0xff;
+
+ bitbang_pre(bus, 1, addr, reg);
+
+ /* tri-state our MDIO I/O pin so we can read */
+ mdc(bus, 0);
+ mdio_tristate(bus);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+
+ /* check the turnaround bit: the PHY should be driving it to zero */
+ if (mdio_read(bus) != 0) {
+ /* PHY didn't drive TA low */
+ for (j = 0; j < 32; j++) {
+ mdc(bus, 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ }
+ ret = -1;
+ goto out;
+ }
+
+ mdc(bus, 0);
+ mii_delay(bus);
+
+ /* read 16 bits of register data, MSB first */
+ rdreg = 0;
+ for (j = 0; j < 16; j++) {
+ mdc(bus, 1);
+ mii_delay(bus);
+ rdreg <<= 1;
+ rdreg |= mdio_read(bus);
+ mdc(bus, 0);
+ mii_delay(bus);
+ }
+
+ mdc(bus, 1);
+ mii_delay(bus);
+ mdc(bus, 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+
+ ret = rdreg;
+out:
+ return ret;
+}
+
+static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
+{
+ int j;
+ u8 addr = phy_id & 0xff;
+ u8 reg = location & 0xff;
+ u16 value = val & 0xffff;
+
+ bitbang_pre(bus, 0, addr, reg);
+
+ /* send the turnaround (10) */
+ mdc(bus, 0);
+ mdio(bus, 1);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ mdc(bus, 0);
+ mdio(bus, 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+
+ /* write 16 bits of register data, MSB first */
+ for (j = 0; j < 16; j++) {
+ mdc(bus, 0);
+ mdio(bus, (value & 0x8000) != 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+ value <<= 1;
+ }
+
+ /*
+ * Tri-state the MDIO line.
+ */
+ mdio_tristate(bus);
+ mdc(bus, 0);
+ mii_delay(bus);
+ mdc(bus, 1);
+ mii_delay(bus);
+}
+
+int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus)
+{
+ const struct fs_mii_bus_info *bi = bus->bus_info;
+ int r;
+
+ r = bitbang_prep_bit(&bus->bitbang.mdio_dir,
+ &bus->bitbang.mdio_dat,
+ &bus->bitbang.mdio_msk,
+ bi->i.bitbang.mdio_port,
+ bi->i.bitbang.mdio_bit);
+ if (r != 0)
+ return r;
+
+ r = bitbang_prep_bit(&bus->bitbang.mdc_dir,
+ &bus->bitbang.mdc_dat,
+ &bus->bitbang.mdc_msk,
+ bi->i.bitbang.mdc_port,
+ bi->i.bitbang.mdc_bit);
+ if (r != 0)
+ return r;
+
+ bus->mii_read = mii_read;
+ bus->mii_write = mii_write;
+
+ return 0;
+}
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
new file mode 100644
index 00000000000..b3e192d612e
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fixed.c
@@ -0,0 +1,92 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+
+static const u16 mii_regs[7] = {
+ 0x3100,
+ 0x786d,
+ 0x0fff,
+ 0x0fff,
+ 0x01e1,
+ 0x45e1,
+ 0x0003,
+};
+
+static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
+{
+ int ret = 0;
+
+ if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
+ return -1;
+
+ if (location != 5)
+ ret = mii_regs[location];
+ else
+ ret = bus->fixed.lpa;
+
+ return ret;
+}
+
+static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
+{
+ /* do nothing */
+}
+
+int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
+{
+ const struct fs_mii_bus_info *bi = bus->bus_info;
+
+ bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
+
+ /* if speed is fixed at 10Mb, remove 100Mb modes */
+ if (bi->i.fixed.speed == 10)
+ bus->fixed.lpa &= ~LPA_100;
+
+ /* if duplex is half, remove full duplex modes */
+ if (bi->i.fixed.duplex == 0)
+ bus->fixed.lpa &= ~LPA_DUPLEX;
+
+ bus->mii_read = mii_read;
+ bus->mii_write = mii_write;
+
+ return 0;
+}