aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ti
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-06-14 12:56:50 -0700
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-12 03:41:10 -0700
commitb544dbac41218fd015ac79455cfc1e57736e9b0c (patch)
tree6881af397456d0237dbb123ccb585a1a8086c166 /drivers/net/ethernet/ti
parents6gmac: Move the s6gmac drivers (diff)
downloadlinux-dev-b544dbac41218fd015ac79455cfc1e57736e9b0c.tar.xz
linux-dev-b544dbac41218fd015ac79455cfc1e57736e9b0c.zip
davinci*/tlan/cpmac: Move the Texas Instruments (TI) drivers
Move the Texas Instruments drivers to drivers/net/ethernet/ti/ and make the necessary Kconfig and Makefile changes. CC: Sriram <srk@ti.com> CC: Vinay Hegde <vinay.hegde@ti.com> CC: Cyril Chemparathy <cyril@ti.com> CC: Samuel Chessman <chessman@tux.org> CC: <torben.mathiasen@compaq.com> CC: Eugene Konev <ejka@imfi.kspu.ru> CC: Florian Fainelli <florian@openwrt.org> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/ti')
-rw-r--r--drivers/net/ethernet/ti/Kconfig76
-rw-r--r--drivers/net/ethernet/ti/Makefile9
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1305
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c970
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h109
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2047
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c475
-rw-r--r--drivers/net/ethernet/ti/tlan.c3258
-rw-r--r--drivers/net/ethernet/ti/tlan.h546
9 files changed, 8795 insertions, 0 deletions
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
new file mode 100644
index 000000000000..1284319ba7e0
--- /dev/null
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -0,0 +1,76 @@
+#
+# TI device configuration
+#
+
+config NET_VENDOR_TI
+ bool "Texas Instruments (TI) devices"
+ depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3))
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about TI devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_TI
+
+config TI_DAVINCI_EMAC
+ tristate "TI DaVinci EMAC Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select TI_DAVINCI_MDIO
+ select TI_DAVINCI_CPDMA
+ select PHYLIB
+ ---help---
+ This driver supports TI's DaVinci Ethernet .
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_emac_driver. This is recommended.
+
+config TI_DAVINCI_MDIO
+ tristate "TI DaVinci MDIO Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select PHYLIB
+ ---help---
+ This driver supports TI's DaVinci MDIO module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_mdio. This is recommended.
+
+config TI_DAVINCI_CPDMA
+ tristate "TI DaVinci CPDMA Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ ---help---
+ This driver supports TI's DaVinci CPDMA dma engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_cpdma. This is recommended.
+
+config TLAN
+ tristate "TI ThunderLAN support"
+ depends on (PCI || EISA)
+ ---help---
+ If you have a PCI Ethernet network card based on the ThunderLAN chip
+ which is supported by this driver, say Y and read the
+ Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Devices currently supported by this driver are Compaq Netelligent,
+ Compaq NetFlex and Olicom cards. Please read the file
+ <file:Documentation/networking/tlan.txt> for more details.
+
+ To compile this driver as a module, choose M here. The module
+ will be called tlan.
+
+ Please email feedback to <torben.mathiasen@compaq.com>.
+
+config CPMAC
+ tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && AR7
+ select PHYLIB
+ ---help---
+ TI AR7 CPMAC Ethernet support
+
+endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
new file mode 100644
index 000000000000..aedb3af74e5a
--- /dev/null
+++ b/drivers/net/ethernet/ti/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the TI network device drivers.
+#
+
+obj-$(CONFIG_TLAN) += tlan.o
+obj-$(CONFIG_CPMAC) += cpmac.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
new file mode 100644
index 000000000000..e0638cb4b07c
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -0,0 +1,1305 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/atomic.h>
+
+MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
+MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cpmac");
+
+static int debug_level = 8;
+static int dumb_switch;
+
+/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
+module_param(debug_level, int, 0444);
+module_param(dumb_switch, int, 0444);
+
+MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
+MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
+
+#define CPMAC_VERSION "0.5.2"
+/* frame size + 802.1q tag + FCS size */
+#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define CPMAC_QUEUES 8
+
+/* Ethernet registers */
+#define CPMAC_TX_CONTROL 0x0004
+#define CPMAC_TX_TEARDOWN 0x0008
+#define CPMAC_RX_CONTROL 0x0014
+#define CPMAC_RX_TEARDOWN 0x0018
+#define CPMAC_MBP 0x0100
+# define MBP_RXPASSCRC 0x40000000
+# define MBP_RXQOS 0x20000000
+# define MBP_RXNOCHAIN 0x10000000
+# define MBP_RXCMF 0x01000000
+# define MBP_RXSHORT 0x00800000
+# define MBP_RXCEF 0x00400000
+# define MBP_RXPROMISC 0x00200000
+# define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
+# define MBP_RXBCAST 0x00002000
+# define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
+# define MBP_RXMCAST 0x00000020
+# define MBP_MCASTCHAN(channel) ((channel) & 0x7)
+#define CPMAC_UNICAST_ENABLE 0x0104
+#define CPMAC_UNICAST_CLEAR 0x0108
+#define CPMAC_MAX_LENGTH 0x010c
+#define CPMAC_BUFFER_OFFSET 0x0110
+#define CPMAC_MAC_CONTROL 0x0160
+# define MAC_TXPTYPE 0x00000200
+# define MAC_TXPACE 0x00000040
+# define MAC_MII 0x00000020
+# define MAC_TXFLOW 0x00000010
+# define MAC_RXFLOW 0x00000008
+# define MAC_MTEST 0x00000004
+# define MAC_LOOPBACK 0x00000002
+# define MAC_FDX 0x00000001
+#define CPMAC_MAC_STATUS 0x0164
+# define MAC_STATUS_QOS 0x00000004
+# define MAC_STATUS_RXFLOW 0x00000002
+# define MAC_STATUS_TXFLOW 0x00000001
+#define CPMAC_TX_INT_ENABLE 0x0178
+#define CPMAC_TX_INT_CLEAR 0x017c
+#define CPMAC_MAC_INT_VECTOR 0x0180
+# define MAC_INT_STATUS 0x00080000
+# define MAC_INT_HOST 0x00040000
+# define MAC_INT_RX 0x00020000
+# define MAC_INT_TX 0x00010000
+#define CPMAC_MAC_EOI_VECTOR 0x0184
+#define CPMAC_RX_INT_ENABLE 0x0198
+#define CPMAC_RX_INT_CLEAR 0x019c
+#define CPMAC_MAC_INT_ENABLE 0x01a8
+#define CPMAC_MAC_INT_CLEAR 0x01ac
+#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
+#define CPMAC_MAC_ADDR_MID 0x01d0
+#define CPMAC_MAC_ADDR_HI 0x01d4
+#define CPMAC_MAC_HASH_LO 0x01d8
+#define CPMAC_MAC_HASH_HI 0x01dc
+#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
+#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
+#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
+#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
+#define CPMAC_REG_END 0x0680
+/*
+ * Rx/Tx statistics
+ * TODO: use some of them to fill stats in cpmac_stats()
+ */
+#define CPMAC_STATS_RX_GOOD 0x0200
+#define CPMAC_STATS_RX_BCAST 0x0204
+#define CPMAC_STATS_RX_MCAST 0x0208
+#define CPMAC_STATS_RX_PAUSE 0x020c
+#define CPMAC_STATS_RX_CRC 0x0210
+#define CPMAC_STATS_RX_ALIGN 0x0214
+#define CPMAC_STATS_RX_OVER 0x0218
+#define CPMAC_STATS_RX_JABBER 0x021c
+#define CPMAC_STATS_RX_UNDER 0x0220
+#define CPMAC_STATS_RX_FRAG 0x0224
+#define CPMAC_STATS_RX_FILTER 0x0228
+#define CPMAC_STATS_RX_QOSFILTER 0x022c
+#define CPMAC_STATS_RX_OCTETS 0x0230
+
+#define CPMAC_STATS_TX_GOOD 0x0234
+#define CPMAC_STATS_TX_BCAST 0x0238
+#define CPMAC_STATS_TX_MCAST 0x023c
+#define CPMAC_STATS_TX_PAUSE 0x0240
+#define CPMAC_STATS_TX_DEFER 0x0244
+#define CPMAC_STATS_TX_COLLISION 0x0248
+#define CPMAC_STATS_TX_SINGLECOLL 0x024c
+#define CPMAC_STATS_TX_MULTICOLL 0x0250
+#define CPMAC_STATS_TX_EXCESSCOLL 0x0254
+#define CPMAC_STATS_TX_LATECOLL 0x0258
+#define CPMAC_STATS_TX_UNDERRUN 0x025c
+#define CPMAC_STATS_TX_CARRIERSENSE 0x0260
+#define CPMAC_STATS_TX_OCTETS 0x0264
+
+#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
+#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
+ (reg)))
+
+/* MDIO bus */
+#define CPMAC_MDIO_VERSION 0x0000
+#define CPMAC_MDIO_CONTROL 0x0004
+# define MDIOC_IDLE 0x80000000
+# define MDIOC_ENABLE 0x40000000
+# define MDIOC_PREAMBLE 0x00100000
+# define MDIOC_FAULT 0x00080000
+# define MDIOC_FAULTDETECT 0x00040000
+# define MDIOC_INTTEST 0x00020000
+# define MDIOC_CLKDIV(div) ((div) & 0xff)
+#define CPMAC_MDIO_ALIVE 0x0008
+#define CPMAC_MDIO_LINK 0x000c
+#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
+# define MDIO_BUSY 0x80000000
+# define MDIO_WRITE 0x40000000
+# define MDIO_REG(reg) (((reg) & 0x1f) << 21)
+# define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
+# define MDIO_DATA(data) ((data) & 0xffff)
+#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
+# define PHYSEL_LINKSEL 0x00000040
+# define PHYSEL_LINKINT 0x00000020
+
+struct cpmac_desc {
+ u32 hw_next;
+ u32 hw_data;
+ u16 buflen;
+ u16 bufflags;
+ u16 datalen;
+ u16 dataflags;
+#define CPMAC_SOP 0x8000
+#define CPMAC_EOP 0x4000
+#define CPMAC_OWN 0x2000
+#define CPMAC_EOQ 0x1000
+ struct sk_buff *skb;
+ struct cpmac_desc *next;
+ struct cpmac_desc *prev;
+ dma_addr_t mapping;
+ dma_addr_t data_mapping;
+};
+
+struct cpmac_priv {
+ spinlock_t lock;
+ spinlock_t rx_lock;
+ struct cpmac_desc *rx_head;
+ int ring_size;
+ struct cpmac_desc *desc_ring;
+ dma_addr_t dma_ring;
+ void __iomem *regs;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy;
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ int oldlink, oldspeed, oldduplex;
+ u32 msg_enable;
+ struct net_device *dev;
+ struct work_struct reset_work;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ atomic_t reset_pending;
+};
+
+static irqreturn_t cpmac_irq(int, void *);
+static void cpmac_hw_start(struct net_device *dev);
+static void cpmac_hw_stop(struct net_device *dev);
+static int cpmac_stop(struct net_device *dev);
+static int cpmac_open(struct net_device *dev);
+
+static void cpmac_dump_regs(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ for (i = 0; i < CPMAC_REG_END; i += 4) {
+ if (i % 16 == 0) {
+ if (i)
+ pr_cont("\n");
+ printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
+ priv->regs + i);
+ }
+ printk(" %08x", cpmac_read(priv->regs, i));
+ }
+ printk("\n");
+}
+
+static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
+{
+ int i;
+ printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
+ for (i = 0; i < sizeof(*desc) / 4; i++)
+ printk(" %08x", ((u32 *)desc)[i]);
+ printk("\n");
+}
+
+static void cpmac_dump_all_desc(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *dump = priv->rx_head;
+ do {
+ cpmac_dump_desc(dev, dump);
+ dump = dump->next;
+ } while (dump != priv->rx_head);
+}
+
+static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ int i;
+ printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
+ for (i = 0; i < skb->len; i++) {
+ if (i % 16 == 0) {
+ if (i)
+ pr_cont("\n");
+ printk(KERN_DEBUG "%s: data[%p]:", dev->name,
+ skb->data + i);
+ }
+ printk(" %02x", ((u8 *)skb->data)[i]);
+ }
+ printk("\n");
+}
+
+static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ u32 val;
+
+ while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+ cpu_relax();
+ cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
+ MDIO_PHY(phy_id));
+ while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
+ cpu_relax();
+ return MDIO_DATA(val);
+}
+
+static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 val)
+{
+ while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+ cpu_relax();
+ cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
+ MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
+ return 0;
+}
+
+static int cpmac_mdio_reset(struct mii_bus *bus)
+{
+ struct clk *cpmac_clk;
+
+ cpmac_clk = clk_get(&bus->dev, "cpmac");
+ if (IS_ERR(cpmac_clk)) {
+ printk(KERN_ERR "unable to get cpmac clock\n");
+ return -1;
+ }
+ ar7_device_reset(AR7_RESET_BIT_MDIO);
+ cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
+ MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
+ return 0;
+}
+
+static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
+
+static struct mii_bus *cpmac_mii;
+
+static int cpmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr)
+ return -EOPNOTSUPP;
+
+ /* ignore other fields */
+ return 0;
+}
+
+static void cpmac_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u8 tmp;
+ u32 mbp, bit, hash[2] = { 0, };
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ mbp = cpmac_read(priv->regs, CPMAC_MBP);
+ if (dev->flags & IFF_PROMISC) {
+ cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
+ MBP_RXPROMISC);
+ } else {
+ cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
+ if (dev->flags & IFF_ALLMULTI) {
+ /* enable all multicast mode */
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
+ } else {
+ /*
+ * cpmac uses some strange mac address hashing
+ * (not crc32)
+ */
+ netdev_for_each_mc_addr(ha, dev) {
+ bit = 0;
+ tmp = ha->addr[0];
+ bit ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = ha->addr[1];
+ bit ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = ha->addr[2];
+ bit ^= (tmp >> 6) ^ tmp;
+ tmp = ha->addr[3];
+ bit ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = ha->addr[4];
+ bit ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = ha->addr[5];
+ bit ^= (tmp >> 6) ^ tmp;
+ bit &= 0x3f;
+ hash[bit / 32] |= 1 << (bit % 32);
+ }
+
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
+ }
+ }
+}
+
+static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
+ struct cpmac_desc *desc)
+{
+ struct sk_buff *skb, *result = NULL;
+
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(priv->dev, desc);
+ cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
+ if (unlikely(!desc->datalen)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: rx: spurious interrupt\n",
+ priv->dev->name);
+ return NULL;
+ }
+
+ skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
+ if (likely(skb)) {
+ skb_put(desc->skb, desc->datalen);
+ desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
+ skb_checksum_none_assert(desc->skb);
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += desc->datalen;
+ result = desc->skb;
+ dma_unmap_single(&priv->dev->dev, desc->data_mapping,
+ CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ if (unlikely(netif_msg_pktdata(priv))) {
+ printk(KERN_DEBUG "%s: received packet:\n",
+ priv->dev->name);
+ cpmac_dump_skb(priv->dev, result);
+ }
+ } else {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING
+ "%s: low on skbs, dropping packet\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
+ }
+
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+
+ return result;
+}
+
+static int cpmac_poll(struct napi_struct *napi, int budget)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc, *restart;
+ struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
+ int received = 0, processed = 0;
+
+ spin_lock(&priv->rx_lock);
+ if (unlikely(!priv->rx_head)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: rx: polling, but no queue\n",
+ priv->dev->name);
+ spin_unlock(&priv->rx_lock);
+ napi_complete(napi);
+ return 0;
+ }
+
+ desc = priv->rx_head;
+ restart = NULL;
+ while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
+ processed++;
+
+ if ((desc->dataflags & CPMAC_EOQ) != 0) {
+ /* The last update to eoq->hw_next didn't happen
+ * soon enough, and the receiver stopped here.
+ *Remember this descriptor so we can restart
+ * the receiver after freeing some space.
+ */
+ if (unlikely(restart)) {
+ if (netif_msg_rx_err(priv))
+ printk(KERN_ERR "%s: poll found a"
+ " duplicate EOQ: %p and %p\n",
+ priv->dev->name, restart, desc);
+ goto fatal_error;
+ }
+
+ restart = desc->next;
+ }
+
+ skb = cpmac_rx_one(priv, desc);
+ if (likely(skb)) {
+ netif_receive_skb(skb);
+ received++;
+ }
+ desc = desc->next;
+ }
+
+ if (desc != priv->rx_head) {
+ /* We freed some buffers, but not the whole ring,
+ * add what we did free to the rx list */
+ desc->prev->hw_next = (u32)0;
+ priv->rx_head->prev->hw_next = priv->rx_head->mapping;
+ }
+
+ /* Optimization: If we did not actually process an EOQ (perhaps because
+ * of quota limits), check to see if the tail of the queue has EOQ set.
+ * We should immediately restart in that case so that the receiver can
+ * restart and run in parallel with more packet processing.
+ * This lets us handle slightly larger bursts before running
+ * out of ring space (assuming dev->weight < ring_size) */
+
+ if (!restart &&
+ (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
+ == CPMAC_EOQ &&
+ (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
+ /* reset EOQ so the poll loop (above) doesn't try to
+ * restart this when it eventually gets to this descriptor.
+ */
+ priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
+ restart = priv->rx_head;
+ }
+
+ if (restart) {
+ priv->dev->stats.rx_errors++;
+ priv->dev->stats.rx_fifo_errors++;
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: rx dma ring overrun\n",
+ priv->dev->name);
+
+ if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: cpmac_poll is trying to "
+ "restart rx from a descriptor that's "
+ "not free: %p\n",
+ priv->dev->name, restart);
+ goto fatal_error;
+ }
+
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
+ }
+
+ priv->rx_head = desc;
+ spin_unlock(&priv->rx_lock);
+ if (unlikely(netif_msg_rx_status(priv)))
+ printk(KERN_DEBUG "%s: poll processed %d packets\n",
+ priv->dev->name, received);
+ if (processed == 0) {
+ /* we ran out of packets to read,
+ * revert to interrupt-driven mode */
+ napi_complete(napi);
+ cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+ return 0;
+ }
+
+ return 1;
+
+fatal_error:
+ /* Something went horribly wrong.
+ * Reset hardware to try to recover rather than wedging. */
+
+ if (netif_msg_drv(priv)) {
+ printk(KERN_ERR "%s: cpmac_poll is confused. "
+ "Resetting hardware\n", priv->dev->name);
+ cpmac_dump_all_desc(priv->dev);
+ printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
+ priv->dev->name,
+ cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
+ cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
+ }
+
+ spin_unlock(&priv->rx_lock);
+ napi_complete(napi);
+ netif_tx_stop_all_queues(priv->dev);
+ napi_disable(&priv->napi);
+
+ atomic_inc(&priv->reset_pending);
+ cpmac_hw_stop(priv->dev);
+ if (!schedule_work(&priv->reset_work))
+ atomic_dec(&priv->reset_pending);
+ return 0;
+
+}
+
+static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int queue, len;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(atomic_read(&priv->reset_pending)))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_padto(skb, ETH_ZLEN)))
+ return NETDEV_TX_OK;
+
+ len = max(skb->len, ETH_ZLEN);
+ queue = skb_get_queue_mapping(skb);
+ netif_stop_subqueue(dev, queue);
+
+ desc = &priv->desc_ring[queue];
+ if (unlikely(desc->dataflags & CPMAC_OWN)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: tx dma ring full\n",
+ dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock(&priv->lock);
+ spin_unlock(&priv->lock);
+ desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ desc->datalen = len;
+ desc->buflen = len;
+ if (unlikely(netif_msg_tx_queued(priv)))
+ printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
+ skb->len);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ if (unlikely(netif_msg_pktdata(priv)))
+ cpmac_dump_skb(dev, skb);
+ cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
+
+ return NETDEV_TX_OK;
+}
+
+static void cpmac_end_xmit(struct net_device *dev, int queue)
+{
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ desc = &priv->desc_ring[queue];
+ cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
+ if (likely(desc->skb)) {
+ spin_lock(&priv->lock);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += desc->skb->len;
+ spin_unlock(&priv->lock);
+ dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(netif_msg_tx_done(priv)))
+ printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
+ desc->skb, desc->skb->len);
+
+ dev_kfree_skb_irq(desc->skb);
+ desc->skb = NULL;
+ if (__netif_subqueue_stopped(dev, queue))
+ netif_wake_subqueue(dev, queue);
+ } else {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING
+ "%s: end_xmit: spurious interrupt\n", dev->name);
+ if (__netif_subqueue_stopped(dev, queue))
+ netif_wake_subqueue(dev, queue);
+ }
+}
+
+static void cpmac_hw_stop(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+
+ ar7_device_reset(pdata->reset_bit);
+ cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
+ cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
+ for (i = 0; i < 8; i++) {
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+ }
+ cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+ cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
+}
+
+static void cpmac_hw_start(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+
+ ar7_device_reset(pdata->reset_bit);
+ for (i = 0; i < 8; i++) {
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+ }
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
+
+ cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
+ MBP_RXMCAST);
+ cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
+ for (i = 0; i < 8; i++)
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
+ (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
+ (dev->dev_addr[3] << 24));
+ cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
+ cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
+ cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+ cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+
+ cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
+ cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
+ cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+ cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
+ MAC_FDX);
+}
+
+static void cpmac_clear_rx(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+ int i;
+ if (unlikely(!priv->rx_head))
+ return;
+ desc = priv->rx_head;
+ for (i = 0; i < priv->ring_size; i++) {
+ if ((desc->dataflags & CPMAC_OWN) == 0) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: packet dropped\n",
+ dev->name);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ desc->dataflags = CPMAC_OWN;
+ dev->stats.rx_dropped++;
+ }
+ desc->hw_next = desc->next->mapping;
+ desc = desc->next;
+ }
+ priv->rx_head->prev->hw_next = 0;
+}
+
+static void cpmac_clear_tx(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ int i;
+ if (unlikely(!priv->desc_ring))
+ return;
+ for (i = 0; i < CPMAC_QUEUES; i++) {
+ priv->desc_ring[i].dataflags = 0;
+ if (priv->desc_ring[i].skb) {
+ dev_kfree_skb_any(priv->desc_ring[i].skb);
+ priv->desc_ring[i].skb = NULL;
+ }
+ }
+}
+
+static void cpmac_hw_error(struct work_struct *work)
+{
+ struct cpmac_priv *priv =
+ container_of(work, struct cpmac_priv, reset_work);
+
+ spin_lock(&priv->rx_lock);
+ cpmac_clear_rx(priv->dev);
+ spin_unlock(&priv->rx_lock);
+ cpmac_clear_tx(priv->dev);
+ cpmac_hw_start(priv->dev);
+ barrier();
+ atomic_dec(&priv->reset_pending);
+
+ netif_tx_wake_all_queues(priv->dev);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+}
+
+static void cpmac_check_status(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
+ int rx_channel = (macstatus >> 8) & 7;
+ int rx_code = (macstatus >> 12) & 15;
+ int tx_channel = (macstatus >> 16) & 7;
+ int tx_code = (macstatus >> 20) & 15;
+
+ if (rx_code || tx_code) {
+ if (netif_msg_drv(priv) && net_ratelimit()) {
+ /* Can't find any documentation on what these
+ *error codes actually are. So just log them and hope..
+ */
+ if (rx_code)
+ printk(KERN_WARNING "%s: host error %d on rx "
+ "channel %d (macstatus %08x), resetting\n",
+ dev->name, rx_code, rx_channel, macstatus);
+ if (tx_code)
+ printk(KERN_WARNING "%s: host error %d on tx "
+ "channel %d (macstatus %08x), resetting\n",
+ dev->name, tx_code, tx_channel, macstatus);
+ }
+
+ netif_tx_stop_all_queues(dev);
+ cpmac_hw_stop(dev);
+ if (schedule_work(&priv->reset_work))
+ atomic_inc(&priv->reset_pending);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_regs(dev);
+ }
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+}
+
+static irqreturn_t cpmac_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cpmac_priv *priv;
+ int queue;
+ u32 status;
+
+ priv = netdev_priv(dev);
+
+ status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
+
+ if (unlikely(netif_msg_intr(priv)))
+ printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
+ status);
+
+ if (status & MAC_INT_TX)
+ cpmac_end_xmit(dev, (status & 7));
+
+ if (status & MAC_INT_RX) {
+ queue = (status >> 8) & 7;
+ if (napi_schedule_prep(&priv->napi)) {
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
+
+ if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
+ cpmac_check_status(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void cpmac_tx_timeout(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ dev->stats.tx_errors++;
+ spin_unlock(&priv->lock);
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
+
+ atomic_inc(&priv->reset_pending);
+ barrier();
+ cpmac_clear_tx(dev);
+ barrier();
+ atomic_dec(&priv->reset_pending);
+
+ netif_tx_wake_all_queues(priv->dev);
+}
+
+static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ if (!(netif_running(dev)))
+ return -EINVAL;
+ if (!priv->phy)
+ return -EINVAL;
+
+ return phy_mii_ioctl(priv->phy, ifr, cmd);
+}
+
+static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (priv->phy)
+ return phy_ethtool_gset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (priv->phy)
+ return phy_ethtool_sset(priv->phy, cmd);
+
+ return -EINVAL;
+}
+
+static void cpmac_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ ring->rx_max_pending = 1024;
+ ring->rx_mini_max_pending = 1;
+ ring->rx_jumbo_max_pending = 1;
+ ring->tx_max_pending = 1;
+
+ ring->rx_pending = priv->ring_size;
+ ring->rx_mini_pending = 1;
+ ring->rx_jumbo_pending = 1;
+ ring->tx_pending = 1;
+}
+
+static int cpmac_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (netif_running(dev))
+ return -EBUSY;
+ priv->ring_size = ring->rx_pending;
+ return 0;
+}
+
+static void cpmac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "cpmac");
+ strcpy(info->version, CPMAC_VERSION);
+ info->fw_version[0] = '\0';
+ sprintf(info->bus_info, "%s", "cpmac");
+ info->regdump_len = 0;
+}
+
+static const struct ethtool_ops cpmac_ethtool_ops = {
+ .get_settings = cpmac_get_settings,
+ .set_settings = cpmac_set_settings,
+ .get_drvinfo = cpmac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = cpmac_get_ringparam,
+ .set_ringparam = cpmac_set_ringparam,
+};
+
+static void cpmac_adjust_link(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ int new_state = 0;
+
+ spin_lock(&priv->lock);
+ if (priv->phy->link) {
+ netif_tx_start_all_queues(dev);
+ if (priv->phy->duplex != priv->oldduplex) {
+ new_state = 1;
+ priv->oldduplex = priv->phy->duplex;
+ }
+
+ if (priv->phy->speed != priv->oldspeed) {
+ new_state = 1;
+ priv->oldspeed = priv->phy->speed;
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv) && net_ratelimit())
+ phy_print_status(priv->phy);
+
+ spin_unlock(&priv->lock);
+}
+
+static int cpmac_open(struct net_device *dev)
+{
+ int i, size, res;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+ struct cpmac_desc *desc;
+ struct sk_buff *skb;
+
+ mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
+ if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to request registers\n",
+ dev->name);
+ res = -ENXIO;
+ goto fail_reserve;
+ }
+
+ priv->regs = ioremap(mem->start, resource_size(mem));
+ if (!priv->regs) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to remap registers\n",
+ dev->name);
+ res = -ENXIO;
+ goto fail_remap;
+ }
+
+ size = priv->ring_size + CPMAC_QUEUES;
+ priv->desc_ring = dma_alloc_coherent(&dev->dev,
+ sizeof(struct cpmac_desc) * size,
+ &priv->dma_ring,
+ GFP_KERNEL);
+ if (!priv->desc_ring) {
+ res = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < size; i++)
+ priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
+
+ priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
+ for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
+ skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
+ if (unlikely(!skb)) {
+ res = -ENOMEM;
+ goto fail_desc;
+ }
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+ desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
+ desc->next->prev = desc;
+ desc->hw_next = (u32)desc->next->mapping;
+ }
+
+ priv->rx_head->prev->hw_next = (u32)0;
+
+ res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
+ if (res) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: failed to obtain irq\n",
+ dev->name);
+ goto fail_irq;
+ }
+
+ atomic_set(&priv->reset_pending, 0);
+ INIT_WORK(&priv->reset_work, cpmac_hw_error);
+ cpmac_hw_start(dev);
+
+ napi_enable(&priv->napi);
+ priv->phy->state = PHY_CHANGELINK;
+ phy_start(priv->phy);
+
+ return 0;
+
+fail_irq:
+fail_desc:
+ for (i = 0; i < priv->ring_size; i++) {
+ if (priv->rx_head[i].skb) {
+ dma_unmap_single(&dev->dev,
+ priv->rx_head[i].data_mapping,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kfree_skb(priv->rx_head[i].skb);
+ }
+ }
+fail_alloc:
+ kfree(priv->desc_ring);
+ iounmap(priv->regs);
+
+fail_remap:
+ release_mem_region(mem->start, resource_size(mem));
+
+fail_reserve:
+ return res;
+}
+
+static int cpmac_stop(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ netif_tx_stop_all_queues(dev);
+
+ cancel_work_sync(&priv->reset_work);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phy);
+
+ cpmac_hw_stop(dev);
+
+ for (i = 0; i < 8; i++)
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
+ cpmac_write(priv->regs, CPMAC_MBP, 0);
+
+ free_irq(dev->irq, dev);
+ iounmap(priv->regs);
+ mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
+ release_mem_region(mem->start, resource_size(mem));
+ priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
+ for (i = 0; i < priv->ring_size; i++) {
+ if (priv->rx_head[i].skb) {
+ dma_unmap_single(&dev->dev,
+ priv->rx_head[i].data_mapping,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kfree_skb(priv->rx_head[i].skb);
+ }
+ }
+
+ dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
+ (CPMAC_QUEUES + priv->ring_size),
+ priv->desc_ring, priv->dma_ring);
+ return 0;
+}
+
+static const struct net_device_ops cpmac_netdev_ops = {
+ .ndo_open = cpmac_open,
+ .ndo_stop = cpmac_stop,
+ .ndo_start_xmit = cpmac_start_xmit,
+ .ndo_tx_timeout = cpmac_tx_timeout,
+ .ndo_set_multicast_list = cpmac_set_multicast_list,
+ .ndo_do_ioctl = cpmac_ioctl,
+ .ndo_set_config = cpmac_config,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int external_switch;
+
+static int __devinit cpmac_probe(struct platform_device *pdev)
+{
+ int rc, phy_id;
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ struct resource *mem;
+ struct cpmac_priv *priv;
+ struct net_device *dev;
+ struct plat_cpmac_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ if (external_switch || dumb_switch) {
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ phy_id = pdev->id;
+ } else {
+ for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+ if (!(pdata->phy_mask & (1 << phy_id)))
+ continue;
+ if (!cpmac_mii->phy_map[phy_id])
+ continue;
+ strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
+ break;
+ }
+ }
+
+ if (phy_id == PHY_MAX_ADDR) {
+ dev_err(&pdev->dev, "no PHY present, falling back "
+ "to switch on MDIO bus 0\n");
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ phy_id = pdev->id;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
+
+ if (!dev) {
+ printk(KERN_ERR "cpmac: Unable to allocate net_device\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ priv = netdev_priv(dev);
+
+ priv->pdev = pdev;
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!mem) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ dev->irq = platform_get_irq_byname(pdev, "irq");
+
+ dev->netdev_ops = &cpmac_netdev_ops;
+ dev->ethtool_ops = &cpmac_ethtool_ops;
+
+ netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->rx_lock);
+ priv->dev = dev;
+ priv->ring_size = 64;
+ priv->msg_enable = netif_msg_init(debug_level, 0xff);
+ memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
+
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+ mdio_bus_id, phy_id);
+
+ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phy)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: Could not attach to PHY\n",
+ dev->name);
+ rc = PTR_ERR(priv->phy);
+ goto fail;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
+ dev->name);
+ goto fail;
+ }
+
+ if (netif_msg_probe(priv)) {
+ printk(KERN_INFO
+ "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
+ "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
+ priv->phy_name, dev->dev_addr);
+ }
+ return 0;
+
+fail:
+ free_netdev(dev);
+ return rc;
+}
+
+static int __devexit cpmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver cpmac_driver = {
+ .driver.name = "cpmac",
+ .driver.owner = THIS_MODULE,
+ .probe = cpmac_probe,
+ .remove = __devexit_p(cpmac_remove),
+};
+
+int __devinit cpmac_init(void)
+{
+ u32 mask;
+ int i, res;
+
+ cpmac_mii = mdiobus_alloc();
+ if (cpmac_mii == NULL)
+ return -ENOMEM;
+
+ cpmac_mii->name = "cpmac-mii";
+ cpmac_mii->read = cpmac_mdio_read;
+ cpmac_mii->write = cpmac_mdio_write;
+ cpmac_mii->reset = cpmac_mdio_reset;
+ cpmac_mii->irq = mii_irqs;
+
+ cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
+
+ if (!cpmac_mii->priv) {
+ printk(KERN_ERR "Can't ioremap mdio registers\n");
+ res = -ENXIO;
+ goto fail_alloc;
+ }
+
+#warning FIXME: unhardcode gpio&reset bits
+ ar7_gpio_disable(26);
+ ar7_gpio_disable(27);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
+ ar7_device_reset(AR7_RESET_BIT_EPHY);
+
+ cpmac_mii->reset(cpmac_mii);
+
+ for (i = 0; i < 300; i++) {
+ mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
+ if (mask)
+ break;
+ else
+ msleep(10);
+ }
+
+ mask &= 0x7fffffff;
+ if (mask & (mask - 1)) {
+ external_switch = 1;
+ mask = 0;
+ }
+
+ cpmac_mii->phy_mask = ~(mask | 0x80000000);
+ snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
+
+ res = mdiobus_register(cpmac_mii);
+ if (res)
+ goto fail_mii;
+
+ res = platform_driver_register(&cpmac_driver);
+ if (res)
+ goto fail_cpmac;
+
+ return 0;
+
+fail_cpmac:
+ mdiobus_unregister(cpmac_mii);
+
+fail_mii:
+ iounmap(cpmac_mii->priv);
+
+fail_alloc:
+ mdiobus_free(cpmac_mii);
+
+ return res;
+}
+
+void __devexit cpmac_exit(void)
+{
+ platform_driver_unregister(&cpmac_driver);
+ mdiobus_unregister(cpmac_mii);
+ iounmap(cpmac_mii->priv);
+ mdiobus_free(cpmac_mii);
+}
+
+module_init(cpmac_init);
+module_exit(cpmac_exit);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
new file mode 100644
index 000000000000..dca9d3369cdd
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -0,0 +1,970 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "davinci_cpdma.h"
+
+/* DMA Registers */
+#define CPDMA_TXIDVER 0x00
+#define CPDMA_TXCONTROL 0x04
+#define CPDMA_TXTEARDOWN 0x08
+#define CPDMA_RXIDVER 0x10
+#define CPDMA_RXCONTROL 0x14
+#define CPDMA_SOFTRESET 0x1c
+#define CPDMA_RXTEARDOWN 0x18
+#define CPDMA_TXINTSTATRAW 0x80
+#define CPDMA_TXINTSTATMASKED 0x84
+#define CPDMA_TXINTMASKSET 0x88
+#define CPDMA_TXINTMASKCLEAR 0x8c
+#define CPDMA_MACINVECTOR 0x90
+#define CPDMA_MACEOIVECTOR 0x94
+#define CPDMA_RXINTSTATRAW 0xa0
+#define CPDMA_RXINTSTATMASKED 0xa4
+#define CPDMA_RXINTMASKSET 0xa8
+#define CPDMA_RXINTMASKCLEAR 0xac
+#define CPDMA_DMAINTSTATRAW 0xb0
+#define CPDMA_DMAINTSTATMASKED 0xb4
+#define CPDMA_DMAINTMASKSET 0xb8
+#define CPDMA_DMAINTMASKCLEAR 0xbc
+#define CPDMA_DMAINT_HOSTERR BIT(1)
+
+/* the following exist only if has_ext_regs is set */
+#define CPDMA_DMACONTROL 0x20
+#define CPDMA_DMASTATUS 0x24
+#define CPDMA_RXBUFFOFS 0x28
+#define CPDMA_EM_CONTROL 0x2c
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP BIT(31)
+#define CPDMA_DESC_EOP BIT(30)
+#define CPDMA_DESC_OWNER BIT(29)
+#define CPDMA_DESC_EOQ BIT(28)
+#define CPDMA_DESC_TD_COMPLETE BIT(27)
+#define CPDMA_DESC_PASS_CRC BIT(26)
+
+#define CPDMA_TEARDOWN_VALUE 0xfffffffc
+
+struct cpdma_desc {
+ /* hardware fields */
+ u32 hw_next;
+ u32 hw_buffer;
+ u32 hw_len;
+ u32 hw_mode;
+ /* software fields */
+ void *sw_token;
+ u32 sw_buffer;
+ u32 sw_len;
+};
+
+struct cpdma_desc_pool {
+ u32 phys;
+ u32 hw_addr;
+ void __iomem *iomap; /* ioremap map */
+ void *cpumap; /* dma_alloc map */
+ int desc_size, mem_size;
+ int num_desc, used_desc;
+ unsigned long *bitmap;
+ struct device *dev;
+ spinlock_t lock;
+};
+
+enum cpdma_state {
+ CPDMA_STATE_IDLE,
+ CPDMA_STATE_ACTIVE,
+ CPDMA_STATE_TEARDOWN,
+};
+
+const char *cpdma_state_str[] = { "idle", "active", "teardown" };
+
+struct cpdma_ctlr {
+ enum cpdma_state state;
+ struct cpdma_params params;
+ struct device *dev;
+ struct cpdma_desc_pool *pool;
+ spinlock_t lock;
+ struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
+};
+
+struct cpdma_chan {
+ enum cpdma_state state;
+ struct cpdma_ctlr *ctlr;
+ int chan_num;
+ spinlock_t lock;
+ struct cpdma_desc __iomem *head, *tail;
+ int count;
+ void __iomem *hdp, *cp, *rxfree;
+ u32 mask;
+ cpdma_handler_fn handler;
+ enum dma_data_direction dir;
+ struct cpdma_chan_stats stats;
+ /* offsets into dmaregs */
+ int int_set, int_clear, td;
+};
+
+/* The following make access to common cpdma_ctlr params more readable */
+#define dmaregs params.dmaregs
+#define num_chan params.num_chan
+
+/* various accessors */
+#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld) __raw_readl((chan)->fld)
+#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
+#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+
+/*
+ * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
+ * emac) have dedicated on-chip memory for these descriptors. Some other
+ * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
+ * abstract out these details
+ */
+static struct cpdma_desc_pool *
+cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
+ int size, int align)
+{
+ int bitmap_size;
+ struct cpdma_desc_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ spin_lock_init(&pool->lock);
+
+ pool->dev = dev;
+ pool->mem_size = size;
+ pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
+ pool->num_desc = size / pool->desc_size;
+
+ bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
+ pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!pool->bitmap)
+ goto fail;
+
+ if (phys) {
+ pool->phys = phys;
+ pool->iomap = ioremap(phys, size);
+ pool->hw_addr = hw_addr;
+ } else {
+ pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
+ GFP_KERNEL);
+ pool->iomap = pool->cpumap;
+ pool->hw_addr = pool->phys;
+ }
+
+ if (pool->iomap)
+ return pool;
+
+fail:
+ kfree(pool->bitmap);
+ kfree(pool);
+ return NULL;
+}
+
+static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+{
+ unsigned long flags;
+
+ if (!pool)
+ return;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ WARN_ON(pool->used_desc);
+ kfree(pool->bitmap);
+ if (pool->cpumap) {
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+ } else {
+ iounmap(pool->iomap);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+ kfree(pool);
+}
+
+static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc)
+{
+ if (!desc)
+ return 0;
+ return pool->hw_addr + (__force dma_addr_t)desc -
+ (__force dma_addr_t)pool->iomap;
+}
+
+static inline struct cpdma_desc __iomem *
+desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->iomap + dma - pool->hw_addr : NULL;
+}
+
+static struct cpdma_desc __iomem *
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+{
+ unsigned long flags;
+ int index;
+ struct cpdma_desc __iomem *desc = NULL;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
+ num_desc, 0);
+ if (index < pool->num_desc) {
+ bitmap_set(pool->bitmap, index, num_desc);
+ desc = pool->iomap + pool->desc_size * index;
+ pool->used_desc++;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return desc;
+}
+
+static void cpdma_desc_free(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc, int num_desc)
+{
+ unsigned long flags, index;
+
+ index = ((unsigned long)desc - (unsigned long)pool->iomap) /
+ pool->desc_size;
+ spin_lock_irqsave(&pool->lock, flags);
+ bitmap_clear(pool->bitmap, index, num_desc);
+ pool->used_desc--;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
+{
+ struct cpdma_ctlr *ctlr;
+
+ ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
+ return NULL;
+
+ ctlr->state = CPDMA_STATE_IDLE;
+ ctlr->params = *params;
+ ctlr->dev = params->dev;
+ spin_lock_init(&ctlr->lock);
+
+ ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
+ ctlr->params.desc_mem_phys,
+ ctlr->params.desc_hw_addr,
+ ctlr->params.desc_mem_size,
+ ctlr->params.desc_align);
+ if (!ctlr->pool) {
+ kfree(ctlr);
+ return NULL;
+ }
+
+ if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
+ ctlr->num_chan = CPDMA_MAX_CHANNELS;
+ return ctlr;
+}
+
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EBUSY;
+ }
+
+ if (ctlr->params.has_soft_reset) {
+ unsigned long timeout = jiffies + HZ/10;
+
+ dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
+ while (time_before(jiffies, timeout)) {
+ if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
+ break;
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ }
+
+ for (i = 0; i < ctlr->num_chan; i++) {
+ __raw_writel(0, ctlr->params.txhdp + 4 * i);
+ __raw_writel(0, ctlr->params.rxhdp + 4 * i);
+ __raw_writel(0, ctlr->params.txcp + 4 * i);
+ __raw_writel(0, ctlr->params.rxcp + 4 * i);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
+
+ ctlr->state = CPDMA_STATE_ACTIVE;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_start(ctlr->channels[i]);
+ }
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ ctlr->state = CPDMA_STATE_TEARDOWN;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_stop(ctlr->channels[i]);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
+
+ ctlr->state = CPDMA_STATE_IDLE;
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
+{
+ struct device *dev = ctlr->dev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
+
+ dev_info(dev, "CPDMA: txidver: %x",
+ dma_reg_read(ctlr, CPDMA_TXIDVER));
+ dev_info(dev, "CPDMA: txcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_TXCONTROL));
+ dev_info(dev, "CPDMA: txteardown: %x",
+ dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
+ dev_info(dev, "CPDMA: rxidver: %x",
+ dma_reg_read(ctlr, CPDMA_RXIDVER));
+ dev_info(dev, "CPDMA: rxcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_RXCONTROL));
+ dev_info(dev, "CPDMA: softreset: %x",
+ dma_reg_read(ctlr, CPDMA_SOFTRESET));
+ dev_info(dev, "CPDMA: rxteardown: %x",
+ dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
+ dev_info(dev, "CPDMA: txintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
+ dev_info(dev, "CPDMA: txintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
+ dev_info(dev, "CPDMA: txintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
+ dev_info(dev, "CPDMA: txintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: macinvector: %x",
+ dma_reg_read(ctlr, CPDMA_MACINVECTOR));
+ dev_info(dev, "CPDMA: maceoivector: %x",
+ dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
+ dev_info(dev, "CPDMA: rxintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
+ dev_info(dev, "CPDMA: rxintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
+ dev_info(dev, "CPDMA: rxintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
+ dev_info(dev, "CPDMA: rxintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: dmaintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
+ dev_info(dev, "CPDMA: dmaintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
+ dev_info(dev, "CPDMA: dmaintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
+ dev_info(dev, "CPDMA: dmaintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
+
+ if (!ctlr->params.has_ext_regs) {
+ dev_info(dev, "CPDMA: dmacontrol: %x",
+ dma_reg_read(ctlr, CPDMA_DMACONTROL));
+ dev_info(dev, "CPDMA: dmastatus: %x",
+ dma_reg_read(ctlr, CPDMA_DMASTATUS));
+ dev_info(dev, "CPDMA: rxbuffofs: %x",
+ dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ if (ctlr->channels[i])
+ cpdma_chan_dump(ctlr->channels[i]);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int ret = 0, i;
+
+ if (!ctlr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE)
+ cpdma_ctlr_stop(ctlr);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_destroy(ctlr->channels[i]);
+ }
+
+ cpdma_desc_pool_destroy(ctlr->pool);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(ctlr);
+ return ret;
+}
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+{
+ unsigned long flags;
+ int i, reg;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
+ dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_int_ctrl(ctlr->channels[i], enable);
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+{
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+}
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler)
+{
+ struct cpdma_chan *chan;
+ int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+ unsigned long flags;
+
+ if (__chan_linear(chan_num) >= ctlr->num_chan)
+ return NULL;
+
+ ret = -ENOMEM;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ goto err_chan_alloc;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = -EBUSY;
+ if (ctlr->channels[chan_num])
+ goto err_chan_busy;
+
+ chan->ctlr = ctlr;
+ chan->state = CPDMA_STATE_IDLE;
+ chan->chan_num = chan_num;
+ chan->handler = handler;
+
+ if (is_rx_chan(chan)) {
+ chan->hdp = ctlr->params.rxhdp + offset;
+ chan->cp = ctlr->params.rxcp + offset;
+ chan->rxfree = ctlr->params.rxfree + offset;
+ chan->int_set = CPDMA_RXINTMASKSET;
+ chan->int_clear = CPDMA_RXINTMASKCLEAR;
+ chan->td = CPDMA_RXTEARDOWN;
+ chan->dir = DMA_FROM_DEVICE;
+ } else {
+ chan->hdp = ctlr->params.txhdp + offset;
+ chan->cp = ctlr->params.txcp + offset;
+ chan->int_set = CPDMA_TXINTMASKSET;
+ chan->int_clear = CPDMA_TXINTMASKCLEAR;
+ chan->td = CPDMA_TXTEARDOWN;
+ chan->dir = DMA_TO_DEVICE;
+ }
+ chan->mask = BIT(chan_linear(chan));
+
+ spin_lock_init(&chan->lock);
+
+ ctlr->channels[chan_num] = chan;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return chan;
+
+err_chan_busy:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+err_chan_alloc:
+ return ERR_PTR(ret);
+}
+
+int cpdma_chan_destroy(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ unsigned long flags;
+
+ if (!chan)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE)
+ cpdma_chan_stop(chan);
+ ctlr->channels[chan->chan_num] = NULL;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+ return 0;
+}
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats)
+{
+ unsigned long flags;
+ if (!chan)
+ return -EINVAL;
+ spin_lock_irqsave(&chan->lock, flags);
+ memcpy(stats, &chan->stats, sizeof(*stats));
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_dump(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ struct device *dev = chan->ctlr->dev;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_info(dev, "channel %d (%s %d) state %s",
+ chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
+ chan_linear(chan), cpdma_state_str[chan->state]);
+ dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
+ dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
+ if (chan->rxfree) {
+ dev_info(dev, "\trxfree: %x\n",
+ chan_read(chan, rxfree));
+ }
+
+ dev_info(dev, "\tstats head_enqueue: %d\n",
+ chan->stats.head_enqueue);
+ dev_info(dev, "\tstats tail_enqueue: %d\n",
+ chan->stats.tail_enqueue);
+ dev_info(dev, "\tstats pad_enqueue: %d\n",
+ chan->stats.pad_enqueue);
+ dev_info(dev, "\tstats misqueued: %d\n",
+ chan->stats.misqueued);
+ dev_info(dev, "\tstats desc_alloc_fail: %d\n",
+ chan->stats.desc_alloc_fail);
+ dev_info(dev, "\tstats pad_alloc_fail: %d\n",
+ chan->stats.pad_alloc_fail);
+ dev_info(dev, "\tstats runt_receive_buff: %d\n",
+ chan->stats.runt_receive_buff);
+ dev_info(dev, "\tstats runt_transmit_buff: %d\n",
+ chan->stats.runt_transmit_buff);
+ dev_info(dev, "\tstats empty_dequeue: %d\n",
+ chan->stats.empty_dequeue);
+ dev_info(dev, "\tstats busy_dequeue: %d\n",
+ chan->stats.busy_dequeue);
+ dev_info(dev, "\tstats good_dequeue: %d\n",
+ chan->stats.good_dequeue);
+ dev_info(dev, "\tstats requeue: %d\n",
+ chan->stats.requeue);
+ dev_info(dev, "\tstats teardown_dequeue: %d\n",
+ chan->stats.teardown_dequeue);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+static void __cpdma_chan_submit(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *prev = chan->tail;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ u32 mode;
+
+ desc_dma = desc_phys(pool, desc);
+
+ /* simple case - idle channel */
+ if (!chan->head) {
+ chan->stats.head_enqueue++;
+ chan->head = desc;
+ chan->tail = desc;
+ if (chan->state == CPDMA_STATE_ACTIVE)
+ chan_write(chan, hdp, desc_dma);
+ return;
+ }
+
+ /* first chain the descriptor at the tail of the list */
+ desc_write(prev, hw_next, desc_dma);
+ chan->tail = desc;
+ chan->stats.tail_enqueue++;
+
+ /* next check if EOQ has been triggered already */
+ mode = desc_read(prev, hw_mode);
+ if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
+ (chan->state == CPDMA_STATE_ACTIVE)) {
+ desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
+ chan_write(chan, hdp, desc_dma);
+ chan->stats.misqueued++;
+ }
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ dma_addr_t buffer;
+ unsigned long flags;
+ u32 mode;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool, 1);
+ if (!desc) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
+
+ if (len < ctlr->params.min_packet_size) {
+ len = ctlr->params.min_packet_size;
+ chan->stats.runt_transmit_buff++;
+ }
+
+ buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+
+ desc_write(desc, hw_next, 0);
+ desc_write(desc, hw_buffer, buffer);
+ desc_write(desc, hw_len, len);
+ desc_write(desc, hw_mode, mode | len);
+ desc_write(desc, sw_token, token);
+ desc_write(desc, sw_buffer, buffer);
+ desc_write(desc, sw_len, len);
+
+ __cpdma_chan_submit(chan, desc);
+
+ if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
+ chan_write(chan, rxfree, 1);
+
+ chan->count++;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+static void __cpdma_chan_free(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc,
+ int outlen, int status)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t buff_dma;
+ int origlen;
+ void *token;
+
+ token = (void *)desc_read(desc, sw_token);
+ buff_dma = desc_read(desc, sw_buffer);
+ origlen = desc_read(desc, sw_len);
+
+ dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ cpdma_desc_free(pool, desc, 1);
+ (*chan->handler)(token, outlen, status);
+}
+
+static int __cpdma_chan_process(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ int status, outlen;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->head;
+ if (!desc) {
+ chan->stats.empty_dequeue++;
+ status = -ENOENT;
+ goto unlock_ret;
+ }
+ desc_dma = desc_phys(pool, desc);
+
+ status = __raw_readl(&desc->hw_mode);
+ outlen = status & 0x7ff;
+ if (status & CPDMA_DESC_OWNER) {
+ chan->stats.busy_dequeue++;
+ status = -EBUSY;
+ goto unlock_ret;
+ }
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+
+ chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
+ chan_write(chan, cp, desc_dma);
+ chan->count--;
+ chan->stats.good_dequeue++;
+
+ if (status & CPDMA_DESC_EOQ) {
+ chan->stats.requeue++;
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ __cpdma_chan_free(chan, desc, outlen, status);
+ return status;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return status;
+}
+
+int cpdma_chan_process(struct cpdma_chan *chan, int quota)
+{
+ int used = 0, ret = 0;
+
+ if (chan->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ while (used < quota) {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ used++;
+ }
+ return used;
+}
+
+int cpdma_chan_start(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EBUSY;
+ }
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+ dma_reg_write(ctlr, chan->int_set, chan->mask);
+ chan->state = CPDMA_STATE_ACTIVE;
+ if (chan->head) {
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ if (chan->rxfree)
+ chan_write(chan, rxfree, chan->count);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_stop(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+ int ret;
+ unsigned long timeout;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ chan->state = CPDMA_STATE_TEARDOWN;
+ dma_reg_write(ctlr, chan->int_clear, chan->mask);
+
+ /* trigger teardown */
+ dma_reg_write(ctlr, chan->td, chan->chan_num);
+
+ /* wait for teardown complete */
+ timeout = jiffies + HZ/10; /* 100 msec */
+ while (time_before(jiffies, timeout)) {
+ u32 cp = chan_read(chan, cp);
+ if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
+ break;
+ cpu_relax();
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
+
+ /* handle completed packets */
+ do {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+
+ /* remaining packets haven't been tx/rx'ed, clean them up */
+ while (chan->head) {
+ struct cpdma_desc __iomem *desc = chan->head;
+ dma_addr_t next_dma;
+
+ next_dma = desc_read(desc, hw_next);
+ chan->head = desc_from_phys(pool, next_dma);
+ chan->stats.teardown_dequeue++;
+
+ /* issue callback without locks held */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ __cpdma_chan_free(chan, desc, 0, -ENOSYS);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ chan->state = CPDMA_STATE_IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
+ chan->mask);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+struct cpdma_control_info {
+ u32 reg;
+ u32 shift, mask;
+ int access;
+#define ACCESS_RO BIT(0)
+#define ACCESS_WO BIT(1)
+#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
+};
+
+struct cpdma_control_info controls[] = {
+ [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
+ [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
+ [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
+ [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
+ [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
+ [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
+ [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
+ [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
+ [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_RO) != ACCESS_RO)
+ goto unlock_ret;
+
+ ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
+
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+ u32 val;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_WO) != ACCESS_WO)
+ goto unlock_ret;
+
+ val = dma_reg_read(ctlr, info->reg);
+ val &= ~(info->mask << info->shift);
+ val |= (value & info->mask) << info->shift;
+ dma_reg_write(ctlr, info->reg, val);
+ ret = 0;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
new file mode 100644
index 000000000000..afa19a0c0d81
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -0,0 +1,109 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DAVINCI_CPDMA_H__
+#define __DAVINCI_CPDMA_H__
+
+#define CPDMA_MAX_CHANNELS BITS_PER_LONG
+
+#define tx_chan_num(chan) (chan)
+#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan) (!is_rx_chan(chan))
+#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan) __chan_linear((chan)->chan_num)
+
+struct cpdma_params {
+ struct device *dev;
+ void __iomem *dmaregs;
+ void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
+ void __iomem *rxthresh, *rxfree;
+ int num_chan;
+ bool has_soft_reset;
+ int min_packet_size;
+ u32 desc_mem_phys;
+ u32 desc_hw_addr;
+ int desc_mem_size;
+ int desc_align;
+
+ /*
+ * Some instances of embedded cpdma controllers have extra control and
+ * status registers. The following flag enables access to these
+ * "extended" registers.
+ */
+ bool has_ext_regs;
+};
+
+struct cpdma_chan_stats {
+ u32 head_enqueue;
+ u32 tail_enqueue;
+ u32 pad_enqueue;
+ u32 misqueued;
+ u32 desc_alloc_fail;
+ u32 pad_alloc_fail;
+ u32 runt_receive_buff;
+ u32 runt_transmit_buff;
+ u32 empty_dequeue;
+ u32 busy_dequeue;
+ u32 good_dequeue;
+ u32 requeue;
+ u32 teardown_dequeue;
+};
+
+struct cpdma_ctlr;
+struct cpdma_chan;
+
+typedef void (*cpdma_handler_fn)(void *token, int len, int status);
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler);
+int cpdma_chan_destroy(struct cpdma_chan *chan);
+int cpdma_chan_start(struct cpdma_chan *chan);
+int cpdma_chan_stop(struct cpdma_chan *chan);
+int cpdma_chan_dump(struct cpdma_chan *chan);
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats);
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask);
+int cpdma_chan_process(struct cpdma_chan *chan, int quota);
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+
+enum cpdma_control {
+ CPDMA_CMD_IDLE, /* write-only */
+ CPDMA_COPY_ERROR_FRAMES, /* read-write */
+ CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
+ CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
+ CPDMA_TX_PRIO_FIXED, /* read-write */
+ CPDMA_STAT_IDLE, /* read-only */
+ CPDMA_STAT_TX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_TX_ERR_CODE, /* read-only */
+ CPDMA_STAT_RX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_RX_ERR_CODE, /* read-only */
+ CPDMA_RX_BUFFER_OFFSET, /* read-write */
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
new file mode 100644
index 000000000000..3f451e4d8361
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -0,0 +1,2047 @@
+/*
+ * DaVinci Ethernet Medium Access Controller
+ *
+ * DaVinci EMAC is based upon CPPI 3.0 TI DMA engine
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ * History:
+ * 0-5 A number of folks worked on this driver in bits and pieces but the major
+ * contribution came from Suraj Iyer and Anant Gole
+ * 6.0 Anant Gole - rewrote the driver as per Linux conventions
+ * 6.1 Chaithrika U S - added support for Gigabit and RMII features,
+ * PHY layer usage
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <linux/phy.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/davinci_emac.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+
+#include "davinci_cpdma.h"
+
+static int debug_level;
+module_param(debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
+
+/* Netif debug messages possible */
+#define DAVINCI_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+/* version info */
+#define EMAC_MAJOR_VERSION 6
+#define EMAC_MINOR_VERSION 1
+#define EMAC_MODULE_VERSION "6.1"
+MODULE_VERSION(EMAC_MODULE_VERSION);
+static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
+
+/* Configuration items */
+#define EMAC_DEF_PASS_CRC (0) /* Do not pass CRC up to frames */
+#define EMAC_DEF_QOS_EN (0) /* EMAC proprietary QoS disabled */
+#define EMAC_DEF_NO_BUFF_CHAIN (0) /* No buffer chain */
+#define EMAC_DEF_MACCTRL_FRAME_EN (0) /* Discard Maccontrol frames */
+#define EMAC_DEF_SHORT_FRAME_EN (0) /* Discard short frames */
+#define EMAC_DEF_ERROR_FRAME_EN (0) /* Discard error frames */
+#define EMAC_DEF_PROM_EN (0) /* Promiscuous disabled */
+#define EMAC_DEF_PROM_CH (0) /* Promiscuous channel is 0 */
+#define EMAC_DEF_BCAST_EN (1) /* Broadcast enabled */
+#define EMAC_DEF_BCAST_CH (0) /* Broadcast channel is 0 */
+#define EMAC_DEF_MCAST_EN (1) /* Multicast enabled */
+#define EMAC_DEF_MCAST_CH (0) /* Multicast channel is 0 */
+
+#define EMAC_DEF_TXPRIO_FIXED (1) /* TX Priority is fixed */
+#define EMAC_DEF_TXPACING_EN (0) /* TX pacing NOT supported*/
+
+#define EMAC_DEF_BUFFER_OFFSET (0) /* Buffer offset to DMA (future) */
+#define EMAC_DEF_MIN_ETHPKTSIZE (60) /* Minimum ethernet pkt size */
+#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
+#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
+#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
+#define EMAC_DEF_RX_NUM_DESC (128)
+#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
+#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
+#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
+
+/* Buffer descriptor parameters */
+#define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */
+#define EMAC_DEF_RX_MAX_SERVICE (64) /* should = netdev->weight */
+
+/* EMAC register related defines */
+#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
+#define EMAC_NUM_MULTICAST_BITS (64)
+#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
+#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
+#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
+#define EMAC_RX_UNICAST_CLEAR_ALL (0xFF)
+#define EMAC_INT_MASK_CLEAR (0xFF)
+
+/* RX MBP register bit positions */
+#define EMAC_RXMBP_PASSCRC_MASK BIT(30)
+#define EMAC_RXMBP_QOSEN_MASK BIT(29)
+#define EMAC_RXMBP_NOCHAIN_MASK BIT(28)
+#define EMAC_RXMBP_CMFEN_MASK BIT(24)
+#define EMAC_RXMBP_CSFEN_MASK BIT(23)
+#define EMAC_RXMBP_CEFEN_MASK BIT(22)
+#define EMAC_RXMBP_CAFEN_MASK BIT(21)
+#define EMAC_RXMBP_PROMCH_SHIFT (16)
+#define EMAC_RXMBP_PROMCH_MASK (0x7 << 16)
+#define EMAC_RXMBP_BROADEN_MASK BIT(13)
+#define EMAC_RXMBP_BROADCH_SHIFT (8)
+#define EMAC_RXMBP_BROADCH_MASK (0x7 << 8)
+#define EMAC_RXMBP_MULTIEN_MASK BIT(5)
+#define EMAC_RXMBP_MULTICH_SHIFT (0)
+#define EMAC_RXMBP_MULTICH_MASK (0x7)
+#define EMAC_RXMBP_CHMASK (0x7)
+
+/* EMAC register definitions/bit maps used */
+# define EMAC_MBP_RXPROMISC (0x00200000)
+# define EMAC_MBP_PROMISCCH(ch) (((ch) & 0x7) << 16)
+# define EMAC_MBP_RXBCAST (0x00002000)
+# define EMAC_MBP_BCASTCHAN(ch) (((ch) & 0x7) << 8)
+# define EMAC_MBP_RXMCAST (0x00000020)
+# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7)
+
+/* EMAC mac_control register */
+#define EMAC_MACCONTROL_TXPTYPE BIT(9)
+#define EMAC_MACCONTROL_TXPACEEN BIT(6)
+#define EMAC_MACCONTROL_GMIIEN BIT(5)
+#define EMAC_MACCONTROL_GIGABITEN BIT(7)
+#define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0)
+#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
+
+/* GIGABIT MODE related bits */
+#define EMAC_DM646X_MACCONTORL_GIG BIT(7)
+#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17)
+
+/* EMAC mac_status register */
+#define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000)
+#define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
+#define EMAC_MACSTATUS_TXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_TXERRCH_SHIFT (16)
+#define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000)
+#define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
+#define EMAC_MACSTATUS_RXERRCH_MASK (0x7)
+#define EMAC_MACSTATUS_RXERRCH_SHIFT (8)
+
+/* EMAC RX register masks */
+#define EMAC_RX_MAX_LEN_MASK (0xFFFF)
+#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF)
+
+/* MAC_IN_VECTOR (0x180) register bit fields */
+#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17)
+#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16)
+#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8)
+#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0)
+
+/** NOTE:: For DM646x the IN_VECTOR has changed */
+#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
+
+/* CPPI bit positions */
+#define EMAC_CPPI_SOP_BIT BIT(31)
+#define EMAC_CPPI_EOP_BIT BIT(30)
+#define EMAC_CPPI_OWNERSHIP_BIT BIT(29)
+#define EMAC_CPPI_EOQ_BIT BIT(28)
+#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT BIT(27)
+#define EMAC_CPPI_PASS_CRC_BIT BIT(26)
+#define EMAC_RX_BD_BUF_SIZE (0xFFFF)
+#define EMAC_BD_LENGTH_FOR_CACHE (16) /* only CPPI bytes */
+#define EMAC_RX_BD_PKT_LENGTH_MASK (0xFFFF)
+
+/* Max hardware defines */
+#define EMAC_MAX_TXRX_CHANNELS (8) /* Max hardware channels */
+#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
+
+/* EMAC Peripheral Device Register Memory Layout structure */
+#define EMAC_MACINVECTOR 0x90
+
+#define EMAC_DM646X_MACEOIVECTOR 0x94
+
+#define EMAC_MACINTSTATRAW 0xB0
+#define EMAC_MACINTSTATMASKED 0xB4
+#define EMAC_MACINTMASKSET 0xB8
+#define EMAC_MACINTMASKCLEAR 0xBC
+
+#define EMAC_RXMBPENABLE 0x100
+#define EMAC_RXUNICASTSET 0x104
+#define EMAC_RXUNICASTCLEAR 0x108
+#define EMAC_RXMAXLEN 0x10C
+#define EMAC_RXBUFFEROFFSET 0x110
+#define EMAC_RXFILTERLOWTHRESH 0x114
+
+#define EMAC_MACCONTROL 0x160
+#define EMAC_MACSTATUS 0x164
+#define EMAC_EMCONTROL 0x168
+#define EMAC_FIFOCONTROL 0x16C
+#define EMAC_MACCONFIG 0x170
+#define EMAC_SOFTRESET 0x174
+#define EMAC_MACSRCADDRLO 0x1D0
+#define EMAC_MACSRCADDRHI 0x1D4
+#define EMAC_MACHASH1 0x1D8
+#define EMAC_MACHASH2 0x1DC
+#define EMAC_MACADDRLO 0x500
+#define EMAC_MACADDRHI 0x504
+#define EMAC_MACINDEX 0x508
+
+/* EMAC statistics registers */
+#define EMAC_RXGOODFRAMES 0x200
+#define EMAC_RXBCASTFRAMES 0x204
+#define EMAC_RXMCASTFRAMES 0x208
+#define EMAC_RXPAUSEFRAMES 0x20C
+#define EMAC_RXCRCERRORS 0x210
+#define EMAC_RXALIGNCODEERRORS 0x214
+#define EMAC_RXOVERSIZED 0x218
+#define EMAC_RXJABBER 0x21C
+#define EMAC_RXUNDERSIZED 0x220
+#define EMAC_RXFRAGMENTS 0x224
+#define EMAC_RXFILTERED 0x228
+#define EMAC_RXQOSFILTERED 0x22C
+#define EMAC_RXOCTETS 0x230
+#define EMAC_TXGOODFRAMES 0x234
+#define EMAC_TXBCASTFRAMES 0x238
+#define EMAC_TXMCASTFRAMES 0x23C
+#define EMAC_TXPAUSEFRAMES 0x240
+#define EMAC_TXDEFERRED 0x244
+#define EMAC_TXCOLLISION 0x248
+#define EMAC_TXSINGLECOLL 0x24C
+#define EMAC_TXMULTICOLL 0x250
+#define EMAC_TXEXCESSIVECOLL 0x254
+#define EMAC_TXLATECOLL 0x258
+#define EMAC_TXUNDERRUN 0x25C
+#define EMAC_TXCARRIERSENSE 0x260
+#define EMAC_TXOCTETS 0x264
+#define EMAC_NETOCTETS 0x280
+#define EMAC_RXSOFOVERRUNS 0x284
+#define EMAC_RXMOFOVERRUNS 0x288
+#define EMAC_RXDMAOVERRUNS 0x28C
+
+/* EMAC DM644x control registers */
+#define EMAC_CTRL_EWCTL (0x4)
+#define EMAC_CTRL_EWINTTCNT (0x8)
+
+/* EMAC DM644x control module masks */
+#define EMAC_DM644X_EWINTCNT_MASK 0x1FFFF
+#define EMAC_DM644X_INTMIN_INTVL 0x1
+#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
+
+/* EMAC DM646X control module registers */
+#define EMAC_DM646X_CMINTCTRL 0x0C
+#define EMAC_DM646X_CMRXINTEN 0x14
+#define EMAC_DM646X_CMTXINTEN 0x18
+#define EMAC_DM646X_CMRXINTMAX 0x70
+#define EMAC_DM646X_CMTXINTMAX 0x74
+
+/* EMAC DM646X control module masks */
+#define EMAC_DM646X_INTPACEEN (0x3 << 16)
+#define EMAC_DM646X_INTPRESCALE_MASK (0x7FF << 0)
+#define EMAC_DM646X_CMINTMAX_CNT 63
+#define EMAC_DM646X_CMINTMIN_CNT 2
+#define EMAC_DM646X_CMINTMAX_INTVL (1000 / EMAC_DM646X_CMINTMIN_CNT)
+#define EMAC_DM646X_CMINTMIN_INTVL ((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1)
+
+
+/* EMAC EOI codes for C0 */
+#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
+#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
+
+/* EMAC Stats Clear Mask */
+#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
+
+/* emac_priv: EMAC private data structure
+ *
+ * EMAC adapter private data structure
+ */
+struct emac_priv {
+ u32 msg_enable;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ char mac_addr[6];
+ void __iomem *remap_addr;
+ u32 emac_base_phys;
+ void __iomem *emac_base;
+ void __iomem *ctrl_base;
+ struct cpdma_ctlr *dma;
+ struct cpdma_chan *txchan;
+ struct cpdma_chan *rxchan;
+ u32 link; /* 1=link on, 0=link off */
+ u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
+ u32 duplex; /* Link duplex: 0=Half, 1=Full */
+ u32 rx_buf_size;
+ u32 isr_count;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
+ u8 rmii_en;
+ u8 version;
+ u32 mac_hash1;
+ u32 mac_hash2;
+ u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
+ u32 rx_addr_type;
+ const char *phy_id;
+ struct phy_device *phydev;
+ spinlock_t lock;
+ /*platform specific members*/
+ void (*int_enable) (void);
+ void (*int_disable) (void);
+};
+
+/* clock frequency for EMAC */
+static struct clk *emac_clk;
+static unsigned long emac_bus_frequency;
+
+/* EMAC TX Host Error description strings */
+static char *emac_txhost_errcodes[16] = {
+ "No error", "SOP error", "Ownership bit not set in SOP buffer",
+ "Zero Next Buffer Descriptor Pointer Without EOP",
+ "Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error",
+ "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* EMAC RX Host Error description strings */
+static char *emac_rxhost_errcodes[16] = {
+ "No error", "Reserved", "Ownership bit not set in input buffer",
+ "Reserved", "Zero Buffer Pointer", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* Helper macros */
+#define emac_read(reg) ioread32(priv->emac_base + (reg))
+#define emac_write(reg, val) iowrite32(val, priv->emac_base + (reg))
+
+#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
+#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
+
+/**
+ * emac_dump_regs: Dump important EMAC registers to debug terminal
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Executes ethtool set cmd & sets phy mode
+ *
+ */
+static void emac_dump_regs(struct emac_priv *priv)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+
+ /* Print important registers in EMAC */
+ dev_info(emac_dev, "EMAC Basic registers\n");
+ if (priv->version == EMAC_VERSION_1) {
+ dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
+ emac_ctrl_read(EMAC_CTRL_EWCTL),
+ emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
+ }
+ dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
+ emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
+ dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
+ "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE),
+ emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN));
+ dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
+ "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
+ emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
+ dev_info(emac_dev, "EMAC Statistics\n");
+ dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
+ emac_read(EMAC_RXGOODFRAMES));
+ dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n",
+ emac_read(EMAC_RXBCASTFRAMES));
+ dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n",
+ emac_read(EMAC_RXMCASTFRAMES));
+ dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n",
+ emac_read(EMAC_RXPAUSEFRAMES));
+ dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n",
+ emac_read(EMAC_RXCRCERRORS));
+ dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n",
+ emac_read(EMAC_RXALIGNCODEERRORS));
+ dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n",
+ emac_read(EMAC_RXOVERSIZED));
+ dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n",
+ emac_read(EMAC_RXJABBER));
+ dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n",
+ emac_read(EMAC_RXUNDERSIZED));
+ dev_info(emac_dev, "EMAC: rx_fragments:%d\n",
+ emac_read(EMAC_RXFRAGMENTS));
+ dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n",
+ emac_read(EMAC_RXFILTERED));
+ dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n",
+ emac_read(EMAC_RXQOSFILTERED));
+ dev_info(emac_dev, "EMAC: rx_octets:%d\n",
+ emac_read(EMAC_RXOCTETS));
+ dev_info(emac_dev, "EMAC: tx_goodframes:%d\n",
+ emac_read(EMAC_TXGOODFRAMES));
+ dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n",
+ emac_read(EMAC_TXBCASTFRAMES));
+ dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n",
+ emac_read(EMAC_TXMCASTFRAMES));
+ dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n",
+ emac_read(EMAC_TXPAUSEFRAMES));
+ dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n",
+ emac_read(EMAC_TXDEFERRED));
+ dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n",
+ emac_read(EMAC_TXCOLLISION));
+ dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n",
+ emac_read(EMAC_TXSINGLECOLL));
+ dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n",
+ emac_read(EMAC_TXMULTICOLL));
+ dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n",
+ emac_read(EMAC_TXEXCESSIVECOLL));
+ dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n",
+ emac_read(EMAC_TXLATECOLL));
+ dev_info(emac_dev, "EMAC: tx_underrun:%d\n",
+ emac_read(EMAC_TXUNDERRUN));
+ dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n",
+ emac_read(EMAC_TXCARRIERSENSE));
+ dev_info(emac_dev, "EMAC: tx_octets:%d\n",
+ emac_read(EMAC_TXOCTETS));
+ dev_info(emac_dev, "EMAC: net_octets:%d\n",
+ emac_read(EMAC_NETOCTETS));
+ dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n",
+ emac_read(EMAC_RXSOFOVERRUNS));
+ dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n",
+ emac_read(EMAC_RXMOFOVERRUNS));
+ dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
+ emac_read(EMAC_RXDMAOVERRUNS));
+
+ cpdma_ctlr_dump(priv->dma);
+}
+
+/**
+ * emac_get_drvinfo: Get EMAC driver information
+ * @ndev: The DaVinci EMAC network adapter
+ * @info: ethtool info structure containing name and version
+ *
+ * Returns EMAC driver information (name and version)
+ *
+ */
+static void emac_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, emac_version_string);
+ strcpy(info->version, EMAC_MODULE_VERSION);
+}
+
+/**
+ * emac_get_settings: Get EMAC settings
+ * @ndev: The DaVinci EMAC network adapter
+ * @ecmd: ethtool command
+ *
+ * Executes ethool get command
+ *
+ */
+static int emac_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ if (priv->phydev)
+ return phy_ethtool_gset(priv->phydev, ecmd);
+ else
+ return -EOPNOTSUPP;
+
+}
+
+/**
+ * emac_set_settings: Set EMAC settings
+ * @ndev: The DaVinci EMAC network adapter
+ * @ecmd: ethtool command
+ *
+ * Executes ethool set command
+ *
+ */
+static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ if (priv->phydev)
+ return phy_ethtool_sset(priv->phydev, ecmd);
+ else
+ return -EOPNOTSUPP;
+
+}
+
+/**
+ * emac_get_coalesce : Get interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Fetch the current interrupt coalesce settings
+ *
+ */
+static int emac_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ coal->rx_coalesce_usecs = priv->coal_intvl;
+ return 0;
+
+}
+
+/**
+ * emac_set_coalesce : Set interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ *
+ * Set interrupt coalesce parameters
+ *
+ */
+static int emac_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl, num_interrupts = 0;
+ u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0;
+
+ if (!coal->rx_coalesce_usecs)
+ return -EINVAL;
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ switch (priv->version) {
+ case EMAC_VERSION_2:
+ int_ctrl = emac_ctrl_read(EMAC_DM646X_CMINTCTRL);
+ prescale = priv->bus_freq_mhz * 4;
+
+ if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL)
+ coal_intvl = EMAC_DM646X_CMINTMIN_INTVL;
+
+ if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) {
+ /*
+ * Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL
+ * addnl_dvdr))
+ coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = EMAC_DM646X_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+
+ int_ctrl |= EMAC_DM646X_INTPACEEN;
+ int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK);
+ emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl);
+
+ emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts);
+
+ break;
+ default:
+ int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT);
+ int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK);
+ prescale = coal_intvl * priv->bus_freq_mhz;
+ if (prescale > EMAC_DM644X_EWINTCNT_MASK) {
+ prescale = EMAC_DM644X_EWINTCNT_MASK;
+ coal_intvl = prescale / priv->bus_freq_mhz;
+ }
+ emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale));
+
+ break;
+ }
+
+ printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
+ priv->coal_intvl = coal_intvl;
+
+ return 0;
+
+}
+
+
+/**
+ * ethtool_ops: DaVinci EMAC Ethtool structure
+ *
+ * Ethtool support for EMAC adapter
+ *
+ */
+static const struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = emac_get_drvinfo,
+ .get_settings = emac_get_settings,
+ .set_settings = emac_set_settings,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = emac_get_coalesce,
+ .set_coalesce = emac_set_coalesce,
+};
+
+/**
+ * emac_update_phystatus: Update Phy status
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Updates phy status and takes action for network queue if required
+ * based upon link status
+ *
+ */
+static void emac_update_phystatus(struct emac_priv *priv)
+{
+ u32 mac_control;
+ u32 new_duplex;
+ u32 cur_duplex;
+ struct net_device *ndev = priv->ndev;
+
+ mac_control = emac_read(EMAC_MACCONTROL);
+ cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ if (priv->phydev)
+ new_duplex = priv->phydev->duplex;
+ else
+ new_duplex = DUPLEX_FULL;
+
+ /* We get called only if link has changed (speed/duplex/status) */
+ if ((priv->link) && (new_duplex != cur_duplex)) {
+ priv->duplex = new_duplex;
+ if (DUPLEX_FULL == priv->duplex)
+ mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN);
+ else
+ mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN);
+ }
+
+ if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
+ mac_control = emac_read(EMAC_MACCONTROL);
+ mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
+ EMAC_DM646X_MACCONTORL_GIGFORCE);
+ } else {
+ /* Clear the GIG bit and GIGFORCE bit */
+ mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE |
+ EMAC_DM646X_MACCONTORL_GIG);
+
+ if (priv->rmii_en && (priv->speed == SPEED_100))
+ mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK;
+ else
+ mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK;
+ }
+
+ /* Update mac_control if changed */
+ emac_write(EMAC_MACCONTROL, mac_control);
+
+ if (priv->link) {
+ /* link ON */
+ if (!netif_carrier_ok(ndev))
+ netif_carrier_on(ndev);
+ /* reactivate the transmit queue if it is stopped */
+ if (netif_running(ndev) && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ /* link OFF */
+ if (netif_carrier_ok(ndev))
+ netif_carrier_off(ndev);
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ }
+}
+
+/**
+ * hash_get: Calculate hash value from mac address
+ * @addr: mac address to delete from hash table
+ *
+ * Calculates hash value from mac address
+ *
+ */
+static u32 hash_get(u8 *addr)
+{
+ u32 hash;
+ u8 tmpval;
+ int cnt;
+ hash = 0;
+
+ for (cnt = 0; cnt < 2; cnt++) {
+ tmpval = *addr++;
+ hash ^= (tmpval >> 2) ^ (tmpval << 4);
+ tmpval = *addr++;
+ hash ^= (tmpval >> 4) ^ (tmpval << 2);
+ tmpval = *addr++;
+ hash ^= (tmpval >> 6) ^ (tmpval);
+ }
+
+ return hash & 0x3F;
+}
+
+/**
+ * hash_add: Hash function to add mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * mac_addr: mac address to delete from hash table
+ *
+ * Adds mac address to the internal hash table
+ *
+ */
+static int hash_add(struct emac_priv *priv, u8 *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ u32 rc = 0;
+ u32 hash_bit;
+ u32 hash_value = hash_get(mac_addr);
+
+ if (hash_value >= EMAC_NUM_MULTICAST_BITS) {
+ if (netif_msg_drv(priv)) {
+ dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\
+ "Hash %08x, should not be greater than %08x",
+ hash_value, (EMAC_NUM_MULTICAST_BITS - 1));
+ }
+ return -1;
+ }
+
+ /* set the hash bit only if not previously set */
+ if (priv->multicast_hash_cnt[hash_value] == 0) {
+ rc = 1; /* hash value changed */
+ if (hash_value < 32) {
+ hash_bit = BIT(hash_value);
+ priv->mac_hash1 |= hash_bit;
+ } else {
+ hash_bit = BIT((hash_value - 32));
+ priv->mac_hash2 |= hash_bit;
+ }
+ }
+
+ /* incr counter for num of mcast addr's mapped to "this" hash bit */
+ ++priv->multicast_hash_cnt[hash_value];
+
+ return rc;
+}
+
+/**
+ * hash_del: Hash function to delete mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * mac_addr: mac address to delete from hash table
+ *
+ * Removes mac address from the internal hash table
+ *
+ */
+static int hash_del(struct emac_priv *priv, u8 *mac_addr)
+{
+ u32 hash_value;
+ u32 hash_bit;
+
+ hash_value = hash_get(mac_addr);
+ if (priv->multicast_hash_cnt[hash_value] > 0) {
+ /* dec cntr for num of mcast addr's mapped to this hash bit */
+ --priv->multicast_hash_cnt[hash_value];
+ }
+
+ /* if counter still > 0, at least one multicast address refers
+ * to this hash bit. so return 0 */
+ if (priv->multicast_hash_cnt[hash_value] > 0)
+ return 0;
+
+ if (hash_value < 32) {
+ hash_bit = BIT(hash_value);
+ priv->mac_hash1 &= ~hash_bit;
+ } else {
+ hash_bit = BIT((hash_value - 32));
+ priv->mac_hash2 &= ~hash_bit;
+ }
+
+ /* return 1 to indicate change in mac_hash registers reqd */
+ return 1;
+}
+
+/* EMAC multicast operation */
+#define EMAC_MULTICAST_ADD 0
+#define EMAC_MULTICAST_DEL 1
+#define EMAC_ALL_MULTI_SET 2
+#define EMAC_ALL_MULTI_CLR 3
+
+/**
+ * emac_add_mcast: Set multicast address in the EMAC adapter (Internal)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @action: multicast operation to perform
+ * mac_addr: mac address to set
+ *
+ * Set multicast addresses in EMAC adapter - internal function
+ *
+ */
+static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ int update = -1;
+
+ switch (action) {
+ case EMAC_MULTICAST_ADD:
+ update = hash_add(priv, mac_addr);
+ break;
+ case EMAC_MULTICAST_DEL:
+ update = hash_del(priv, mac_addr);
+ break;
+ case EMAC_ALL_MULTI_SET:
+ update = 1;
+ priv->mac_hash1 = EMAC_ALL_MULTI_REG_VALUE;
+ priv->mac_hash2 = EMAC_ALL_MULTI_REG_VALUE;
+ break;
+ case EMAC_ALL_MULTI_CLR:
+ update = 1;
+ priv->mac_hash1 = 0;
+ priv->mac_hash2 = 0;
+ memset(&(priv->multicast_hash_cnt[0]), 0,
+ sizeof(priv->multicast_hash_cnt[0]) *
+ EMAC_NUM_MULTICAST_BITS);
+ break;
+ default:
+ if (netif_msg_drv(priv))
+ dev_err(emac_dev, "DaVinci EMAC: add_mcast"\
+ ": bad operation %d", action);
+ break;
+ }
+
+ /* write to the hardware only if the register status chances */
+ if (update > 0) {
+ emac_write(EMAC_MACHASH1, priv->mac_hash1);
+ emac_write(EMAC_MACHASH2, priv->mac_hash2);
+ }
+}
+
+/**
+ * emac_dev_mcast_set: Set multicast address in the EMAC adapter
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Set multicast addresses in EMAC adapter
+ *
+ */
+static void emac_dev_mcast_set(struct net_device *ndev)
+{
+ u32 mbp_enable;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ mbp_enable = emac_read(EMAC_RXMBPENABLE);
+ if (ndev->flags & IFF_PROMISC) {
+ mbp_enable &= (~EMAC_MBP_PROMISCCH(EMAC_DEF_PROM_CH));
+ mbp_enable |= (EMAC_MBP_RXPROMISC);
+ } else {
+ mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
+ }
+ if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+ /* program multicast address list into EMAC hardware */
+ netdev_for_each_mc_addr(ha, ndev) {
+ emac_add_mcast(priv, EMAC_MULTICAST_ADD,
+ (u8 *) ha->addr);
+ }
+ } else {
+ mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+ }
+ }
+ /* Set mbp config register */
+ emac_write(EMAC_RXMBPENABLE, mbp_enable);
+}
+
+/*************************************************************************
+ * EMAC Hardware manipulation
+ *************************************************************************/
+
+/**
+ * emac_int_disable: Disable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Disable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_disable(struct emac_priv *priv)
+{
+ if (priv->version == EMAC_VERSION_2) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* Program C0_Int_En to zero to turn off
+ * interrupts to the CPU */
+ emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
+ /* NOTE: Rx Threshold and Misc interrupts are not disabled */
+ if (priv->int_disable)
+ priv->int_disable();
+
+ local_irq_restore(flags);
+
+ } else {
+ /* Set DM644x control registers for interrupt control */
+ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0);
+ }
+}
+
+/**
+ * emac_int_enable: Enable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_enable(struct emac_priv *priv)
+{
+ if (priv->version == EMAC_VERSION_2) {
+ if (priv->int_enable)
+ priv->int_enable();
+
+ emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
+
+ /* In addition to turning on interrupt Enable, we need
+ * ack by writing appropriate values to the EOI
+ * register */
+
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+ /* ack rxen only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+ /* ack txen- only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_TXEN);
+
+ } else {
+ /* Set DM644x control registers for interrupt control */
+ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
+ }
+}
+
+/**
+ * emac_irq: EMAC interrupt handler
+ * @irq: interrupt number
+ * @dev_id: EMAC network adapter data structure ptr
+ *
+ * EMAC Interrupt handler - we only schedule NAPI and not process any packets
+ * here. EVen the interrupt status is checked (TX/RX/Err) in NAPI poll function
+ *
+ * Returns interrupt handled condition
+ */
+static irqreturn_t emac_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ ++priv->isr_count;
+ if (likely(netif_running(priv->ndev))) {
+ emac_int_disable(priv);
+ napi_schedule(&priv->napi);
+ } else {
+ /* we are closing down, so dont process anything */
+ }
+ return IRQ_HANDLED;
+}
+
+static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
+{
+ struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
+ if (WARN_ON(!skb))
+ return NULL;
+ skb->dev = priv->ndev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ return skb;
+}
+
+static void emac_rx_handler(void *token, int len, int status)
+{
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+ int ret;
+
+ /* free and bail if we are shutting down */
+ if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* recycle on receive error */
+ if (status < 0) {
+ ndev->stats.rx_errors++;
+ goto recycle;
+ }
+
+ /* feed received packet up the stack */
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+ /* alloc a new packet for receive */
+ skb = emac_rx_alloc(priv);
+ if (!skb) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "failed rx buffer alloc\n");
+ return;
+ }
+
+recycle:
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ dev_kfree_skb_any(skb);
+}
+
+static void emac_tx_handler(void *token, int len, int status)
+{
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_start_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * emac_dev_xmit: EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
+ */
+static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct device *emac_dev = &ndev->dev;
+ int ret_code;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ /* If no link, return */
+ if (unlikely(!priv->link)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
+ goto fail_tx;
+ }
+
+ ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
+ if (unlikely(ret_code < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
+ goto fail_tx;
+ }
+
+ skb_tx_timestamp(skb);
+
+ ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
+ GFP_KERNEL);
+ if (unlikely(ret_code != 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
+ goto fail_tx;
+ }
+
+ return NETDEV_TX_OK;
+
+fail_tx:
+ ndev->stats.tx_dropped++;
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+}
+
+/**
+ * emac_dev_tx_timeout: EMAC Transmit timeout function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system detects that a skb timeout period has expired
+ * potentially due to a fault in the adapter in not being able to send
+ * it out on the wire. We teardown the TX channel assuming a hardware
+ * error and re-initialize the TX channel for hardware operation
+ *
+ */
+static void emac_dev_tx_timeout(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+
+ if (netif_msg_tx_err(priv))
+ dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
+
+ emac_dump_regs(priv);
+
+ ndev->stats.tx_errors++;
+ emac_int_disable(priv);
+ cpdma_chan_stop(priv->txchan);
+ cpdma_chan_start(priv->txchan);
+ emac_int_enable(priv);
+}
+
+/**
+ * emac_set_type0addr: Set EMAC Type0 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type0 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ u32 val;
+ val = ((mac_addr[5] << 8) | (mac_addr[4]));
+ emac_write(EMAC_MACSRCADDRLO, val);
+
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACSRCADDRHI, val);
+ val = emac_read(EMAC_RXUNICASTSET);
+ val |= BIT(ch);
+ emac_write(EMAC_RXUNICASTSET, val);
+ val = emac_read(EMAC_RXUNICASTCLEAR);
+ val &= ~BIT(ch);
+ emac_write(EMAC_RXUNICASTCLEAR, val);
+}
+
+/**
+ * emac_set_type1addr: Set EMAC Type1 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type1 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ u32 val;
+ emac_write(EMAC_MACINDEX, ch);
+ val = ((mac_addr[5] << 8) | mac_addr[4]);
+ emac_write(EMAC_MACADDRLO, val);
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACADDRHI, val);
+ emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_set_type2addr: Set EMAC Type2 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ * @index: index into RX address entries
+ * @match: match parameter for RX address matching logic
+ *
+ * Called internally to set Type2 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
+ char *mac_addr, int index, int match)
+{
+ u32 val;
+ emac_write(EMAC_MACINDEX, index);
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACADDRHI, val);
+ val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \
+ (match << 19) | BIT(20));
+ emac_write(EMAC_MACADDRLO, val);
+ emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_setmac: Set mac address in the adapter (internal function)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+
+ if (priv->rx_addr_type == 0) {
+ emac_set_type0addr(priv, ch, mac_addr);
+ } else if (priv->rx_addr_type == 1) {
+ u32 cnt;
+ for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++)
+ emac_set_type1addr(priv, ch, mac_addr);
+ } else if (priv->rx_addr_type == 2) {
+ emac_set_type2addr(priv, ch, mac_addr, ch, 1);
+ emac_set_type0addr(priv, ch, mac_addr);
+ } else {
+ if (netif_msg_drv(priv))
+ dev_err(emac_dev, "DaVinci EMAC: Wrong addressing\n");
+ }
+}
+
+/**
+ * emac_dev_setmac_addr: Set mac address in the adapter
+ * @ndev: The DaVinci EMAC network adapter
+ * @addr: MAC address to set in device
+ *
+ * Called by the system to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &priv->ndev->dev;
+ struct sockaddr *sa = addr;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+
+ /* Store mac addr in priv and rx channel and set it in EMAC hw */
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+
+ /* MAC address is configured only after the interface is enabled. */
+ if (netif_running(ndev)) {
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+ }
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
+ priv->mac_addr);
+
+ return 0;
+}
+
+/**
+ * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enables EMAC hardware for packet processing - enables PHY, enables RX
+ * for packet reception and enables device interrupts and then NAPI
+ *
+ * Returns success (0) or appropriate error code (none right now)
+ */
+static int emac_hw_enable(struct emac_priv *priv)
+{
+ u32 val, mbp_enable, mac_control;
+
+ /* Soft reset */
+ emac_write(EMAC_SOFTRESET, 1);
+ while (emac_read(EMAC_SOFTRESET))
+ cpu_relax();
+
+ /* Disable interrupt & Set pacing for more interrupts initially */
+ emac_int_disable(priv);
+
+ /* Full duplex enable bit set when auto negotiation happens */
+ mac_control =
+ (((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) |
+ ((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) |
+ ((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) |
+ ((priv->duplex == DUPLEX_FULL) ? 0x1 : 0));
+ emac_write(EMAC_MACCONTROL, mac_control);
+
+ mbp_enable =
+ (((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) |
+ ((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) |
+ ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) |
+ ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) |
+ ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) |
+ ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) |
+ ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) |
+ ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_PROMCH_SHIFT) |
+ ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) |
+ ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_BROADCH_SHIFT) |
+ ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) |
+ ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_MULTICH_SHIFT));
+ emac_write(EMAC_RXMBPENABLE, mbp_enable);
+ emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE &
+ EMAC_RX_MAX_LEN_MASK));
+ emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET &
+ EMAC_RX_BUFFER_OFFSET_MASK));
+ emac_write(EMAC_RXFILTERLOWTHRESH, 0);
+ emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
+ priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
+
+ emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
+
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+
+ /* Enable MII */
+ val = emac_read(EMAC_MACCONTROL);
+ val |= (EMAC_MACCONTROL_GMIIEN);
+ emac_write(EMAC_MACCONTROL, val);
+
+ /* Enable NAPI and interrupts */
+ napi_enable(&priv->napi);
+ emac_int_enable(priv);
+ return 0;
+
+}
+
+/**
+ * emac_poll: EMAC NAPI Poll function
+ * @ndev: The DaVinci EMAC network adapter
+ * @budget: Number of receive packets to process (as told by NAPI layer)
+ *
+ * NAPI Poll function implemented to process packets as per budget. We check
+ * the type of interrupt on the device and accordingly call the TX or RX
+ * packet processing functions. We follow the budget for RX processing and
+ * also put a cap on number of TX pkts processed through config param. The
+ * NAPI schedule function is called if more packets pending.
+ *
+ * Returns number of packets received (in most cases; else TX pkts - rarely)
+ */
+static int emac_poll(struct napi_struct *napi, int budget)
+{
+ unsigned int mask;
+ struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct device *emac_dev = &ndev->dev;
+ u32 status = 0;
+ u32 num_tx_pkts = 0, num_rx_pkts = 0;
+
+ /* Check interrupt vectors and call packet processing */
+ status = emac_read(EMAC_MACINVECTOR);
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC;
+
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
+
+ if (status & mask) {
+ num_tx_pkts = cpdma_chan_process(priv->txchan,
+ EMAC_DEF_TX_MAX_SERVICE);
+ } /* TX processing */
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
+
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
+
+ if (status & mask) {
+ num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
+ } /* RX processing */
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+ if (unlikely(status & mask)) {
+ u32 ch, cause;
+ dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ status = emac_read(EMAC_MACSTATUS);
+ cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >>
+ EMAC_MACSTATUS_TXERRCODE_SHIFT);
+ if (cause) {
+ ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >>
+ EMAC_MACSTATUS_TXERRCH_SHIFT);
+ if (net_ratelimit()) {
+ dev_err(emac_dev, "TX Host error %s on ch=%d\n",
+ &emac_txhost_errcodes[cause][0], ch);
+ }
+ }
+ cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >>
+ EMAC_MACSTATUS_RXERRCODE_SHIFT);
+ if (cause) {
+ ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >>
+ EMAC_MACSTATUS_RXERRCH_SHIFT);
+ if (netif_msg_hw(priv) && net_ratelimit())
+ dev_err(emac_dev, "RX Host error %s on ch=%d\n",
+ &emac_rxhost_errcodes[cause][0], ch);
+ }
+ } else if (num_rx_pkts < budget) {
+ napi_complete(napi);
+ emac_int_enable(priv);
+ }
+
+ return num_rx_pkts;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * emac_poll_controller: EMAC Poll controller function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+void emac_poll_controller(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_int_disable(priv);
+ emac_irq(ndev->irq, ndev);
+ emac_int_enable(priv);
+}
+#endif
+
+static void emac_adjust_link(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != priv->duplex) {
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+ }
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+ if (!priv->link) {
+ new_state = 1;
+ priv->link = 1;
+ }
+
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = ~0;
+ }
+ if (new_state) {
+ emac_update_phystatus(priv);
+ phy_print_status(priv->phydev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/*************************************************************************
+ * Linux Driver Model
+ *************************************************************************/
+
+/**
+ * emac_devioctl: EMAC adapter ioctl
+ * @ndev: The DaVinci EMAC network adapter
+ * @ifrq: request parameter
+ * @cmd: command parameter
+ *
+ * EMAC driver ioctl function
+ *
+ * Returns success(0) or appropriate error code
+ */
+static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ if (!(netif_running(ndev)))
+ return -EINVAL;
+
+ /* TODO: Add phy read and write and private statistics get feature */
+
+ return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+}
+
+static int match_first_device(struct device *dev, void *data)
+{
+ return 1;
+}
+
+/**
+ * emac_dev_open: EMAC device open
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to start the interface. We init TX/RX channels
+ * and enable the hardware for packet reception/transmission and start the
+ * network queue.
+ *
+ * Returns 0 for a successful open, or appropriate error code
+ */
+static int emac_dev_open(struct net_device *ndev)
+{
+ struct device *emac_dev = &ndev->dev;
+ u32 cnt;
+ struct resource *res;
+ int q, m, ret;
+ int i = 0;
+ int k = 0;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ netif_carrier_off(ndev);
+ for (cnt = 0; cnt < ETH_ALEN; cnt++)
+ ndev->dev_addr[cnt] = priv->mac_addr[cnt];
+
+ /* Configuration items */
+ priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
+
+ priv->mac_hash1 = 0;
+ priv->mac_hash2 = 0;
+ emac_write(EMAC_MACHASH1, 0);
+ emac_write(EMAC_MACHASH2, 0);
+
+ for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
+ struct sk_buff *skb = emac_rx_alloc(priv);
+
+ if (!skb)
+ break;
+
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+
+ /* Request IRQ */
+
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
+ for (i = res->start; i <= res->end; i++) {
+ if (request_irq(i, emac_irq, IRQF_DISABLED,
+ ndev->name, ndev))
+ goto rollback;
+ }
+ k++;
+ }
+
+ /* Start/Enable EMAC hardware */
+ emac_hw_enable(priv);
+
+ /* Enable Interrupt pacing if configured */
+ if (priv->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ emac_set_coalesce(ndev, &coal);
+ }
+
+ cpdma_ctlr_start(priv->dma);
+
+ priv->phydev = NULL;
+ /* use the first phy on the bus if pdata did not give us a phy id */
+ if (!priv->phy_id) {
+ struct device *phy;
+
+ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+ match_first_device);
+ if (phy)
+ priv->phy_id = dev_name(phy);
+ }
+
+ if (priv->phy_id && *priv->phy_id) {
+ priv->phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(priv->phydev)) {
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_id);
+ priv->phydev = NULL;
+ return PTR_ERR(priv->phydev);
+ }
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = ~0;
+
+ dev_info(emac_dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, id=%x)\n",
+ priv->phydev->drv->name, dev_name(&priv->phydev->dev),
+ priv->phydev->phy_id);
+ } else {
+ /* No PHY , fix the link, speed and duplex settings */
+ dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
+ priv->link = 1;
+ priv->speed = SPEED_100;
+ priv->duplex = DUPLEX_FULL;
+ emac_update_phystatus(priv);
+ }
+
+ if (!netif_running(ndev)) /* debug only - to avoid compiler warning */
+ emac_dump_regs(priv);
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ return 0;
+
+rollback:
+
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed");
+
+ for (q = k; k >= 0; k--) {
+ for (m = i; m >= res->start; m--)
+ free_irq(m, ndev);
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
+ m = res->end;
+ }
+ return -EBUSY;
+}
+
+/**
+ * emac_dev_stop: EMAC device stop
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to stop or down the interface. We stop the network
+ * queue, disable interrupts and cleanup TX/RX channels.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static int emac_dev_stop(struct net_device *ndev)
+{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+
+ /* inform the upper layers. */
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ netif_carrier_off(ndev);
+ emac_int_disable(priv);
+ cpdma_ctlr_stop(priv->dma);
+ emac_write(EMAC_SOFTRESET, 1);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ /* Free IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+
+ return 0;
+}
+
+/**
+ * emac_dev_getnetstats: EMAC get statistics function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 mac_control;
+ u32 stats_clear_mask;
+
+ /* update emac hardware stats and reset the registers*/
+
+ mac_control = emac_read(EMAC_MACCONTROL);
+
+ if (mac_control & EMAC_MACCONTROL_GMIIEN)
+ stats_clear_mask = EMAC_STATS_CLR_MASK;
+ else
+ stats_clear_mask = 0;
+
+ ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
+ emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
+
+ ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) +
+ emac_read(EMAC_TXSINGLECOLL) +
+ emac_read(EMAC_TXMULTICOLL));
+ emac_write(EMAC_TXCOLLISION, stats_clear_mask);
+ emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
+ emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
+
+ ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
+ emac_read(EMAC_RXJABBER) +
+ emac_read(EMAC_RXUNDERSIZED));
+ emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
+ emac_write(EMAC_RXJABBER, stats_clear_mask);
+ emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
+
+ ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
+ emac_read(EMAC_RXMOFOVERRUNS));
+ emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
+ emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
+
+ ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
+ emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
+
+ ndev->stats.tx_carrier_errors +=
+ emac_read(EMAC_TXCARRIERSENSE);
+ emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
+
+ ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
+ emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+
+ return &ndev->stats;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_dev_open,
+ .ndo_stop = emac_dev_stop,
+ .ndo_start_xmit = emac_dev_xmit,
+ .ndo_set_multicast_list = emac_dev_mcast_set,
+ .ndo_set_mac_address = emac_dev_setmac_addr,
+ .ndo_do_ioctl = emac_devioctl,
+ .ndo_tx_timeout = emac_dev_tx_timeout,
+ .ndo_get_stats = emac_dev_getnetstats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = emac_poll_controller,
+#endif
+};
+
+/**
+ * davinci_emac_probe: EMAC device probe
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when probing for emac devicesr. We get details of instances and
+ * resource information from platform init and register a network device
+ * and allocate resources necessary for driver to perform
+ */
+static int __devinit davinci_emac_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct resource *res;
+ struct net_device *ndev;
+ struct emac_priv *priv;
+ unsigned long size, hw_ram_addr;
+ struct emac_platform_data *pdata;
+ struct device *emac_dev;
+ struct cpdma_params dma_params;
+
+ /* obtain emac clock from kernel */
+ emac_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(emac_clk)) {
+ dev_err(&pdev->dev, "failed to get EMAC clock\n");
+ return -EBUSY;
+ }
+ emac_bus_frequency = clk_get_rate(emac_clk);
+ /* TODO: Probe PHY here if possible */
+
+ ndev = alloc_etherdev(sizeof(struct emac_priv));
+ if (!ndev) {
+ dev_err(&pdev->dev, "error allocating net_device\n");
+ rc = -ENOMEM;
+ goto free_clk;
+ }
+
+ platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
+
+ spin_lock_init(&priv->lock);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ rc = -ENODEV;
+ goto probe_quit;
+ }
+
+ /* MAC addr and PHY mask , RMII enable info from platform_data */
+ memcpy(priv->mac_addr, pdata->mac_addr, 6);
+ priv->phy_id = pdata->phy_id;
+ priv->rmii_en = pdata->rmii_en;
+ priv->version = pdata->version;
+ priv->int_enable = pdata->interrupt_enable;
+ priv->int_disable = pdata->interrupt_disable;
+
+ priv->coal_intvl = 0;
+ priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
+
+ emac_dev = &ndev->dev;
+ /* Get EMAC platform data */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,"error getting res\n");
+ rc = -ENOENT;
+ goto probe_quit;
+ }
+
+ priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
+ size = resource_size(res);
+ if (!request_mem_region(res->start, size, ndev->name)) {
+ dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
+ rc = -ENXIO;
+ goto probe_quit;
+ }
+
+ priv->remap_addr = ioremap(res->start, size);
+ if (!priv->remap_addr) {
+ dev_err(&pdev->dev, "unable to map IO\n");
+ rc = -ENOMEM;
+ release_mem_region(res->start, size);
+ goto probe_quit;
+ }
+ priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
+ ndev->base_addr = (unsigned long)priv->remap_addr;
+
+ priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+
+ hw_ram_addr = pdata->hw_ram_addr;
+ if (!hw_ram_addr)
+ hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ dma_params.dev = emac_dev;
+ dma_params.dmaregs = priv->emac_base;
+ dma_params.rxthresh = priv->emac_base + 0x120;
+ dma_params.rxfree = priv->emac_base + 0x140;
+ dma_params.txhdp = priv->emac_base + 0x600;
+ dma_params.rxhdp = priv->emac_base + 0x620;
+ dma_params.txcp = priv->emac_base + 0x640;
+ dma_params.rxcp = priv->emac_base + 0x660;
+ dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
+ dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
+ dma_params.desc_hw_addr = hw_ram_addr;
+ dma_params.desc_mem_size = pdata->ctrl_ram_size;
+ dma_params.desc_align = 16;
+
+ dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 :
+ (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ priv->dma = cpdma_ctlr_create(&dma_params);
+ if (!priv->dma) {
+ dev_err(&pdev->dev, "error initializing DMA\n");
+ rc = -ENOMEM;
+ goto no_dma;
+ }
+
+ priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
+ emac_tx_handler);
+ priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
+ emac_rx_handler);
+ if (WARN_ON(!priv->txchan || !priv->rxchan)) {
+ rc = -ENOMEM;
+ goto no_irq_res;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "error getting irq res\n");
+ rc = -ENOENT;
+ goto no_irq_res;
+ }
+ ndev->irq = res->start;
+
+ if (!is_valid_ether_addr(priv->mac_addr)) {
+ /* Use random MAC if none passed */
+ random_ether_addr(priv->mac_addr);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
+ }
+
+ ndev->netdev_ops = &emac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &ethtool_ops);
+ netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+
+ clk_enable(emac_clk);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "error in register_netdev\n");
+ rc = -ENODEV;
+ goto netdev_reg_err;
+ }
+
+
+ if (netif_msg_probe(priv)) {
+ dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
+ "(regs: %p, irq: %d)\n",
+ (void *)priv->emac_base_phys, ndev->irq);
+ }
+ return 0;
+
+netdev_reg_err:
+ clk_disable(emac_clk);
+no_irq_res:
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+no_dma:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+ iounmap(priv->remap_addr);
+
+probe_quit:
+ free_netdev(ndev);
+free_clk:
+ clk_put(emac_clk);
+ return rc;
+}
+
+/**
+ * davinci_emac_remove: EMAC device remove
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when removing the device driver. We disable clock usage and release
+ * the resources taken up by the driver and unregister network device
+ */
+static int __devexit davinci_emac_remove(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
+
+ platform_set_drvdata(pdev, NULL);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+
+ release_mem_region(res->start, resource_size(res));
+
+ unregister_netdev(ndev);
+ iounmap(priv->remap_addr);
+ free_netdev(ndev);
+
+ clk_disable(emac_clk);
+ clk_put(emac_clk);
+
+ return 0;
+}
+
+static int davinci_emac_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ if (netif_running(ndev))
+ emac_dev_stop(ndev);
+
+ clk_disable(emac_clk);
+
+ return 0;
+}
+
+static int davinci_emac_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ clk_enable(emac_clk);
+
+ if (netif_running(ndev))
+ emac_dev_open(ndev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_emac_pm_ops = {
+ .suspend = davinci_emac_suspend,
+ .resume = davinci_emac_resume,
+};
+
+/**
+ * davinci_emac_driver: EMAC platform driver structure
+ */
+static struct platform_driver davinci_emac_driver = {
+ .driver = {
+ .name = "davinci_emac",
+ .owner = THIS_MODULE,
+ .pm = &davinci_emac_pm_ops,
+ },
+ .probe = davinci_emac_probe,
+ .remove = __devexit_p(davinci_emac_remove),
+};
+
+/**
+ * davinci_emac_init: EMAC driver module init
+ *
+ * Called when initializing the driver. We register the driver with
+ * the platform.
+ */
+static int __init davinci_emac_init(void)
+{
+ return platform_driver_register(&davinci_emac_driver);
+}
+late_initcall(davinci_emac_init);
+
+/**
+ * davinci_emac_exit: EMAC driver module exit
+ *
+ * Called when exiting the driver completely. We unregister the driver with
+ * the platform and exit
+ */
+static void __exit davinci_emac_exit(void)
+{
+ platform_driver_unregister(&davinci_emac_driver);
+}
+module_exit(davinci_emac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Anant Gole <anantgole@ti.com>");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Chaithrika U S <chaithrika@ti.com>");
+MODULE_DESCRIPTION("DaVinci EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
new file mode 100644
index 000000000000..7615040df756
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -0,0 +1,475 @@
+/*
+ * DaVinci MDIO Module driver
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/davinci_emac.h>
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define MDIO_TIMEOUT 100 /* msecs */
+
+#define PHY_REG_MASK 0x1f
+#define PHY_ID_MASK 0x1f
+
+#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+
+struct davinci_mdio_regs {
+ u32 version;
+ u32 control;
+#define CONTROL_IDLE BIT(31)
+#define CONTROL_ENABLE BIT(30)
+#define CONTROL_MAX_DIV (0xff)
+
+ u32 alive;
+ u32 link;
+ u32 linkintraw;
+ u32 linkintmasked;
+ u32 __reserved_0[2];
+ u32 userintraw;
+ u32 userintmasked;
+ u32 userintmaskset;
+ u32 userintmaskclr;
+ u32 __reserved_1[20];
+
+ struct {
+ u32 access;
+#define USERACCESS_GO BIT(31)
+#define USERACCESS_WRITE BIT(30)
+#define USERACCESS_ACK BIT(29)
+#define USERACCESS_READ (0)
+#define USERACCESS_DATA (0xffff)
+
+ u32 physel;
+ } user[0];
+};
+
+struct mdio_platform_data default_pdata = {
+ .bus_freq = DEF_OUT_FREQ,
+};
+
+struct davinci_mdio_data {
+ struct mdio_platform_data pdata;
+ struct davinci_mdio_regs __iomem *regs;
+ spinlock_t lock;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *bus;
+ bool suspended;
+ unsigned long access_time; /* jiffies */
+};
+
+static void __davinci_mdio_reset(struct davinci_mdio_data *data)
+{
+ u32 mdio_in, div, mdio_out_khz, access_time;
+
+ mdio_in = clk_get_rate(data->clk);
+ div = (mdio_in / data->pdata.bus_freq) - 1;
+ if (div > CONTROL_MAX_DIV)
+ div = CONTROL_MAX_DIV;
+
+ /* set enable and clock divider */
+ __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
+
+ /*
+ * One mdio transaction consists of:
+ * 32 bits of preamble
+ * 32 bits of transferred data
+ * 24 bits of bus yield (not needed unless shared?)
+ */
+ mdio_out_khz = mdio_in / (1000 * (div + 1));
+ access_time = (88 * 1000) / mdio_out_khz;
+
+ /*
+ * In the worst case, we could be kicking off a user-access immediately
+ * after the mdio bus scan state-machine triggered its own read. If
+ * so, our request could get deferred by one access cycle. We
+ * defensively allow for 4 access cycles.
+ */
+ data->access_time = usecs_to_jiffies(access_time * 4);
+ if (!data->access_time)
+ data->access_time = 1;
+}
+
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 phy_mask, ver;
+
+ __davinci_mdio_reset(data);
+
+ /* wait for scan logic to settle */
+ msleep(PHY_MAX_ADDR * data->access_time);
+
+ /* dump hardware version info */
+ ver = __raw_readl(&data->regs->version);
+ dev_info(data->dev, "davinci mdio revision %d.%d\n",
+ (ver >> 8) & 0xff, ver & 0xff);
+
+ /* get phy mask from the alive register */
+ phy_mask = __raw_readl(&data->regs->alive);
+ if (phy_mask) {
+ /* restrict mdio bus to live phys only */
+ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
+ phy_mask = ~phy_mask;
+ } else {
+ /* desperately scan all phys */
+ dev_warn(data->dev, "no live phy, scanning all\n");
+ phy_mask = 0;
+ }
+ data->bus->phy_mask = phy_mask;
+
+ return 0;
+}
+
+/* wait until hardware is ready for another user access */
+static inline int wait_for_user_access(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+ u32 reg;
+
+ while (time_after(timeout, jiffies)) {
+ reg = __raw_readl(&regs->user[0].access);
+ if ((reg & USERACCESS_GO) == 0)
+ return 0;
+
+ reg = __raw_readl(&regs->control);
+ if ((reg & CONTROL_IDLE) == 0)
+ continue;
+
+ /*
+ * An emac soft_reset may have clobbered the mdio controller's
+ * state machine. We need to reset and retry the current
+ * operation
+ */
+ dev_warn(data->dev, "resetting idled controller\n");
+ __davinci_mdio_reset(data);
+ return -EAGAIN;
+ }
+ dev_err(data->dev, "timed out waiting for user access\n");
+ return -ETIMEDOUT;
+}
+
+/* wait until hardware state machine is idle */
+static inline int wait_for_idle(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+
+ while (time_after(timeout, jiffies)) {
+ if (__raw_readl(&regs->control) & CONTROL_IDLE)
+ return 0;
+ }
+ dev_err(data->dev, "timed out waiting for idle\n");
+ return -ETIMEDOUT;
+}
+
+static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
+ (phy_id << 16));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ reg = __raw_readl(&data->regs->user[0].access);
+ ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return ret;
+}
+
+static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
+ int phy_reg, u16 phy_data)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
+ (phy_id << 16) | (phy_data & USERACCESS_DATA));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int __devinit davinci_mdio_probe(struct platform_device *pdev)
+{
+ struct mdio_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data;
+ struct resource *res;
+ struct phy_device *phy;
+ int ret, addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "failed to alloc device data\n");
+ return -ENOMEM;
+ }
+
+ data->pdata = pdata ? (*pdata) : default_pdata;
+
+ data->bus = mdiobus_alloc();
+ if (!data->bus) {
+ dev_err(dev, "failed to alloc mii bus\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ data->bus->name = dev_name(dev);
+ data->bus->read = davinci_mdio_read,
+ data->bus->write = davinci_mdio_write,
+ data->bus->reset = davinci_mdio_reset,
+ data->bus->parent = dev;
+ data->bus->priv = data;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+ data->clk = clk_get(dev, NULL);
+ if (IS_ERR(data->clk)) {
+ data->clk = NULL;
+ dev_err(dev, "failed to get device clock\n");
+ ret = PTR_ERR(data->clk);
+ goto bail_out;
+ }
+
+ clk_enable(data->clk);
+
+ dev_set_drvdata(dev, data);
+ data->dev = dev;
+ spin_lock_init(&data->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "could not find register map resource\n");
+ ret = -ENOENT;
+ goto bail_out;
+ }
+
+ res = devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev));
+ if (!res) {
+ dev_err(dev, "could not allocate register map resource\n");
+ ret = -ENXIO;
+ goto bail_out;
+ }
+
+ data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!data->regs) {
+ dev_err(dev, "could not map mdio registers\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ /* register the mii bus */
+ ret = mdiobus_register(data->bus);
+ if (ret)
+ goto bail_out;
+
+ /* scan and dump the bus */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phy = data->bus->phy_map[addr];
+ if (phy) {
+ dev_info(dev, "phy[%d]: device %s, driver %s\n",
+ phy->addr, dev_name(&phy->dev),
+ phy->drv ? phy->drv->name : "unknown");
+ }
+ }
+
+ return 0;
+
+bail_out:
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit davinci_mdio_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ dev_set_drvdata(dev, NULL);
+
+ kfree(data);
+
+ return 0;
+}
+
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+
+ /* shutdown the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl &= ~CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+ wait_for_idle(data);
+
+ if (data->clk)
+ clk_disable(data->clk);
+
+ data->suspended = true;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int davinci_mdio_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+ if (data->clk)
+ clk_enable(data->clk);
+
+ /* restart the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl |= CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+
+ data->suspended = false;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ .suspend = davinci_mdio_suspend,
+ .resume = davinci_mdio_resume,
+};
+
+static struct platform_driver davinci_mdio_driver = {
+ .driver = {
+ .name = "davinci_mdio",
+ .owner = THIS_MODULE,
+ .pm = &davinci_mdio_pm_ops,
+ },
+ .probe = davinci_mdio_probe,
+ .remove = __devexit_p(davinci_mdio_remove),
+};
+
+static int __init davinci_mdio_init(void)
+{
+ return platform_driver_register(&davinci_mdio_driver);
+}
+device_initcall(davinci_mdio_init);
+
+static void __exit davinci_mdio_exit(void)
+{
+ platform_driver_unregister(&davinci_mdio_driver);
+}
+module_exit(davinci_mdio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci MDIO driver");
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
new file mode 100644
index 000000000000..145871b3130b
--- /dev/null
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -0,0 +1,3258 @@
+/*******************************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.c
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1998 James Banks
+ * (C) 1999-2001 Torben Mathiasen
+ * (C) 2002 Samuel Chessman
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ ** Useful (if not required) reading:
+ *
+ * Texas Instruments, ThunderLAN Programmer's Guide,
+ * TI Literature Number SPWU013A
+ * available in PDF format from www.ti.com
+ * Level One, LXT901 and LXT970 Data Sheets
+ * available in PDF format from www.level1.com
+ * National Semiconductor, DP83840A Data Sheet
+ * available in PDF format from www.national.com
+ * Microchip Technology, 24C01A/02A/04A Data Sheet
+ * available in PDF format from www.microchip.com
+ *
+ ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+
+#include "tlan.h"
+
+
+/* For removing EISA devices */
+static struct net_device *tlan_eisa_devices;
+
+static int tlan_devices_installed;
+
+/* Set speed, duplex and aui settings */
+static int aui[MAX_TLAN_BOARDS];
+static int duplex[MAX_TLAN_BOARDS];
+static int speed[MAX_TLAN_BOARDS];
+static int boards_found;
+module_param_array(aui, int, NULL, 0);
+module_param_array(duplex, int, NULL, 0);
+module_param_array(speed, int, NULL, 0);
+MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
+MODULE_PARM_DESC(duplex,
+ "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
+
+MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
+MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
+MODULE_LICENSE("GPL");
+
+
+/* Define this to enable Link beat monitoring */
+#undef MONITOR
+
+/* Turn on debugging. See Documentation/networking/tlan.txt for details */
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
+
+static const char tlan_signature[] = "TLAN";
+static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
+static int tlan_have_pci;
+static int tlan_have_eisa;
+
+static const char * const media[] = {
+ "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+ "100BaseTx-FD", "100BaseT4", NULL
+};
+
+static struct board {
+ const char *device_label;
+ u32 flags;
+ u16 addr_ofs;
+} board_info[] = {
+ { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq NetFlex-3/P",
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+ { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent Integrated 10/100 TX UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
+ { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+ { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
+ { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
+
+static void tlan_eisa_probe(void);
+static void tlan_eisa_cleanup(void);
+static int tlan_init(struct net_device *);
+static int tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int tlan_close(struct net_device *);
+static struct net_device_stats *tlan_get_stats(struct net_device *);
+static void tlan_set_multicast_list(struct net_device *);
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
+ int irq, int rev, const struct pci_device_id *ent);
+static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout_work(struct work_struct *work);
+static int tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static u32 tlan_handle_tx_eof(struct net_device *, u16);
+static u32 tlan_handle_stat_overflow(struct net_device *, u16);
+static u32 tlan_handle_rx_eof(struct net_device *, u16);
+static u32 tlan_handle_dummy(struct net_device *, u16);
+static u32 tlan_handle_tx_eoc(struct net_device *, u16);
+static u32 tlan_handle_status_check(struct net_device *, u16);
+static u32 tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void tlan_timer(unsigned long);
+
+static void tlan_reset_lists(struct net_device *);
+static void tlan_free_lists(struct net_device *);
+static void tlan_print_dio(u16);
+static void tlan_print_list(struct tlan_list *, char *, int);
+static void tlan_read_and_clear_stats(struct net_device *, int);
+static void tlan_reset_adapter(struct net_device *);
+static void tlan_finish_reset(struct net_device *);
+static void tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void tlan_phy_print(struct net_device *);
+static void tlan_phy_detect(struct net_device *);
+static void tlan_phy_power_down(struct net_device *);
+static void tlan_phy_power_up(struct net_device *);
+static void tlan_phy_reset(struct net_device *);
+static void tlan_phy_start_link(struct net_device *);
+static void tlan_phy_finish_auto_neg(struct net_device *);
+#ifdef MONITOR
+static void tlan_phy_monitor(struct net_device *);
+#endif
+
+/*
+ static int tlan_phy_nop(struct net_device *);
+ static int tlan_phy_internal_check(struct net_device *);
+ static int tlan_phy_internal_service(struct net_device *);
+ static int tlan_phy_dp83840a_check(struct net_device *);
+*/
+
+static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_send_data(u16, u32, unsigned);
+static void tlan_mii_sync(u16);
+static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
+
+static void tlan_ee_send_start(u16);
+static int tlan_ee_send_byte(u16, u8, int);
+static void tlan_ee_receive_byte(u16, u8 *, int);
+static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
+
+
+static inline void
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
+{
+ unsigned long addr = (unsigned long)skb;
+ tag->buffer[9].address = addr;
+ tag->buffer[8].address = upper_32_bits(addr);
+}
+
+static inline struct sk_buff *
+tlan_get_skb(const struct tlan_list *tag)
+{
+ unsigned long addr;
+
+ addr = tag->buffer[9].address;
+ addr |= (tag->buffer[8].address << 16) << 16;
+ return (struct sk_buff *) addr;
+}
+
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
+ NULL,
+ tlan_handle_tx_eof,
+ tlan_handle_stat_overflow,
+ tlan_handle_rx_eof,
+ tlan_handle_dummy,
+ tlan_handle_tx_eoc,
+ tlan_handle_status_check,
+ tlan_handle_rx_eoc
+};
+
+static inline void
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function != NULL &&
+ priv->timer_type != TLAN_TIMER_ACTIVITY) {
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+ priv->timer.function = tlan_timer;
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ priv->timer.data = (unsigned long) dev;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = type;
+ mod_timer(&priv->timer, jiffies + ticks);
+
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver primary functions
+
+these functions are more or less common to all linux network drivers.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+
+/***************************************************************
+ * tlan_remove_one
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
+
+
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev,
+ priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
+ }
+
+#ifdef CONFIG_PCI
+ pci_release_regions(pdev);
+#endif
+
+ free_netdev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void tlan_start(struct net_device *dev)
+{
+ tlan_reset_lists(dev);
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+ /* Reset and power down phy */
+ tlan_reset_adapter(dev);
+ if (priv->timer.function != NULL) {
+ del_timer_sync(&priv->timer);
+ priv->timer.function = NULL;
+ }
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev))
+ tlan_stop(dev);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int tlan_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, 0, 0);
+ netif_device_attach(dev);
+
+ if (netif_running(dev))
+ tlan_start(dev);
+
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend NULL
+#define tlan_resume NULL
+
+#endif /* CONFIG_PM */
+
+
+static struct pci_driver tlan_driver = {
+ .name = "tlan",
+ .id_table = tlan_pci_tbl,
+ .probe = tlan_init_one,
+ .remove = __devexit_p(tlan_remove_one),
+ .suspend = tlan_suspend,
+ .resume = tlan_resume,
+};
+
+static int __init tlan_probe(void)
+{
+ int rc = -ENODEV;
+
+ pr_info("%s", tlan_banner);
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
+
+ /* Use new style PCI probing. Now the kernel will
+ do most of this for us */
+ rc = pci_register_driver(&tlan_driver);
+
+ if (rc != 0) {
+ pr_err("Could not register pci driver\n");
+ goto err_out_pci_free;
+ }
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
+ tlan_eisa_probe();
+
+ pr_info("%d device%s installed, PCI: %d EISA: %d\n",
+ tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+ tlan_have_pci, tlan_have_eisa);
+
+ if (tlan_devices_installed == 0) {
+ rc = -ENODEV;
+ goto err_out_pci_unreg;
+ }
+ return 0;
+
+err_out_pci_unreg:
+ pci_unregister_driver(&tlan_driver);
+err_out_pci_free:
+ return rc;
+}
+
+
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ return tlan_probe1(pdev, -1, -1, 0, ent);
+}
+
+
+/*
+***************************************************************
+* tlan_probe1
+*
+* Returns:
+* 0 on success, error code on error
+* Parms:
+* none
+*
+* The name is lower case to fit in with all the rest of
+* the netcard_probe names. This function looks for
+* another TLan based adapter, setting it up with the
+* allocated device struct if one is found.
+* tlan_probe has been ported to the new net API and
+* now allocates its own device structure. This function
+* is also used by modules.
+*
+**************************************************************/
+
+static int __devinit tlan_probe1(struct pci_dev *pdev,
+ long ioaddr, int irq, int rev,
+ const struct pci_device_id *ent)
+{
+
+ struct net_device *dev;
+ struct tlan_priv *priv;
+ u16 device_id;
+ int reg, rc = -ENODEV;
+
+#ifdef CONFIG_PCI
+ if (pdev) {
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, tlan_signature);
+ if (rc) {
+ pr_err("Could not reserve IO regions\n");
+ goto err_out;
+ }
+ }
+#endif /* CONFIG_PCI */
+
+ dev = alloc_etherdev(sizeof(struct tlan_priv));
+ if (dev == NULL) {
+ pr_err("Could not allocate memory for device\n");
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+
+ priv->pci_dev = pdev;
+ priv->dev = dev;
+
+ /* Is this a PCI device? */
+ if (pdev) {
+ u32 pci_io_base = 0;
+
+ priv->adapter = &board_info[ent->driver_data];
+
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc) {
+ pr_err("No suitable PCI mapping available\n");
+ goto err_out_free_dev;
+ }
+
+ for (reg = 0; reg <= 5; reg++) {
+ if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
+ pci_io_base = pci_resource_start(pdev, reg);
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "IO mapping is available at %x.\n",
+ pci_io_base);
+ break;
+ }
+ }
+ if (!pci_io_base) {
+ pr_err("No IO mappings available\n");
+ rc = -EIO;
+ goto err_out_free_dev;
+ }
+
+ dev->base_addr = pci_io_base;
+ dev->irq = pdev->irq;
+ priv->adapter_rev = pdev->revision;
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, dev);
+
+ } else { /* EISA card */
+ /* This is a hack. We need to know which board structure
+ * is suited for this adapter */
+ device_id = inw(ioaddr + EISA_ID2);
+ priv->is_eisa = 1;
+ if (device_id == 0x20F1) {
+ priv->adapter = &board_info[13]; /* NetFlex-3/E */
+ priv->adapter_rev = 23; /* TLAN 2.3 */
+ } else {
+ priv->adapter = &board_info[14];
+ priv->adapter_rev = 10; /* TLAN 1.0 */
+ }
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ }
+
+ /* Kernel parameters */
+ if (dev->mem_start) {
+ priv->aui = dev->mem_start & 0x01;
+ priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
+ : (dev->mem_start & 0x06) >> 1;
+ priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
+ : (dev->mem_start & 0x18) >> 3;
+
+ if (priv->speed == 0x1)
+ priv->speed = TLAN_SPEED_10;
+ else if (priv->speed == 0x2)
+ priv->speed = TLAN_SPEED_100;
+
+ debug = priv->debug = dev->mem_end;
+ } else {
+ priv->aui = aui[boards_found];
+ priv->speed = speed[boards_found];
+ priv->duplex = duplex[boards_found];
+ priv->debug = debug;
+ }
+
+ /* This will be used when we get an adapter error from
+ * within our irq handler */
+ INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
+
+ spin_lock_init(&priv->lock);
+
+ rc = tlan_init(dev);
+ if (rc) {
+ pr_err("Could not set up device\n");
+ goto err_out_free_dev;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ pr_err("Could not register device\n");
+ goto err_out_uninit;
+ }
+
+
+ tlan_devices_installed++;
+ boards_found++;
+
+ /* pdev is NULL if this is an EISA device */
+ if (pdev)
+ tlan_have_pci++;
+ else {
+ priv->next_device = tlan_eisa_devices;
+ tlan_eisa_devices = dev;
+ tlan_have_eisa++;
+ }
+
+ netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
+ (int)dev->irq,
+ (int)dev->base_addr,
+ priv->adapter->device_label,
+ priv->adapter_rev);
+ return 0;
+
+err_out_uninit:
+ pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
+err_out_free_dev:
+ free_netdev(dev);
+err_out_regions:
+#ifdef CONFIG_PCI
+ if (pdev)
+ pci_release_regions(pdev);
+#endif
+err_out:
+ if (pdev)
+ pci_disable_device(pdev);
+ return rc;
+}
+
+
+static void tlan_eisa_cleanup(void)
+{
+ struct net_device *dev;
+ struct tlan_priv *priv;
+
+ while (tlan_have_eisa) {
+ dev = tlan_eisa_devices;
+ priv = netdev_priv(dev);
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
+ }
+ release_region(dev->base_addr, 0x10);
+ unregister_netdev(dev);
+ tlan_eisa_devices = priv->next_device;
+ free_netdev(dev);
+ tlan_have_eisa--;
+ }
+}
+
+
+static void __exit tlan_exit(void)
+{
+ pci_unregister_driver(&tlan_driver);
+
+ if (tlan_have_eisa)
+ tlan_eisa_cleanup();
+
+}
+
+
+/* Module loading/unloading */
+module_init(tlan_probe);
+module_exit(tlan_exit);
+
+
+
+/**************************************************************
+ * tlan_eisa_probe
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * Parms: None
+ *
+ *
+ * This functions probes for EISA devices and calls
+ * TLan_probe1 when one is found.
+ *
+ *************************************************************/
+
+static void __init tlan_eisa_probe(void)
+{
+ long ioaddr;
+ int rc = -ENODEV;
+ int irq;
+ u16 device_id;
+
+ if (!EISA_bus) {
+ TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
+ return;
+ }
+
+ /* Loop through all slots of the EISA bus */
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
+
+
+ TLAN_DBG(TLAN_DEBUG_PROBE,
+ "Probing for EISA adapter at IO: 0x%4x : ",
+ (int) ioaddr);
+ if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
+ goto out;
+
+ if (inw(ioaddr + EISA_ID) != 0x110E) {
+ release_region(ioaddr, 0x10);
+ goto out;
+ }
+
+ device_id = inw(ioaddr + EISA_ID2);
+ if (device_id != 0x20F1 && device_id != 0x40F1) {
+ release_region(ioaddr, 0x10);
+ goto out;
+ }
+
+ /* check if adapter is enabled */
+ if (inb(ioaddr + EISA_CR) != 0x1) {
+ release_region(ioaddr, 0x10);
+ goto out2;
+ }
+
+ if (debug == 0x10)
+ pr_info("Found one\n");
+
+
+ /* Get irq from board */
+ switch (inb(ioaddr + 0xcc0)) {
+ case(0x10):
+ irq = 5;
+ break;
+ case(0x20):
+ irq = 9;
+ break;
+ case(0x40):
+ irq = 10;
+ break;
+ case(0x80):
+ irq = 11;
+ break;
+ default:
+ goto out;
+ }
+
+
+ /* Setup the newly found eisa adapter */
+ rc = tlan_probe1(NULL, ioaddr, irq,
+ 12, NULL);
+ continue;
+
+out:
+ if (debug == 0x10)
+ pr_info("None found\n");
+ continue;
+
+out2:
+ if (debug == 0x10)
+ pr_info("Card found but it is not enabled, skipping\n");
+ continue;
+
+ }
+
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tlan_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ tlan_handle_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static const struct net_device_ops tlan_netdev_ops = {
+ .ndo_open = tlan_open,
+ .ndo_stop = tlan_close,
+ .ndo_start_xmit = tlan_start_tx,
+ .ndo_tx_timeout = tlan_tx_timeout,
+ .ndo_get_stats = tlan_get_stats,
+ .ndo_set_multicast_list = tlan_set_multicast_list,
+ .ndo_do_ioctl = tlan_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tlan_poll,
+#endif
+};
+
+
+
+/***************************************************************
+ * tlan_init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
+{
+ int dma_size;
+ int err;
+ int i;
+ struct tlan_priv *priv;
+
+ priv = netdev_priv(dev);
+
+ dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+ * (sizeof(struct tlan_list));
+ priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+ dma_size,
+ &priv->dma_storage_dma);
+ priv->dma_size = dma_size;
+
+ if (priv->dma_storage == NULL) {
+ pr_err("Could not allocate lists and buffers for %s\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ memset(priv->dma_storage, 0, dma_size);
+ priv->rx_list = (struct tlan_list *)
+ ALIGN((unsigned long)priv->dma_storage, 8);
+ priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+ priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+ priv->tx_list_dma =
+ priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
+
+ err = 0;
+ for (i = 0; i < 6 ; i++)
+ err |= tlan_ee_read_byte(dev,
+ (u8) priv->adapter->addr_ofs + i,
+ (u8 *) &dev->dev_addr[i]);
+ if (err) {
+ pr_err("%s: Error reading MAC from eeprom: %d\n",
+ dev->name, err);
+ }
+ dev->addr_len = 6;
+
+ netif_carrier_off(dev);
+
+ /* Device methods */
+ dev->netdev_ops = &tlan_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return 0;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
+
+static int tlan_open(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int err;
+
+ priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+ err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+ dev->name, dev);
+
+ if (err) {
+ netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
+ dev->irq);
+ return err;
+ }
+
+ init_timer(&priv->timer);
+
+ tlan_start(dev);
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
+ dev->name, priv->tlan_rev);
+
+ return 0;
+
+}
+
+
+
+/**************************************************************
+ * tlan_ioctl
+ *
+ * Returns:
+ * 0 on success, error code otherwise
+ * Params:
+ * dev structure of device to receive ioctl.
+ *
+ * rq ifreq structure to hold userspace data.
+ *
+ * cmd ioctl command.
+ *
+ *
+ *************************************************************/
+
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ u32 phy = priv->phy[priv->phy_num];
+
+ if (!priv->phy_online)
+ return -EAGAIN;
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* get address of MII PHY in use. */
+ data->phy_id = phy;
+
+
+ case SIOCGMIIREG: /* read MII PHY register. */
+ tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &data->val_out);
+ return 0;
+
+
+ case SIOCSMIIREG: /* write MII PHY register. */
+ tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+/***************************************************************
+ * tlan_tx_timeout
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * dev structure of device which timed out
+ * during transmit.
+ *
+ **************************************************************/
+
+static void tlan_tx_timeout(struct net_device *dev)
+{
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+
+ /* Ok so we timed out, lets see what we can do about it...*/
+ tlan_free_lists(dev);
+ tlan_reset_lists(dev);
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+
+}
+
+
+/***************************************************************
+ * tlan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
+
+static void tlan_tx_timeout_work(struct work_struct *work)
+{
+ struct tlan_priv *priv =
+ container_of(work, struct tlan_priv, tlan_tqueue);
+
+ tlan_tx_timeout(priv->dev);
+}
+
+
+
+/***************************************************************
+ * tlan_start_tx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ dma_addr_t tail_list_phys;
+ struct tlan_list *tail_list;
+ unsigned long flags;
+ unsigned int txlen;
+
+ if (!priv->phy_online) {
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
+ dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
+ return NETDEV_TX_OK;
+ txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
+
+ tail_list = priv->tx_list + priv->tx_tail;
+ tail_list_phys =
+ priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
+
+ if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
+ dev->name, priv->tx_head, priv->tx_tail);
+ netif_stop_queue(dev);
+ priv->tx_busy_count++;
+ return NETDEV_TX_BUSY;
+ }
+
+ tail_list->forward = 0;
+
+ tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
+ skb->data, txlen,
+ PCI_DMA_TODEVICE);
+ tlan_store_skb(tail_list, skb);
+
+ tail_list->frame_size = (u16) txlen;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
+ tail_list->buffer[1].count = 0;
+ tail_list->buffer[1].address = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ tail_list->c_stat = TLAN_CSTAT_READY;
+ if (!priv->tx_in_progress) {
+ priv->tx_in_progress = 1;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Starting TX on buffer %d\n",
+ priv->tx_tail);
+ outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
+ } else {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Adding buffer %d to TX channel\n",
+ priv->tx_tail);
+ if (priv->tx_tail == 0) {
+ (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
+ = tail_list_phys;
+ } else {
+ (priv->tx_list + (priv->tx_tail - 1))->forward
+ = tail_list_phys;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
+
+ return NETDEV_TX_OK;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_interrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
+
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 host_int;
+ u16 type;
+
+ spin_lock(&priv->lock);
+
+ host_int = inw(dev->base_addr + TLAN_HOST_INT);
+ type = (host_int & TLAN_HI_IT_MASK) >> 2;
+ if (type) {
+ u32 ack;
+ u32 host_cmd;
+
+ outw(host_int, dev->base_addr + TLAN_HOST_INT);
+ ack = tlan_int_vector[type](dev, host_int);
+
+ if (ack) {
+ host_cmd = TLAN_HC_ACK | ack | (type << 18);
+ outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
+ }
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_RETVAL(type);
+}
+
+
+
+
+/***************************************************************
+ * tlan_close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
+
+static int tlan_close(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ priv->neg_be_verbose = 0;
+ tlan_stop(dev);
+
+ free_irq(dev->irq, dev);
+ tlan_free_lists(dev);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
+
+ return 0;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_get_stats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
+
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+
+ /* Should only read stats if open ? */
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
+ priv->rx_eoc_count);
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
+ priv->tx_busy_count);
+ if (debug & TLAN_DEBUG_GNRL) {
+ tlan_print_dio(dev->base_addr);
+ tlan_phy_print(dev);
+ }
+ if (debug & TLAN_DEBUG_LIST) {
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+ tlan_print_list(priv->rx_list + i, "RX", i);
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+ tlan_print_list(priv->tx_list + i, "TX", i);
+ }
+
+ return &dev->stats;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_set_multicast_list
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
+
+static void tlan_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ int i;
+ u32 offset;
+ u8 tmp;
+
+ if (dev->flags & IFF_PROMISC) {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
+ } else {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+ 0xffffffff);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+ 0xffffffff);
+ } else {
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i < 3) {
+ tlan_set_mac(dev, i + 1,
+ (char *) &ha->addr);
+ } else {
+ offset =
+ tlan_hash_func((u8 *)&ha->addr);
+ if (offset < 32)
+ hash1 |= (1 << offset);
+ else
+ hash2 |= (1 << (offset - 32));
+ }
+ i++;
+ }
+ for ( ; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
+ }
+ }
+
+}
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver interrupt vectors and table
+
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+/***************************************************************
+ * tlan_handle_tx_eof
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int eoc = 0;
+ struct tlan_list *head_list;
+ dma_addr_t head_list_phys;
+ u32 ack = 0;
+ u16 tmp_c_stat;
+
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ struct sk_buff *skb = tlan_get_skb(head_list);
+
+ ack++;
+ pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ head_list->buffer[8].address = 0;
+ head_list->buffer[9].address = 0;
+
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
+ eoc = 1;
+
+ dev->stats.tx_bytes += head_list->frame_size;
+
+ head_list->c_stat = TLAN_CSTAT_UNUSED;
+ netif_start_queue(dev);
+ CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+ head_list = priv->tx_list + priv->tx_head;
+ }
+
+ if (!ack)
+ netdev_info(dev,
+ "Received interrupt for uncompleted TX frame\n");
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->tx_in_progress = 0;
+ }
+ }
+
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
+{
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+
+ return 1;
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u32 ack = 0;
+ int eoc = 0;
+ struct tlan_list *head_list;
+ struct sk_buff *skb;
+ struct tlan_list *tail_list;
+ u16 tmp_c_stat;
+ dma_addr_t head_list_phys;
+
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys =
+ priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
+
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ dma_addr_t frame_dma = head_list->buffer[0].address;
+ u32 frame_size = head_list->frame_size;
+ struct sk_buff *new_skb;
+
+ ack++;
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
+ eoc = 1;
+
+ new_skb = netdev_alloc_skb_ip_align(dev,
+ TLAN_MAX_FRAME_SIZE + 5);
+ if (!new_skb)
+ goto drop_and_reuse;
+
+ skb = tlan_get_skb(head_list);
+ pci_unmap_single(priv->pci_dev, frame_dma,
+ TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ skb_put(skb, frame_size);
+
+ dev->stats.rx_bytes += frame_size;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ head_list->buffer[0].address =
+ pci_map_single(priv->pci_dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+
+ tlan_store_skb(head_list, new_skb);
+drop_and_reuse:
+ head_list->forward = 0;
+ head_list->c_stat = 0;
+ tail_list = priv->rx_list + priv->rx_tail;
+ tail_list->forward = head_list_phys;
+
+ CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+ CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ }
+
+ if (!ack)
+ netdev_info(dev,
+ "Received interrupt for uncompleted RX frame\n");
+
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rx_eoc_count++;
+ }
+
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_dummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
+{
+ netdev_info(dev, "Test interrupt\n");
+ return 1;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_tx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_list *head_list;
+ dma_addr_t head_list_phys;
+ u32 ack = 1;
+
+ host_int = 0;
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ netif_stop_queue(dev);
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->tx_in_progress = 0;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_status_check
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u32 ack;
+ u32 error;
+ u8 net_sts;
+ u32 phy;
+ u16 tlphy_ctl;
+ u16 tlphy_sts;
+
+ ack = 1;
+ if (host_int & TLAN_HI_IV_MASK) {
+ netif_stop_queue(dev);
+ error = inl(dev->base_addr + TLAN_CH_PARM);
+ netdev_info(dev, "Adaptor Error = 0x%x\n", error);
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+
+ schedule_work(&priv->tlan_tqueue);
+
+ netif_wake_queue(dev);
+ ack = 0;
+ } else {
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+ phy = priv->phy[priv->phy_num];
+
+ net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+ if (net_sts) {
+ tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
+ dev->name, (unsigned) net_sts);
+ }
+ if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ if (!(tlphy_sts & TLAN_TS_POLOK) &&
+ !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+ (tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ }
+
+ if (debug)
+ tlan_phy_print(dev);
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_rx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ dma_addr_t head_list_phys;
+ u32 ack = 1;
+
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+ priv->rx_head, priv->rx_tail);
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rx_eoc_count++;
+ }
+
+ return ack;
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver timer function
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 100 ms produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *) data;
+ struct tlan_priv *priv = netdev_priv(dev);
+ u32 elapsed;
+ unsigned long flags = 0;
+
+ priv->timer.function = NULL;
+
+ switch (priv->timer_type) {
+#ifdef MONITOR
+ case TLAN_TIMER_LINK_BEAT:
+ tlan_phy_monitor(dev);
+ break;
+#endif
+ case TLAN_TIMER_PHY_PDOWN:
+ tlan_phy_power_down(dev);
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ tlan_phy_power_up(dev);
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ tlan_phy_reset(dev);
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ tlan_phy_start_link(dev);
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ tlan_phy_finish_auto_neg(dev);
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ tlan_finish_reset(dev);
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function == NULL) {
+ elapsed = jiffies - priv->timer_set_at;
+ if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK);
+ } else {
+ priv->timer.function = tlan_timer;
+ priv->timer.expires = priv->timer_set_at
+ + TLAN_TIMER_ACT_DELAY;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ add_timer(&priv->timer);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ break;
+ }
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver adapter related routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_reset_lists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ struct tlan_list *list;
+ dma_addr_t list_phys;
+ struct sk_buff *skb;
+
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ list->c_stat = TLAN_CSTAT_UNUSED;
+ list->buffer[0].address = 0;
+ list->buffer[2].count = 0;
+ list->buffer[2].address = 0;
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+
+ priv->rx_head = 0;
+ priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+ list->c_stat = TLAN_CSTAT_READY;
+ list->frame_size = TLAN_MAX_FRAME_SIZE;
+ list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
+ if (!skb) {
+ netdev_err(dev, "Out of memory for received data\n");
+ break;
+ }
+
+ list->buffer[0].address = pci_map_single(priv->pci_dev,
+ skb->data,
+ TLAN_MAX_FRAME_SIZE,
+ PCI_DMA_FROMDEVICE);
+ tlan_store_skb(list, skb);
+ list->buffer[1].count = 0;
+ list->buffer[1].address = 0;
+ list->forward = list_phys + sizeof(struct tlan_list);
+ }
+
+ /* in case ran out of memory early, clear bits */
+ while (i < TLAN_NUM_RX_LISTS) {
+ tlan_store_skb(priv->rx_list + i, NULL);
+ ++i;
+ }
+ list->forward = 0;
+
+}
+
+
+static void tlan_free_lists(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ struct tlan_list *list;
+ struct sk_buff *skb;
+
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ pci_unmap_single(
+ priv->pci_dev,
+ list->buffer[0].address,
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+ }
+
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ pci_unmap_single(priv->pci_dev,
+ list->buffer[0].address,
+ TLAN_MAX_FRAME_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+ }
+}
+
+
+
+
+/***************************************************************
+ * tlan_print_dio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_print_dio(u16 io_base)
+{
+ u32 data0, data1;
+ int i;
+
+ pr_info("Contents of internal registers for io base 0x%04hx\n",
+ io_base);
+ pr_info("Off. +0 +4\n");
+ for (i = 0; i < 0x4C; i += 8) {
+ data0 = tlan_dio_read32(io_base, i);
+ data1 = tlan_dio_read32(io_base, i + 0x4);
+ pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the struct tlan_list structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
+
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
+{
+ int i;
+
+ pr_info("%s List %d at %p\n", type, num, list);
+ pr_info(" Forward = 0x%08x\n", list->forward);
+ pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
+ pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
+ /* for (i = 0; i < 10; i++) { */
+ for (i = 0; i < 2; i++) {
+ pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+ i, list->buffer[i].count, list->buffer[i].address);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_read_and_clear_stats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
+
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
+{
+ u32 tx_good, tx_under;
+ u32 rx_good, rx_over;
+ u32 def_tx, crc, code;
+ u32 multi_col, single_col;
+ u32 excess_col, late_col, loss;
+
+ outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+ def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
+ def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+ outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+ loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+ if (record) {
+ dev->stats.rx_packets += rx_good;
+ dev->stats.rx_errors += rx_over + crc + code;
+ dev->stats.tx_packets += tx_good;
+ dev->stats.tx_errors += tx_under + loss;
+ dev->stats.collisions += multi_col
+ + single_col + excess_col + late_col;
+
+ dev->stats.rx_over_errors += rx_over;
+ dev->stats.rx_crc_errors += crc;
+ dev->stats.rx_frame_errors += code;
+
+ dev->stats.tx_aborted_errors += tx_under;
+ dev->stats.tx_carrier_errors += loss;
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
+
+static void
+tlan_reset_adapter(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ u32 addr;
+ u32 data;
+ u8 data8;
+
+ priv->tlan_full_duplex = false;
+ priv->phy_online = 0;
+ netif_carrier_off(dev);
+
+/* 1. Assert reset bit. */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_AD_RST;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+ udelay(1000);
+
+/* 2. Turn off interrupts. (Probably isn't necessary) */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_INT_OFF;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+/* 3. Clear AREGs and HASHs. */
+
+ for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+ tlan_dio_write32(dev->base_addr, (u16) i, 0);
+
+/* 4. Setup NetConfig register. */
+
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
+
+/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
+
+ outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+ outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
+
+/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+ tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
+
+/* 7. Setup the remaining registers. */
+
+ if (priv->tlan_rev >= 0x30) {
+ data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
+ tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
+ }
+ tlan_phy_detect(dev);
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
+
+ if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
+ data |= TLAN_NET_CFG_BIT;
+ if (priv->aui == 1) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+ } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+ priv->tlan_full_duplex = true;
+ } else {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
+ }
+ }
+
+ if (priv->phy_num == 0)
+ data |= TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+ tlan_finish_reset(dev);
+ else
+ tlan_phy_power_down(dev);
+
+}
+
+
+
+
+static void
+tlan_finish_reset(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u8 data;
+ u32 phy;
+ u8 sio;
+ u16 status;
+ u16 partner;
+ u16 tlphy_ctl;
+ u16 tlphy_par;
+ u16 tlphy_id1, tlphy_id2;
+ int i;
+
+ phy = priv->phy[priv->phy_num];
+
+ data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
+ if (priv->tlan_full_duplex)
+ data |= TLAN_NET_CMD_DUPLEX;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
+ data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
+ if (priv->phy_num == 0)
+ data |= TLAN_NET_MASK_MASK7;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+ tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
+
+ if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+ (priv->aui)) {
+ status = MII_GS_LINK;
+ netdev_info(dev, "Link forced\n");
+ } else {
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ if ((status & MII_GS_LINK) &&
+ /* We only support link info on Nat.Sem. PHY's */
+ (tlphy_id1 == NAT_SEM_ID1) &&
+ (tlphy_id2 == NAT_SEM_ID2)) {
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
+
+ netdev_info(dev,
+ "Link active with %s %uMbps %s-Duplex\n",
+ !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+ ? "forced" : "Autonegotiation enabled,",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? 100 : 10,
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "Half");
+
+ if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+ netdev_info(dev, "Partner capability:");
+ for (i = 5; i < 10; i++)
+ if (partner & (1 << i))
+ pr_cont(" %s", media[i-5]);
+ pr_cont("\n");
+ }
+
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
+#ifdef MONITOR
+ /* We have link beat..for now anyway */
+ priv->link = 1;
+ /*Enabling link beat monitoring */
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
+#endif
+ } else if (status & MII_GS_LINK) {
+ netdev_info(dev, "Link active\n");
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
+ }
+ }
+
+ if (priv->phy_num == 0) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ tlphy_ctl |= TLAN_TC_INTEN;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+ sio |= TLAN_NET_SIO_MINTEN;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+ }
+
+ if (status & MII_GS_LINK) {
+ tlan_set_mac(dev, 0, dev->dev_addr);
+ priv->phy_online = 1;
+ outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+ if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+ outb((TLAN_HC_REQ_INT >> 8),
+ dev->base_addr + TLAN_HOST_CMD + 1);
+ outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
+ netif_carrier_on(dev);
+ } else {
+ netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
+ return;
+ }
+ tlan_set_multicast_list(dev);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_set_mac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
+
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
+{
+ int i;
+
+ areg *= 6;
+
+ if (mac != NULL) {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, mac[i]);
+ } else {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, 0);
+ }
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver PHY layer routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+/*********************************************************************
+ * tlan_phy_print
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
+
+static void tlan_phy_print(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 i, data0, data1, data2, data3, phy;
+
+ phy = priv->phy[priv->phy_num];
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ netdev_info(dev, "Unmanaged PHY\n");
+ } else if (phy <= TLAN_PHY_MAX_ADDR) {
+ netdev_info(dev, "PHY 0x%02x\n", phy);
+ pr_info(" Off. +0 +1 +2 +3\n");
+ for (i = 0; i < 0x20; i += 4) {
+ tlan_mii_read_reg(dev, phy, i, &data0);
+ tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
+ i, data0, data1, data2, data3);
+ }
+ } else {
+ netdev_info(dev, "Invalid PHY\n");
+ }
+
+}
+
+
+
+
+/*********************************************************************
+ * tlan_phy_detect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
+
+static void tlan_phy_detect(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 control;
+ u16 hi;
+ u16 lo;
+ u32 phy;
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ priv->phy_num = 0xffff;
+ return;
+ }
+
+ tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
+
+ if (hi != 0xffff)
+ priv->phy[0] = TLAN_PHY_MAX_ADDR;
+ else
+ priv->phy[0] = TLAN_PHY_NONE;
+
+ priv->phy[1] = TLAN_PHY_NONE;
+ for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+ if ((control != 0xffff) ||
+ (hi != 0xffff) || (lo != 0xffff)) {
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "PHY found at %02x %04x %04x %04x\n",
+ phy, control, hi, lo);
+ if ((priv->phy[1] == TLAN_PHY_NONE) &&
+ (phy != TLAN_PHY_MAX_ADDR)) {
+ priv->phy[1] = phy;
+ }
+ }
+ }
+
+ if (priv->phy[1] != TLAN_PHY_NONE)
+ priv->phy_num = 1;
+ else if (priv->phy[0] != TLAN_PHY_NONE)
+ priv->phy_num = 0;
+ else
+ netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
+
+}
+
+
+
+
+static void tlan_phy_power_down(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 value;
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
+ value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ if ((priv->phy_num == 0) &&
+ (priv->phy[1] != TLAN_PHY_NONE) &&
+ (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
+ }
+
+ /* Wait for 50 ms and powerup
+ * This is abitrary. It is intended to make sure the
+ * transceiver settles.
+ */
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
+
+}
+
+
+
+
+static void tlan_phy_power_up(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 value;
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
+ value = MII_GC_LOOPBK;
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ tlan_mii_sync(dev->base_addr);
+ /* Wait for 500 ms and reset the
+ * transceiver. The TLAN docs say both 50 ms and
+ * 500 ms, so do the longer, just in case.
+ */
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
+
+}
+
+
+
+
+static void tlan_phy_reset(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 phy;
+ u16 value;
+
+ phy = priv->phy[priv->phy_num];
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
+ value = MII_GC_LOOPBK | MII_GC_RESET;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+ while (value & MII_GC_RESET)
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+
+ /* Wait for 500 ms and initialize.
+ * I don't remember why I wait this long.
+ * I've changed this to 50ms, as it seems long enough.
+ */
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
+
+}
+
+
+
+
+static void tlan_phy_start_link(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 ability;
+ u16 control;
+ u16 data;
+ u16 phy;
+ u16 status;
+ u16 tctl;
+
+ phy = priv->phy[priv->phy_num];
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
+
+ if ((status & MII_GS_AUTONEG) &&
+ (!priv->aui)) {
+ ability = status >> 11;
+ if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+ } else if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
+ } else {
+
+ /* Set Auto-Neg advertisement */
+ tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+ (ability << 5) | 1);
+ /* Enablee Auto-Neg */
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
+ /* Restart Auto-Neg */
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
+ /* Wait for 4 sec for autonegotiation
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ netdev_info(dev, "Starting autonegotiation\n");
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
+ return;
+ }
+
+ }
+
+ if ((priv->aui) && (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+ return;
+ } else if (priv->phy_num == 0) {
+ control = 0;
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+ if (priv->aui) {
+ tctl |= TLAN_TC_AUISEL;
+ } else {
+ tctl &= ~TLAN_TC_AUISEL;
+ if (priv->duplex == TLAN_DUPLEX_FULL) {
+ control |= MII_GC_DUPLEX;
+ priv->tlan_full_duplex = true;
+ }
+ if (priv->speed == TLAN_SPEED_100)
+ control |= MII_GC_SPEEDSEL;
+ }
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
+ }
+
+ /* Wait for 2 sec to give the transceiver time
+ * to establish link.
+ */
+ tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
+
+}
+
+
+
+
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 an_adv;
+ u16 an_lpa;
+ u16 data;
+ u16 mode;
+ u16 phy;
+ u16 status;
+
+ phy = priv->phy[priv->phy_num];
+
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+
+ if (!(status & MII_GS_AUTOCMPLT)) {
+ /* Wait for 8 sec to give the process
+ * more time. Perhaps we should fail after a while.
+ */
+ if (!priv->neg_be_verbose++) {
+ pr_info("Giving autonegotiation more time.\n");
+ pr_info("Please check that your adapter has\n");
+ pr_info("been properly connected to a HUB or Switch.\n");
+ pr_info("Trying to establish link in the background...\n");
+ }
+ tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
+ return;
+ }
+
+ netdev_info(dev, "Autonegotiation complete\n");
+ tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
+ mode = an_adv & an_lpa & 0x03E0;
+ if (mode & 0x0100)
+ priv->tlan_full_duplex = true;
+ else if (!(mode & 0x0080) && (mode & 0x0040))
+ priv->tlan_full_duplex = true;
+
+ if ((!(mode & 0x0180)) &&
+ (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+ (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
+ return;
+ }
+
+ if (priv->phy_num == 0) {
+ if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+ (an_adv & an_lpa & 0x0040)) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB | MII_GC_DUPLEX);
+ netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
+ } else {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB);
+ netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
+ }
+ }
+
+ /* Wait for 100 ms. No reason in partiticular.
+ */
+ tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
+
+}
+
+#ifdef MONITOR
+
+/*********************************************************************
+ *
+ * tlan_phy_monitor
+ *
+ * Returns:
+ * None
+ *
+ * Params:
+ * dev The device structure of this device.
+ *
+ *
+ * This function monitors PHY condition by reading the status
+ * register via the MII bus. This can be used to give info
+ * about link changes (up/down), and possible switch to alternate
+ * media.
+ *
+ *******************************************************************/
+
+void tlan_phy_monitor(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 phy;
+ u16 phy_status;
+
+ phy = priv->phy[priv->phy_num];
+
+ /* Get PHY status register */
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
+
+ /* Check if link has been lost */
+ if (!(phy_status & MII_GS_LINK)) {
+ if (priv->link) {
+ priv->link = 0;
+ printk(KERN_DEBUG "TLAN: %s has lost link\n",
+ dev->name);
+ netif_carrier_off(dev);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+ return;
+ }
+ }
+
+ /* Link restablished? */
+ if ((phy_status & MII_GS_LINK) && !priv->link) {
+ priv->link = 1;
+ printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+ dev->name);
+ netif_carrier_on(dev);
+ }
+
+ /* Setup a new monitor */
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+}
+
+#endif /* MONITOR */
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver MII routines
+
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_mii_read_reg
+ *
+ * Returns:
+ * false if ack received ok
+ * true if no ack received or other error
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retrieved.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retrieve the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u8 nack;
+ u16 sio, tmp;
+ u32 i;
+ bool err;
+ int minten;
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+
+ err = false;
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tlan_mii_sync(dev->base_addr);
+
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
+
+
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
+
+ nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
+ if (nack) { /* no ACK, so fake it */
+ for (i = 0; i < 16; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+ tmp = 0xffff;
+ err = true;
+ } else { /* ACK, so read data */
+ for (tmp = 0, i = 0x8000; i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
+ tmp |= i;
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+ }
+
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ *val = tmp;
+
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_mii_send_data
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
+
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
+{
+ u16 sio;
+ u32 i;
+
+ if (num_bits == 0)
+ return;
+
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+ tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
+
+ for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ if (data & i)
+ tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
+ else
+ tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
+
+static void tlan_mii_sync(u16 base_port)
+{
+ int i;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+ for (i = 0; i < 32; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_mii_write_reg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
+
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u16 sio;
+ int minten;
+ unsigned long flags = 0;
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ if (!in_irq())
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tlan_mii_sync(dev->base_addr);
+
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
+
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
+ tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ if (!in_irq())
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver eeprom routines
+
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM. these functions are based on information in microchip's
+data sheet. I don't know how well this functions will work with
+other Eeproms.
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_ee_send_start
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
+{
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_send_byte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
+{
+ int err;
+ u8 place;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ /* Assume clock is low, tx is enabled; */
+ for (place = 0x80; place != 0; place >>= 1) {
+ if (place & data)
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ else
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ }
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+
+ if ((!err) && stop) {
+ /* STOP, raise data while clock is high */
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+ return err;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_receive_byte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
+{
+ u8 place;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+ *data = 0;
+
+ /* Assume clock is low, tx is enabled; */
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ for (place = 0x80; place; place >>= 1) {
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
+ *data |= place;
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ }
+
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ if (!stop) {
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ } else {
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ /* STOP, raise data while clock is high */
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_read_byte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occurred.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
+{
+ int err;
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 1;
+ goto fail;
+ }
+ err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 2;
+ goto fail;
+ }
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 3;
+ goto fail;
+ }
+ tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
+fail:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+
+}
+
+
+
diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h
new file mode 100644
index 000000000000..5fc98a8e4889
--- /dev/null
+++ b/drivers/net/ethernet/ti/tlan.h
@@ -0,0 +1,546 @@
+#ifndef TLAN_H
+#define TLAN_H
+/********************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.h
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1999-2001 Torben Mathiasen
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ *
+ * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com>
+ * New Maintainer
+ *
+ ********************************************************************/
+
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+
+
+ /*****************************************************************
+ * TLan Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_MIN_FRAME_SIZE 64
+#define TLAN_MAX_FRAME_SIZE 1600
+
+#define TLAN_NUM_RX_LISTS 32
+#define TLAN_NUM_TX_LISTS 64
+
+#define TLAN_IGNORE 0
+#define TLAN_RECORD 1
+
+#define TLAN_DBG(lvl, format, args...) \
+ do { \
+ if (debug&lvl) \
+ printk(KERN_DEBUG "TLAN: " format, ##args); \
+ } while (0)
+
+#define TLAN_DEBUG_GNRL 0x0001
+#define TLAN_DEBUG_TX 0x0002
+#define TLAN_DEBUG_RX 0x0004
+#define TLAN_DEBUG_LIST 0x0008
+#define TLAN_DEBUG_PROBE 0x0010
+
+#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
+#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
+ at a time */
+
+
+ /*****************************************************************
+ * Device Identification Definitions
+ *
+ ****************************************************************/
+
+#define PCI_DEVICE_ID_NETELLIGENT_10_T2 0xB012
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100 0xB030
+#ifndef PCI_DEVICE_ID_OLICOM_OC2183
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2325
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2326
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#endif
+
+struct tlan_adapter_entry {
+ u16 vendor_id;
+ u16 device_id;
+ char *device_label;
+ u32 flags;
+ u16 addr_ofs;
+};
+
+#define TLAN_ADAPTER_NONE 0x00000000
+#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
+#define TLAN_ADAPTER_BIT_RATE_PHY 0x00000002
+#define TLAN_ADAPTER_USE_INTERN_10 0x00000004
+#define TLAN_ADAPTER_ACTIVITY_LED 0x00000008
+
+#define TLAN_SPEED_DEFAULT 0
+#define TLAN_SPEED_10 10
+#define TLAN_SPEED_100 100
+
+#define TLAN_DUPLEX_DEFAULT 0
+#define TLAN_DUPLEX_HALF 1
+#define TLAN_DUPLEX_FULL 2
+
+
+
+ /*****************************************************************
+ * EISA Definitions
+ *
+ ****************************************************************/
+
+#define EISA_ID 0xc80 /* EISA ID Registers */
+#define EISA_ID0 0xc80 /* EISA ID Register 0 */
+#define EISA_ID1 0xc81 /* EISA ID Register 1 */
+#define EISA_ID2 0xc82 /* EISA ID Register 2 */
+#define EISA_ID3 0xc83 /* EISA ID Register 3 */
+#define EISA_CR 0xc84 /* EISA Control Register */
+#define EISA_REG0 0xc88 /* EISA Configuration Register 0 */
+#define EISA_REG1 0xc89 /* EISA Configuration Register 1 */
+#define EISA_REG2 0xc8a /* EISA Configuration Register 2 */
+#define EISA_REG3 0xc8f /* EISA Configuration Register 3 */
+#define EISA_APROM 0xc90 /* Ethernet Address PROM */
+
+
+
+ /*****************************************************************
+ * Rx/Tx List Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_BUFFERS_PER_LIST 10
+#define TLAN_LAST_BUFFER 0x80000000
+#define TLAN_CSTAT_UNUSED 0x8000
+#define TLAN_CSTAT_FRM_CMP 0x4000
+#define TLAN_CSTAT_READY 0x3000
+#define TLAN_CSTAT_EOC 0x0800
+#define TLAN_CSTAT_RX_ERROR 0x0400
+#define TLAN_CSTAT_PASS_CRC 0x0200
+#define TLAN_CSTAT_DP_PR 0x0100
+
+
+struct tlan_buffer {
+ u32 count;
+ u32 address;
+};
+
+
+struct tlan_list {
+ u32 forward;
+ u16 c_stat;
+ u16 frame_size;
+ struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
+
+
+typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
+
+
+
+
+ /*****************************************************************
+ * PHY definitions
+ *
+ ****************************************************************/
+
+#define TLAN_PHY_MAX_ADDR 0x1F
+#define TLAN_PHY_NONE 0x20
+
+
+
+
+ /*****************************************************************
+ * TLAN Private Information Structure
+ *
+ ****************************************************************/
+
+struct tlan_priv {
+ struct net_device *next_device;
+ struct pci_dev *pci_dev;
+ struct net_device *dev;
+ void *dma_storage;
+ dma_addr_t dma_storage_dma;
+ unsigned int dma_size;
+ u8 *pad_buffer;
+ struct tlan_list *rx_list;
+ dma_addr_t rx_list_dma;
+ u8 *rx_buffer;
+ dma_addr_t rx_buffer_dma;
+ u32 rx_head;
+ u32 rx_tail;
+ u32 rx_eoc_count;
+ struct tlan_list *tx_list;
+ dma_addr_t tx_list_dma;
+ u8 *tx_buffer;
+ dma_addr_t tx_buffer_dma;
+ u32 tx_head;
+ u32 tx_in_progress;
+ u32 tx_tail;
+ u32 tx_busy_count;
+ u32 phy_online;
+ u32 timer_set_at;
+ u32 timer_type;
+ struct timer_list timer;
+ struct board *adapter;
+ u32 adapter_rev;
+ u32 aui;
+ u32 debug;
+ u32 duplex;
+ u32 phy[2];
+ u32 phy_num;
+ u32 speed;
+ u8 tlan_rev;
+ u8 tlan_full_duplex;
+ spinlock_t lock;
+ u8 link;
+ u8 is_eisa;
+ struct work_struct tlan_tqueue;
+ u8 neg_be_verbose;
+};
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Timer Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_TIMER_LINK_BEAT 1
+#define TLAN_TIMER_ACTIVITY 2
+#define TLAN_TIMER_PHY_PDOWN 3
+#define TLAN_TIMER_PHY_PUP 4
+#define TLAN_TIMER_PHY_RESET 5
+#define TLAN_TIMER_PHY_START_LINK 6
+#define TLAN_TIMER_PHY_FINISH_AN 7
+#define TLAN_TIMER_FINISH_RESET 8
+
+#define TLAN_TIMER_ACT_DELAY (HZ/10)
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Eeprom Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_EEPROM_ACK 0
+#define TLAN_EEPROM_STOP 1
+
+
+
+
+ /*****************************************************************
+ * Host Register Offsets and Contents
+ *
+ ****************************************************************/
+
+#define TLAN_HOST_CMD 0x00
+#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_STOP 0x40000000
+#define TLAN_HC_ACK 0x20000000
+#define TLAN_HC_CS_MASK 0x1FE00000
+#define TLAN_HC_EOC 0x00100000
+#define TLAN_HC_RT 0x00080000
+#define TLAN_HC_NES 0x00040000
+#define TLAN_HC_AD_RST 0x00008000
+#define TLAN_HC_LD_TMR 0x00004000
+#define TLAN_HC_LD_THR 0x00002000
+#define TLAN_HC_REQ_INT 0x00001000
+#define TLAN_HC_INT_OFF 0x00000800
+#define TLAN_HC_INT_ON 0x00000400
+#define TLAN_HC_AC_MASK 0x000000FF
+#define TLAN_CH_PARM 0x04
+#define TLAN_DIO_ADR 0x08
+#define TLAN_DA_ADR_INC 0x8000
+#define TLAN_DA_RAM_ADR 0x4000
+#define TLAN_HOST_INT 0x0A
+#define TLAN_HI_IV_MASK 0x1FE0
+#define TLAN_HI_IT_MASK 0x001C
+#define TLAN_DIO_DATA 0x0C
+
+
+/* ThunderLAN Internal Register DIO Offsets */
+
+#define TLAN_NET_CMD 0x00
+#define TLAN_NET_CMD_NRESET 0x80
+#define TLAN_NET_CMD_NWRAP 0x40
+#define TLAN_NET_CMD_CSF 0x20
+#define TLAN_NET_CMD_CAF 0x10
+#define TLAN_NET_CMD_NOBRX 0x08
+#define TLAN_NET_CMD_DUPLEX 0x04
+#define TLAN_NET_CMD_TRFRAM 0x02
+#define TLAN_NET_CMD_TXPACE 0x01
+#define TLAN_NET_SIO 0x01
+#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_ECLOK 0x40
+#define TLAN_NET_SIO_ETXEN 0x20
+#define TLAN_NET_SIO_EDATA 0x10
+#define TLAN_NET_SIO_NMRST 0x08
+#define TLAN_NET_SIO_MCLK 0x04
+#define TLAN_NET_SIO_MTXEN 0x02
+#define TLAN_NET_SIO_MDATA 0x01
+#define TLAN_NET_STS 0x02
+#define TLAN_NET_STS_MIRQ 0x80
+#define TLAN_NET_STS_HBEAT 0x40
+#define TLAN_NET_STS_TXSTOP 0x20
+#define TLAN_NET_STS_RXSTOP 0x10
+#define TLAN_NET_STS_RSRVD 0x0F
+#define TLAN_NET_MASK 0x03
+#define TLAN_NET_MASK_MASK7 0x80
+#define TLAN_NET_MASK_MASK6 0x40
+#define TLAN_NET_MASK_MASK5 0x20
+#define TLAN_NET_MASK_MASK4 0x10
+#define TLAN_NET_MASK_RSRVD 0x0F
+#define TLAN_NET_CONFIG 0x04
+#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_TCLK 0x4000
+#define TLAN_NET_CFG_BIT 0x2000
+#define TLAN_NET_CFG_RXCRC 0x1000
+#define TLAN_NET_CFG_PEF 0x0800
+#define TLAN_NET_CFG_1FRAG 0x0400
+#define TLAN_NET_CFG_1CHAN 0x0200
+#define TLAN_NET_CFG_MTEST 0x0100
+#define TLAN_NET_CFG_PHY_EN 0x0080
+#define TLAN_NET_CFG_MSMASK 0x007F
+#define TLAN_MAN_TEST 0x06
+#define TLAN_DEF_VENDOR_ID 0x08
+#define TLAN_DEF_DEVICE_ID 0x0A
+#define TLAN_DEF_REVISION 0x0C
+#define TLAN_DEF_SUBCLASS 0x0D
+#define TLAN_DEF_MIN_LAT 0x0E
+#define TLAN_DEF_MAX_LAT 0x0F
+#define TLAN_AREG_0 0x10
+#define TLAN_AREG_1 0x16
+#define TLAN_AREG_2 0x1C
+#define TLAN_AREG_3 0x22
+#define TLAN_HASH_1 0x28
+#define TLAN_HASH_2 0x2C
+#define TLAN_GOOD_TX_FRMS 0x30
+#define TLAN_TX_UNDERUNS 0x33
+#define TLAN_GOOD_RX_FRMS 0x34
+#define TLAN_RX_OVERRUNS 0x37
+#define TLAN_DEFERRED_TX 0x38
+#define TLAN_CRC_ERRORS 0x3A
+#define TLAN_CODE_ERRORS 0x3B
+#define TLAN_MULTICOL_FRMS 0x3C
+#define TLAN_SINGLECOL_FRMS 0x3E
+#define TLAN_EXCESSCOL_FRMS 0x40
+#define TLAN_LATE_COLS 0x41
+#define TLAN_CARRIER_LOSS 0x42
+#define TLAN_ACOMMIT 0x43
+#define TLAN_LED_REG 0x44
+#define TLAN_LED_ACT 0x10
+#define TLAN_LED_LINK 0x01
+#define TLAN_BSIZE_REG 0x45
+#define TLAN_MAX_RX 0x46
+#define TLAN_INT_DIS 0x48
+#define TLAN_ID_TX_EOC 0x04
+#define TLAN_ID_RX_EOF 0x02
+#define TLAN_ID_RX_EOC 0x01
+
+
+
+/* ThunderLAN Interrupt Codes */
+
+#define TLAN_INT_NUMBER_OF_INTS 8
+
+#define TLAN_INT_NONE 0x0000
+#define TLAN_INT_TX_EOF 0x0001
+#define TLAN_INT_STAT_OVERFLOW 0x0002
+#define TLAN_INT_RX_EOF 0x0003
+#define TLAN_INT_DUMMY 0x0004
+#define TLAN_INT_TX_EOC 0x0005
+#define TLAN_INT_STATUS_CHECK 0x0006
+#define TLAN_INT_RX_EOC 0x0007
+
+
+
+/* ThunderLAN MII Registers */
+
+/* Generic MII/PHY Registers */
+
+#define MII_GEN_CTL 0x00
+#define MII_GC_RESET 0x8000
+#define MII_GC_LOOPBK 0x4000
+#define MII_GC_SPEEDSEL 0x2000
+#define MII_GC_AUTOENB 0x1000
+#define MII_GC_PDOWN 0x0800
+#define MII_GC_ISOLATE 0x0400
+#define MII_GC_AUTORSRT 0x0200
+#define MII_GC_DUPLEX 0x0100
+#define MII_GC_COLTEST 0x0080
+#define MII_GC_RESERVED 0x007F
+#define MII_GEN_STS 0x01
+#define MII_GS_100BT4 0x8000
+#define MII_GS_100BTXFD 0x4000
+#define MII_GS_100BTXHD 0x2000
+#define MII_GS_10BTFD 0x1000
+#define MII_GS_10BTHD 0x0800
+#define MII_GS_RESERVED 0x07C0
+#define MII_GS_AUTOCMPLT 0x0020
+#define MII_GS_RFLT 0x0010
+#define MII_GS_AUTONEG 0x0008
+#define MII_GS_LINK 0x0004
+#define MII_GS_JABBER 0x0002
+#define MII_GS_EXTCAP 0x0001
+#define MII_GEN_ID_HI 0x02
+#define MII_GEN_ID_LO 0x03
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
+#define MII_AN_ADV 0x04
+#define MII_AN_LPA 0x05
+#define MII_AN_EXP 0x06
+
+/* ThunderLAN Specific MII/PHY Registers */
+
+#define TLAN_TLPHY_ID 0x10
+#define TLAN_TLPHY_CTL 0x11
+#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_SWAPOL 0x4000
+#define TLAN_TC_AUISEL 0x2000
+#define TLAN_TC_SQEEN 0x1000
+#define TLAN_TC_MTEST 0x0800
+#define TLAN_TC_RESERVED 0x07F8
+#define TLAN_TC_NFEW 0x0004
+#define TLAN_TC_INTEN 0x0002
+#define TLAN_TC_TINT 0x0001
+#define TLAN_TLPHY_STS 0x12
+#define TLAN_TS_MINT 0x8000
+#define TLAN_TS_PHOK 0x4000
+#define TLAN_TS_POLOK 0x2000
+#define TLAN_TS_TPENERGY 0x1000
+#define TLAN_TS_RESERVED 0x0FFF
+#define TLAN_TLPHY_PAR 0x19
+#define TLAN_PHY_CIM_STAT 0x0020
+#define TLAN_PHY_SPEED_100 0x0040
+#define TLAN_PHY_DUPLEX_FULL 0x0080
+#define TLAN_PHY_AN_EN_STAT 0x0400
+
+/* National Sem. & Level1 PHY id's */
+#define NAT_SEM_ID1 0x2000
+#define NAT_SEM_ID2 0x5C01
+#define LEVEL1_ID1 0x7810
+#define LEVEL1_ID2 0x0000
+
+#define CIRC_INC(a, b) if (++a >= b) a = 0
+
+/* Routines to access internal registers. */
+
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inl(base_addr + TLAN_DIO_DATA);
+
+}
+
+
+
+
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
+
+/*
+ * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
+ * the code below is about seven times as fast as the original code
+ *
+ * The original code was:
+ *
+ * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
+ *
+ * #define XOR8(a, b, c, d, e, f, g, h) \
+ * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
+ *
+ * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ * DA(a,30), DA(a,36), DA(a,42));
+ * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ * DA(a,31), DA(a,37), DA(a,43)) << 1;
+ * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ * DA(a,32), DA(a,38), DA(a,44)) << 2;
+ * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ * DA(a,33), DA(a,39), DA(a,45)) << 3;
+ * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ * DA(a,34), DA(a,40), DA(a,46)) << 4;
+ * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ * DA(a,35), DA(a,41), DA(a,47)) << 5;
+ *
+ */
+static inline u32 tlan_hash_func(const u8 *a)
+{
+ u8 hash;
+
+ hash = (a[0]^a[3]); /* & 077 */
+ hash ^= ((a[0]^a[3])>>6); /* & 003 */
+ hash ^= ((a[1]^a[4])<<2); /* & 074 */
+ hash ^= ((a[1]^a[4])>>4); /* & 017 */
+ hash ^= ((a[2]^a[5])<<4); /* & 060 */
+ hash ^= ((a[2]^a[5])>>2); /* & 077 */
+
+ return hash & 077;
+}
+#endif